aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-05-21 01:20:48 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-21 01:20:48 -0400
commit2f37dd131c5d3a2eac21cd5baf80658b1b02a8ac (patch)
treee0f191b15865268e694c02f1f02cbc26a168ddf9
parent3aa2fc1667acdd9cca816a2bc9529f494bd61b05 (diff)
parentffc83a79b44e02995ab5e93af07e26f6c7243c53 (diff)
Merge tag 'staging-4.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging
Pull staging and IIO driver updates from Greg KH: "Here's the big staging and iio driver update for 4.7-rc1. I think we almost broke even with this release, only adding a few more lines than we removed, which isn't bad overall given that there's a bunch of new iio drivers added. The Lustre developers seem to have woken up from their sleep and have been doing a great job in cleaning up the code and pruning unused or old cruft, the filesystem is almost readable :) Other than that, just a lot of basic coding style cleanups in the churn. All have been in linux-next for a while with no reported issues" * tag 'staging-4.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging: (938 commits) Staging: emxx_udc: emxx_udc: fixed coding style issue staging/gdm724x: fix "alignment should match open parenthesis" issues staging/gdm724x: Fix avoid CamelCase staging: unisys: rename misleading var ii with frag staging: unisys: visorhba: switch success handling to error handling staging: unisys: visorhba: main path needs to flow down the left margin staging: unisys: visorinput: handle_locking_key() simplifications staging: unisys: visorhba: fail gracefully for thread creation failures staging: unisys: visornic: comment restructuring and removing bad diction staging: unisys: fix format string %Lx to %llx for u64 staging: unisys: remove unused struct members staging: unisys: visorchannel: correct variable misspelling staging: unisys: visorhba: replace functionlike macro with function staging: dgnc: Need to check for NULL of ch staging: dgnc: remove redundant condition check staging: dgnc: fix 'line over 80 characters' staging: dgnc: clean up the dgnc_get_modem_info() staging: lustre: lnet: enable configuration per NI interface staging: lustre: o2iblnd: properly set ibr_why staging: lustre: o2iblnd: remove last of kiblnd_tunables_fini ...
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio68
-rw-r--r--Documentation/DocBook/device-drivers.tmpl2
-rw-r--r--Documentation/devicetree/bindings/iio/accel/mma8452.txt3
-rw-r--r--Documentation/devicetree/bindings/iio/adc/lpc1850-adc.txt21
-rw-r--r--Documentation/devicetree/bindings/iio/adc/mxs-lradc.txt (renamed from Documentation/devicetree/bindings/staging/iio/adc/mxs-lradc.txt)0
-rw-r--r--Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt6
-rw-r--r--Documentation/devicetree/bindings/iio/dac/ad5592r.txt155
-rw-r--r--Documentation/devicetree/bindings/iio/dac/lpc1850-dac.txt20
-rw-r--r--Documentation/devicetree/bindings/iio/imu/inv_mpu6050.txt13
-rw-r--r--Documentation/devicetree/bindings/iio/magnetometer/ak8975.txt12
-rw-r--r--Documentation/devicetree/bindings/iio/potentiometer/ds1803.txt21
-rw-r--r--Documentation/devicetree/bindings/iio/potentiometer/mcp4131.txt84
-rw-r--r--Documentation/devicetree/bindings/iio/pressure/hp03.txt17
-rw-r--r--Documentation/devicetree/bindings/iio/pressure/ms5611.txt19
-rw-r--r--Documentation/devicetree/bindings/iio/st-sensors.txt6
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt1
-rw-r--r--Documentation/driver-model/devres.txt4
-rw-r--r--Documentation/sync_file.txt69
-rw-r--r--MAINTAINERS12
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/dma-buf/Kconfig11
-rw-r--r--drivers/dma-buf/Makefile1
-rw-r--r--drivers/dma-buf/sync_file.c395
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c11
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c14
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c14
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c11
-rw-r--r--drivers/iio/accel/Kconfig5
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c127
-rw-r--r--drivers/iio/accel/bmc150-accel-i2c.c7
-rw-r--r--drivers/iio/accel/bmc150-accel-spi.c8
-rw-r--r--drivers/iio/accel/bmc150-accel.h1
-rw-r--r--drivers/iio/accel/kxcjk-1013.c25
-rw-r--r--drivers/iio/accel/mma7455_core.c5
-rw-r--r--drivers/iio/accel/mma8452.c188
-rw-r--r--drivers/iio/accel/mma9553.c1
-rw-r--r--drivers/iio/accel/mxc4005.c29
-rw-r--r--drivers/iio/accel/st_accel.h1
-rw-r--r--drivers/iio/accel/st_accel_core.c105
-rw-r--r--drivers/iio/accel/st_accel_i2c.c4
-rw-r--r--drivers/iio/accel/stk8312.c1
-rw-r--r--drivers/iio/accel/stk8ba50.c1
-rw-r--r--drivers/iio/adc/Kconfig16
-rw-r--r--drivers/iio/adc/Makefile1
-rw-r--r--drivers/iio/adc/ad799x.c2
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c102
-rw-r--r--drivers/iio/adc/at91_adc.c8
-rw-r--r--drivers/iio/adc/ina2xx-adc.c43
-rw-r--r--drivers/iio/adc/lpc18xx_adc.c231
-rw-r--r--drivers/iio/adc/mcp3422.c6
-rw-r--r--drivers/iio/adc/mxs-lradc.c37
-rw-r--r--drivers/iio/adc/rockchip_saradc.c19
-rw-r--r--drivers/iio/adc/ti-adc081c.c118
-rw-r--r--drivers/iio/adc/vf610_adc.c24
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.c2
-rw-r--r--drivers/iio/common/ms_sensors/ms_sensors_i2c.c2
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_buffer.c97
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c20
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_trigger.c13
-rw-r--r--drivers/iio/dac/Kconfig37
-rw-r--r--drivers/iio/dac/Makefile4
-rw-r--r--drivers/iio/dac/ad5592r-base.c691
-rw-r--r--drivers/iio/dac/ad5592r-base.h76
-rw-r--r--drivers/iio/dac/ad5592r.c164
-rw-r--r--drivers/iio/dac/ad5593r.c131
-rw-r--r--drivers/iio/dac/lpc18xx_dac.c210
-rw-r--r--drivers/iio/frequency/ad9523.c19
-rw-r--r--drivers/iio/gyro/Kconfig2
-rw-r--r--drivers/iio/gyro/bmg160_core.c137
-rw-r--r--drivers/iio/gyro/st_gyro.h1
-rw-r--r--drivers/iio/gyro/st_gyro_core.c4
-rw-r--r--drivers/iio/gyro/st_gyro_i2c.c5
-rw-r--r--drivers/iio/gyro/st_gyro_spi.c1
-rw-r--r--drivers/iio/humidity/Kconfig10
-rw-r--r--drivers/iio/humidity/Makefile1
-rw-r--r--drivers/iio/humidity/am2315.c303
-rw-r--r--drivers/iio/humidity/dht11.c40
-rw-r--r--drivers/iio/imu/Kconfig2
-rw-r--r--drivers/iio/imu/Makefile1
-rw-r--r--drivers/iio/imu/adis.c7
-rw-r--r--drivers/iio/imu/bmi160/Kconfig32
-rw-r--r--drivers/iio/imu/bmi160/Makefile6
-rw-r--r--drivers/iio/imu/bmi160/bmi160.h10
-rw-r--r--drivers/iio/imu/bmi160/bmi160_core.c596
-rw-r--r--drivers/iio/imu/bmi160/bmi160_i2c.c72
-rw-r--r--drivers/iio/imu/bmi160/bmi160_spi.c63
-rw-r--r--drivers/iio/imu/inv_mpu6050/Kconfig10
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c73
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c3
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h16
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c20
-rw-r--r--drivers/iio/imu/kmx61.c1
-rw-r--r--drivers/iio/industrialio-core.c123
-rw-r--r--drivers/iio/inkern.c86
-rw-r--r--drivers/iio/light/Kconfig32
-rw-r--r--drivers/iio/light/Makefile3
-rw-r--r--drivers/iio/light/apds9960.c13
-rw-r--r--drivers/iio/light/bh1780.c297
-rw-r--r--drivers/iio/light/max44000.c639
-rw-r--r--drivers/iio/light/stk3310.c1
-rw-r--r--drivers/iio/light/tsl2563.c3
-rw-r--r--drivers/iio/light/veml6070.c218
-rw-r--r--drivers/iio/magnetometer/Kconfig33
-rw-r--r--drivers/iio/magnetometer/Makefile3
-rw-r--r--drivers/iio/magnetometer/ak8975.c232
-rw-r--r--drivers/iio/magnetometer/bmc150_magn.c156
-rw-r--r--drivers/iio/magnetometer/bmc150_magn.h11
-rw-r--r--drivers/iio/magnetometer/bmc150_magn_i2c.c77
-rw-r--r--drivers/iio/magnetometer/bmc150_magn_spi.c68
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c1
-rw-r--r--drivers/iio/potentiometer/Kconfig28
-rw-r--r--drivers/iio/potentiometer/Makefile2
-rw-r--r--drivers/iio/potentiometer/ds1803.c173
-rw-r--r--drivers/iio/potentiometer/mcp4131.c494
-rw-r--r--drivers/iio/potentiometer/mcp4531.c13
-rw-r--r--drivers/iio/potentiometer/tpl0102.c2
-rw-r--r--drivers/iio/pressure/Kconfig28
-rw-r--r--drivers/iio/pressure/Makefile2
-rw-r--r--drivers/iio/pressure/bmp280.c564
-rw-r--r--drivers/iio/pressure/hp03.c312
-rw-r--r--drivers/iio/pressure/hp206c.c426
-rw-r--r--drivers/iio/pressure/ms5611.h23
-rw-r--r--drivers/iio/pressure/ms5611_core.c148
-rw-r--r--drivers/iio/pressure/ms5611_i2c.c25
-rw-r--r--drivers/iio/pressure/ms5611_spi.c34
-rw-r--r--drivers/iio/pressure/st_pressure_core.c10
-rw-r--r--drivers/staging/android/Kconfig17
-rw-r--r--drivers/staging/android/Makefile2
-rw-r--r--drivers/staging/android/ion/ion.c16
-rw-r--r--drivers/staging/android/ion/ion_chunk_heap.c4
-rw-r--r--drivers/staging/android/ion/ion_dummy_driver.c2
-rw-r--r--drivers/staging/android/ion/ion_test.c2
-rw-r--r--drivers/staging/android/lowmemorykiller.c9
-rw-r--r--drivers/staging/android/sync.c356
-rw-r--r--drivers/staging/android/sync.h91
-rw-r--r--drivers/staging/android/sync_debug.c8
-rw-r--r--drivers/staging/android/timed_gpio.c166
-rw-r--r--drivers/staging/android/timed_gpio.h33
-rw-r--r--drivers/staging/android/timed_output.c110
-rw-r--r--drivers/staging/android/timed_output.h37
-rw-r--r--drivers/staging/board/armadillo800eva.c8
-rw-r--r--drivers/staging/comedi/comedi_buf.c10
-rw-r--r--drivers/staging/comedi/comedi_fops.c54
-rw-r--r--drivers/staging/comedi/comedidev.h4
-rw-r--r--drivers/staging/comedi/drivers.c40
-rw-r--r--drivers/staging/comedi/drivers/amcc_s5933.h24
-rw-r--r--drivers/staging/comedi/drivers/amplc_dio200_common.c12
-rw-r--r--drivers/staging/comedi/drivers/amplc_pc263.c104
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci224.c71
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci230.c189
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci263.c86
-rw-r--r--drivers/staging/comedi/drivers/c6xdigio.c4
-rw-r--r--drivers/staging/comedi/drivers/comedi_8254.h14
-rw-r--r--drivers/staging/comedi/drivers/das1800.c1385
-rw-r--r--drivers/staging/comedi/drivers/dt282x.c119
-rw-r--r--drivers/staging/comedi/drivers/mite.c1113
-rw-r--r--drivers/staging/comedi/drivers/mite.h329
-rw-r--r--drivers/staging/comedi/drivers/ni_660x.c1174
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc.h33
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_common.c65
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_cs.c95
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_pci.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_regs.h82
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_c_common.c0
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c981
-rw-r--r--drivers/staging/comedi/drivers/ni_pcidio.c37
-rw-r--r--drivers/staging/comedi/drivers/ni_pcimio.c36
-rw-r--r--drivers/staging/comedi/drivers/ni_stc.h56
-rw-r--r--drivers/staging/comedi/drivers/ni_tio.c807
-rw-r--r--drivers/staging/comedi/drivers/ni_tio.h66
-rw-r--r--drivers/staging/comedi/drivers/ni_tio_internal.h322
-rw-r--r--drivers/staging/comedi/drivers/ni_tiocmd.c127
-rw-r--r--drivers/staging/comedi/drivers/plx9052.h122
-rw-r--r--drivers/staging/comedi/drivers/plx9080.h2
-rw-r--r--drivers/staging/comedi/drivers/z8536.h89
-rw-r--r--drivers/staging/dgnc/dgnc_cls.c2
-rw-r--r--drivers/staging/dgnc/dgnc_driver.c52
-rw-r--r--drivers/staging/dgnc/dgnc_driver.h23
-rw-r--r--drivers/staging/dgnc/dgnc_mgmt.c28
-rw-r--r--drivers/staging/dgnc/dgnc_neo.c131
-rw-r--r--drivers/staging/dgnc/dgnc_sysfs.c22
-rw-r--r--drivers/staging/dgnc/dgnc_tty.c277
-rw-r--r--drivers/staging/dgnc/digi.h4
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.c24
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.h40
-rw-r--r--drivers/staging/fbtft/fb_agm1264k-fl.c2
-rw-r--r--drivers/staging/fbtft/fbtft-io.c8
-rw-r--r--drivers/staging/fbtft/fbtft_device.c6
-rw-r--r--drivers/staging/fsl-mc/README.txt138
-rw-r--r--drivers/staging/fsl-mc/TODO13
-rw-r--r--drivers/staging/fsl-mc/bus/dpbp.c77
-rw-r--r--drivers/staging/fsl-mc/bus/dpmcp-cmd.h7
-rw-r--r--drivers/staging/fsl-mc/bus/dpmcp.c35
-rw-r--r--drivers/staging/fsl-mc/bus/dpmcp.h10
-rw-r--r--drivers/staging/fsl-mc/bus/dprc-cmd.h6
-rw-r--r--drivers/staging/fsl-mc/bus/dprc-driver.c33
-rw-r--r--drivers/staging/fsl-mc/bus/dprc.c26
-rw-r--r--drivers/staging/fsl-mc/bus/mc-allocator.c79
-rw-r--r--drivers/staging/fsl-mc/bus/mc-bus.c90
-rw-r--r--drivers/staging/fsl-mc/bus/mc-msi.c14
-rw-r--r--drivers/staging/fsl-mc/include/dpbp-cmd.h4
-rw-r--r--drivers/staging/fsl-mc/include/dpbp.h51
-rw-r--r--drivers/staging/fsl-mc/include/dprc.h19
-rw-r--r--drivers/staging/fsl-mc/include/mc-private.h2
-rw-r--r--drivers/staging/fwserial/dma_fifo.c8
-rw-r--r--drivers/staging/fwserial/dma_fifo.h16
-rw-r--r--drivers/staging/fwserial/fwserial.c42
-rw-r--r--drivers/staging/fwserial/fwserial.h42
-rw-r--r--drivers/staging/gdm724x/gdm_mux.c5
-rw-r--r--drivers/staging/gdm724x/gdm_usb.c6
-rw-r--r--drivers/staging/gdm724x/hci_packet.h2
-rw-r--r--drivers/staging/gdm724x/netlink_k.c3
-rw-r--r--drivers/staging/gs_fpgaboot/gs_fpgaboot.c8
-rw-r--r--drivers/staging/gs_fpgaboot/gs_fpgaboot.h2
-rw-r--r--drivers/staging/gs_fpgaboot/io.c1
-rw-r--r--drivers/staging/i4l/act2000/act2000_isa.c24
-rw-r--r--drivers/staging/i4l/pcbit/capi.h2
-rw-r--r--drivers/staging/i4l/pcbit/drv.c8
-rw-r--r--drivers/staging/i4l/pcbit/edss1.c2
-rw-r--r--drivers/staging/i4l/pcbit/layer2.h2
-rw-r--r--drivers/staging/iio/accel/Kconfig23
-rw-r--r--drivers/staging/iio/accel/Makefile6
-rw-r--r--drivers/staging/iio/accel/adis16201.h156
-rw-r--r--drivers/staging/iio/accel/adis16201_core.c1
-rw-r--r--drivers/staging/iio/accel/adis16203.h132
-rw-r--r--drivers/staging/iio/accel/adis16203_core.c1
-rw-r--r--drivers/staging/iio/accel/adis16204.h68
-rw-r--r--drivers/staging/iio/accel/adis16204_core.c253
-rw-r--r--drivers/staging/iio/accel/adis16209.h39
-rw-r--r--drivers/staging/iio/accel/adis16209_core.c1
-rw-r--r--drivers/staging/iio/accel/adis16220.h140
-rw-r--r--drivers/staging/iio/accel/adis16220_core.c494
-rw-r--r--drivers/staging/iio/accel/adis16240.h50
-rw-r--r--drivers/staging/iio/accel/adis16240_core.c5
-rw-r--r--drivers/staging/iio/adc/ad7192.c50
-rw-r--r--drivers/staging/iio/adc/ad7280a.c40
-rw-r--r--drivers/staging/iio/adc/ad7280a.h8
-rw-r--r--drivers/staging/iio/adc/ad7606.h28
-rw-r--r--drivers/staging/iio/adc/ad7606_core.c18
-rw-r--r--drivers/staging/iio/adc/ad7606_spi.c5
-rw-r--r--drivers/staging/iio/adc/ad7780.c2
-rw-r--r--drivers/staging/iio/frequency/ad9832.c2
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c45
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.h28
-rw-r--r--drivers/staging/iio/light/isl29028.c55
-rw-r--r--drivers/staging/iio/light/tsl2x7x_core.c211
-rw-r--r--drivers/staging/iio/meter/ade7753.c4
-rw-r--r--drivers/staging/iio/meter/ade7754.c4
-rw-r--r--drivers/staging/iio/meter/ade7758.h16
-rw-r--r--drivers/staging/iio/meter/ade7758_core.c77
-rw-r--r--drivers/staging/iio/meter/ade7758_ring.c4
-rw-r--r--drivers/staging/iio/meter/ade7759.c4
-rw-r--r--drivers/staging/iio/meter/ade7854.c3
-rw-r--r--drivers/staging/iio/resolver/ad2s1210.h8
-rw-r--r--drivers/staging/iio/trigger/iio-trig-bfin-timer.c15
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs.h51
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h79
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h136
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h18
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h15
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h4
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h161
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h31
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_private.h75
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h12
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h2
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h2
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h80
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h4
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-dlc.h29
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-lnet.h9
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-types.h2
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c405
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h134
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c98
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c139
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c1
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c3
-rw-r--r--drivers/staging/lustre/lnet/libcfs/debug.c126
-rw-r--r--drivers/staging/lustre/lnet/libcfs/fail.c3
-rw-r--r--drivers/staging/lustre/lnet/libcfs/hash.c6
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_lock.c54
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_mem.c28
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c9
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c283
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-module.c154
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c31
-rw-r--r--drivers/staging/lustre/lnet/libcfs/module.c132
-rw-r--r--drivers/staging/lustre/lnet/libcfs/tracefile.c17
-rw-r--r--drivers/staging/lustre/lnet/libcfs/workitem.c12
-rw-r--r--drivers/staging/lustre/lnet/lnet/api-ni.c143
-rw-r--r--drivers/staging/lustre/lnet/lnet/config.c3
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-move.c10
-rw-r--r--drivers/staging/lustre/lnet/lnet/module.c7
-rw-r--r--drivers/staging/lustre/lnet/selftest/brw_test.c82
-rw-r--r--drivers/staging/lustre/lnet/selftest/conctl.c52
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.c215
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.h40
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.c282
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.h47
-rw-r--r--drivers/staging/lustre/lnet/selftest/framework.c270
-rw-r--r--drivers/staging/lustre/lnet/selftest/ping_test.c44
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.c133
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.h156
-rw-r--r--drivers/staging/lustre/lnet/selftest/selftest.h204
-rw-r--r--drivers/staging/lustre/lnet/selftest/timer.c12
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_request.c12
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_cache.c3
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_internal.h9
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_request.c94
-rw-r--r--drivers/staging/lustre/lustre/include/cl_object.h978
-rw-r--r--drivers/staging/lustre/lustre/include/lclient.h408
-rw-r--r--drivers/staging/lustre/lustre/include/linux/obd.h125
-rw-r--r--drivers/staging/lustre/lustre/include/lu_object.h75
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_idl.h112
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_user.h54
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_cfg.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_disk.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm.h14
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm_flags.h120
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_fid.h22
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_import.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_lib.h60
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_mdc.h18
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_net.h4
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_param.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_req_layout.h3
-rw-r--r--drivers/staging/lustre/lustre/include/obd.h77
-rw-r--r--drivers/staging/lustre/lustre/include/obd_cksum.h1
-rw-r--r--drivers/staging/lustre/lustre/include/obd_class.h5
-rw-r--r--drivers/staging/lustre/lustre/include/obd_support.h4
-rw-r--r--drivers/staging/lustre/lustre/lclient/lcommon_cl.c1203
-rw-r--r--drivers/staging/lustre/lustre/ldlm/l_lock.c4
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_extent.c4
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_flock.c30
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_internal.h19
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lib.c14
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lock.c115
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c28
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_request.c163
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_resource.c19
-rw-r--r--drivers/staging/lustre/lustre/llite/Makefile5
-rw-r--r--drivers/staging/lustre/lustre/llite/dcache.c15
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c95
-rw-r--r--drivers/staging/lustre/lustre/llite/file.c277
-rw-r--r--drivers/staging/lustre/lustre/llite/glimpse.c (renamed from drivers/staging/lustre/lustre/lclient/glimpse.c)87
-rw-r--r--drivers/staging/lustre/lustre/llite/lcommon_cl.c327
-rw-r--r--drivers/staging/lustre/lustre/llite/lcommon_misc.c (renamed from drivers/staging/lustre/lustre/lclient/lcommon_misc.c)45
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_close.c71
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h266
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c176
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_mmap.c48
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_nfs.c29
-rw-r--r--drivers/staging/lustre/lustre/llite/lloop.c3
-rw-r--r--drivers/staging/lustre/lustre/llite/lproc_llite.c33
-rw-r--r--drivers/staging/lustre/lustre/llite/namei.c143
-rw-r--r--drivers/staging/lustre/lustre/llite/rw.c367
-rw-r--r--drivers/staging/lustre/lustre/llite/rw26.c314
-rw-r--r--drivers/staging/lustre/lustre/llite/statahead.c17
-rw-r--r--drivers/staging/lustre/lustre/llite/super25.c14
-rw-r--r--drivers/staging/lustre/lustre/llite/symlink.c10
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_dev.c270
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_internal.h332
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c928
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_lock.c53
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_object.c141
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_page.c211
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_req.c121
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr.c33
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr_cache.c1
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_internal.h3
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_obd.c182
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_cl_internal.h105
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_dev.c15
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_ea.c5
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_internal.h34
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_io.c246
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_lock.c996
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_merge.c11
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_obd.c26
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_object.c54
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_offset.c12
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pack.c8
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_page.c183
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pool.c62
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_request.c11
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_dev.c9
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_lock.c386
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_object.c7
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_page.c4
-rw-r--r--drivers/staging/lustre/lustre/mdc/lproc_mdc.c8
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_lib.c24
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_locks.c5
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_request.c26
-rw-r--r--drivers/staging/lustre/lustre/mgc/mgc_request.c12
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_io.c430
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_lock.c2086
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_object.c303
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_page.c659
-rw-r--r--drivers/staging/lustre/lustre/obdclass/class_obd.c5
-rw-r--r--drivers/staging/lustre/lustre/obdclass/debug.c4
-rw-r--r--drivers/staging/lustre/lustre/obdclass/genops.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-module.c4
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lprocfs_status.c72
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_object.c9
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lustre_peer.c3
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_config.c26
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_mount.c15
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obdo.c3
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_client.c173
-rw-r--r--drivers/staging/lustre/lustre/osc/lproc_osc.c68
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cache.c531
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cl_internal.h159
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_internal.h27
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_io.c283
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_lock.c1698
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_object.c38
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_page.c544
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_request.c423
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/client.c11
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/events.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/import.c12
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/layout.c31
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c11
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/nrs.c7
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pack_generic.c3
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c21
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c14
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_plain.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/service.c52
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/wiretest.c12
-rw-r--r--drivers/staging/media/omap1/omap1_camera.c68
-rw-r--r--drivers/staging/media/omap4iss/iss.c2
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_errors.h8
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_hal.h14
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_reg.h8
-rw-r--r--drivers/staging/netlogic/xlr_net.c2
-rw-r--r--drivers/staging/nvec/nvec.c11
-rw-r--r--drivers/staging/nvec/nvec_power.c4
-rw-r--r--drivers/staging/octeon/ethernet-rx.c7
-rw-r--r--drivers/staging/octeon/ethernet-rx.h2
-rw-r--r--drivers/staging/octeon/ethernet-tx.c15
-rw-r--r--drivers/staging/octeon/ethernet.c4
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ap.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_cmd.c49
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_debug.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_efuse.c7
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ieee80211.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ioctl_set.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme.c13
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c49
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_pwrctrl.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_recv.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_rf.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_security.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_sreset.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_sta_mgt.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_wlan_util.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_xmit.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/bb_cfg.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/fw.c4
-rw-r--r--drivers/staging/rtl8188eu/hal/hal_com.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/hal_intf.c7
-rw-r--r--drivers/staging/rtl8188eu/hal/mac_cfg.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/odm.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_HWConfig.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_RTL8188E.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/phy.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/pwrseq.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/pwrseqcmd.c4
-rw-r--r--drivers/staging/rtl8188eu/hal/rf.c4
-rw-r--r--drivers/staging/rtl8188eu/hal/rf_cfg.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_dm.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c9
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_led.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/usb_halinit.c76
-rw-r--r--drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h5
-rw-r--r--drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h5
-rw-r--r--drivers/staging/rtl8188eu/include/HalHWImg8188E_FW.h5
-rw-r--r--drivers/staging/rtl8188eu/include/HalVerDef.h5
-rw-r--r--drivers/staging/rtl8188eu/include/basic_types.h5
-rw-r--r--drivers/staging/rtl8188eu/include/drv_types.h5
-rw-r--r--drivers/staging/rtl8188eu/include/fw.h4
-rw-r--r--drivers/staging/rtl8188eu/include/hal_com.h5
-rw-r--r--drivers/staging/rtl8188eu/include/hal_intf.h5
-rw-r--r--drivers/staging/rtl8188eu/include/ieee80211.h5
-rw-r--r--drivers/staging/rtl8188eu/include/mlme_osdep.h5
-rw-r--r--drivers/staging/rtl8188eu/include/mp_custom_oid.h5
-rw-r--r--drivers/staging/rtl8188eu/include/odm.h5
-rw-r--r--drivers/staging/rtl8188eu/include/odm_HWConfig.h4
-rw-r--r--drivers/staging/rtl8188eu/include/odm_RTL8188E.h5
-rw-r--r--drivers/staging/rtl8188eu/include/odm_RegDefine11N.h5
-rw-r--r--drivers/staging/rtl8188eu/include/odm_debug.h5
-rw-r--r--drivers/staging/rtl8188eu/include/odm_precomp.h5
-rw-r--r--drivers/staging/rtl8188eu/include/odm_reg.h5
-rw-r--r--drivers/staging/rtl8188eu/include/odm_types.h5
-rw-r--r--drivers/staging/rtl8188eu/include/osdep_intf.h5
-rw-r--r--drivers/staging/rtl8188eu/include/osdep_service.h5
-rw-r--r--drivers/staging/rtl8188eu/include/pwrseq.h5
-rw-r--r--drivers/staging/rtl8188eu/include/pwrseqcmd.h5
-rw-r--r--drivers/staging/rtl8188eu/include/recv_osdep.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_cmd.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_dm.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_hal.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_led.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_recv.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_spec.h4
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_xmit.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_android.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ap.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_cmd.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_debug.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_eeprom.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_efuse.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_event.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ht.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ioctl.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ioctl_rtl.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ioctl_set.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_iol.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme_ext.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_pwrctrl.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_qos.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_recv.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_rf.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_security.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_sreset.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_xmit.h5
-rw-r--r--drivers/staging/rtl8188eu/include/sta_info.h5
-rw-r--r--drivers/staging/rtl8188eu/include/usb_hal.h5
-rw-r--r--drivers/staging/rtl8188eu/include/usb_ops_linux.h5
-rw-r--r--drivers/staging/rtl8188eu/include/wifi.h5
-rw-r--r--drivers/staging/rtl8188eu/include/wlan_bssdef.h5
-rw-r--r--drivers/staging/rtl8188eu/include/xmit_osdep.h5
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c13
-rw-r--r--drivers/staging/rtl8188eu/os_dep/mlme_linux.c5
-rw-r--r--drivers/staging/rtl8188eu/os_dep/os_intfs.c5
-rw-r--r--drivers/staging/rtl8188eu/os_dep/osdep_service.c5
-rw-r--r--drivers/staging/rtl8188eu/os_dep/recv_linux.c5
-rw-r--r--drivers/staging/rtl8188eu/os_dep/rtw_android.c5
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c7
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c4
-rw-r--r--drivers/staging/rtl8188eu/os_dep/xmit_linux.c5
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c2
-rw-r--r--drivers/staging/rtl8192u/r8190_rtl8256.c2
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c73
-rw-r--r--drivers/staging/rtl8192u/r8192U_wx.c22
-rw-r--r--drivers/staging/rtl8712/basic_types.h4
-rw-r--r--drivers/staging/rtl8712/drv_types.h4
-rw-r--r--drivers/staging/rtl8712/ethernet.h4
-rw-r--r--drivers/staging/rtl8712/hal_init.c25
-rw-r--r--drivers/staging/rtl8712/ieee80211.c4
-rw-r--r--drivers/staging/rtl8712/mlme_linux.c2
-rw-r--r--drivers/staging/rtl8712/os_intfs.c4
-rw-r--r--drivers/staging/rtl8712/osdep_service.h3
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.c18
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.c10
-rw-r--r--drivers/staging/rtl8712/rtl8712_xmit.c8
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.c80
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c16
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_set.c6
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.c16
-rw-r--r--drivers/staging/rtl8712/rtl871x_recv.c2
-rw-r--r--drivers/staging/rtl8712/rtl871x_sta_mgt.c6
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.c2
-rw-r--r--drivers/staging/rtl8712/usb_ops_linux.c2
-rw-r--r--drivers/staging/rtl8723au/Kconfig7
-rw-r--r--drivers/staging/rtl8723au/core/rtw_ap.c3
-rw-r--r--drivers/staging/rtl8723au/core/rtw_recv.c25
-rw-r--r--drivers/staging/rtl8723au/core/rtw_wlan_util.c10
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c2
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_rf6052.c2
-rw-r--r--drivers/staging/rtl8723au/include/rtw_mlme_ext.h2
-rw-r--r--drivers/staging/rtl8723au/include/rtw_recv.h2
-rw-r--r--drivers/staging/rtl8723au/os_dep/usb_intf.c5
-rw-r--r--drivers/staging/rts5208/ms.c16
-rw-r--r--drivers/staging/rts5208/rtsx_card.c21
-rw-r--r--drivers/staging/rts5208/rtsx_card.h2
-rw-r--r--drivers/staging/rts5208/rtsx_chip.c35
-rw-r--r--drivers/staging/rts5208/rtsx_chip.h3
-rw-r--r--drivers/staging/rts5208/sd.c16
-rw-r--r--drivers/staging/skein/skein_api.c3
-rw-r--r--drivers/staging/skein/skein_base.c90
-rw-r--r--drivers/staging/skein/skein_base.h45
-rw-r--r--drivers/staging/skein/skein_block.c92
-rw-r--r--drivers/staging/skein/skein_generic.c6
-rw-r--r--drivers/staging/skein/threefish_api.h2
-rw-r--r--drivers/staging/skein/threefish_block.c2144
-rw-r--r--drivers/staging/slicoss/slicoss.c8
-rw-r--r--drivers/staging/sm750fb/ddk750_chip.c2
-rw-r--r--drivers/staging/speakup/main.c6
-rw-r--r--drivers/staging/speakup/serialio.h3
-rw-r--r--drivers/staging/unisys/Documentation/ABI/sysfs-platform-visorchipset14
-rw-r--r--drivers/staging/unisys/Documentation/overview.txt19
-rw-r--r--drivers/staging/unisys/Documentation/proc-entries.txt93
-rw-r--r--drivers/staging/unisys/MAINTAINERS1
-rw-r--r--drivers/staging/unisys/include/channel.h10
-rw-r--r--drivers/staging/unisys/include/iochannel.h42
-rw-r--r--drivers/staging/unisys/include/visorbus.h127
-rw-r--r--drivers/staging/unisys/visorbus/visorbus_main.c394
-rw-r--r--drivers/staging/unisys/visorbus/visorchannel.c5
-rw-r--r--drivers/staging/unisys/visorbus/visorchipset.c442
-rw-r--r--drivers/staging/unisys/visorhba/visorhba_main.c114
-rw-r--r--drivers/staging/unisys/visorinput/visorinput.c24
-rw-r--r--drivers/staging/unisys/visornic/visornic_main.c223
-rw-r--r--drivers/staging/vme/devices/vme_pio2_gpio.c5
-rw-r--r--drivers/staging/vt6655/baseband.c24
-rw-r--r--drivers/staging/vt6655/baseband.h6
-rw-r--r--drivers/staging/vt6655/card.c95
-rw-r--r--drivers/staging/vt6655/card.h9
-rw-r--r--drivers/staging/vt6655/desc.h3
-rw-r--r--drivers/staging/vt6655/mac.c15
-rw-r--r--drivers/staging/vt6655/srom.c9
-rw-r--r--drivers/staging/vt6656/baseband.c26
-rw-r--r--drivers/staging/vt6656/main_usb.c6
-rw-r--r--drivers/staging/vt6656/wcmd.c8
-rw-r--r--drivers/staging/wilc1000/Kconfig1
-rw-r--r--drivers/staging/wilc1000/host_interface.c438
-rw-r--r--drivers/staging/wilc1000/host_interface.h8
-rw-r--r--drivers/staging/wilc1000/linux_mon.c24
-rw-r--r--drivers/staging/wilc1000/linux_wlan.c98
-rw-r--r--drivers/staging/wilc1000/wilc_spi.c3
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_cfgoperations.c71
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_netdevice.h15
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.c53
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.h6
-rw-r--r--drivers/staging/wilc1000/wilc_wlan_cfg.c7
-rw-r--r--drivers/staging/wilc1000/wilc_wlan_if.h21
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c4
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c8
-rw-r--r--drivers/staging/wlan-ng/p80211conv.c5
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c4
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.h1
-rw-r--r--drivers/staging/wlan-ng/prism2fw.c28
-rw-r--r--drivers/staging/wlan-ng/prism2usb.c2
-rw-r--r--drivers/staging/xgifb/XGI_main_26.c5
-rw-r--r--drivers/staging/xgifb/vb_init.c16
-rw-r--r--drivers/staging/xgifb/vb_setmode.c22
-rw-r--r--drivers/staging/xgifb/vb_table.h135
-rw-r--r--drivers/staging/xgifb/vb_util.h8
-rw-r--r--include/dt-bindings/iio/adi,ad5592r.h16
-rw-r--r--include/linux/iio/buffer.h2
-rw-r--r--include/linux/iio/common/st_sensors.h9
-rw-r--r--include/linux/iio/consumer.h53
-rw-r--r--include/linux/iio/iio.h33
-rw-r--r--include/linux/iio/imu/adis.h1
-rw-r--r--include/linux/iio/magnetometer/ak8975.h16
-rw-r--r--include/linux/kernel.h7
-rw-r--r--include/linux/platform_data/invensense_mpu6050.h5
-rw-r--r--include/linux/platform_data/st_sensors_pdata.h2
-rw-r--r--include/linux/sched.h4
-rw-r--r--include/linux/sync_file.h57
-rw-r--r--include/uapi/linux/iio/types.h2
-rw-r--r--include/uapi/linux/sync_file.h (renamed from drivers/staging/android/uapi/sync.h)44
-rw-r--r--tools/iio/generic_buffer.c116
-rw-r--r--tools/iio/iio_event_monitor.c18
-rw-r--r--tools/iio/iio_utils.h7
668 files changed, 25442 insertions, 25016 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio
index 3c6624881375..df44998e7506 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio
+++ b/Documentation/ABI/testing/sysfs-bus-iio
@@ -1233,7 +1233,7 @@ KernelVersion: 3.4
1233Contact: linux-iio@vger.kernel.org 1233Contact: linux-iio@vger.kernel.org
1234Description: 1234Description:
1235 Proximity measurement indicating that some 1235 Proximity measurement indicating that some
1236 object is near the sensor, usually be observing 1236 object is near the sensor, usually by observing
1237 reflectivity of infrared or ultrasound emitted. 1237 reflectivity of infrared or ultrasound emitted.
1238 Often these sensors are unit less and as such conversion 1238 Often these sensors are unit less and as such conversion
1239 to SI units is not possible. Higher proximity measurements 1239 to SI units is not possible. Higher proximity measurements
@@ -1255,12 +1255,23 @@ Description:
1255What: /sys/.../iio:deviceX/in_intensityY_raw 1255What: /sys/.../iio:deviceX/in_intensityY_raw
1256What: /sys/.../iio:deviceX/in_intensityY_ir_raw 1256What: /sys/.../iio:deviceX/in_intensityY_ir_raw
1257What: /sys/.../iio:deviceX/in_intensityY_both_raw 1257What: /sys/.../iio:deviceX/in_intensityY_both_raw
1258What: /sys/.../iio:deviceX/in_intensityY_uv_raw
1258KernelVersion: 3.4 1259KernelVersion: 3.4
1259Contact: linux-iio@vger.kernel.org 1260Contact: linux-iio@vger.kernel.org
1260Description: 1261Description:
1261 Unit-less light intensity. Modifiers both and ir indicate 1262 Unit-less light intensity. Modifiers both and ir indicate
1262 that measurements contains visible and infrared light 1263 that measurements contains visible and infrared light
1263 components or just infrared light, respectively. 1264 components or just infrared light, respectively. Modifier uv indicates
1265 that measurements contain ultraviolet light components.
1266
1267What: /sys/.../iio:deviceX/in_uvindex_input
1268KernelVersion: 4.6
1269Contact: linux-iio@vger.kernel.org
1270Description:
1271 UV light intensity index measuring the human skin's response to
1272 different wavelength of sunlight weighted according to the
1273 standardised CIE Erythemal Action Spectrum. UV index values range
1274 from 0 (low) to >=11 (extreme).
1264 1275
1265What: /sys/.../iio:deviceX/in_intensity_red_integration_time 1276What: /sys/.../iio:deviceX/in_intensity_red_integration_time
1266What: /sys/.../iio:deviceX/in_intensity_green_integration_time 1277What: /sys/.../iio:deviceX/in_intensity_green_integration_time
@@ -1501,3 +1512,56 @@ Contact: linux-iio@vger.kernel.org
1501Description: 1512Description:
1502 Raw (unscaled no offset etc.) pH reading of a substance as a negative 1513 Raw (unscaled no offset etc.) pH reading of a substance as a negative
1503 base-10 logarithm of hydrodium ions in a litre of water. 1514 base-10 logarithm of hydrodium ions in a litre of water.
1515
1516What: /sys/bus/iio/devices/iio:deviceX/mount_matrix
1517What: /sys/bus/iio/devices/iio:deviceX/in_mount_matrix
1518What: /sys/bus/iio/devices/iio:deviceX/out_mount_matrix
1519What: /sys/bus/iio/devices/iio:deviceX/in_anglvel_mount_matrix
1520What: /sys/bus/iio/devices/iio:deviceX/in_accel_mount_matrix
1521KernelVersion: 4.6
1522Contact: linux-iio@vger.kernel.org
1523Description:
1524 Mounting matrix for IIO sensors. This is a rotation matrix which
1525 informs userspace about sensor chip's placement relative to the
1526 main hardware it is mounted on.
1527 Main hardware placement is defined according to the local
1528 reference frame related to the physical quantity the sensor
1529 measures.
1530 Given that the rotation matrix is defined in a board specific
1531 way (platform data and / or device-tree), the main hardware
1532 reference frame definition is left to the implementor's choice
1533 (see below for a magnetometer example).
1534 Applications should apply this rotation matrix to samples so
1535 that when main hardware reference frame is aligned onto local
1536 reference frame, then sensor chip reference frame is also
1537 perfectly aligned with it.
1538 Matrix is a 3x3 unitary matrix and typically looks like
1539 [0, 1, 0; 1, 0, 0; 0, 0, -1]. Identity matrix
1540 [1, 0, 0; 0, 1, 0; 0, 0, 1] means sensor chip and main hardware
1541 are perfectly aligned with each other.
1542
1543 For example, a mounting matrix for a magnetometer sensor informs
1544 userspace about sensor chip's ORIENTATION relative to the main
1545 hardware.
1546 More specifically, main hardware orientation is defined with
1547 respect to the LOCAL EARTH GEOMAGNETIC REFERENCE FRAME where :
1548 * Y is in the ground plane and positive towards magnetic North ;
1549 * X is in the ground plane, perpendicular to the North axis and
1550 positive towards the East ;
1551 * Z is perpendicular to the ground plane and positive upwards.
1552
1553 An implementor might consider that for a hand-held device, a
1554 'natural' orientation would be 'front facing camera at the top'.
1555 The main hardware reference frame could then be described as :
1556 * Y is in the plane of the screen and is positive towards the
1557 top of the screen ;
1558 * X is in the plane of the screen, perpendicular to Y axis, and
1559 positive towards the right hand side of the screen ;
1560 * Z is perpendicular to the screen plane and positive out of the
1561 screen.
1562 Another example for a quadrotor UAV might be :
1563 * Y is in the plane of the propellers and positive towards the
1564 front-view camera;
1565 * X is in the plane of the propellers, perpendicular to Y axis,
1566 and positive towards the starboard side of the UAV ;
1567 * Z is perpendicular to propellers plane and positive upwards.
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
index 893b2cabf7e4..de79efdad46c 100644
--- a/Documentation/DocBook/device-drivers.tmpl
+++ b/Documentation/DocBook/device-drivers.tmpl
@@ -136,6 +136,8 @@ X!Edrivers/base/interface.c
136!Iinclude/linux/seqno-fence.h 136!Iinclude/linux/seqno-fence.h
137!Edrivers/dma-buf/reservation.c 137!Edrivers/dma-buf/reservation.c
138!Iinclude/linux/reservation.h 138!Iinclude/linux/reservation.h
139!Edrivers/dma-buf/sync_file.c
140!Iinclude/linux/sync_file.h
139!Edrivers/base/dma-coherent.c 141!Edrivers/base/dma-coherent.c
140!Edrivers/base/dma-mapping.c 142!Edrivers/base/dma-mapping.c
141 </sect1> 143 </sect1>
diff --git a/Documentation/devicetree/bindings/iio/accel/mma8452.txt b/Documentation/devicetree/bindings/iio/accel/mma8452.txt
index 165937e1ac1c..45f5c5c5929c 100644
--- a/Documentation/devicetree/bindings/iio/accel/mma8452.txt
+++ b/Documentation/devicetree/bindings/iio/accel/mma8452.txt
@@ -1,4 +1,4 @@
1Freescale MMA8451Q, MMA8452Q, MMA8453Q, MMA8652FC or MMA8653FC 1Freescale MMA8451Q, MMA8452Q, MMA8453Q, MMA8652FC, MMA8653FC or FXLS8471Q
2triaxial accelerometer 2triaxial accelerometer
3 3
4Required properties: 4Required properties:
@@ -9,6 +9,7 @@ Required properties:
9 * "fsl,mma8453" 9 * "fsl,mma8453"
10 * "fsl,mma8652" 10 * "fsl,mma8652"
11 * "fsl,mma8653" 11 * "fsl,mma8653"
12 * "fsl,fxls8471"
12 13
13 - reg: the I2C address of the chip 14 - reg: the I2C address of the chip
14 15
diff --git a/Documentation/devicetree/bindings/iio/adc/lpc1850-adc.txt b/Documentation/devicetree/bindings/iio/adc/lpc1850-adc.txt
new file mode 100644
index 000000000000..0bcae5140bc5
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/lpc1850-adc.txt
@@ -0,0 +1,21 @@
1NXP LPC1850 ADC bindings
2
3Required properties:
4- compatible: Should be "nxp,lpc1850-adc"
5- reg: Offset and length of the register set for the ADC device
6- interrupts: The interrupt number for the ADC device
7- clocks: The root clock of the ADC controller
8- vref-supply: The regulator supply ADC reference voltage
9- resets: phandle to reset controller and line specifier
10
11Example:
12
13adc0: adc@400e3000 {
14 compatible = "nxp,lpc1850-adc";
15 reg = <0x400e3000 0x1000>;
16 interrupts = <17>;
17 clocks = <&ccu1 CLK_APB3_ADC0>;
18 vref-supply = <&reg_vdda>;
19 resets = <&rgu 40>;
20 status = "disabled";
21};
diff --git a/Documentation/devicetree/bindings/staging/iio/adc/mxs-lradc.txt b/Documentation/devicetree/bindings/iio/adc/mxs-lradc.txt
index 555fb117d4fa..555fb117d4fa 100644
--- a/Documentation/devicetree/bindings/staging/iio/adc/mxs-lradc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/mxs-lradc.txt
diff --git a/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt b/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt
index a9a5fe19ff2a..bf99e2f24788 100644
--- a/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt
@@ -1,7 +1,11 @@
1Rockchip Successive Approximation Register (SAR) A/D Converter bindings 1Rockchip Successive Approximation Register (SAR) A/D Converter bindings
2 2
3Required properties: 3Required properties:
4- compatible: Should be "rockchip,saradc" or "rockchip,rk3066-tsadc" 4- compatible: should be "rockchip,<name>-saradc" or "rockchip,rk3066-tsadc"
5 - "rockchip,saradc": for rk3188, rk3288
6 - "rockchip,rk3066-tsadc": for rk3036
7 - "rockchip,rk3399-saradc": for rk3399
8
5- reg: physical base address of the controller and length of memory mapped 9- reg: physical base address of the controller and length of memory mapped
6 region. 10 region.
7- interrupts: The interrupt number to the cpu. The interrupt specifier format 11- interrupts: The interrupt number to the cpu. The interrupt specifier format
diff --git a/Documentation/devicetree/bindings/iio/dac/ad5592r.txt b/Documentation/devicetree/bindings/iio/dac/ad5592r.txt
new file mode 100644
index 000000000000..989f96f31c66
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/dac/ad5592r.txt
@@ -0,0 +1,155 @@
1Analog Devices AD5592R/AD5593R DAC/ADC device driver
2
3Required properties for the AD5592R:
4 - compatible: Must be "adi,ad5592r"
5 - reg: SPI chip select number for the device
6 - spi-max-frequency: Max SPI frequency to use (< 30000000)
7 - spi-cpol: The AD5592R requires inverse clock polarity (CPOL) mode
8
9Required properties for the AD5593R:
10 - compatible: Must be "adi,ad5593r"
11 - reg: I2C address of the device
12
13Required properties for all supported chips:
14 - #address-cells: Should be 1.
15 - #size-cells: Should be 0.
16 - channel nodes:
17 Each child node represents one channel and has the following
18 Required properties:
19 * reg: Pin on which this channel is connected to.
20 * adi,mode: Mode or function of this channel.
21 Macros specifying the valid values
22 can be found in <dt-bindings/iio/adi,ad5592r.h>.
23
24 The following values are currently supported:
25 * CH_MODE_UNUSED (the pin is unused)
26 * CH_MODE_ADC (the pin is ADC input)
27 * CH_MODE_DAC (the pin is DAC output)
28 * CH_MODE_DAC_AND_ADC (the pin is DAC output
29 but can be monitored by an ADC, since
30 there is no disadvantage this
31 this should be considered as the
32 preferred DAC mode)
33 * CH_MODE_GPIO (the pin is registered
34 with GPIOLIB)
35 Optional properties:
36 * adi,off-state: State of this channel when unused or the
37 device gets removed. Macros specifying the
38 valid values can be found in
39 <dt-bindings/iio/adi,ad5592r.h>.
40
41 * CH_OFFSTATE_PULLDOWN (the pin is pulled down)
42 * CH_OFFSTATE_OUT_LOW (the pin is output low)
43 * CH_OFFSTATE_OUT_HIGH (the pin is output high)
44 * CH_OFFSTATE_OUT_TRISTATE (the pin is
45 tristated output)
46
47
48Optional properties:
49 - vref-supply: Phandle to the external reference voltage supply. This should
50 only be set if there is an external reference voltage connected to the VREF
51 pin. If the property is not set the internal 2.5V reference is used.
52 - reset-gpios : GPIO spec for the RESET pin. If specified, it will be
53 asserted during driver probe.
54 - gpio-controller: Marks the device node as a GPIO controller.
55 - #gpio-cells: Should be 2. The first cell is the GPIO number and the second
56 cell specifies GPIO flags, as defined in <dt-bindings/gpio/gpio.h>.
57
58AD5592R Example:
59
60 #include <dt-bindings/iio/adi,ad5592r.h>
61
62 vref: regulator-vref {
63 compatible = "regulator-fixed";
64 regulator-name = "vref-ad559x";
65 regulator-min-microvolt = <3300000>;
66 regulator-max-microvolt = <3300000>;
67 regulator-always-on;
68 };
69
70 ad5592r@0 {
71 #size-cells = <0>;
72 #address-cells = <1>;
73 #gpio-cells = <2>;
74 compatible = "adi,ad5592r";
75 reg = <0>;
76
77 spi-max-frequency = <1000000>;
78 spi-cpol;
79
80 vref-supply = <&vref>; /* optional */
81 reset-gpios = <&gpio0 86 0>; /* optional */
82 gpio-controller;
83
84 channel@0 {
85 reg = <0>;
86 adi,mode = <CH_MODE_DAC>;
87 };
88 channel@1 {
89 reg = <1>;
90 adi,mode = <CH_MODE_ADC>;
91 };
92 channel@2 {
93 reg = <2>;
94 adi,mode = <CH_MODE_DAC_AND_ADC>;
95 };
96 channel@3 {
97 reg = <3>;
98 adi,mode = <CH_MODE_DAC_AND_ADC>;
99 adi,off-state = <CH_OFFSTATE_PULLDOWN>;
100 };
101 channel@4 {
102 reg = <4>;
103 adi,mode = <CH_MODE_UNUSED>;
104 adi,off-state = <CH_OFFSTATE_PULLDOWN>;
105 };
106 channel@5 {
107 reg = <5>;
108 adi,mode = <CH_MODE_GPIO>;
109 adi,off-state = <CH_OFFSTATE_PULLDOWN>;
110 };
111 channel@6 {
112 reg = <6>;
113 adi,mode = <CH_MODE_GPIO>;
114 adi,off-state = <CH_OFFSTATE_PULLDOWN>;
115 };
116 channel@7 {
117 reg = <7>;
118 adi,mode = <CH_MODE_GPIO>;
119 adi,off-state = <CH_OFFSTATE_PULLDOWN>;
120 };
121 };
122
123AD5593R Example:
124
125 #include <dt-bindings/iio/adi,ad5592r.h>
126
127 ad5593r@10 {
128 #size-cells = <0>;
129 #address-cells = <1>;
130 #gpio-cells = <2>;
131 compatible = "adi,ad5593r";
132 reg = <0x10>;
133 gpio-controller;
134
135 channel@0 {
136 reg = <0>;
137 adi,mode = <CH_MODE_DAC>;
138 adi,off-state = <CH_OFFSTATE_PULLDOWN>;
139 };
140 channel@1 {
141 reg = <1>;
142 adi,mode = <CH_MODE_ADC>;
143 adi,off-state = <CH_OFFSTATE_PULLDOWN>;
144 };
145 channel@2 {
146 reg = <2>;
147 adi,mode = <CH_MODE_DAC_AND_ADC>;
148 adi,off-state = <CH_OFFSTATE_PULLDOWN>;
149 };
150 channel@6 {
151 reg = <6>;
152 adi,mode = <CH_MODE_GPIO>;
153 adi,off-state = <CH_OFFSTATE_PULLDOWN>;
154 };
155 };
diff --git a/Documentation/devicetree/bindings/iio/dac/lpc1850-dac.txt b/Documentation/devicetree/bindings/iio/dac/lpc1850-dac.txt
new file mode 100644
index 000000000000..7d6647d4af5e
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/dac/lpc1850-dac.txt
@@ -0,0 +1,20 @@
1NXP LPC1850 DAC bindings
2
3Required properties:
4- compatible: Should be "nxp,lpc1850-dac"
5- reg: Offset and length of the register set for the ADC device
6- interrupts: The interrupt number for the ADC device
7- clocks: The root clock of the ADC controller
8- vref-supply: The regulator supply ADC reference voltage
9- resets: phandle to reset controller and line specifier
10
11Example:
12dac: dac@400e1000 {
13 compatible = "nxp,lpc1850-dac";
14 reg = <0x400e1000 0x1000>;
15 interrupts = <0>;
16 clocks = <&ccu1 CLK_APB3_DAC>;
17 vref-supply = <&reg_vdda>;
18 resets = <&rgu 42>;
19 status = "disabled";
20};
diff --git a/Documentation/devicetree/bindings/iio/imu/inv_mpu6050.txt b/Documentation/devicetree/bindings/iio/imu/inv_mpu6050.txt
index e4d8f1c52f4a..a9fc11e43b45 100644
--- a/Documentation/devicetree/bindings/iio/imu/inv_mpu6050.txt
+++ b/Documentation/devicetree/bindings/iio/imu/inv_mpu6050.txt
@@ -8,10 +8,23 @@ Required properties:
8 - interrupt-parent : should be the phandle for the interrupt controller 8 - interrupt-parent : should be the phandle for the interrupt controller
9 - interrupts : interrupt mapping for GPIO IRQ 9 - interrupts : interrupt mapping for GPIO IRQ
10 10
11Optional properties:
12 - mount-matrix: an optional 3x3 mounting rotation matrix
13
14
11Example: 15Example:
12 mpu6050@68 { 16 mpu6050@68 {
13 compatible = "invensense,mpu6050"; 17 compatible = "invensense,mpu6050";
14 reg = <0x68>; 18 reg = <0x68>;
15 interrupt-parent = <&gpio1>; 19 interrupt-parent = <&gpio1>;
16 interrupts = <18 1>; 20 interrupts = <18 1>;
21 mount-matrix = "-0.984807753012208", /* x0 */
22 "0", /* y0 */
23 "-0.173648177666930", /* z0 */
24 "0", /* x1 */
25 "-1", /* y1 */
26 "0", /* z1 */
27 "-0.173648177666930", /* x2 */
28 "0", /* y2 */
29 "0.984807753012208"; /* z2 */
17 }; 30 };
diff --git a/Documentation/devicetree/bindings/iio/magnetometer/ak8975.txt b/Documentation/devicetree/bindings/iio/magnetometer/ak8975.txt
index 011679f1a425..e1e7dd3259f6 100644
--- a/Documentation/devicetree/bindings/iio/magnetometer/ak8975.txt
+++ b/Documentation/devicetree/bindings/iio/magnetometer/ak8975.txt
@@ -8,6 +8,8 @@ Required properties:
8Optional properties: 8Optional properties:
9 9
10 - gpios : should be device tree identifier of the magnetometer DRDY pin 10 - gpios : should be device tree identifier of the magnetometer DRDY pin
11 - vdd-supply: an optional regulator that needs to be on to provide VDD
12 - mount-matrix: an optional 3x3 mounting rotation matrix
11 13
12Example: 14Example:
13 15
@@ -15,4 +17,14 @@ ak8975@0c {
15 compatible = "asahi-kasei,ak8975"; 17 compatible = "asahi-kasei,ak8975";
16 reg = <0x0c>; 18 reg = <0x0c>;
17 gpios = <&gpj0 7 0>; 19 gpios = <&gpj0 7 0>;
20 vdd-supply = <&ldo_3v3_gnss>;
21 mount-matrix = "-0.984807753012208", /* x0 */
22 "0", /* y0 */
23 "-0.173648177666930", /* z0 */
24 "0", /* x1 */
25 "-1", /* y1 */
26 "0", /* z1 */
27 "-0.173648177666930", /* x2 */
28 "0", /* y2 */
29 "0.984807753012208"; /* z2 */
18}; 30};
diff --git a/Documentation/devicetree/bindings/iio/potentiometer/ds1803.txt b/Documentation/devicetree/bindings/iio/potentiometer/ds1803.txt
new file mode 100644
index 000000000000..df77bf552656
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/potentiometer/ds1803.txt
@@ -0,0 +1,21 @@
1* Maxim Integrated DS1803 digital potentiometer driver
2
3The node for this driver must be a child node of a I2C controller, hence
4all mandatory properties for your controller must be specified. See directory:
5
6 Documentation/devicetree/bindings/i2c
7
8for more details.
9
10Required properties:
11 - compatible: Must be one of the following, depending on the
12 model:
13 "maxim,ds1803-010",
14 "maxim,ds1803-050",
15 "maxim,ds1803-100"
16
17Example:
18ds1803: ds1803@1 {
19 reg = <0x28>;
20 compatible = "maxim,ds1803-010";
21};
diff --git a/Documentation/devicetree/bindings/iio/potentiometer/mcp4131.txt b/Documentation/devicetree/bindings/iio/potentiometer/mcp4131.txt
new file mode 100644
index 000000000000..3ccba16f7035
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/potentiometer/mcp4131.txt
@@ -0,0 +1,84 @@
1* Microchip MCP413X/414X/415X/416X/423X/424X/425X/426X Digital Potentiometer
2 driver
3
4The node for this driver must be a child node of a SPI controller, hence
5all mandatory properties described in
6
7 Documentation/devicetree/bindings/spi/spi-bus.txt
8
9must be specified.
10
11Required properties:
12 - compatible: Must be one of the following, depending on the
13 model:
14 "microchip,mcp4131-502"
15 "microchip,mcp4131-103"
16 "microchip,mcp4131-503"
17 "microchip,mcp4131-104"
18 "microchip,mcp4132-502"
19 "microchip,mcp4132-103"
20 "microchip,mcp4132-503"
21 "microchip,mcp4132-104"
22 "microchip,mcp4141-502"
23 "microchip,mcp4141-103"
24 "microchip,mcp4141-503"
25 "microchip,mcp4141-104"
26 "microchip,mcp4142-502"
27 "microchip,mcp4142-103"
28 "microchip,mcp4142-503"
29 "microchip,mcp4142-104"
30 "microchip,mcp4151-502"
31 "microchip,mcp4151-103"
32 "microchip,mcp4151-503"
33 "microchip,mcp4151-104"
34 "microchip,mcp4152-502"
35 "microchip,mcp4152-103"
36 "microchip,mcp4152-503"
37 "microchip,mcp4152-104"
38 "microchip,mcp4161-502"
39 "microchip,mcp4161-103"
40 "microchip,mcp4161-503"
41 "microchip,mcp4161-104"
42 "microchip,mcp4162-502"
43 "microchip,mcp4162-103"
44 "microchip,mcp4162-503"
45 "microchip,mcp4162-104"
46 "microchip,mcp4231-502"
47 "microchip,mcp4231-103"
48 "microchip,mcp4231-503"
49 "microchip,mcp4231-104"
50 "microchip,mcp4232-502"
51 "microchip,mcp4232-103"
52 "microchip,mcp4232-503"
53 "microchip,mcp4232-104"
54 "microchip,mcp4241-502"
55 "microchip,mcp4241-103"
56 "microchip,mcp4241-503"
57 "microchip,mcp4241-104"
58 "microchip,mcp4242-502"
59 "microchip,mcp4242-103"
60 "microchip,mcp4242-503"
61 "microchip,mcp4242-104"
62 "microchip,mcp4251-502"
63 "microchip,mcp4251-103"
64 "microchip,mcp4251-503"
65 "microchip,mcp4251-104"
66 "microchip,mcp4252-502"
67 "microchip,mcp4252-103"
68 "microchip,mcp4252-503"
69 "microchip,mcp4252-104"
70 "microchip,mcp4261-502"
71 "microchip,mcp4261-103"
72 "microchip,mcp4261-503"
73 "microchip,mcp4261-104"
74 "microchip,mcp4262-502"
75 "microchip,mcp4262-103"
76 "microchip,mcp4262-503"
77 "microchip,mcp4262-104"
78
79Example:
80mcp4131: mcp4131@0 {
81 compatible = "mcp4131-502";
82 reg = <0>;
83 spi-max-frequency = <500000>;
84};
diff --git a/Documentation/devicetree/bindings/iio/pressure/hp03.txt b/Documentation/devicetree/bindings/iio/pressure/hp03.txt
new file mode 100644
index 000000000000..54e7e70bcea5
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/pressure/hp03.txt
@@ -0,0 +1,17 @@
1HopeRF HP03 digital pressure/temperature sensors
2
3Required properties:
4- compatible: must be "hoperf,hp03"
5- xclr-gpio: must be device tree identifier of the XCLR pin.
6 The XCLR pin is a reset of the ADC in the chip,
7 it must be pulled HI before the conversion and
8 readout of the value from the ADC registers and
9 pulled LO afterward.
10
11Example:
12
13hp03@0x77 {
14 compatible = "hoperf,hp03";
15 reg = <0x77>;
16 xclr-gpio = <&portc 0 0x0>;
17};
diff --git a/Documentation/devicetree/bindings/iio/pressure/ms5611.txt b/Documentation/devicetree/bindings/iio/pressure/ms5611.txt
new file mode 100644
index 000000000000..17bca866c084
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/pressure/ms5611.txt
@@ -0,0 +1,19 @@
1MEAS ms5611 family pressure sensors
2
3Pressure sensors from MEAS Switzerland with SPI and I2C bus interfaces.
4
5Required properties:
6- compatible: "meas,ms5611" or "meas,ms5607"
7- reg: the I2C address or SPI chip select the device will respond to
8
9Optional properties:
10- vdd-supply: an optional regulator that needs to be on to provide VDD
11 power to the sensor.
12
13Example:
14
15ms5607@77 {
16 compatible = "meas,ms5607";
17 reg = <0x77>;
18 vdd-supply = <&ldo_3v3_gnss>;
19};
diff --git a/Documentation/devicetree/bindings/iio/st-sensors.txt b/Documentation/devicetree/bindings/iio/st-sensors.txt
index d4b87cc1e446..5844cf72862d 100644
--- a/Documentation/devicetree/bindings/iio/st-sensors.txt
+++ b/Documentation/devicetree/bindings/iio/st-sensors.txt
@@ -16,6 +16,10 @@ Optional properties:
16- st,drdy-int-pin: the pin on the package that will be used to signal 16- st,drdy-int-pin: the pin on the package that will be used to signal
17 "data ready" (valid values: 1 or 2). This property is not configurable 17 "data ready" (valid values: 1 or 2). This property is not configurable
18 on all sensors. 18 on all sensors.
19- drive-open-drain: the interrupt/data ready line will be configured
20 as open drain, which is useful if several sensors share the same
21 interrupt line. (This binding is taken from pinctrl/pinctrl-bindings.txt)
22 This is a boolean property.
19 23
20Sensors may also have applicable pin control settings, those use the 24Sensors may also have applicable pin control settings, those use the
21standard bindings from pinctrl/pinctrl-bindings.txt. 25standard bindings from pinctrl/pinctrl-bindings.txt.
@@ -37,6 +41,7 @@ Accelerometers:
37- st,lsm330-accel 41- st,lsm330-accel
38- st,lsm303agr-accel 42- st,lsm303agr-accel
39- st,lis2dh12-accel 43- st,lis2dh12-accel
44- st,h3lis331dl-accel
40 45
41Gyroscopes: 46Gyroscopes:
42- st,l3g4200d-gyro 47- st,l3g4200d-gyro
@@ -46,6 +51,7 @@ Gyroscopes:
46- st,l3gd20-gyro 51- st,l3gd20-gyro
47- st,l3g4is-gyro 52- st,l3g4is-gyro
48- st,lsm330-gyro 53- st,lsm330-gyro
54- st,lsm9ds0-gyro
49 55
50Magnetometers: 56Magnetometers:
51- st,lsm303agr-magn 57- st,lsm303agr-magn
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 32f965807a07..4454483cc53f 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -151,6 +151,7 @@ lsi LSI Corp. (LSI Logic)
151lltc Linear Technology Corporation 151lltc Linear Technology Corporation
152marvell Marvell Technology Group Ltd. 152marvell Marvell Technology Group Ltd.
153maxim Maxim Integrated Products 153maxim Maxim Integrated Products
154meas Measurement Specialties
154mediatek MediaTek Inc. 155mediatek MediaTek Inc.
155melexis Melexis N.V. 156melexis Melexis N.V.
156merrii Merrii Technology Co., Ltd. 157merrii Merrii Technology Co., Ltd.
diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt
index 76a6c0a70dee..c63eea0c1c8c 100644
--- a/Documentation/driver-model/devres.txt
+++ b/Documentation/driver-model/devres.txt
@@ -268,6 +268,10 @@ IIO
268 devm_iio_kfifo_free() 268 devm_iio_kfifo_free()
269 devm_iio_trigger_alloc() 269 devm_iio_trigger_alloc()
270 devm_iio_trigger_free() 270 devm_iio_trigger_free()
271 devm_iio_channel_get()
272 devm_iio_channel_release()
273 devm_iio_channel_get_all()
274 devm_iio_channel_release_all()
271 275
272INPUT 276INPUT
273 devm_input_allocate_device() 277 devm_input_allocate_device()
diff --git a/Documentation/sync_file.txt b/Documentation/sync_file.txt
new file mode 100644
index 000000000000..eaf8297dbca2
--- /dev/null
+++ b/Documentation/sync_file.txt
@@ -0,0 +1,69 @@
1 Sync File API Guide
2 ~~~~~~~~~~~~~~~~~~~
3
4 Gustavo Padovan
5 <gustavo at padovan dot org>
6
7This document serves as a guide for device drivers writers on what the
8sync_file API is, and how drivers can support it. Sync file is the carrier of
9the fences(struct fence) that needs to synchronized between drivers or across
10process boundaries.
11
12The sync_file API is meant to be used to send and receive fence information
13to/from userspace. It enables userspace to do explicit fencing, where instead
14of attaching a fence to the buffer a producer driver (such as a GPU or V4L
15driver) sends the fence related to the buffer to userspace via a sync_file.
16
17The sync_file then can be sent to the consumer (DRM driver for example), that
18will not use the buffer for anything before the fence(s) signals, i.e., the
19driver that issued the fence is not using/processing the buffer anymore, so it
20signals that the buffer is ready to use. And vice-versa for the consumer ->
21producer part of the cycle.
22
23Sync files allows userspace awareness on buffer sharing synchronization between
24drivers.
25
26Sync file was originally added in the Android kernel but current Linux Desktop
27can benefit a lot from it.
28
29in-fences and out-fences
30------------------------
31
32Sync files can go either to or from userspace. When a sync_file is sent from
33the driver to userspace we call the fences it contains 'out-fences'. They are
34related to a buffer that the driver is processing or is going to process, so
35the driver an create out-fence to be able to notify, through fence_signal(),
36when it has finished using (or processing) that buffer. Out-fences are fences
37that the driver creates.
38
39On the other hand if the driver receives fence(s) through a sync_file from
40userspace we call these fence(s) 'in-fences'. Receiveing in-fences means that
41we need to wait for the fence(s) to signal before using any buffer related to
42the in-fences.
43
44Creating Sync Files
45-------------------
46
47When a driver needs to send an out-fence userspace it creates a sync_file.
48
49Interface:
50 struct sync_file *sync_file_create(struct fence *fence);
51
52The caller pass the out-fence and gets back the sync_file. That is just the
53first step, next it needs to install an fd on sync_file->file. So it gets an
54fd:
55
56 fd = get_unused_fd_flags(O_CLOEXEC);
57
58and installs it on sync_file->file:
59
60 fd_install(fd, sync_file->file);
61
62The sync_file fd now can be sent to userspace.
63
64If the creation process fail, or the sync_file needs to be released by any
65other reason fput(sync_file->file) should be used.
66
67References:
68[1] struct sync_file in include/linux/sync_file.h
69[2] All interfaces mentioned above defined in include/linux/sync_file.h
diff --git a/MAINTAINERS b/MAINTAINERS
index 5504c0de47ad..49d1e8339e57 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -776,6 +776,15 @@ S: Supported
776F: drivers/android/ 776F: drivers/android/
777F: drivers/staging/android/ 777F: drivers/staging/android/
778 778
779ANDROID ION DRIVER
780M: Laura Abbott <labbott@redhat.com>
781M: Sumit Semwal <sumit.semwal@linaro.org>
782L: devel@driverdev.osuosl.org
783S: Supported
784F: drivers/staging/android/ion
785F: drivers/staging/android/uapi/ion.h
786F: drivers/staging/android/uapi/ion_test.h
787
779AOA (Apple Onboard Audio) ALSA DRIVER 788AOA (Apple Onboard Audio) ALSA DRIVER
780M: Johannes Berg <johannes@sipsolutions.net> 789M: Johannes Berg <johannes@sipsolutions.net>
781L: linuxppc-dev@lists.ozlabs.org 790L: linuxppc-dev@lists.ozlabs.org
@@ -4746,6 +4755,7 @@ F: sound/soc/fsl/mpc8610_hpcd.c
4746 4755
4747FREESCALE QORIQ MANAGEMENT COMPLEX DRIVER 4756FREESCALE QORIQ MANAGEMENT COMPLEX DRIVER
4748M: "J. German Rivera" <German.Rivera@freescale.com> 4757M: "J. German Rivera" <German.Rivera@freescale.com>
4758M: Stuart Yoder <stuart.yoder@nxp.com>
4749L: linux-kernel@vger.kernel.org 4759L: linux-kernel@vger.kernel.org
4750S: Maintained 4760S: Maintained
4751F: drivers/staging/fsl-mc/ 4761F: drivers/staging/fsl-mc/
@@ -5617,7 +5627,7 @@ IIO SUBSYSTEM AND DRIVERS
5617M: Jonathan Cameron <jic23@kernel.org> 5627M: Jonathan Cameron <jic23@kernel.org>
5618R: Hartmut Knaack <knaack.h@gmx.de> 5628R: Hartmut Knaack <knaack.h@gmx.de>
5619R: Lars-Peter Clausen <lars@metafoo.de> 5629R: Lars-Peter Clausen <lars@metafoo.de>
5620R: Peter Meerwald <pmeerw@pmeerw.net> 5630R: Peter Meerwald-Stadler <pmeerw@pmeerw.net>
5621L: linux-iio@vger.kernel.org 5631L: linux-iio@vger.kernel.org
5622S: Maintained 5632S: Maintained
5623F: drivers/iio/ 5633F: drivers/iio/
diff --git a/drivers/Kconfig b/drivers/Kconfig
index d2ac339de85f..430f761b0d8d 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -114,6 +114,8 @@ source "drivers/rtc/Kconfig"
114 114
115source "drivers/dma/Kconfig" 115source "drivers/dma/Kconfig"
116 116
117source "drivers/dma-buf/Kconfig"
118
117source "drivers/dca/Kconfig" 119source "drivers/dca/Kconfig"
118 120
119source "drivers/auxdisplay/Kconfig" 121source "drivers/auxdisplay/Kconfig"
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
new file mode 100644
index 000000000000..9824bc4addf8
--- /dev/null
+++ b/drivers/dma-buf/Kconfig
@@ -0,0 +1,11 @@
1menu "DMABUF options"
2
3config SYNC_FILE
4 bool "sync_file support for fences"
5 default n
6 select ANON_INODES
7 select DMA_SHARED_BUFFER
8 ---help---
9 This option enables the fence framework synchronization to export
10 sync_files to userspace that can represent one or more fences.
11endmenu
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index 57a675f90cd0..4a424eca75ed 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -1 +1,2 @@
1obj-y := dma-buf.o fence.o reservation.o seqno-fence.o 1obj-y := dma-buf.o fence.o reservation.o seqno-fence.o
2obj-$(CONFIG_SYNC_FILE) += sync_file.o
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
new file mode 100644
index 000000000000..f08cf2d8309e
--- /dev/null
+++ b/drivers/dma-buf/sync_file.c
@@ -0,0 +1,395 @@
1/*
2 * drivers/dma-buf/sync_file.c
3 *
4 * Copyright (C) 2012 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/export.h>
18#include <linux/file.h>
19#include <linux/fs.h>
20#include <linux/kernel.h>
21#include <linux/poll.h>
22#include <linux/sched.h>
23#include <linux/slab.h>
24#include <linux/uaccess.h>
25#include <linux/anon_inodes.h>
26#include <linux/sync_file.h>
27#include <uapi/linux/sync_file.h>
28
29static const struct file_operations sync_file_fops;
30
31static struct sync_file *sync_file_alloc(int size)
32{
33 struct sync_file *sync_file;
34
35 sync_file = kzalloc(size, GFP_KERNEL);
36 if (!sync_file)
37 return NULL;
38
39 sync_file->file = anon_inode_getfile("sync_file", &sync_file_fops,
40 sync_file, 0);
41 if (IS_ERR(sync_file->file))
42 goto err;
43
44 kref_init(&sync_file->kref);
45
46 init_waitqueue_head(&sync_file->wq);
47
48 return sync_file;
49
50err:
51 kfree(sync_file);
52 return NULL;
53}
54
55static void fence_check_cb_func(struct fence *f, struct fence_cb *cb)
56{
57 struct sync_file_cb *check;
58 struct sync_file *sync_file;
59
60 check = container_of(cb, struct sync_file_cb, cb);
61 sync_file = check->sync_file;
62
63 if (atomic_dec_and_test(&sync_file->status))
64 wake_up_all(&sync_file->wq);
65}
66
67/**
68 * sync_file_create() - creates a sync file
69 * @fence: fence to add to the sync_fence
70 *
71 * Creates a sync_file containg @fence. Once this is called, the sync_file
72 * takes ownership of @fence. The sync_file can be released with
73 * fput(sync_file->file). Returns the sync_file or NULL in case of error.
74 */
75struct sync_file *sync_file_create(struct fence *fence)
76{
77 struct sync_file *sync_file;
78
79 sync_file = sync_file_alloc(offsetof(struct sync_file, cbs[1]));
80 if (!sync_file)
81 return NULL;
82
83 sync_file->num_fences = 1;
84 atomic_set(&sync_file->status, 1);
85 snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%d-%d",
86 fence->ops->get_driver_name(fence),
87 fence->ops->get_timeline_name(fence), fence->context,
88 fence->seqno);
89
90 sync_file->cbs[0].fence = fence;
91 sync_file->cbs[0].sync_file = sync_file;
92 if (fence_add_callback(fence, &sync_file->cbs[0].cb,
93 fence_check_cb_func))
94 atomic_dec(&sync_file->status);
95
96 return sync_file;
97}
98EXPORT_SYMBOL(sync_file_create);
99
100/**
101 * sync_file_fdget() - get a sync_file from an fd
102 * @fd: fd referencing a fence
103 *
104 * Ensures @fd references a valid sync_file, increments the refcount of the
105 * backing file. Returns the sync_file or NULL in case of error.
106 */
107static struct sync_file *sync_file_fdget(int fd)
108{
109 struct file *file = fget(fd);
110
111 if (!file)
112 return NULL;
113
114 if (file->f_op != &sync_file_fops)
115 goto err;
116
117 return file->private_data;
118
119err:
120 fput(file);
121 return NULL;
122}
123
124static void sync_file_add_pt(struct sync_file *sync_file, int *i,
125 struct fence *fence)
126{
127 sync_file->cbs[*i].fence = fence;
128 sync_file->cbs[*i].sync_file = sync_file;
129
130 if (!fence_add_callback(fence, &sync_file->cbs[*i].cb,
131 fence_check_cb_func)) {
132 fence_get(fence);
133 (*i)++;
134 }
135}
136
137/**
138 * sync_file_merge() - merge two sync_files
139 * @name: name of new fence
140 * @a: sync_file a
141 * @b: sync_file b
142 *
143 * Creates a new sync_file which contains copies of all the fences in both
144 * @a and @b. @a and @b remain valid, independent sync_file. Returns the
145 * new merged sync_file or NULL in case of error.
146 */
147static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
148 struct sync_file *b)
149{
150 int num_fences = a->num_fences + b->num_fences;
151 struct sync_file *sync_file;
152 int i, i_a, i_b;
153 unsigned long size = offsetof(struct sync_file, cbs[num_fences]);
154
155 sync_file = sync_file_alloc(size);
156 if (!sync_file)
157 return NULL;
158
159 atomic_set(&sync_file->status, num_fences);
160
161 /*
162 * Assume sync_file a and b are both ordered and have no
163 * duplicates with the same context.
164 *
165 * If a sync_file can only be created with sync_file_merge
166 * and sync_file_create, this is a reasonable assumption.
167 */
168 for (i = i_a = i_b = 0; i_a < a->num_fences && i_b < b->num_fences; ) {
169 struct fence *pt_a = a->cbs[i_a].fence;
170 struct fence *pt_b = b->cbs[i_b].fence;
171
172 if (pt_a->context < pt_b->context) {
173 sync_file_add_pt(sync_file, &i, pt_a);
174
175 i_a++;
176 } else if (pt_a->context > pt_b->context) {
177 sync_file_add_pt(sync_file, &i, pt_b);
178
179 i_b++;
180 } else {
181 if (pt_a->seqno - pt_b->seqno <= INT_MAX)
182 sync_file_add_pt(sync_file, &i, pt_a);
183 else
184 sync_file_add_pt(sync_file, &i, pt_b);
185
186 i_a++;
187 i_b++;
188 }
189 }
190
191 for (; i_a < a->num_fences; i_a++)
192 sync_file_add_pt(sync_file, &i, a->cbs[i_a].fence);
193
194 for (; i_b < b->num_fences; i_b++)
195 sync_file_add_pt(sync_file, &i, b->cbs[i_b].fence);
196
197 if (num_fences > i)
198 atomic_sub(num_fences - i, &sync_file->status);
199 sync_file->num_fences = i;
200
201 strlcpy(sync_file->name, name, sizeof(sync_file->name));
202 return sync_file;
203}
204
205static void sync_file_free(struct kref *kref)
206{
207 struct sync_file *sync_file = container_of(kref, struct sync_file,
208 kref);
209 int i;
210
211 for (i = 0; i < sync_file->num_fences; ++i) {
212 fence_remove_callback(sync_file->cbs[i].fence,
213 &sync_file->cbs[i].cb);
214 fence_put(sync_file->cbs[i].fence);
215 }
216
217 kfree(sync_file);
218}
219
220static int sync_file_release(struct inode *inode, struct file *file)
221{
222 struct sync_file *sync_file = file->private_data;
223
224 kref_put(&sync_file->kref, sync_file_free);
225 return 0;
226}
227
228static unsigned int sync_file_poll(struct file *file, poll_table *wait)
229{
230 struct sync_file *sync_file = file->private_data;
231 int status;
232
233 poll_wait(file, &sync_file->wq, wait);
234
235 status = atomic_read(&sync_file->status);
236
237 if (!status)
238 return POLLIN;
239 if (status < 0)
240 return POLLERR;
241 return 0;
242}
243
244static long sync_file_ioctl_merge(struct sync_file *sync_file,
245 unsigned long arg)
246{
247 int fd = get_unused_fd_flags(O_CLOEXEC);
248 int err;
249 struct sync_file *fence2, *fence3;
250 struct sync_merge_data data;
251
252 if (fd < 0)
253 return fd;
254
255 if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
256 err = -EFAULT;
257 goto err_put_fd;
258 }
259
260 if (data.flags || data.pad) {
261 err = -EINVAL;
262 goto err_put_fd;
263 }
264
265 fence2 = sync_file_fdget(data.fd2);
266 if (!fence2) {
267 err = -ENOENT;
268 goto err_put_fd;
269 }
270
271 data.name[sizeof(data.name) - 1] = '\0';
272 fence3 = sync_file_merge(data.name, sync_file, fence2);
273 if (!fence3) {
274 err = -ENOMEM;
275 goto err_put_fence2;
276 }
277
278 data.fence = fd;
279 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
280 err = -EFAULT;
281 goto err_put_fence3;
282 }
283
284 fd_install(fd, fence3->file);
285 fput(fence2->file);
286 return 0;
287
288err_put_fence3:
289 fput(fence3->file);
290
291err_put_fence2:
292 fput(fence2->file);
293
294err_put_fd:
295 put_unused_fd(fd);
296 return err;
297}
298
299static void sync_fill_fence_info(struct fence *fence,
300 struct sync_fence_info *info)
301{
302 strlcpy(info->obj_name, fence->ops->get_timeline_name(fence),
303 sizeof(info->obj_name));
304 strlcpy(info->driver_name, fence->ops->get_driver_name(fence),
305 sizeof(info->driver_name));
306 if (fence_is_signaled(fence))
307 info->status = fence->status >= 0 ? 1 : fence->status;
308 else
309 info->status = 0;
310 info->timestamp_ns = ktime_to_ns(fence->timestamp);
311}
312
313static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
314 unsigned long arg)
315{
316 struct sync_file_info info;
317 struct sync_fence_info *fence_info = NULL;
318 __u32 size;
319 int ret, i;
320
321 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
322 return -EFAULT;
323
324 if (info.flags || info.pad)
325 return -EINVAL;
326
327 /*
328 * Passing num_fences = 0 means that userspace doesn't want to
329 * retrieve any sync_fence_info. If num_fences = 0 we skip filling
330 * sync_fence_info and return the actual number of fences on
331 * info->num_fences.
332 */
333 if (!info.num_fences)
334 goto no_fences;
335
336 if (info.num_fences < sync_file->num_fences)
337 return -EINVAL;
338
339 size = sync_file->num_fences * sizeof(*fence_info);
340 fence_info = kzalloc(size, GFP_KERNEL);
341 if (!fence_info)
342 return -ENOMEM;
343
344 for (i = 0; i < sync_file->num_fences; ++i)
345 sync_fill_fence_info(sync_file->cbs[i].fence, &fence_info[i]);
346
347 if (copy_to_user(u64_to_user_ptr(info.sync_fence_info), fence_info,
348 size)) {
349 ret = -EFAULT;
350 goto out;
351 }
352
353no_fences:
354 strlcpy(info.name, sync_file->name, sizeof(info.name));
355 info.status = atomic_read(&sync_file->status);
356 if (info.status >= 0)
357 info.status = !info.status;
358
359 info.num_fences = sync_file->num_fences;
360
361 if (copy_to_user((void __user *)arg, &info, sizeof(info)))
362 ret = -EFAULT;
363 else
364 ret = 0;
365
366out:
367 kfree(fence_info);
368
369 return ret;
370}
371
372static long sync_file_ioctl(struct file *file, unsigned int cmd,
373 unsigned long arg)
374{
375 struct sync_file *sync_file = file->private_data;
376
377 switch (cmd) {
378 case SYNC_IOC_MERGE:
379 return sync_file_ioctl_merge(sync_file, arg);
380
381 case SYNC_IOC_FILE_INFO:
382 return sync_file_ioctl_fence_info(sync_file, arg);
383
384 default:
385 return -ENOTTY;
386 }
387}
388
389static const struct file_operations sync_file_fops = {
390 .release = sync_file_release,
391 .poll = sync_file_poll,
392 .unlocked_ioctl = sync_file_ioctl,
393 .compat_ioctl = sync_file_ioctl,
394};
395
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 236ada93df53..afdd55ddf821 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -28,11 +28,6 @@
28#define BO_LOCKED 0x4000 28#define BO_LOCKED 0x4000
29#define BO_PINNED 0x2000 29#define BO_PINNED 0x2000
30 30
31static inline void __user *to_user_ptr(u64 address)
32{
33 return (void __user *)(uintptr_t)address;
34}
35
36static struct etnaviv_gem_submit *submit_create(struct drm_device *dev, 31static struct etnaviv_gem_submit *submit_create(struct drm_device *dev,
37 struct etnaviv_gpu *gpu, size_t nr) 32 struct etnaviv_gpu *gpu, size_t nr)
38{ 33{
@@ -347,21 +342,21 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
347 cmdbuf->exec_state = args->exec_state; 342 cmdbuf->exec_state = args->exec_state;
348 cmdbuf->ctx = file->driver_priv; 343 cmdbuf->ctx = file->driver_priv;
349 344
350 ret = copy_from_user(bos, to_user_ptr(args->bos), 345 ret = copy_from_user(bos, u64_to_user_ptr(args->bos),
351 args->nr_bos * sizeof(*bos)); 346 args->nr_bos * sizeof(*bos));
352 if (ret) { 347 if (ret) {
353 ret = -EFAULT; 348 ret = -EFAULT;
354 goto err_submit_cmds; 349 goto err_submit_cmds;
355 } 350 }
356 351
357 ret = copy_from_user(relocs, to_user_ptr(args->relocs), 352 ret = copy_from_user(relocs, u64_to_user_ptr(args->relocs),
358 args->nr_relocs * sizeof(*relocs)); 353 args->nr_relocs * sizeof(*relocs));
359 if (ret) { 354 if (ret) {
360 ret = -EFAULT; 355 ret = -EFAULT;
361 goto err_submit_cmds; 356 goto err_submit_cmds;
362 } 357 }
363 358
364 ret = copy_from_user(stream, to_user_ptr(args->stream), 359 ret = copy_from_user(stream, u64_to_user_ptr(args->stream),
365 args->stream_size); 360 args->stream_size);
366 if (ret) { 361 if (ret) {
367 ret = -EFAULT; 362 ret = -EFAULT;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index daba7ebb9699..5d7a7c4f5136 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3577,11 +3577,6 @@ static inline i915_reg_t i915_vgacntrl_reg(struct drm_device *dev)
3577 return VGACNTRL; 3577 return VGACNTRL;
3578} 3578}
3579 3579
3580static inline void __user *to_user_ptr(u64 address)
3581{
3582 return (void __user *)(uintptr_t)address;
3583}
3584
3585static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m) 3580static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
3586{ 3581{
3587 unsigned long j = msecs_to_jiffies(m); 3582 unsigned long j = msecs_to_jiffies(m);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f2cb9a9539ee..233adc31ef0c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -324,7 +324,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
324{ 324{
325 struct drm_device *dev = obj->base.dev; 325 struct drm_device *dev = obj->base.dev;
326 void *vaddr = obj->phys_handle->vaddr + args->offset; 326 void *vaddr = obj->phys_handle->vaddr + args->offset;
327 char __user *user_data = to_user_ptr(args->data_ptr); 327 char __user *user_data = u64_to_user_ptr(args->data_ptr);
328 int ret = 0; 328 int ret = 0;
329 329
330 /* We manually control the domain here and pretend that it 330 /* We manually control the domain here and pretend that it
@@ -605,7 +605,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
605 int needs_clflush = 0; 605 int needs_clflush = 0;
606 struct sg_page_iter sg_iter; 606 struct sg_page_iter sg_iter;
607 607
608 user_data = to_user_ptr(args->data_ptr); 608 user_data = u64_to_user_ptr(args->data_ptr);
609 remain = args->size; 609 remain = args->size;
610 610
611 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); 611 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
@@ -692,7 +692,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
692 return 0; 692 return 0;
693 693
694 if (!access_ok(VERIFY_WRITE, 694 if (!access_ok(VERIFY_WRITE,
695 to_user_ptr(args->data_ptr), 695 u64_to_user_ptr(args->data_ptr),
696 args->size)) 696 args->size))
697 return -EFAULT; 697 return -EFAULT;
698 698
@@ -783,7 +783,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
783 if (ret) 783 if (ret)
784 goto out_unpin; 784 goto out_unpin;
785 785
786 user_data = to_user_ptr(args->data_ptr); 786 user_data = u64_to_user_ptr(args->data_ptr);
787 remain = args->size; 787 remain = args->size;
788 788
789 offset = i915_gem_obj_ggtt_offset(obj) + args->offset; 789 offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
@@ -907,7 +907,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
907 int needs_clflush_before = 0; 907 int needs_clflush_before = 0;
908 struct sg_page_iter sg_iter; 908 struct sg_page_iter sg_iter;
909 909
910 user_data = to_user_ptr(args->data_ptr); 910 user_data = u64_to_user_ptr(args->data_ptr);
911 remain = args->size; 911 remain = args->size;
912 912
913 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); 913 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
@@ -1036,12 +1036,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1036 return 0; 1036 return 0;
1037 1037
1038 if (!access_ok(VERIFY_READ, 1038 if (!access_ok(VERIFY_READ,
1039 to_user_ptr(args->data_ptr), 1039 u64_to_user_ptr(args->data_ptr),
1040 args->size)) 1040 args->size))
1041 return -EFAULT; 1041 return -EFAULT;
1042 1042
1043 if (likely(!i915.prefault_disable)) { 1043 if (likely(!i915.prefault_disable)) {
1044 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr), 1044 ret = fault_in_multipages_readable(u64_to_user_ptr(args->data_ptr),
1045 args->size); 1045 args->size);
1046 if (ret) 1046 if (ret)
1047 return -EFAULT; 1047 return -EFAULT;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index b845f468dd74..a676eedb441b 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -514,7 +514,7 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
514 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; 514 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
515 int remain, ret; 515 int remain, ret;
516 516
517 user_relocs = to_user_ptr(entry->relocs_ptr); 517 user_relocs = u64_to_user_ptr(entry->relocs_ptr);
518 518
519 remain = entry->relocation_count; 519 remain = entry->relocation_count;
520 while (remain) { 520 while (remain) {
@@ -865,7 +865,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
865 u64 invalid_offset = (u64)-1; 865 u64 invalid_offset = (u64)-1;
866 int j; 866 int j;
867 867
868 user_relocs = to_user_ptr(exec[i].relocs_ptr); 868 user_relocs = u64_to_user_ptr(exec[i].relocs_ptr);
869 869
870 if (copy_from_user(reloc+total, user_relocs, 870 if (copy_from_user(reloc+total, user_relocs,
871 exec[i].relocation_count * sizeof(*reloc))) { 871 exec[i].relocation_count * sizeof(*reloc))) {
@@ -1009,7 +1009,7 @@ validate_exec_list(struct drm_device *dev,
1009 invalid_flags |= EXEC_OBJECT_NEEDS_GTT; 1009 invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
1010 1010
1011 for (i = 0; i < count; i++) { 1011 for (i = 0; i < count; i++) {
1012 char __user *ptr = to_user_ptr(exec[i].relocs_ptr); 1012 char __user *ptr = u64_to_user_ptr(exec[i].relocs_ptr);
1013 int length; /* limited by fault_in_pages_readable() */ 1013 int length; /* limited by fault_in_pages_readable() */
1014 1014
1015 if (exec[i].flags & invalid_flags) 1015 if (exec[i].flags & invalid_flags)
@@ -1696,7 +1696,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1696 return -ENOMEM; 1696 return -ENOMEM;
1697 } 1697 }
1698 ret = copy_from_user(exec_list, 1698 ret = copy_from_user(exec_list,
1699 to_user_ptr(args->buffers_ptr), 1699 u64_to_user_ptr(args->buffers_ptr),
1700 sizeof(*exec_list) * args->buffer_count); 1700 sizeof(*exec_list) * args->buffer_count);
1701 if (ret != 0) { 1701 if (ret != 0) {
1702 DRM_DEBUG("copy %d exec entries failed %d\n", 1702 DRM_DEBUG("copy %d exec entries failed %d\n",
@@ -1732,7 +1732,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1732 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list); 1732 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1733 if (!ret) { 1733 if (!ret) {
1734 struct drm_i915_gem_exec_object __user *user_exec_list = 1734 struct drm_i915_gem_exec_object __user *user_exec_list =
1735 to_user_ptr(args->buffers_ptr); 1735 u64_to_user_ptr(args->buffers_ptr);
1736 1736
1737 /* Copy the new buffer offsets back to the user's exec list. */ 1737 /* Copy the new buffer offsets back to the user's exec list. */
1738 for (i = 0; i < args->buffer_count; i++) { 1738 for (i = 0; i < args->buffer_count; i++) {
@@ -1786,7 +1786,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
1786 return -ENOMEM; 1786 return -ENOMEM;
1787 } 1787 }
1788 ret = copy_from_user(exec2_list, 1788 ret = copy_from_user(exec2_list,
1789 to_user_ptr(args->buffers_ptr), 1789 u64_to_user_ptr(args->buffers_ptr),
1790 sizeof(*exec2_list) * args->buffer_count); 1790 sizeof(*exec2_list) * args->buffer_count);
1791 if (ret != 0) { 1791 if (ret != 0) {
1792 DRM_DEBUG("copy %d exec entries failed %d\n", 1792 DRM_DEBUG("copy %d exec entries failed %d\n",
@@ -1799,7 +1799,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
1799 if (!ret) { 1799 if (!ret) {
1800 /* Copy the new buffer offsets back to the user's exec list. */ 1800 /* Copy the new buffer offsets back to the user's exec list. */
1801 struct drm_i915_gem_exec_object2 __user *user_exec_list = 1801 struct drm_i915_gem_exec_object2 __user *user_exec_list =
1802 to_user_ptr(args->buffers_ptr); 1802 u64_to_user_ptr(args->buffers_ptr);
1803 int i; 1803 int i;
1804 1804
1805 for (i = 0; i < args->buffer_count; i++) { 1805 for (i = 0; i < args->buffer_count; i++) {
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 43d2181231c0..23d25283616c 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -28,11 +28,6 @@
28#define BO_LOCKED 0x4000 28#define BO_LOCKED 0x4000
29#define BO_PINNED 0x2000 29#define BO_PINNED 0x2000
30 30
31static inline void __user *to_user_ptr(u64 address)
32{
33 return (void __user *)(uintptr_t)address;
34}
35
36static struct msm_gem_submit *submit_create(struct drm_device *dev, 31static struct msm_gem_submit *submit_create(struct drm_device *dev,
37 struct msm_gpu *gpu, int nr) 32 struct msm_gpu *gpu, int nr)
38{ 33{
@@ -68,7 +63,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
68 struct drm_gem_object *obj; 63 struct drm_gem_object *obj;
69 struct msm_gem_object *msm_obj; 64 struct msm_gem_object *msm_obj;
70 void __user *userptr = 65 void __user *userptr =
71 to_user_ptr(args->bos + (i * sizeof(submit_bo))); 66 u64_to_user_ptr(args->bos + (i * sizeof(submit_bo)));
72 67
73 ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo)); 68 ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
74 if (ret) { 69 if (ret) {
@@ -257,7 +252,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
257 for (i = 0; i < nr_relocs; i++) { 252 for (i = 0; i < nr_relocs; i++) {
258 struct drm_msm_gem_submit_reloc submit_reloc; 253 struct drm_msm_gem_submit_reloc submit_reloc;
259 void __user *userptr = 254 void __user *userptr =
260 to_user_ptr(relocs + (i * sizeof(submit_reloc))); 255 u64_to_user_ptr(relocs + (i * sizeof(submit_reloc)));
261 uint32_t iova, off; 256 uint32_t iova, off;
262 bool valid; 257 bool valid;
263 258
@@ -356,7 +351,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
356 for (i = 0; i < args->nr_cmds; i++) { 351 for (i = 0; i < args->nr_cmds; i++) {
357 struct drm_msm_gem_submit_cmd submit_cmd; 352 struct drm_msm_gem_submit_cmd submit_cmd;
358 void __user *userptr = 353 void __user *userptr =
359 to_user_ptr(args->cmds + (i * sizeof(submit_cmd))); 354 u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
360 struct msm_gem_object *msm_obj; 355 struct msm_gem_object *msm_obj;
361 uint32_t iova; 356 uint32_t iova;
362 357
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index b0d3ecf3318b..e4a758cd7d35 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -64,7 +64,7 @@ config IIO_ST_ACCEL_3AXIS
64 help 64 help
65 Say yes here to build support for STMicroelectronics accelerometers: 65 Say yes here to build support for STMicroelectronics accelerometers:
66 LSM303DLH, LSM303DLHC, LIS3DH, LSM330D, LSM330DL, LSM330DLC, 66 LSM303DLH, LSM303DLHC, LIS3DH, LSM330D, LSM330DL, LSM330DLC,
67 LIS331DLH, LSM303DL, LSM303DLM, LSM330, LIS2DH12. 67 LIS331DLH, LSM303DL, LSM303DLM, LSM330, LIS2DH12, H3LIS331DL.
68 68
69 This driver can also be built as a module. If so, these modules 69 This driver can also be built as a module. If so, these modules
70 will be created: 70 will be created:
@@ -143,7 +143,8 @@ config MMA8452
143 select IIO_TRIGGERED_BUFFER 143 select IIO_TRIGGERED_BUFFER
144 help 144 help
145 Say yes here to build support for the following Freescale 3-axis 145 Say yes here to build support for the following Freescale 3-axis
146 accelerometers: MMA8451Q, MMA8452Q, MMA8453Q, MMA8652FC, MMA8653FC. 146 accelerometers: MMA8451Q, MMA8452Q, MMA8453Q, MMA8652FC, MMA8653FC,
147 FXLS8471Q.
147 148
148 To compile this driver as a module, choose M here: the module 149 To compile this driver as a module, choose M here: the module
149 will be called mma8452. 150 will be called mma8452.
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index 2072a31e813b..197e693e7e7b 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -25,7 +25,6 @@
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/acpi.h> 27#include <linux/acpi.h>
28#include <linux/gpio/consumer.h>
29#include <linux/pm.h> 28#include <linux/pm.h>
30#include <linux/pm_runtime.h> 29#include <linux/pm_runtime.h>
31#include <linux/iio/iio.h> 30#include <linux/iio/iio.h>
@@ -138,6 +137,7 @@ enum bmc150_accel_axis {
138 AXIS_X, 137 AXIS_X,
139 AXIS_Y, 138 AXIS_Y,
140 AXIS_Z, 139 AXIS_Z,
140 AXIS_MAX,
141}; 141};
142 142
143enum bmc150_power_modes { 143enum bmc150_power_modes {
@@ -188,7 +188,6 @@ enum bmc150_accel_trigger_id {
188 188
189struct bmc150_accel_data { 189struct bmc150_accel_data {
190 struct regmap *regmap; 190 struct regmap *regmap;
191 struct device *dev;
192 int irq; 191 int irq;
193 struct bmc150_accel_interrupt interrupts[BMC150_ACCEL_INTERRUPTS]; 192 struct bmc150_accel_interrupt interrupts[BMC150_ACCEL_INTERRUPTS];
194 atomic_t active_intr; 193 atomic_t active_intr;
@@ -246,16 +245,18 @@ static const struct {
246 {500000, BMC150_ACCEL_SLEEP_500_MS}, 245 {500000, BMC150_ACCEL_SLEEP_500_MS},
247 {1000000, BMC150_ACCEL_SLEEP_1_SEC} }; 246 {1000000, BMC150_ACCEL_SLEEP_1_SEC} };
248 247
249static const struct regmap_config bmc150_i2c_regmap_conf = { 248const struct regmap_config bmc150_regmap_conf = {
250 .reg_bits = 8, 249 .reg_bits = 8,
251 .val_bits = 8, 250 .val_bits = 8,
252 .max_register = 0x3f, 251 .max_register = 0x3f,
253}; 252};
253EXPORT_SYMBOL_GPL(bmc150_regmap_conf);
254 254
255static int bmc150_accel_set_mode(struct bmc150_accel_data *data, 255static int bmc150_accel_set_mode(struct bmc150_accel_data *data,
256 enum bmc150_power_modes mode, 256 enum bmc150_power_modes mode,
257 int dur_us) 257 int dur_us)
258{ 258{
259 struct device *dev = regmap_get_device(data->regmap);
259 int i; 260 int i;
260 int ret; 261 int ret;
261 u8 lpw_bits; 262 u8 lpw_bits;
@@ -279,11 +280,11 @@ static int bmc150_accel_set_mode(struct bmc150_accel_data *data,
279 lpw_bits = mode << BMC150_ACCEL_PMU_MODE_SHIFT; 280 lpw_bits = mode << BMC150_ACCEL_PMU_MODE_SHIFT;
280 lpw_bits |= (dur_val << BMC150_ACCEL_PMU_BIT_SLEEP_DUR_SHIFT); 281 lpw_bits |= (dur_val << BMC150_ACCEL_PMU_BIT_SLEEP_DUR_SHIFT);
281 282
282 dev_dbg(data->dev, "Set Mode bits %x\n", lpw_bits); 283 dev_dbg(dev, "Set Mode bits %x\n", lpw_bits);
283 284
284 ret = regmap_write(data->regmap, BMC150_ACCEL_REG_PMU_LPW, lpw_bits); 285 ret = regmap_write(data->regmap, BMC150_ACCEL_REG_PMU_LPW, lpw_bits);
285 if (ret < 0) { 286 if (ret < 0) {
286 dev_err(data->dev, "Error writing reg_pmu_lpw\n"); 287 dev_err(dev, "Error writing reg_pmu_lpw\n");
287 return ret; 288 return ret;
288 } 289 }
289 290
@@ -316,23 +317,24 @@ static int bmc150_accel_set_bw(struct bmc150_accel_data *data, int val,
316 317
317static int bmc150_accel_update_slope(struct bmc150_accel_data *data) 318static int bmc150_accel_update_slope(struct bmc150_accel_data *data)
318{ 319{
320 struct device *dev = regmap_get_device(data->regmap);
319 int ret; 321 int ret;
320 322
321 ret = regmap_write(data->regmap, BMC150_ACCEL_REG_INT_6, 323 ret = regmap_write(data->regmap, BMC150_ACCEL_REG_INT_6,
322 data->slope_thres); 324 data->slope_thres);
323 if (ret < 0) { 325 if (ret < 0) {
324 dev_err(data->dev, "Error writing reg_int_6\n"); 326 dev_err(dev, "Error writing reg_int_6\n");
325 return ret; 327 return ret;
326 } 328 }
327 329
328 ret = regmap_update_bits(data->regmap, BMC150_ACCEL_REG_INT_5, 330 ret = regmap_update_bits(data->regmap, BMC150_ACCEL_REG_INT_5,
329 BMC150_ACCEL_SLOPE_DUR_MASK, data->slope_dur); 331 BMC150_ACCEL_SLOPE_DUR_MASK, data->slope_dur);
330 if (ret < 0) { 332 if (ret < 0) {
331 dev_err(data->dev, "Error updating reg_int_5\n"); 333 dev_err(dev, "Error updating reg_int_5\n");
332 return ret; 334 return ret;
333 } 335 }
334 336
335 dev_dbg(data->dev, "%s: %x %x\n", __func__, data->slope_thres, 337 dev_dbg(dev, "%s: %x %x\n", __func__, data->slope_thres,
336 data->slope_dur); 338 data->slope_dur);
337 339
338 return ret; 340 return ret;
@@ -378,20 +380,21 @@ static int bmc150_accel_get_startup_times(struct bmc150_accel_data *data)
378 380
379static int bmc150_accel_set_power_state(struct bmc150_accel_data *data, bool on) 381static int bmc150_accel_set_power_state(struct bmc150_accel_data *data, bool on)
380{ 382{
383 struct device *dev = regmap_get_device(data->regmap);
381 int ret; 384 int ret;
382 385
383 if (on) { 386 if (on) {
384 ret = pm_runtime_get_sync(data->dev); 387 ret = pm_runtime_get_sync(dev);
385 } else { 388 } else {
386 pm_runtime_mark_last_busy(data->dev); 389 pm_runtime_mark_last_busy(dev);
387 ret = pm_runtime_put_autosuspend(data->dev); 390 ret = pm_runtime_put_autosuspend(dev);
388 } 391 }
389 392
390 if (ret < 0) { 393 if (ret < 0) {
391 dev_err(data->dev, 394 dev_err(dev,
392 "Failed: bmc150_accel_set_power_state for %d\n", on); 395 "Failed: bmc150_accel_set_power_state for %d\n", on);
393 if (on) 396 if (on)
394 pm_runtime_put_noidle(data->dev); 397 pm_runtime_put_noidle(dev);
395 398
396 return ret; 399 return ret;
397 } 400 }
@@ -445,6 +448,7 @@ static void bmc150_accel_interrupts_setup(struct iio_dev *indio_dev,
445static int bmc150_accel_set_interrupt(struct bmc150_accel_data *data, int i, 448static int bmc150_accel_set_interrupt(struct bmc150_accel_data *data, int i,
446 bool state) 449 bool state)
447{ 450{
451 struct device *dev = regmap_get_device(data->regmap);
448 struct bmc150_accel_interrupt *intr = &data->interrupts[i]; 452 struct bmc150_accel_interrupt *intr = &data->interrupts[i];
449 const struct bmc150_accel_interrupt_info *info = intr->info; 453 const struct bmc150_accel_interrupt_info *info = intr->info;
450 int ret; 454 int ret;
@@ -474,7 +478,7 @@ static int bmc150_accel_set_interrupt(struct bmc150_accel_data *data, int i,
474 ret = regmap_update_bits(data->regmap, info->map_reg, info->map_bitmask, 478 ret = regmap_update_bits(data->regmap, info->map_reg, info->map_bitmask,
475 (state ? info->map_bitmask : 0)); 479 (state ? info->map_bitmask : 0));
476 if (ret < 0) { 480 if (ret < 0) {
477 dev_err(data->dev, "Error updating reg_int_map\n"); 481 dev_err(dev, "Error updating reg_int_map\n");
478 goto out_fix_power_state; 482 goto out_fix_power_state;
479 } 483 }
480 484
@@ -482,7 +486,7 @@ static int bmc150_accel_set_interrupt(struct bmc150_accel_data *data, int i,
482 ret = regmap_update_bits(data->regmap, info->en_reg, info->en_bitmask, 486 ret = regmap_update_bits(data->regmap, info->en_reg, info->en_bitmask,
483 (state ? info->en_bitmask : 0)); 487 (state ? info->en_bitmask : 0));
484 if (ret < 0) { 488 if (ret < 0) {
485 dev_err(data->dev, "Error updating reg_int_en\n"); 489 dev_err(dev, "Error updating reg_int_en\n");
486 goto out_fix_power_state; 490 goto out_fix_power_state;
487 } 491 }
488 492
@@ -500,6 +504,7 @@ out_fix_power_state:
500 504
501static int bmc150_accel_set_scale(struct bmc150_accel_data *data, int val) 505static int bmc150_accel_set_scale(struct bmc150_accel_data *data, int val)
502{ 506{
507 struct device *dev = regmap_get_device(data->regmap);
503 int ret, i; 508 int ret, i;
504 509
505 for (i = 0; i < ARRAY_SIZE(data->chip_info->scale_table); ++i) { 510 for (i = 0; i < ARRAY_SIZE(data->chip_info->scale_table); ++i) {
@@ -508,8 +513,7 @@ static int bmc150_accel_set_scale(struct bmc150_accel_data *data, int val)
508 BMC150_ACCEL_REG_PMU_RANGE, 513 BMC150_ACCEL_REG_PMU_RANGE,
509 data->chip_info->scale_table[i].reg_range); 514 data->chip_info->scale_table[i].reg_range);
510 if (ret < 0) { 515 if (ret < 0) {
511 dev_err(data->dev, 516 dev_err(dev, "Error writing pmu_range\n");
512 "Error writing pmu_range\n");
513 return ret; 517 return ret;
514 } 518 }
515 519
@@ -523,6 +527,7 @@ static int bmc150_accel_set_scale(struct bmc150_accel_data *data, int val)
523 527
524static int bmc150_accel_get_temp(struct bmc150_accel_data *data, int *val) 528static int bmc150_accel_get_temp(struct bmc150_accel_data *data, int *val)
525{ 529{
530 struct device *dev = regmap_get_device(data->regmap);
526 int ret; 531 int ret;
527 unsigned int value; 532 unsigned int value;
528 533
@@ -530,7 +535,7 @@ static int bmc150_accel_get_temp(struct bmc150_accel_data *data, int *val)
530 535
531 ret = regmap_read(data->regmap, BMC150_ACCEL_REG_TEMP, &value); 536 ret = regmap_read(data->regmap, BMC150_ACCEL_REG_TEMP, &value);
532 if (ret < 0) { 537 if (ret < 0) {
533 dev_err(data->dev, "Error reading reg_temp\n"); 538 dev_err(dev, "Error reading reg_temp\n");
534 mutex_unlock(&data->mutex); 539 mutex_unlock(&data->mutex);
535 return ret; 540 return ret;
536 } 541 }
@@ -545,6 +550,7 @@ static int bmc150_accel_get_axis(struct bmc150_accel_data *data,
545 struct iio_chan_spec const *chan, 550 struct iio_chan_spec const *chan,
546 int *val) 551 int *val)
547{ 552{
553 struct device *dev = regmap_get_device(data->regmap);
548 int ret; 554 int ret;
549 int axis = chan->scan_index; 555 int axis = chan->scan_index;
550 __le16 raw_val; 556 __le16 raw_val;
@@ -559,7 +565,7 @@ static int bmc150_accel_get_axis(struct bmc150_accel_data *data,
559 ret = regmap_bulk_read(data->regmap, BMC150_ACCEL_AXIS_TO_REG(axis), 565 ret = regmap_bulk_read(data->regmap, BMC150_ACCEL_AXIS_TO_REG(axis),
560 &raw_val, sizeof(raw_val)); 566 &raw_val, sizeof(raw_val));
561 if (ret < 0) { 567 if (ret < 0) {
562 dev_err(data->dev, "Error reading axis %d\n", axis); 568 dev_err(dev, "Error reading axis %d\n", axis);
563 bmc150_accel_set_power_state(data, false); 569 bmc150_accel_set_power_state(data, false);
564 mutex_unlock(&data->mutex); 570 mutex_unlock(&data->mutex);
565 return ret; 571 return ret;
@@ -831,6 +837,7 @@ static int bmc150_accel_set_watermark(struct iio_dev *indio_dev, unsigned val)
831static int bmc150_accel_fifo_transfer(struct bmc150_accel_data *data, 837static int bmc150_accel_fifo_transfer(struct bmc150_accel_data *data,
832 char *buffer, int samples) 838 char *buffer, int samples)
833{ 839{
840 struct device *dev = regmap_get_device(data->regmap);
834 int sample_length = 3 * 2; 841 int sample_length = 3 * 2;
835 int ret; 842 int ret;
836 int total_length = samples * sample_length; 843 int total_length = samples * sample_length;
@@ -854,7 +861,8 @@ static int bmc150_accel_fifo_transfer(struct bmc150_accel_data *data,
854 } 861 }
855 862
856 if (ret) 863 if (ret)
857 dev_err(data->dev, "Error transferring data from fifo in single steps of %zu\n", 864 dev_err(dev,
865 "Error transferring data from fifo in single steps of %zu\n",
858 step); 866 step);
859 867
860 return ret; 868 return ret;
@@ -864,6 +872,7 @@ static int __bmc150_accel_fifo_flush(struct iio_dev *indio_dev,
864 unsigned samples, bool irq) 872 unsigned samples, bool irq)
865{ 873{
866 struct bmc150_accel_data *data = iio_priv(indio_dev); 874 struct bmc150_accel_data *data = iio_priv(indio_dev);
875 struct device *dev = regmap_get_device(data->regmap);
867 int ret, i; 876 int ret, i;
868 u8 count; 877 u8 count;
869 u16 buffer[BMC150_ACCEL_FIFO_LENGTH * 3]; 878 u16 buffer[BMC150_ACCEL_FIFO_LENGTH * 3];
@@ -873,7 +882,7 @@ static int __bmc150_accel_fifo_flush(struct iio_dev *indio_dev,
873 882
874 ret = regmap_read(data->regmap, BMC150_ACCEL_REG_FIFO_STATUS, &val); 883 ret = regmap_read(data->regmap, BMC150_ACCEL_REG_FIFO_STATUS, &val);
875 if (ret < 0) { 884 if (ret < 0) {
876 dev_err(data->dev, "Error reading reg_fifo_status\n"); 885 dev_err(dev, "Error reading reg_fifo_status\n");
877 return ret; 886 return ret;
878 } 887 }
879 888
@@ -1105,27 +1114,23 @@ static const struct iio_info bmc150_accel_info_fifo = {
1105 .driver_module = THIS_MODULE, 1114 .driver_module = THIS_MODULE,
1106}; 1115};
1107 1116
1117static const unsigned long bmc150_accel_scan_masks[] = {
1118 BIT(AXIS_X) | BIT(AXIS_Y) | BIT(AXIS_Z),
1119 0};
1120
1108static irqreturn_t bmc150_accel_trigger_handler(int irq, void *p) 1121static irqreturn_t bmc150_accel_trigger_handler(int irq, void *p)
1109{ 1122{
1110 struct iio_poll_func *pf = p; 1123 struct iio_poll_func *pf = p;
1111 struct iio_dev *indio_dev = pf->indio_dev; 1124 struct iio_dev *indio_dev = pf->indio_dev;
1112 struct bmc150_accel_data *data = iio_priv(indio_dev); 1125 struct bmc150_accel_data *data = iio_priv(indio_dev);
1113 int bit, ret, i = 0; 1126 int ret;
1114 unsigned int raw_val;
1115 1127
1116 mutex_lock(&data->mutex); 1128 mutex_lock(&data->mutex);
1117 for_each_set_bit(bit, indio_dev->active_scan_mask, 1129 ret = regmap_bulk_read(data->regmap, BMC150_ACCEL_REG_XOUT_L,
1118 indio_dev->masklength) { 1130 data->buffer, AXIS_MAX * 2);
1119 ret = regmap_bulk_read(data->regmap,
1120 BMC150_ACCEL_AXIS_TO_REG(bit), &raw_val,
1121 2);
1122 if (ret < 0) {
1123 mutex_unlock(&data->mutex);
1124 goto err_read;
1125 }
1126 data->buffer[i++] = raw_val;
1127 }
1128 mutex_unlock(&data->mutex); 1131 mutex_unlock(&data->mutex);
1132 if (ret < 0)
1133 goto err_read;
1129 1134
1130 iio_push_to_buffers_with_timestamp(indio_dev, data->buffer, 1135 iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
1131 pf->timestamp); 1136 pf->timestamp);
@@ -1139,6 +1144,7 @@ static int bmc150_accel_trig_try_reen(struct iio_trigger *trig)
1139{ 1144{
1140 struct bmc150_accel_trigger *t = iio_trigger_get_drvdata(trig); 1145 struct bmc150_accel_trigger *t = iio_trigger_get_drvdata(trig);
1141 struct bmc150_accel_data *data = t->data; 1146 struct bmc150_accel_data *data = t->data;
1147 struct device *dev = regmap_get_device(data->regmap);
1142 int ret; 1148 int ret;
1143 1149
1144 /* new data interrupts don't need ack */ 1150 /* new data interrupts don't need ack */
@@ -1152,8 +1158,7 @@ static int bmc150_accel_trig_try_reen(struct iio_trigger *trig)
1152 BMC150_ACCEL_INT_MODE_LATCH_RESET); 1158 BMC150_ACCEL_INT_MODE_LATCH_RESET);
1153 mutex_unlock(&data->mutex); 1159 mutex_unlock(&data->mutex);
1154 if (ret < 0) { 1160 if (ret < 0) {
1155 dev_err(data->dev, 1161 dev_err(dev, "Error writing reg_int_rst_latch\n");
1156 "Error writing reg_int_rst_latch\n");
1157 return ret; 1162 return ret;
1158 } 1163 }
1159 1164
@@ -1204,13 +1209,14 @@ static const struct iio_trigger_ops bmc150_accel_trigger_ops = {
1204static int bmc150_accel_handle_roc_event(struct iio_dev *indio_dev) 1209static int bmc150_accel_handle_roc_event(struct iio_dev *indio_dev)
1205{ 1210{
1206 struct bmc150_accel_data *data = iio_priv(indio_dev); 1211 struct bmc150_accel_data *data = iio_priv(indio_dev);
1212 struct device *dev = regmap_get_device(data->regmap);
1207 int dir; 1213 int dir;
1208 int ret; 1214 int ret;
1209 unsigned int val; 1215 unsigned int val;
1210 1216
1211 ret = regmap_read(data->regmap, BMC150_ACCEL_REG_INT_STATUS_2, &val); 1217 ret = regmap_read(data->regmap, BMC150_ACCEL_REG_INT_STATUS_2, &val);
1212 if (ret < 0) { 1218 if (ret < 0) {
1213 dev_err(data->dev, "Error reading reg_int_status_2\n"); 1219 dev_err(dev, "Error reading reg_int_status_2\n");
1214 return ret; 1220 return ret;
1215 } 1221 }
1216 1222
@@ -1253,6 +1259,7 @@ static irqreturn_t bmc150_accel_irq_thread_handler(int irq, void *private)
1253{ 1259{
1254 struct iio_dev *indio_dev = private; 1260 struct iio_dev *indio_dev = private;
1255 struct bmc150_accel_data *data = iio_priv(indio_dev); 1261 struct bmc150_accel_data *data = iio_priv(indio_dev);
1262 struct device *dev = regmap_get_device(data->regmap);
1256 bool ack = false; 1263 bool ack = false;
1257 int ret; 1264 int ret;
1258 1265
@@ -1276,7 +1283,7 @@ static irqreturn_t bmc150_accel_irq_thread_handler(int irq, void *private)
1276 BMC150_ACCEL_INT_MODE_LATCH_INT | 1283 BMC150_ACCEL_INT_MODE_LATCH_INT |
1277 BMC150_ACCEL_INT_MODE_LATCH_RESET); 1284 BMC150_ACCEL_INT_MODE_LATCH_RESET);
1278 if (ret) 1285 if (ret)
1279 dev_err(data->dev, "Error writing reg_int_rst_latch\n"); 1286 dev_err(dev, "Error writing reg_int_rst_latch\n");
1280 1287
1281 ret = IRQ_HANDLED; 1288 ret = IRQ_HANDLED;
1282 } else { 1289 } else {
@@ -1347,13 +1354,14 @@ static void bmc150_accel_unregister_triggers(struct bmc150_accel_data *data,
1347static int bmc150_accel_triggers_setup(struct iio_dev *indio_dev, 1354static int bmc150_accel_triggers_setup(struct iio_dev *indio_dev,
1348 struct bmc150_accel_data *data) 1355 struct bmc150_accel_data *data)
1349{ 1356{
1357 struct device *dev = regmap_get_device(data->regmap);
1350 int i, ret; 1358 int i, ret;
1351 1359
1352 for (i = 0; i < BMC150_ACCEL_TRIGGERS; i++) { 1360 for (i = 0; i < BMC150_ACCEL_TRIGGERS; i++) {
1353 struct bmc150_accel_trigger *t = &data->triggers[i]; 1361 struct bmc150_accel_trigger *t = &data->triggers[i];
1354 1362
1355 t->indio_trig = devm_iio_trigger_alloc(data->dev, 1363 t->indio_trig = devm_iio_trigger_alloc(dev,
1356 bmc150_accel_triggers[i].name, 1364 bmc150_accel_triggers[i].name,
1357 indio_dev->name, 1365 indio_dev->name,
1358 indio_dev->id); 1366 indio_dev->id);
1359 if (!t->indio_trig) { 1367 if (!t->indio_trig) {
@@ -1361,7 +1369,7 @@ static int bmc150_accel_triggers_setup(struct iio_dev *indio_dev,
1361 break; 1369 break;
1362 } 1370 }
1363 1371
1364 t->indio_trig->dev.parent = data->dev; 1372 t->indio_trig->dev.parent = dev;
1365 t->indio_trig->ops = &bmc150_accel_trigger_ops; 1373 t->indio_trig->ops = &bmc150_accel_trigger_ops;
1366 t->intr = bmc150_accel_triggers[i].intr; 1374 t->intr = bmc150_accel_triggers[i].intr;
1367 t->data = data; 1375 t->data = data;
@@ -1385,12 +1393,13 @@ static int bmc150_accel_triggers_setup(struct iio_dev *indio_dev,
1385 1393
1386static int bmc150_accel_fifo_set_mode(struct bmc150_accel_data *data) 1394static int bmc150_accel_fifo_set_mode(struct bmc150_accel_data *data)
1387{ 1395{
1396 struct device *dev = regmap_get_device(data->regmap);
1388 u8 reg = BMC150_ACCEL_REG_FIFO_CONFIG1; 1397 u8 reg = BMC150_ACCEL_REG_FIFO_CONFIG1;
1389 int ret; 1398 int ret;
1390 1399
1391 ret = regmap_write(data->regmap, reg, data->fifo_mode); 1400 ret = regmap_write(data->regmap, reg, data->fifo_mode);
1392 if (ret < 0) { 1401 if (ret < 0) {
1393 dev_err(data->dev, "Error writing reg_fifo_config1\n"); 1402 dev_err(dev, "Error writing reg_fifo_config1\n");
1394 return ret; 1403 return ret;
1395 } 1404 }
1396 1405
@@ -1400,7 +1409,7 @@ static int bmc150_accel_fifo_set_mode(struct bmc150_accel_data *data)
1400 ret = regmap_write(data->regmap, BMC150_ACCEL_REG_FIFO_CONFIG0, 1409 ret = regmap_write(data->regmap, BMC150_ACCEL_REG_FIFO_CONFIG0,
1401 data->watermark); 1410 data->watermark);
1402 if (ret < 0) 1411 if (ret < 0)
1403 dev_err(data->dev, "Error writing reg_fifo_config0\n"); 1412 dev_err(dev, "Error writing reg_fifo_config0\n");
1404 1413
1405 return ret; 1414 return ret;
1406} 1415}
@@ -1484,17 +1493,17 @@ static const struct iio_buffer_setup_ops bmc150_accel_buffer_ops = {
1484 1493
1485static int bmc150_accel_chip_init(struct bmc150_accel_data *data) 1494static int bmc150_accel_chip_init(struct bmc150_accel_data *data)
1486{ 1495{
1496 struct device *dev = regmap_get_device(data->regmap);
1487 int ret, i; 1497 int ret, i;
1488 unsigned int val; 1498 unsigned int val;
1489 1499
1490 ret = regmap_read(data->regmap, BMC150_ACCEL_REG_CHIP_ID, &val); 1500 ret = regmap_read(data->regmap, BMC150_ACCEL_REG_CHIP_ID, &val);
1491 if (ret < 0) { 1501 if (ret < 0) {
1492 dev_err(data->dev, 1502 dev_err(dev, "Error: Reading chip id\n");
1493 "Error: Reading chip id\n");
1494 return ret; 1503 return ret;
1495 } 1504 }
1496 1505
1497 dev_dbg(data->dev, "Chip Id %x\n", val); 1506 dev_dbg(dev, "Chip Id %x\n", val);
1498 for (i = 0; i < ARRAY_SIZE(bmc150_accel_chip_info_tbl); i++) { 1507 for (i = 0; i < ARRAY_SIZE(bmc150_accel_chip_info_tbl); i++) {
1499 if (bmc150_accel_chip_info_tbl[i].chip_id == val) { 1508 if (bmc150_accel_chip_info_tbl[i].chip_id == val) {
1500 data->chip_info = &bmc150_accel_chip_info_tbl[i]; 1509 data->chip_info = &bmc150_accel_chip_info_tbl[i];
@@ -1503,7 +1512,7 @@ static int bmc150_accel_chip_init(struct bmc150_accel_data *data)
1503 } 1512 }
1504 1513
1505 if (!data->chip_info) { 1514 if (!data->chip_info) {
1506 dev_err(data->dev, "Invalid chip %x\n", val); 1515 dev_err(dev, "Invalid chip %x\n", val);
1507 return -ENODEV; 1516 return -ENODEV;
1508 } 1517 }
1509 1518
@@ -1520,8 +1529,7 @@ static int bmc150_accel_chip_init(struct bmc150_accel_data *data)
1520 ret = regmap_write(data->regmap, BMC150_ACCEL_REG_PMU_RANGE, 1529 ret = regmap_write(data->regmap, BMC150_ACCEL_REG_PMU_RANGE,
1521 BMC150_ACCEL_DEF_RANGE_4G); 1530 BMC150_ACCEL_DEF_RANGE_4G);
1522 if (ret < 0) { 1531 if (ret < 0) {
1523 dev_err(data->dev, 1532 dev_err(dev, "Error writing reg_pmu_range\n");
1524 "Error writing reg_pmu_range\n");
1525 return ret; 1533 return ret;
1526 } 1534 }
1527 1535
@@ -1539,8 +1547,7 @@ static int bmc150_accel_chip_init(struct bmc150_accel_data *data)
1539 BMC150_ACCEL_INT_MODE_LATCH_INT | 1547 BMC150_ACCEL_INT_MODE_LATCH_INT |
1540 BMC150_ACCEL_INT_MODE_LATCH_RESET); 1548 BMC150_ACCEL_INT_MODE_LATCH_RESET);
1541 if (ret < 0) { 1549 if (ret < 0) {
1542 dev_err(data->dev, 1550 dev_err(dev, "Error writing reg_int_rst_latch\n");
1543 "Error writing reg_int_rst_latch\n");
1544 return ret; 1551 return ret;
1545 } 1552 }
1546 1553
@@ -1560,7 +1567,6 @@ int bmc150_accel_core_probe(struct device *dev, struct regmap *regmap, int irq,
1560 1567
1561 data = iio_priv(indio_dev); 1568 data = iio_priv(indio_dev);
1562 dev_set_drvdata(dev, indio_dev); 1569 dev_set_drvdata(dev, indio_dev);
1563 data->dev = dev;
1564 data->irq = irq; 1570 data->irq = irq;
1565 1571
1566 data->regmap = regmap; 1572 data->regmap = regmap;
@@ -1575,6 +1581,7 @@ int bmc150_accel_core_probe(struct device *dev, struct regmap *regmap, int irq,
1575 indio_dev->channels = data->chip_info->channels; 1581 indio_dev->channels = data->chip_info->channels;
1576 indio_dev->num_channels = data->chip_info->num_channels; 1582 indio_dev->num_channels = data->chip_info->num_channels;
1577 indio_dev->name = name ? name : data->chip_info->name; 1583 indio_dev->name = name ? name : data->chip_info->name;
1584 indio_dev->available_scan_masks = bmc150_accel_scan_masks;
1578 indio_dev->modes = INDIO_DIRECT_MODE; 1585 indio_dev->modes = INDIO_DIRECT_MODE;
1579 indio_dev->info = &bmc150_accel_info; 1586 indio_dev->info = &bmc150_accel_info;
1580 1587
@@ -1583,13 +1590,13 @@ int bmc150_accel_core_probe(struct device *dev, struct regmap *regmap, int irq,
1583 bmc150_accel_trigger_handler, 1590 bmc150_accel_trigger_handler,
1584 &bmc150_accel_buffer_ops); 1591 &bmc150_accel_buffer_ops);
1585 if (ret < 0) { 1592 if (ret < 0) {
1586 dev_err(data->dev, "Failed: iio triggered buffer setup\n"); 1593 dev_err(dev, "Failed: iio triggered buffer setup\n");
1587 return ret; 1594 return ret;
1588 } 1595 }
1589 1596
1590 if (data->irq > 0) { 1597 if (data->irq > 0) {
1591 ret = devm_request_threaded_irq( 1598 ret = devm_request_threaded_irq(
1592 data->dev, data->irq, 1599 dev, data->irq,
1593 bmc150_accel_irq_handler, 1600 bmc150_accel_irq_handler,
1594 bmc150_accel_irq_thread_handler, 1601 bmc150_accel_irq_thread_handler,
1595 IRQF_TRIGGER_RISING, 1602 IRQF_TRIGGER_RISING,
@@ -1607,7 +1614,7 @@ int bmc150_accel_core_probe(struct device *dev, struct regmap *regmap, int irq,
1607 ret = regmap_write(data->regmap, BMC150_ACCEL_REG_INT_RST_LATCH, 1614 ret = regmap_write(data->regmap, BMC150_ACCEL_REG_INT_RST_LATCH,
1608 BMC150_ACCEL_INT_MODE_LATCH_RESET); 1615 BMC150_ACCEL_INT_MODE_LATCH_RESET);
1609 if (ret < 0) { 1616 if (ret < 0) {
1610 dev_err(data->dev, "Error writing reg_int_rst_latch\n"); 1617 dev_err(dev, "Error writing reg_int_rst_latch\n");
1611 goto err_buffer_cleanup; 1618 goto err_buffer_cleanup;
1612 } 1619 }
1613 1620
@@ -1656,9 +1663,9 @@ int bmc150_accel_core_remove(struct device *dev)
1656 1663
1657 iio_device_unregister(indio_dev); 1664 iio_device_unregister(indio_dev);
1658 1665
1659 pm_runtime_disable(data->dev); 1666 pm_runtime_disable(dev);
1660 pm_runtime_set_suspended(data->dev); 1667 pm_runtime_set_suspended(dev);
1661 pm_runtime_put_noidle(data->dev); 1668 pm_runtime_put_noidle(dev);
1662 1669
1663 bmc150_accel_unregister_triggers(data, BMC150_ACCEL_TRIGGERS - 1); 1670 bmc150_accel_unregister_triggers(data, BMC150_ACCEL_TRIGGERS - 1);
1664 1671
@@ -1707,7 +1714,7 @@ static int bmc150_accel_runtime_suspend(struct device *dev)
1707 struct bmc150_accel_data *data = iio_priv(indio_dev); 1714 struct bmc150_accel_data *data = iio_priv(indio_dev);
1708 int ret; 1715 int ret;
1709 1716
1710 dev_dbg(data->dev, __func__); 1717 dev_dbg(dev, __func__);
1711 ret = bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_SUSPEND, 0); 1718 ret = bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_SUSPEND, 0);
1712 if (ret < 0) 1719 if (ret < 0)
1713 return -EAGAIN; 1720 return -EAGAIN;
@@ -1722,7 +1729,7 @@ static int bmc150_accel_runtime_resume(struct device *dev)
1722 int ret; 1729 int ret;
1723 int sleep_val; 1730 int sleep_val;
1724 1731
1725 dev_dbg(data->dev, __func__); 1732 dev_dbg(dev, __func__);
1726 1733
1727 ret = bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0); 1734 ret = bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
1728 if (ret < 0) 1735 if (ret < 0)
diff --git a/drivers/iio/accel/bmc150-accel-i2c.c b/drivers/iio/accel/bmc150-accel-i2c.c
index b41404ba32fc..8ca8041267ef 100644
--- a/drivers/iio/accel/bmc150-accel-i2c.c
+++ b/drivers/iio/accel/bmc150-accel-i2c.c
@@ -28,11 +28,6 @@
28 28
29#include "bmc150-accel.h" 29#include "bmc150-accel.h"
30 30
31static const struct regmap_config bmc150_i2c_regmap_conf = {
32 .reg_bits = 8,
33 .val_bits = 8,
34};
35
36static int bmc150_accel_probe(struct i2c_client *client, 31static int bmc150_accel_probe(struct i2c_client *client,
37 const struct i2c_device_id *id) 32 const struct i2c_device_id *id)
38{ 33{
@@ -43,7 +38,7 @@ static int bmc150_accel_probe(struct i2c_client *client,
43 i2c_check_functionality(client->adapter, 38 i2c_check_functionality(client->adapter,
44 I2C_FUNC_SMBUS_READ_I2C_BLOCK); 39 I2C_FUNC_SMBUS_READ_I2C_BLOCK);
45 40
46 regmap = devm_regmap_init_i2c(client, &bmc150_i2c_regmap_conf); 41 regmap = devm_regmap_init_i2c(client, &bmc150_regmap_conf);
47 if (IS_ERR(regmap)) { 42 if (IS_ERR(regmap)) {
48 dev_err(&client->dev, "Failed to initialize i2c regmap\n"); 43 dev_err(&client->dev, "Failed to initialize i2c regmap\n");
49 return PTR_ERR(regmap); 44 return PTR_ERR(regmap);
diff --git a/drivers/iio/accel/bmc150-accel-spi.c b/drivers/iio/accel/bmc150-accel-spi.c
index 16b66f2a7204..006794a70a1f 100644
--- a/drivers/iio/accel/bmc150-accel-spi.c
+++ b/drivers/iio/accel/bmc150-accel-spi.c
@@ -25,18 +25,12 @@
25 25
26#include "bmc150-accel.h" 26#include "bmc150-accel.h"
27 27
28static const struct regmap_config bmc150_spi_regmap_conf = {
29 .reg_bits = 8,
30 .val_bits = 8,
31 .max_register = 0x3f,
32};
33
34static int bmc150_accel_probe(struct spi_device *spi) 28static int bmc150_accel_probe(struct spi_device *spi)
35{ 29{
36 struct regmap *regmap; 30 struct regmap *regmap;
37 const struct spi_device_id *id = spi_get_device_id(spi); 31 const struct spi_device_id *id = spi_get_device_id(spi);
38 32
39 regmap = devm_regmap_init_spi(spi, &bmc150_spi_regmap_conf); 33 regmap = devm_regmap_init_spi(spi, &bmc150_regmap_conf);
40 if (IS_ERR(regmap)) { 34 if (IS_ERR(regmap)) {
41 dev_err(&spi->dev, "Failed to initialize spi regmap\n"); 35 dev_err(&spi->dev, "Failed to initialize spi regmap\n");
42 return PTR_ERR(regmap); 36 return PTR_ERR(regmap);
diff --git a/drivers/iio/accel/bmc150-accel.h b/drivers/iio/accel/bmc150-accel.h
index ba0335987f94..38a8b11f8c19 100644
--- a/drivers/iio/accel/bmc150-accel.h
+++ b/drivers/iio/accel/bmc150-accel.h
@@ -16,5 +16,6 @@ int bmc150_accel_core_probe(struct device *dev, struct regmap *regmap, int irq,
16 const char *name, bool block_supported); 16 const char *name, bool block_supported);
17int bmc150_accel_core_remove(struct device *dev); 17int bmc150_accel_core_remove(struct device *dev);
18extern const struct dev_pm_ops bmc150_accel_pm_ops; 18extern const struct dev_pm_ops bmc150_accel_pm_ops;
19extern const struct regmap_config bmc150_regmap_conf;
19 20
20#endif /* _BMC150_ACCEL_H_ */ 21#endif /* _BMC150_ACCEL_H_ */
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index edec1d099e91..bfe219a8bea2 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -20,7 +20,6 @@
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/string.h> 21#include <linux/string.h>
22#include <linux/acpi.h> 22#include <linux/acpi.h>
23#include <linux/gpio/consumer.h>
24#include <linux/pm.h> 23#include <linux/pm.h>
25#include <linux/pm_runtime.h> 24#include <linux/pm_runtime.h>
26#include <linux/iio/iio.h> 25#include <linux/iio/iio.h>
@@ -115,6 +114,7 @@ enum kxcjk1013_axis {
115 AXIS_X, 114 AXIS_X,
116 AXIS_Y, 115 AXIS_Y,
117 AXIS_Z, 116 AXIS_Z,
117 AXIS_MAX,
118}; 118};
119 119
120enum kxcjk1013_mode { 120enum kxcjk1013_mode {
@@ -922,7 +922,7 @@ static const struct iio_event_spec kxcjk1013_event = {
922 .realbits = 12, \ 922 .realbits = 12, \
923 .storagebits = 16, \ 923 .storagebits = 16, \
924 .shift = 4, \ 924 .shift = 4, \
925 .endianness = IIO_CPU, \ 925 .endianness = IIO_LE, \
926 }, \ 926 }, \
927 .event_spec = &kxcjk1013_event, \ 927 .event_spec = &kxcjk1013_event, \
928 .num_event_specs = 1 \ 928 .num_event_specs = 1 \
@@ -953,25 +953,23 @@ static const struct iio_info kxcjk1013_info = {
953 .driver_module = THIS_MODULE, 953 .driver_module = THIS_MODULE,
954}; 954};
955 955
956static const unsigned long kxcjk1013_scan_masks[] = {0x7, 0};
957
956static irqreturn_t kxcjk1013_trigger_handler(int irq, void *p) 958static irqreturn_t kxcjk1013_trigger_handler(int irq, void *p)
957{ 959{
958 struct iio_poll_func *pf = p; 960 struct iio_poll_func *pf = p;
959 struct iio_dev *indio_dev = pf->indio_dev; 961 struct iio_dev *indio_dev = pf->indio_dev;
960 struct kxcjk1013_data *data = iio_priv(indio_dev); 962 struct kxcjk1013_data *data = iio_priv(indio_dev);
961 int bit, ret, i = 0; 963 int ret;
962 964
963 mutex_lock(&data->mutex); 965 mutex_lock(&data->mutex);
964 966 ret = i2c_smbus_read_i2c_block_data_or_emulated(data->client,
965 for_each_set_bit(bit, indio_dev->active_scan_mask, 967 KXCJK1013_REG_XOUT_L,
966 indio_dev->masklength) { 968 AXIS_MAX * 2,
967 ret = kxcjk1013_get_acc_reg(data, bit); 969 (u8 *)data->buffer);
968 if (ret < 0) {
969 mutex_unlock(&data->mutex);
970 goto err;
971 }
972 data->buffer[i++] = ret;
973 }
974 mutex_unlock(&data->mutex); 970 mutex_unlock(&data->mutex);
971 if (ret < 0)
972 goto err;
975 973
976 iio_push_to_buffers_with_timestamp(indio_dev, data->buffer, 974 iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
977 data->timestamp); 975 data->timestamp);
@@ -1204,6 +1202,7 @@ static int kxcjk1013_probe(struct i2c_client *client,
1204 indio_dev->dev.parent = &client->dev; 1202 indio_dev->dev.parent = &client->dev;
1205 indio_dev->channels = kxcjk1013_channels; 1203 indio_dev->channels = kxcjk1013_channels;
1206 indio_dev->num_channels = ARRAY_SIZE(kxcjk1013_channels); 1204 indio_dev->num_channels = ARRAY_SIZE(kxcjk1013_channels);
1205 indio_dev->available_scan_masks = kxcjk1013_scan_masks;
1207 indio_dev->name = name; 1206 indio_dev->name = name;
1208 indio_dev->modes = INDIO_DIRECT_MODE; 1207 indio_dev->modes = INDIO_DIRECT_MODE;
1209 indio_dev->info = &kxcjk1013_info; 1208 indio_dev->info = &kxcjk1013_info;
diff --git a/drivers/iio/accel/mma7455_core.c b/drivers/iio/accel/mma7455_core.c
index c633cc2c0789..c902f54c23f5 100644
--- a/drivers/iio/accel/mma7455_core.c
+++ b/drivers/iio/accel/mma7455_core.c
@@ -55,11 +55,11 @@
55 55
56struct mma7455_data { 56struct mma7455_data {
57 struct regmap *regmap; 57 struct regmap *regmap;
58 struct device *dev;
59}; 58};
60 59
61static int mma7455_drdy(struct mma7455_data *mma7455) 60static int mma7455_drdy(struct mma7455_data *mma7455)
62{ 61{
62 struct device *dev = regmap_get_device(mma7455->regmap);
63 unsigned int reg; 63 unsigned int reg;
64 int tries = 3; 64 int tries = 3;
65 int ret; 65 int ret;
@@ -75,7 +75,7 @@ static int mma7455_drdy(struct mma7455_data *mma7455)
75 msleep(20); 75 msleep(20);
76 } 76 }
77 77
78 dev_warn(mma7455->dev, "data not ready\n"); 78 dev_warn(dev, "data not ready\n");
79 79
80 return -EIO; 80 return -EIO;
81} 81}
@@ -260,7 +260,6 @@ int mma7455_core_probe(struct device *dev, struct regmap *regmap,
260 dev_set_drvdata(dev, indio_dev); 260 dev_set_drvdata(dev, indio_dev);
261 mma7455 = iio_priv(indio_dev); 261 mma7455 = iio_priv(indio_dev);
262 mma7455->regmap = regmap; 262 mma7455->regmap = regmap;
263 mma7455->dev = dev;
264 263
265 indio_dev->info = &mma7455_info; 264 indio_dev->info = &mma7455_info;
266 indio_dev->name = name; 265 indio_dev->name = name;
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index 7f4994f32a90..e225d3c53bd5 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -6,6 +6,7 @@
6 * MMA8453Q (10 bit) 6 * MMA8453Q (10 bit)
7 * MMA8652FC (12 bit) 7 * MMA8652FC (12 bit)
8 * MMA8653FC (10 bit) 8 * MMA8653FC (10 bit)
9 * FXLS8471Q (14 bit)
9 * 10 *
10 * Copyright 2015 Martin Kepplinger <martin.kepplinger@theobroma-systems.com> 11 * Copyright 2015 Martin Kepplinger <martin.kepplinger@theobroma-systems.com>
11 * Copyright 2014 Peter Meerwald <pmeerw@pmeerw.net> 12 * Copyright 2014 Peter Meerwald <pmeerw@pmeerw.net>
@@ -16,7 +17,7 @@
16 * 17 *
17 * 7-bit I2C slave address 0x1c/0x1d (pin selectable) 18 * 7-bit I2C slave address 0x1c/0x1d (pin selectable)
18 * 19 *
19 * TODO: orientation events, autosleep 20 * TODO: orientation events
20 */ 21 */
21 22
22#include <linux/module.h> 23#include <linux/module.h>
@@ -31,6 +32,7 @@
31#include <linux/delay.h> 32#include <linux/delay.h>
32#include <linux/of_device.h> 33#include <linux/of_device.h>
33#include <linux/of_irq.h> 34#include <linux/of_irq.h>
35#include <linux/pm_runtime.h>
34 36
35#define MMA8452_STATUS 0x00 37#define MMA8452_STATUS 0x00
36#define MMA8452_STATUS_DRDY (BIT(2) | BIT(1) | BIT(0)) 38#define MMA8452_STATUS_DRDY (BIT(2) | BIT(1) | BIT(0))
@@ -91,6 +93,9 @@
91#define MMA8453_DEVICE_ID 0x3a 93#define MMA8453_DEVICE_ID 0x3a
92#define MMA8652_DEVICE_ID 0x4a 94#define MMA8652_DEVICE_ID 0x4a
93#define MMA8653_DEVICE_ID 0x5a 95#define MMA8653_DEVICE_ID 0x5a
96#define FXLS8471_DEVICE_ID 0x6a
97
98#define MMA8452_AUTO_SUSPEND_DELAY_MS 2000
94 99
95struct mma8452_data { 100struct mma8452_data {
96 struct i2c_client *client; 101 struct i2c_client *client;
@@ -172,6 +177,31 @@ static int mma8452_drdy(struct mma8452_data *data)
172 return -EIO; 177 return -EIO;
173} 178}
174 179
180static int mma8452_set_runtime_pm_state(struct i2c_client *client, bool on)
181{
182#ifdef CONFIG_PM
183 int ret;
184
185 if (on) {
186 ret = pm_runtime_get_sync(&client->dev);
187 } else {
188 pm_runtime_mark_last_busy(&client->dev);
189 ret = pm_runtime_put_autosuspend(&client->dev);
190 }
191
192 if (ret < 0) {
193 dev_err(&client->dev,
194 "failed to change power state to %d\n", on);
195 if (on)
196 pm_runtime_put_noidle(&client->dev);
197
198 return ret;
199 }
200#endif
201
202 return 0;
203}
204
175static int mma8452_read(struct mma8452_data *data, __be16 buf[3]) 205static int mma8452_read(struct mma8452_data *data, __be16 buf[3])
176{ 206{
177 int ret = mma8452_drdy(data); 207 int ret = mma8452_drdy(data);
@@ -179,8 +209,16 @@ static int mma8452_read(struct mma8452_data *data, __be16 buf[3])
179 if (ret < 0) 209 if (ret < 0)
180 return ret; 210 return ret;
181 211
182 return i2c_smbus_read_i2c_block_data(data->client, MMA8452_OUT_X, 212 ret = mma8452_set_runtime_pm_state(data->client, true);
183 3 * sizeof(__be16), (u8 *)buf); 213 if (ret)
214 return ret;
215
216 ret = i2c_smbus_read_i2c_block_data(data->client, MMA8452_OUT_X,
217 3 * sizeof(__be16), (u8 *)buf);
218
219 ret = mma8452_set_runtime_pm_state(data->client, false);
220
221 return ret;
184} 222}
185 223
186static ssize_t mma8452_show_int_plus_micros(char *buf, const int (*vals)[2], 224static ssize_t mma8452_show_int_plus_micros(char *buf, const int (*vals)[2],
@@ -357,7 +395,8 @@ static int mma8452_read_raw(struct iio_dev *indio_dev,
357 return IIO_VAL_INT_PLUS_MICRO; 395 return IIO_VAL_INT_PLUS_MICRO;
358 case IIO_CHAN_INFO_CALIBBIAS: 396 case IIO_CHAN_INFO_CALIBBIAS:
359 ret = i2c_smbus_read_byte_data(data->client, 397 ret = i2c_smbus_read_byte_data(data->client,
360 MMA8452_OFF_X + chan->scan_index); 398 MMA8452_OFF_X +
399 chan->scan_index);
361 if (ret < 0) 400 if (ret < 0)
362 return ret; 401 return ret;
363 402
@@ -392,24 +431,47 @@ static int mma8452_active(struct mma8452_data *data)
392 data->ctrl_reg1); 431 data->ctrl_reg1);
393} 432}
394 433
434/* returns >0 if active, 0 if in standby and <0 on error */
435static int mma8452_is_active(struct mma8452_data *data)
436{
437 int reg;
438
439 reg = i2c_smbus_read_byte_data(data->client, MMA8452_CTRL_REG1);
440 if (reg < 0)
441 return reg;
442
443 return reg & MMA8452_CTRL_ACTIVE;
444}
445
395static int mma8452_change_config(struct mma8452_data *data, u8 reg, u8 val) 446static int mma8452_change_config(struct mma8452_data *data, u8 reg, u8 val)
396{ 447{
397 int ret; 448 int ret;
449 int is_active;
398 450
399 mutex_lock(&data->lock); 451 mutex_lock(&data->lock);
400 452
401 /* config can only be changed when in standby */ 453 is_active = mma8452_is_active(data);
402 ret = mma8452_standby(data); 454 if (is_active < 0) {
403 if (ret < 0) 455 ret = is_active;
404 goto fail; 456 goto fail;
457 }
458
459 /* config can only be changed when in standby */
460 if (is_active > 0) {
461 ret = mma8452_standby(data);
462 if (ret < 0)
463 goto fail;
464 }
405 465
406 ret = i2c_smbus_write_byte_data(data->client, reg, val); 466 ret = i2c_smbus_write_byte_data(data->client, reg, val);
407 if (ret < 0) 467 if (ret < 0)
408 goto fail; 468 goto fail;
409 469
410 ret = mma8452_active(data); 470 if (is_active > 0) {
411 if (ret < 0) 471 ret = mma8452_active(data);
412 goto fail; 472 if (ret < 0)
473 goto fail;
474 }
413 475
414 ret = 0; 476 ret = 0;
415fail: 477fail:
@@ -418,7 +480,7 @@ fail:
418 return ret; 480 return ret;
419} 481}
420 482
421/* returns >0 if in freefall mode, 0 if not or <0 if an error occured */ 483/* returns >0 if in freefall mode, 0 if not or <0 if an error occurred */
422static int mma8452_freefall_mode_enabled(struct mma8452_data *data) 484static int mma8452_freefall_mode_enabled(struct mma8452_data *data)
423{ 485{
424 int val; 486 int val;
@@ -668,7 +730,8 @@ static int mma8452_read_event_config(struct iio_dev *indio_dev,
668 if (ret < 0) 730 if (ret < 0)
669 return ret; 731 return ret;
670 732
671 return !!(ret & BIT(chan->scan_index + chip->ev_cfg_chan_shift)); 733 return !!(ret & BIT(chan->scan_index +
734 chip->ev_cfg_chan_shift));
672 default: 735 default:
673 return -EINVAL; 736 return -EINVAL;
674 } 737 }
@@ -682,7 +745,11 @@ static int mma8452_write_event_config(struct iio_dev *indio_dev,
682{ 745{
683 struct mma8452_data *data = iio_priv(indio_dev); 746 struct mma8452_data *data = iio_priv(indio_dev);
684 const struct mma_chip_info *chip = data->chip_info; 747 const struct mma_chip_info *chip = data->chip_info;
685 int val; 748 int val, ret;
749
750 ret = mma8452_set_runtime_pm_state(data->client, state);
751 if (ret)
752 return ret;
686 753
687 switch (dir) { 754 switch (dir) {
688 case IIO_EV_DIR_FALLING: 755 case IIO_EV_DIR_FALLING:
@@ -990,6 +1057,7 @@ enum {
990 mma8453, 1057 mma8453,
991 mma8652, 1058 mma8652,
992 mma8653, 1059 mma8653,
1060 fxls8471,
993}; 1061};
994 1062
995static const struct mma_chip_info mma_chip_info_table[] = { 1063static const struct mma_chip_info mma_chip_info_table[] = {
@@ -1003,7 +1071,7 @@ static const struct mma_chip_info mma_chip_info_table[] = {
1003 * bit. 1071 * bit.
1004 * The userspace interface uses m/s^2 and we declare micro units 1072 * The userspace interface uses m/s^2 and we declare micro units
1005 * So scale factor for 12 bit here is given by: 1073 * So scale factor for 12 bit here is given by:
1006 * g * N * 1000000 / 2048 for N = 2, 4, 8 and g=9.80665 1074 * g * N * 1000000 / 2048 for N = 2, 4, 8 and g=9.80665
1007 */ 1075 */
1008 .mma_scales = { {0, 2394}, {0, 4788}, {0, 9577} }, 1076 .mma_scales = { {0, 2394}, {0, 4788}, {0, 9577} },
1009 .ev_cfg = MMA8452_TRANSIENT_CFG, 1077 .ev_cfg = MMA8452_TRANSIENT_CFG,
@@ -1081,6 +1149,22 @@ static const struct mma_chip_info mma_chip_info_table[] = {
1081 .ev_ths_mask = MMA8452_FF_MT_THS_MASK, 1149 .ev_ths_mask = MMA8452_FF_MT_THS_MASK,
1082 .ev_count = MMA8452_FF_MT_COUNT, 1150 .ev_count = MMA8452_FF_MT_COUNT,
1083 }, 1151 },
1152 [fxls8471] = {
1153 .chip_id = FXLS8471_DEVICE_ID,
1154 .channels = mma8451_channels,
1155 .num_channels = ARRAY_SIZE(mma8451_channels),
1156 .mma_scales = { {0, 2394}, {0, 4788}, {0, 9577} },
1157 .ev_cfg = MMA8452_TRANSIENT_CFG,
1158 .ev_cfg_ele = MMA8452_TRANSIENT_CFG_ELE,
1159 .ev_cfg_chan_shift = 1,
1160 .ev_src = MMA8452_TRANSIENT_SRC,
1161 .ev_src_xe = MMA8452_TRANSIENT_SRC_XTRANSE,
1162 .ev_src_ye = MMA8452_TRANSIENT_SRC_YTRANSE,
1163 .ev_src_ze = MMA8452_TRANSIENT_SRC_ZTRANSE,
1164 .ev_ths = MMA8452_TRANSIENT_THS,
1165 .ev_ths_mask = MMA8452_TRANSIENT_THS_MASK,
1166 .ev_count = MMA8452_TRANSIENT_COUNT,
1167 },
1084}; 1168};
1085 1169
1086static struct attribute *mma8452_attributes[] = { 1170static struct attribute *mma8452_attributes[] = {
@@ -1114,7 +1198,11 @@ static int mma8452_data_rdy_trigger_set_state(struct iio_trigger *trig,
1114{ 1198{
1115 struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig); 1199 struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
1116 struct mma8452_data *data = iio_priv(indio_dev); 1200 struct mma8452_data *data = iio_priv(indio_dev);
1117 int reg; 1201 int reg, ret;
1202
1203 ret = mma8452_set_runtime_pm_state(data->client, state);
1204 if (ret)
1205 return ret;
1118 1206
1119 reg = i2c_smbus_read_byte_data(data->client, MMA8452_CTRL_REG4); 1207 reg = i2c_smbus_read_byte_data(data->client, MMA8452_CTRL_REG4);
1120 if (reg < 0) 1208 if (reg < 0)
@@ -1206,6 +1294,7 @@ static const struct of_device_id mma8452_dt_ids[] = {
1206 { .compatible = "fsl,mma8453", .data = &mma_chip_info_table[mma8453] }, 1294 { .compatible = "fsl,mma8453", .data = &mma_chip_info_table[mma8453] },
1207 { .compatible = "fsl,mma8652", .data = &mma_chip_info_table[mma8652] }, 1295 { .compatible = "fsl,mma8652", .data = &mma_chip_info_table[mma8652] },
1208 { .compatible = "fsl,mma8653", .data = &mma_chip_info_table[mma8653] }, 1296 { .compatible = "fsl,mma8653", .data = &mma_chip_info_table[mma8653] },
1297 { .compatible = "fsl,fxls8471", .data = &mma_chip_info_table[fxls8471] },
1209 { } 1298 { }
1210}; 1299};
1211MODULE_DEVICE_TABLE(of, mma8452_dt_ids); 1300MODULE_DEVICE_TABLE(of, mma8452_dt_ids);
@@ -1243,6 +1332,7 @@ static int mma8452_probe(struct i2c_client *client,
1243 case MMA8453_DEVICE_ID: 1332 case MMA8453_DEVICE_ID:
1244 case MMA8652_DEVICE_ID: 1333 case MMA8652_DEVICE_ID:
1245 case MMA8653_DEVICE_ID: 1334 case MMA8653_DEVICE_ID:
1335 case FXLS8471_DEVICE_ID:
1246 if (ret == data->chip_info->chip_id) 1336 if (ret == data->chip_info->chip_id)
1247 break; 1337 break;
1248 default: 1338 default:
@@ -1340,6 +1430,15 @@ static int mma8452_probe(struct i2c_client *client,
1340 goto buffer_cleanup; 1430 goto buffer_cleanup;
1341 } 1431 }
1342 1432
1433 ret = pm_runtime_set_active(&client->dev);
1434 if (ret < 0)
1435 goto buffer_cleanup;
1436
1437 pm_runtime_enable(&client->dev);
1438 pm_runtime_set_autosuspend_delay(&client->dev,
1439 MMA8452_AUTO_SUSPEND_DELAY_MS);
1440 pm_runtime_use_autosuspend(&client->dev);
1441
1343 ret = iio_device_register(indio_dev); 1442 ret = iio_device_register(indio_dev);
1344 if (ret < 0) 1443 if (ret < 0)
1345 goto buffer_cleanup; 1444 goto buffer_cleanup;
@@ -1364,6 +1463,11 @@ static int mma8452_remove(struct i2c_client *client)
1364 struct iio_dev *indio_dev = i2c_get_clientdata(client); 1463 struct iio_dev *indio_dev = i2c_get_clientdata(client);
1365 1464
1366 iio_device_unregister(indio_dev); 1465 iio_device_unregister(indio_dev);
1466
1467 pm_runtime_disable(&client->dev);
1468 pm_runtime_set_suspended(&client->dev);
1469 pm_runtime_put_noidle(&client->dev);
1470
1367 iio_triggered_buffer_cleanup(indio_dev); 1471 iio_triggered_buffer_cleanup(indio_dev);
1368 mma8452_trigger_cleanup(indio_dev); 1472 mma8452_trigger_cleanup(indio_dev);
1369 mma8452_standby(iio_priv(indio_dev)); 1473 mma8452_standby(iio_priv(indio_dev));
@@ -1371,6 +1475,45 @@ static int mma8452_remove(struct i2c_client *client)
1371 return 0; 1475 return 0;
1372} 1476}
1373 1477
1478#ifdef CONFIG_PM
1479static int mma8452_runtime_suspend(struct device *dev)
1480{
1481 struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
1482 struct mma8452_data *data = iio_priv(indio_dev);
1483 int ret;
1484
1485 mutex_lock(&data->lock);
1486 ret = mma8452_standby(data);
1487 mutex_unlock(&data->lock);
1488 if (ret < 0) {
1489 dev_err(&data->client->dev, "powering off device failed\n");
1490 return -EAGAIN;
1491 }
1492
1493 return 0;
1494}
1495
1496static int mma8452_runtime_resume(struct device *dev)
1497{
1498 struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
1499 struct mma8452_data *data = iio_priv(indio_dev);
1500 int ret, sleep_val;
1501
1502 ret = mma8452_active(data);
1503 if (ret < 0)
1504 return ret;
1505
1506 ret = mma8452_get_odr_index(data);
1507 sleep_val = 1000 / mma8452_samp_freq[ret][0];
1508 if (sleep_val < 20)
1509 usleep_range(sleep_val * 1000, 20000);
1510 else
1511 msleep_interruptible(sleep_val);
1512
1513 return 0;
1514}
1515#endif
1516
1374#ifdef CONFIG_PM_SLEEP 1517#ifdef CONFIG_PM_SLEEP
1375static int mma8452_suspend(struct device *dev) 1518static int mma8452_suspend(struct device *dev)
1376{ 1519{
@@ -1383,18 +1526,21 @@ static int mma8452_resume(struct device *dev)
1383 return mma8452_active(iio_priv(i2c_get_clientdata( 1526 return mma8452_active(iio_priv(i2c_get_clientdata(
1384 to_i2c_client(dev)))); 1527 to_i2c_client(dev))));
1385} 1528}
1386
1387static SIMPLE_DEV_PM_OPS(mma8452_pm_ops, mma8452_suspend, mma8452_resume);
1388#define MMA8452_PM_OPS (&mma8452_pm_ops)
1389#else
1390#define MMA8452_PM_OPS NULL
1391#endif 1529#endif
1392 1530
1531static const struct dev_pm_ops mma8452_pm_ops = {
1532 SET_SYSTEM_SLEEP_PM_OPS(mma8452_suspend, mma8452_resume)
1533 SET_RUNTIME_PM_OPS(mma8452_runtime_suspend,
1534 mma8452_runtime_resume, NULL)
1535};
1536
1393static const struct i2c_device_id mma8452_id[] = { 1537static const struct i2c_device_id mma8452_id[] = {
1538 { "mma8451", mma8451 },
1394 { "mma8452", mma8452 }, 1539 { "mma8452", mma8452 },
1395 { "mma8453", mma8453 }, 1540 { "mma8453", mma8453 },
1396 { "mma8652", mma8652 }, 1541 { "mma8652", mma8652 },
1397 { "mma8653", mma8653 }, 1542 { "mma8653", mma8653 },
1543 { "fxls8471", fxls8471 },
1398 { } 1544 { }
1399}; 1545};
1400MODULE_DEVICE_TABLE(i2c, mma8452_id); 1546MODULE_DEVICE_TABLE(i2c, mma8452_id);
@@ -1403,7 +1549,7 @@ static struct i2c_driver mma8452_driver = {
1403 .driver = { 1549 .driver = {
1404 .name = "mma8452", 1550 .name = "mma8452",
1405 .of_match_table = of_match_ptr(mma8452_dt_ids), 1551 .of_match_table = of_match_ptr(mma8452_dt_ids),
1406 .pm = MMA8452_PM_OPS, 1552 .pm = &mma8452_pm_ops,
1407 }, 1553 },
1408 .probe = mma8452_probe, 1554 .probe = mma8452_probe,
1409 .remove = mma8452_remove, 1555 .remove = mma8452_remove,
diff --git a/drivers/iio/accel/mma9553.c b/drivers/iio/accel/mma9553.c
index fa7d36217c4b..bb05f3efddca 100644
--- a/drivers/iio/accel/mma9553.c
+++ b/drivers/iio/accel/mma9553.c
@@ -17,7 +17,6 @@
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/acpi.h> 19#include <linux/acpi.h>
20#include <linux/gpio/consumer.h>
21#include <linux/iio/iio.h> 20#include <linux/iio/iio.h>
22#include <linux/iio/sysfs.h> 21#include <linux/iio/sysfs.h>
23#include <linux/iio/events.h> 22#include <linux/iio/events.h>
diff --git a/drivers/iio/accel/mxc4005.c b/drivers/iio/accel/mxc4005.c
index e72e218c2696..c23f47af7256 100644
--- a/drivers/iio/accel/mxc4005.c
+++ b/drivers/iio/accel/mxc4005.c
@@ -17,7 +17,6 @@
17#include <linux/i2c.h> 17#include <linux/i2c.h>
18#include <linux/iio/iio.h> 18#include <linux/iio/iio.h>
19#include <linux/acpi.h> 19#include <linux/acpi.h>
20#include <linux/gpio/consumer.h>
21#include <linux/regmap.h> 20#include <linux/regmap.h>
22#include <linux/iio/sysfs.h> 21#include <linux/iio/sysfs.h>
23#include <linux/iio/trigger.h> 22#include <linux/iio/trigger.h>
@@ -380,31 +379,6 @@ static const struct iio_trigger_ops mxc4005_trigger_ops = {
380 .owner = THIS_MODULE, 379 .owner = THIS_MODULE,
381}; 380};
382 381
383static int mxc4005_gpio_probe(struct i2c_client *client,
384 struct mxc4005_data *data)
385{
386 struct device *dev;
387 struct gpio_desc *gpio;
388 int ret;
389
390 if (!client)
391 return -EINVAL;
392
393 dev = &client->dev;
394
395 gpio = devm_gpiod_get_index(dev, "mxc4005_int", 0, GPIOD_IN);
396 if (IS_ERR(gpio)) {
397 dev_err(dev, "failed to get acpi gpio index\n");
398 return PTR_ERR(gpio);
399 }
400
401 ret = gpiod_to_irq(gpio);
402
403 dev_dbg(dev, "GPIO resource, no:%d irq:%d\n", desc_to_gpio(gpio), ret);
404
405 return ret;
406}
407
408static int mxc4005_chip_init(struct mxc4005_data *data) 382static int mxc4005_chip_init(struct mxc4005_data *data)
409{ 383{
410 int ret; 384 int ret;
@@ -470,9 +444,6 @@ static int mxc4005_probe(struct i2c_client *client,
470 return ret; 444 return ret;
471 } 445 }
472 446
473 if (client->irq < 0)
474 client->irq = mxc4005_gpio_probe(client, data);
475
476 if (client->irq > 0) { 447 if (client->irq > 0) {
477 data->dready_trig = devm_iio_trigger_alloc(&client->dev, 448 data->dready_trig = devm_iio_trigger_alloc(&client->dev,
478 "%s-dev%d", 449 "%s-dev%d",
diff --git a/drivers/iio/accel/st_accel.h b/drivers/iio/accel/st_accel.h
index 5d4a1897b293..57f83a67948c 100644
--- a/drivers/iio/accel/st_accel.h
+++ b/drivers/iio/accel/st_accel.h
@@ -14,6 +14,7 @@
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/iio/common/st_sensors.h> 15#include <linux/iio/common/st_sensors.h>
16 16
17#define H3LIS331DL_DRIVER_NAME "h3lis331dl_accel"
17#define LIS3LV02DL_ACCEL_DEV_NAME "lis3lv02dl_accel" 18#define LIS3LV02DL_ACCEL_DEV_NAME "lis3lv02dl_accel"
18#define LSM303DLHC_ACCEL_DEV_NAME "lsm303dlhc_accel" 19#define LSM303DLHC_ACCEL_DEV_NAME "lsm303dlhc_accel"
19#define LIS3DH_ACCEL_DEV_NAME "lis3dh" 20#define LIS3DH_ACCEL_DEV_NAME "lis3dh"
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index a03a1417dd63..dc73f2d85e6d 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -39,6 +39,9 @@
39#define ST_ACCEL_FS_AVL_6G 6 39#define ST_ACCEL_FS_AVL_6G 6
40#define ST_ACCEL_FS_AVL_8G 8 40#define ST_ACCEL_FS_AVL_8G 8
41#define ST_ACCEL_FS_AVL_16G 16 41#define ST_ACCEL_FS_AVL_16G 16
42#define ST_ACCEL_FS_AVL_100G 100
43#define ST_ACCEL_FS_AVL_200G 200
44#define ST_ACCEL_FS_AVL_400G 400
42 45
43/* CUSTOM VALUES FOR SENSOR 1 */ 46/* CUSTOM VALUES FOR SENSOR 1 */
44#define ST_ACCEL_1_WAI_EXP 0x33 47#define ST_ACCEL_1_WAI_EXP 0x33
@@ -96,6 +99,8 @@
96#define ST_ACCEL_2_DRDY_IRQ_INT2_MASK 0x10 99#define ST_ACCEL_2_DRDY_IRQ_INT2_MASK 0x10
97#define ST_ACCEL_2_IHL_IRQ_ADDR 0x22 100#define ST_ACCEL_2_IHL_IRQ_ADDR 0x22
98#define ST_ACCEL_2_IHL_IRQ_MASK 0x80 101#define ST_ACCEL_2_IHL_IRQ_MASK 0x80
102#define ST_ACCEL_2_OD_IRQ_ADDR 0x22
103#define ST_ACCEL_2_OD_IRQ_MASK 0x40
99#define ST_ACCEL_2_MULTIREAD_BIT true 104#define ST_ACCEL_2_MULTIREAD_BIT true
100 105
101/* CUSTOM VALUES FOR SENSOR 3 */ 106/* CUSTOM VALUES FOR SENSOR 3 */
@@ -177,10 +182,39 @@
177#define ST_ACCEL_5_DRDY_IRQ_INT2_MASK 0x20 182#define ST_ACCEL_5_DRDY_IRQ_INT2_MASK 0x20
178#define ST_ACCEL_5_IHL_IRQ_ADDR 0x22 183#define ST_ACCEL_5_IHL_IRQ_ADDR 0x22
179#define ST_ACCEL_5_IHL_IRQ_MASK 0x80 184#define ST_ACCEL_5_IHL_IRQ_MASK 0x80
185#define ST_ACCEL_5_OD_IRQ_ADDR 0x22
186#define ST_ACCEL_5_OD_IRQ_MASK 0x40
180#define ST_ACCEL_5_IG1_EN_ADDR 0x21 187#define ST_ACCEL_5_IG1_EN_ADDR 0x21
181#define ST_ACCEL_5_IG1_EN_MASK 0x08 188#define ST_ACCEL_5_IG1_EN_MASK 0x08
182#define ST_ACCEL_5_MULTIREAD_BIT false 189#define ST_ACCEL_5_MULTIREAD_BIT false
183 190
191/* CUSTOM VALUES FOR SENSOR 6 */
192#define ST_ACCEL_6_WAI_EXP 0x32
193#define ST_ACCEL_6_ODR_ADDR 0x20
194#define ST_ACCEL_6_ODR_MASK 0x18
195#define ST_ACCEL_6_ODR_AVL_50HZ_VAL 0x00
196#define ST_ACCEL_6_ODR_AVL_100HZ_VAL 0x01
197#define ST_ACCEL_6_ODR_AVL_400HZ_VAL 0x02
198#define ST_ACCEL_6_ODR_AVL_1000HZ_VAL 0x03
199#define ST_ACCEL_6_PW_ADDR 0x20
200#define ST_ACCEL_6_PW_MASK 0x20
201#define ST_ACCEL_6_FS_ADDR 0x23
202#define ST_ACCEL_6_FS_MASK 0x30
203#define ST_ACCEL_6_FS_AVL_100_VAL 0x00
204#define ST_ACCEL_6_FS_AVL_200_VAL 0x01
205#define ST_ACCEL_6_FS_AVL_400_VAL 0x03
206#define ST_ACCEL_6_FS_AVL_100_GAIN IIO_G_TO_M_S_2(49000)
207#define ST_ACCEL_6_FS_AVL_200_GAIN IIO_G_TO_M_S_2(98000)
208#define ST_ACCEL_6_FS_AVL_400_GAIN IIO_G_TO_M_S_2(195000)
209#define ST_ACCEL_6_BDU_ADDR 0x23
210#define ST_ACCEL_6_BDU_MASK 0x80
211#define ST_ACCEL_6_DRDY_IRQ_ADDR 0x22
212#define ST_ACCEL_6_DRDY_IRQ_INT1_MASK 0x02
213#define ST_ACCEL_6_DRDY_IRQ_INT2_MASK 0x10
214#define ST_ACCEL_6_IHL_IRQ_ADDR 0x22
215#define ST_ACCEL_6_IHL_IRQ_MASK 0x80
216#define ST_ACCEL_6_MULTIREAD_BIT true
217
184static const struct iio_chan_spec st_accel_8bit_channels[] = { 218static const struct iio_chan_spec st_accel_8bit_channels[] = {
185 ST_SENSORS_LSM_CHANNELS(IIO_ACCEL, 219 ST_SENSORS_LSM_CHANNELS(IIO_ACCEL,
186 BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), 220 BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
@@ -302,6 +336,7 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
302 .mask_int2 = ST_ACCEL_1_DRDY_IRQ_INT2_MASK, 336 .mask_int2 = ST_ACCEL_1_DRDY_IRQ_INT2_MASK,
303 .addr_ihl = ST_ACCEL_1_IHL_IRQ_ADDR, 337 .addr_ihl = ST_ACCEL_1_IHL_IRQ_ADDR,
304 .mask_ihl = ST_ACCEL_1_IHL_IRQ_MASK, 338 .mask_ihl = ST_ACCEL_1_IHL_IRQ_MASK,
339 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
305 }, 340 },
306 .multi_read_bit = ST_ACCEL_1_MULTIREAD_BIT, 341 .multi_read_bit = ST_ACCEL_1_MULTIREAD_BIT,
307 .bootime = 2, 342 .bootime = 2,
@@ -367,6 +402,9 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
367 .mask_int2 = ST_ACCEL_2_DRDY_IRQ_INT2_MASK, 402 .mask_int2 = ST_ACCEL_2_DRDY_IRQ_INT2_MASK,
368 .addr_ihl = ST_ACCEL_2_IHL_IRQ_ADDR, 403 .addr_ihl = ST_ACCEL_2_IHL_IRQ_ADDR,
369 .mask_ihl = ST_ACCEL_2_IHL_IRQ_MASK, 404 .mask_ihl = ST_ACCEL_2_IHL_IRQ_MASK,
405 .addr_od = ST_ACCEL_2_OD_IRQ_ADDR,
406 .mask_od = ST_ACCEL_2_OD_IRQ_MASK,
407 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
370 }, 408 },
371 .multi_read_bit = ST_ACCEL_2_MULTIREAD_BIT, 409 .multi_read_bit = ST_ACCEL_2_MULTIREAD_BIT,
372 .bootime = 2, 410 .bootime = 2,
@@ -444,6 +482,7 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
444 .mask_int2 = ST_ACCEL_3_DRDY_IRQ_INT2_MASK, 482 .mask_int2 = ST_ACCEL_3_DRDY_IRQ_INT2_MASK,
445 .addr_ihl = ST_ACCEL_3_IHL_IRQ_ADDR, 483 .addr_ihl = ST_ACCEL_3_IHL_IRQ_ADDR,
446 .mask_ihl = ST_ACCEL_3_IHL_IRQ_MASK, 484 .mask_ihl = ST_ACCEL_3_IHL_IRQ_MASK,
485 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
447 .ig1 = { 486 .ig1 = {
448 .en_addr = ST_ACCEL_3_IG1_EN_ADDR, 487 .en_addr = ST_ACCEL_3_IG1_EN_ADDR,
449 .en_mask = ST_ACCEL_3_IG1_EN_MASK, 488 .en_mask = ST_ACCEL_3_IG1_EN_MASK,
@@ -502,6 +541,7 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
502 .drdy_irq = { 541 .drdy_irq = {
503 .addr = ST_ACCEL_4_DRDY_IRQ_ADDR, 542 .addr = ST_ACCEL_4_DRDY_IRQ_ADDR,
504 .mask_int1 = ST_ACCEL_4_DRDY_IRQ_INT1_MASK, 543 .mask_int1 = ST_ACCEL_4_DRDY_IRQ_INT1_MASK,
544 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
505 }, 545 },
506 .multi_read_bit = ST_ACCEL_4_MULTIREAD_BIT, 546 .multi_read_bit = ST_ACCEL_4_MULTIREAD_BIT,
507 .bootime = 2, /* guess */ 547 .bootime = 2, /* guess */
@@ -553,10 +593,75 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
553 .mask_int2 = ST_ACCEL_5_DRDY_IRQ_INT2_MASK, 593 .mask_int2 = ST_ACCEL_5_DRDY_IRQ_INT2_MASK,
554 .addr_ihl = ST_ACCEL_5_IHL_IRQ_ADDR, 594 .addr_ihl = ST_ACCEL_5_IHL_IRQ_ADDR,
555 .mask_ihl = ST_ACCEL_5_IHL_IRQ_MASK, 595 .mask_ihl = ST_ACCEL_5_IHL_IRQ_MASK,
596 .addr_od = ST_ACCEL_5_OD_IRQ_ADDR,
597 .mask_od = ST_ACCEL_5_OD_IRQ_MASK,
598 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
556 }, 599 },
557 .multi_read_bit = ST_ACCEL_5_MULTIREAD_BIT, 600 .multi_read_bit = ST_ACCEL_5_MULTIREAD_BIT,
558 .bootime = 2, /* guess */ 601 .bootime = 2, /* guess */
559 }, 602 },
603 {
604 .wai = ST_ACCEL_6_WAI_EXP,
605 .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
606 .sensors_supported = {
607 [0] = H3LIS331DL_DRIVER_NAME,
608 },
609 .ch = (struct iio_chan_spec *)st_accel_12bit_channels,
610 .odr = {
611 .addr = ST_ACCEL_6_ODR_ADDR,
612 .mask = ST_ACCEL_6_ODR_MASK,
613 .odr_avl = {
614 { 50, ST_ACCEL_6_ODR_AVL_50HZ_VAL },
615 { 100, ST_ACCEL_6_ODR_AVL_100HZ_VAL, },
616 { 400, ST_ACCEL_6_ODR_AVL_400HZ_VAL, },
617 { 1000, ST_ACCEL_6_ODR_AVL_1000HZ_VAL, },
618 },
619 },
620 .pw = {
621 .addr = ST_ACCEL_6_PW_ADDR,
622 .mask = ST_ACCEL_6_PW_MASK,
623 .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
624 .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
625 },
626 .enable_axis = {
627 .addr = ST_SENSORS_DEFAULT_AXIS_ADDR,
628 .mask = ST_SENSORS_DEFAULT_AXIS_MASK,
629 },
630 .fs = {
631 .addr = ST_ACCEL_6_FS_ADDR,
632 .mask = ST_ACCEL_6_FS_MASK,
633 .fs_avl = {
634 [0] = {
635 .num = ST_ACCEL_FS_AVL_100G,
636 .value = ST_ACCEL_6_FS_AVL_100_VAL,
637 .gain = ST_ACCEL_6_FS_AVL_100_GAIN,
638 },
639 [1] = {
640 .num = ST_ACCEL_FS_AVL_200G,
641 .value = ST_ACCEL_6_FS_AVL_200_VAL,
642 .gain = ST_ACCEL_6_FS_AVL_200_GAIN,
643 },
644 [2] = {
645 .num = ST_ACCEL_FS_AVL_400G,
646 .value = ST_ACCEL_6_FS_AVL_400_VAL,
647 .gain = ST_ACCEL_6_FS_AVL_400_GAIN,
648 },
649 },
650 },
651 .bdu = {
652 .addr = ST_ACCEL_6_BDU_ADDR,
653 .mask = ST_ACCEL_6_BDU_MASK,
654 },
655 .drdy_irq = {
656 .addr = ST_ACCEL_6_DRDY_IRQ_ADDR,
657 .mask_int1 = ST_ACCEL_6_DRDY_IRQ_INT1_MASK,
658 .mask_int2 = ST_ACCEL_6_DRDY_IRQ_INT2_MASK,
659 .addr_ihl = ST_ACCEL_6_IHL_IRQ_ADDR,
660 .mask_ihl = ST_ACCEL_6_IHL_IRQ_MASK,
661 },
662 .multi_read_bit = ST_ACCEL_6_MULTIREAD_BIT,
663 .bootime = 2,
664 },
560}; 665};
561 666
562static int st_accel_read_raw(struct iio_dev *indio_dev, 667static int st_accel_read_raw(struct iio_dev *indio_dev,
diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c
index 294a32f89367..7333ee9fb11b 100644
--- a/drivers/iio/accel/st_accel_i2c.c
+++ b/drivers/iio/accel/st_accel_i2c.c
@@ -76,6 +76,10 @@ static const struct of_device_id st_accel_of_match[] = {
76 .compatible = "st,lis2dh12-accel", 76 .compatible = "st,lis2dh12-accel",
77 .data = LIS2DH12_ACCEL_DEV_NAME, 77 .data = LIS2DH12_ACCEL_DEV_NAME,
78 }, 78 },
79 {
80 .compatible = "st,h3lis331dl-accel",
81 .data = H3LIS331DL_DRIVER_NAME,
82 },
79 {}, 83 {},
80}; 84};
81MODULE_DEVICE_TABLE(of, st_accel_of_match); 85MODULE_DEVICE_TABLE(of, st_accel_of_match);
diff --git a/drivers/iio/accel/stk8312.c b/drivers/iio/accel/stk8312.c
index 85fe7f7247c1..e31023dc5f1b 100644
--- a/drivers/iio/accel/stk8312.c
+++ b/drivers/iio/accel/stk8312.c
@@ -11,7 +11,6 @@
11 */ 11 */
12 12
13#include <linux/acpi.h> 13#include <linux/acpi.h>
14#include <linux/gpio/consumer.h>
15#include <linux/i2c.h> 14#include <linux/i2c.h>
16#include <linux/interrupt.h> 15#include <linux/interrupt.h>
17#include <linux/kernel.h> 16#include <linux/kernel.h>
diff --git a/drivers/iio/accel/stk8ba50.c b/drivers/iio/accel/stk8ba50.c
index 5709d9eb8f34..300d955bad00 100644
--- a/drivers/iio/accel/stk8ba50.c
+++ b/drivers/iio/accel/stk8ba50.c
@@ -11,7 +11,6 @@
11 */ 11 */
12 12
13#include <linux/acpi.h> 13#include <linux/acpi.h>
14#include <linux/gpio/consumer.h>
15#include <linux/i2c.h> 14#include <linux/i2c.h>
16#include <linux/interrupt.h> 15#include <linux/interrupt.h>
17#include <linux/kernel.h> 16#include <linux/kernel.h>
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 82c718c515a0..25378c5882e2 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -242,6 +242,16 @@ config LP8788_ADC
242 To compile this driver as a module, choose M here: the module will be 242 To compile this driver as a module, choose M here: the module will be
243 called lp8788_adc. 243 called lp8788_adc.
244 244
245config LPC18XX_ADC
246 tristate "NXP LPC18xx ADC driver"
247 depends on ARCH_LPC18XX || COMPILE_TEST
248 depends on OF && HAS_IOMEM
249 help
250 Say yes here to build support for NXP LPC18XX ADC.
251
252 To compile this driver as a module, choose M here: the module will be
253 called lpc18xx_adc.
254
245config MAX1027 255config MAX1027
246 tristate "Maxim max1027 ADC driver" 256 tristate "Maxim max1027 ADC driver"
247 depends on SPI 257 depends on SPI
@@ -375,11 +385,11 @@ config ROCKCHIP_SARADC
375 module will be called rockchip_saradc. 385 module will be called rockchip_saradc.
376 386
377config TI_ADC081C 387config TI_ADC081C
378 tristate "Texas Instruments ADC081C021/027" 388 tristate "Texas Instruments ADC081C/ADC101C/ADC121C family"
379 depends on I2C 389 depends on I2C
380 help 390 help
381 If you say yes here you get support for Texas Instruments ADC081C021 391 If you say yes here you get support for Texas Instruments ADC081C,
382 and ADC081C027 ADC chips. 392 ADC101C and ADC121C ADC chips.
383 393
384 This driver can also be built as a module. If so, the module will be 394 This driver can also be built as a module. If so, the module will be
385 called ti-adc081c. 395 called ti-adc081c.
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 0cb79210a4b0..38638d46f972 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_HI8435) += hi8435.o
25obj-$(CONFIG_IMX7D_ADC) += imx7d_adc.o 25obj-$(CONFIG_IMX7D_ADC) += imx7d_adc.o
26obj-$(CONFIG_INA2XX_ADC) += ina2xx-adc.o 26obj-$(CONFIG_INA2XX_ADC) += ina2xx-adc.o
27obj-$(CONFIG_LP8788_ADC) += lp8788_adc.o 27obj-$(CONFIG_LP8788_ADC) += lp8788_adc.o
28obj-$(CONFIG_LPC18XX_ADC) += lpc18xx_adc.o
28obj-$(CONFIG_MAX1027) += max1027.o 29obj-$(CONFIG_MAX1027) += max1027.o
29obj-$(CONFIG_MAX1363) += max1363.o 30obj-$(CONFIG_MAX1363) += max1363.o
30obj-$(CONFIG_MCP320X) += mcp320x.o 31obj-$(CONFIG_MCP320X) += mcp320x.o
diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c
index 01d71588d752..a3f5254f4e51 100644
--- a/drivers/iio/adc/ad799x.c
+++ b/drivers/iio/adc/ad799x.c
@@ -477,7 +477,7 @@ static int ad799x_read_event_value(struct iio_dev *indio_dev,
477 if (ret < 0) 477 if (ret < 0)
478 return ret; 478 return ret;
479 *val = (ret >> chan->scan_type.shift) & 479 *val = (ret >> chan->scan_type.shift) &
480 GENMASK(chan->scan_type.realbits - 1 , 0); 480 GENMASK(chan->scan_type.realbits - 1, 0);
481 481
482 return IIO_VAL_INT; 482 return IIO_VAL_INT;
483} 483}
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index 2e154cb51685..e10dca3ed74b 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -66,8 +66,10 @@
66#define AT91_SAMA5D2_MR_PRESCAL(v) ((v) << AT91_SAMA5D2_MR_PRESCAL_OFFSET) 66#define AT91_SAMA5D2_MR_PRESCAL(v) ((v) << AT91_SAMA5D2_MR_PRESCAL_OFFSET)
67#define AT91_SAMA5D2_MR_PRESCAL_OFFSET 8 67#define AT91_SAMA5D2_MR_PRESCAL_OFFSET 8
68#define AT91_SAMA5D2_MR_PRESCAL_MAX 0xff 68#define AT91_SAMA5D2_MR_PRESCAL_MAX 0xff
69#define AT91_SAMA5D2_MR_PRESCAL_MASK GENMASK(15, 8)
69/* Startup Time */ 70/* Startup Time */
70#define AT91_SAMA5D2_MR_STARTUP(v) ((v) << 16) 71#define AT91_SAMA5D2_MR_STARTUP(v) ((v) << 16)
72#define AT91_SAMA5D2_MR_STARTUP_MASK GENMASK(19, 16)
71/* Analog Change */ 73/* Analog Change */
72#define AT91_SAMA5D2_MR_ANACH BIT(23) 74#define AT91_SAMA5D2_MR_ANACH BIT(23)
73/* Tracking Time */ 75/* Tracking Time */
@@ -92,13 +94,13 @@
92/* Last Converted Data Register */ 94/* Last Converted Data Register */
93#define AT91_SAMA5D2_LCDR 0x20 95#define AT91_SAMA5D2_LCDR 0x20
94/* Interrupt Enable Register */ 96/* Interrupt Enable Register */
95#define AT91_SAMA5D2_IER 0x24 97#define AT91_SAMA5D2_IER 0x24
96/* Interrupt Disable Register */ 98/* Interrupt Disable Register */
97#define AT91_SAMA5D2_IDR 0x28 99#define AT91_SAMA5D2_IDR 0x28
98/* Interrupt Mask Register */ 100/* Interrupt Mask Register */
99#define AT91_SAMA5D2_IMR 0x2c 101#define AT91_SAMA5D2_IMR 0x2c
100/* Interrupt Status Register */ 102/* Interrupt Status Register */
101#define AT91_SAMA5D2_ISR 0x30 103#define AT91_SAMA5D2_ISR 0x30
102/* Last Channel Trigger Mode Register */ 104/* Last Channel Trigger Mode Register */
103#define AT91_SAMA5D2_LCTMR 0x34 105#define AT91_SAMA5D2_LCTMR 0x34
104/* Last Channel Compare Window Register */ 106/* Last Channel Compare Window Register */
@@ -106,17 +108,20 @@
106/* Overrun Status Register */ 108/* Overrun Status Register */
107#define AT91_SAMA5D2_OVER 0x3c 109#define AT91_SAMA5D2_OVER 0x3c
108/* Extended Mode Register */ 110/* Extended Mode Register */
109#define AT91_SAMA5D2_EMR 0x40 111#define AT91_SAMA5D2_EMR 0x40
110/* Compare Window Register */ 112/* Compare Window Register */
111#define AT91_SAMA5D2_CWR 0x44 113#define AT91_SAMA5D2_CWR 0x44
112/* Channel Gain Register */ 114/* Channel Gain Register */
113#define AT91_SAMA5D2_CGR 0x48 115#define AT91_SAMA5D2_CGR 0x48
116
114/* Channel Offset Register */ 117/* Channel Offset Register */
115#define AT91_SAMA5D2_COR 0x4c 118#define AT91_SAMA5D2_COR 0x4c
119#define AT91_SAMA5D2_COR_DIFF_OFFSET 16
120
116/* Channel Data Register 0 */ 121/* Channel Data Register 0 */
117#define AT91_SAMA5D2_CDR0 0x50 122#define AT91_SAMA5D2_CDR0 0x50
118/* Analog Control Register */ 123/* Analog Control Register */
119#define AT91_SAMA5D2_ACR 0x94 124#define AT91_SAMA5D2_ACR 0x94
120/* Touchscreen Mode Register */ 125/* Touchscreen Mode Register */
121#define AT91_SAMA5D2_TSMR 0xb0 126#define AT91_SAMA5D2_TSMR 0xb0
122/* Touchscreen X Position Register */ 127/* Touchscreen X Position Register */
@@ -130,7 +135,7 @@
130/* Correction Select Register */ 135/* Correction Select Register */
131#define AT91_SAMA5D2_COSR 0xd0 136#define AT91_SAMA5D2_COSR 0xd0
132/* Correction Value Register */ 137/* Correction Value Register */
133#define AT91_SAMA5D2_CVR 0xd4 138#define AT91_SAMA5D2_CVR 0xd4
134/* Channel Error Correction Register */ 139/* Channel Error Correction Register */
135#define AT91_SAMA5D2_CECR 0xd8 140#define AT91_SAMA5D2_CECR 0xd8
136/* Write Protection Mode Register */ 141/* Write Protection Mode Register */
@@ -140,7 +145,7 @@
140/* Version Register */ 145/* Version Register */
141#define AT91_SAMA5D2_VERSION 0xfc 146#define AT91_SAMA5D2_VERSION 0xfc
142 147
143#define AT91_AT91_SAMA5D2_CHAN(num, addr) \ 148#define AT91_SAMA5D2_CHAN_SINGLE(num, addr) \
144 { \ 149 { \
145 .type = IIO_VOLTAGE, \ 150 .type = IIO_VOLTAGE, \
146 .channel = num, \ 151 .channel = num, \
@@ -156,6 +161,24 @@
156 .indexed = 1, \ 161 .indexed = 1, \
157 } 162 }
158 163
164#define AT91_SAMA5D2_CHAN_DIFF(num, num2, addr) \
165 { \
166 .type = IIO_VOLTAGE, \
167 .differential = 1, \
168 .channel = num, \
169 .channel2 = num2, \
170 .address = addr, \
171 .scan_type = { \
172 .sign = 's', \
173 .realbits = 12, \
174 }, \
175 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
176 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
177 .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),\
178 .datasheet_name = "CH"#num"-CH"#num2, \
179 .indexed = 1, \
180 }
181
159#define at91_adc_readl(st, reg) readl_relaxed(st->base + reg) 182#define at91_adc_readl(st, reg) readl_relaxed(st->base + reg)
160#define at91_adc_writel(st, reg, val) writel_relaxed(val, st->base + reg) 183#define at91_adc_writel(st, reg, val) writel_relaxed(val, st->base + reg)
161 184
@@ -185,18 +208,24 @@ struct at91_adc_state {
185}; 208};
186 209
187static const struct iio_chan_spec at91_adc_channels[] = { 210static const struct iio_chan_spec at91_adc_channels[] = {
188 AT91_AT91_SAMA5D2_CHAN(0, 0x50), 211 AT91_SAMA5D2_CHAN_SINGLE(0, 0x50),
189 AT91_AT91_SAMA5D2_CHAN(1, 0x54), 212 AT91_SAMA5D2_CHAN_SINGLE(1, 0x54),
190 AT91_AT91_SAMA5D2_CHAN(2, 0x58), 213 AT91_SAMA5D2_CHAN_SINGLE(2, 0x58),
191 AT91_AT91_SAMA5D2_CHAN(3, 0x5c), 214 AT91_SAMA5D2_CHAN_SINGLE(3, 0x5c),
192 AT91_AT91_SAMA5D2_CHAN(4, 0x60), 215 AT91_SAMA5D2_CHAN_SINGLE(4, 0x60),
193 AT91_AT91_SAMA5D2_CHAN(5, 0x64), 216 AT91_SAMA5D2_CHAN_SINGLE(5, 0x64),
194 AT91_AT91_SAMA5D2_CHAN(6, 0x68), 217 AT91_SAMA5D2_CHAN_SINGLE(6, 0x68),
195 AT91_AT91_SAMA5D2_CHAN(7, 0x6c), 218 AT91_SAMA5D2_CHAN_SINGLE(7, 0x6c),
196 AT91_AT91_SAMA5D2_CHAN(8, 0x70), 219 AT91_SAMA5D2_CHAN_SINGLE(8, 0x70),
197 AT91_AT91_SAMA5D2_CHAN(9, 0x74), 220 AT91_SAMA5D2_CHAN_SINGLE(9, 0x74),
198 AT91_AT91_SAMA5D2_CHAN(10, 0x78), 221 AT91_SAMA5D2_CHAN_SINGLE(10, 0x78),
199 AT91_AT91_SAMA5D2_CHAN(11, 0x7c), 222 AT91_SAMA5D2_CHAN_SINGLE(11, 0x7c),
223 AT91_SAMA5D2_CHAN_DIFF(0, 1, 0x50),
224 AT91_SAMA5D2_CHAN_DIFF(2, 3, 0x58),
225 AT91_SAMA5D2_CHAN_DIFF(4, 5, 0x60),
226 AT91_SAMA5D2_CHAN_DIFF(6, 7, 0x68),
227 AT91_SAMA5D2_CHAN_DIFF(8, 9, 0x70),
228 AT91_SAMA5D2_CHAN_DIFF(10, 11, 0x78),
200}; 229};
201 230
202static unsigned at91_adc_startup_time(unsigned startup_time_min, 231static unsigned at91_adc_startup_time(unsigned startup_time_min,
@@ -226,7 +255,7 @@ static unsigned at91_adc_startup_time(unsigned startup_time_min,
226static void at91_adc_setup_samp_freq(struct at91_adc_state *st, unsigned freq) 255static void at91_adc_setup_samp_freq(struct at91_adc_state *st, unsigned freq)
227{ 256{
228 struct iio_dev *indio_dev = iio_priv_to_dev(st); 257 struct iio_dev *indio_dev = iio_priv_to_dev(st);
229 unsigned f_per, prescal, startup; 258 unsigned f_per, prescal, startup, mr;
230 259
231 f_per = clk_get_rate(st->per_clk); 260 f_per = clk_get_rate(st->per_clk);
232 prescal = (f_per / (2 * freq)) - 1; 261 prescal = (f_per / (2 * freq)) - 1;
@@ -234,10 +263,11 @@ static void at91_adc_setup_samp_freq(struct at91_adc_state *st, unsigned freq)
234 startup = at91_adc_startup_time(st->soc_info.startup_time, 263 startup = at91_adc_startup_time(st->soc_info.startup_time,
235 freq / 1000); 264 freq / 1000);
236 265
237 at91_adc_writel(st, AT91_SAMA5D2_MR, 266 mr = at91_adc_readl(st, AT91_SAMA5D2_MR);
238 AT91_SAMA5D2_MR_TRANSFER(2) 267 mr &= ~(AT91_SAMA5D2_MR_STARTUP_MASK | AT91_SAMA5D2_MR_PRESCAL_MASK);
239 | AT91_SAMA5D2_MR_STARTUP(startup) 268 mr |= AT91_SAMA5D2_MR_STARTUP(startup);
240 | AT91_SAMA5D2_MR_PRESCAL(prescal)); 269 mr |= AT91_SAMA5D2_MR_PRESCAL(prescal);
270 at91_adc_writel(st, AT91_SAMA5D2_MR, mr);
241 271
242 dev_dbg(&indio_dev->dev, "freq: %u, startup: %u, prescal: %u\n", 272 dev_dbg(&indio_dev->dev, "freq: %u, startup: %u, prescal: %u\n",
243 freq, startup, prescal); 273 freq, startup, prescal);
@@ -278,6 +308,7 @@ static int at91_adc_read_raw(struct iio_dev *indio_dev,
278 int *val, int *val2, long mask) 308 int *val, int *val2, long mask)
279{ 309{
280 struct at91_adc_state *st = iio_priv(indio_dev); 310 struct at91_adc_state *st = iio_priv(indio_dev);
311 u32 cor = 0;
281 int ret; 312 int ret;
282 313
283 switch (mask) { 314 switch (mask) {
@@ -286,6 +317,11 @@ static int at91_adc_read_raw(struct iio_dev *indio_dev,
286 317
287 st->chan = chan; 318 st->chan = chan;
288 319
320 if (chan->differential)
321 cor = (BIT(chan->channel) | BIT(chan->channel2)) <<
322 AT91_SAMA5D2_COR_DIFF_OFFSET;
323
324 at91_adc_writel(st, AT91_SAMA5D2_COR, cor);
289 at91_adc_writel(st, AT91_SAMA5D2_CHER, BIT(chan->channel)); 325 at91_adc_writel(st, AT91_SAMA5D2_CHER, BIT(chan->channel));
290 at91_adc_writel(st, AT91_SAMA5D2_IER, BIT(chan->channel)); 326 at91_adc_writel(st, AT91_SAMA5D2_IER, BIT(chan->channel));
291 at91_adc_writel(st, AT91_SAMA5D2_CR, AT91_SAMA5D2_CR_START); 327 at91_adc_writel(st, AT91_SAMA5D2_CR, AT91_SAMA5D2_CR_START);
@@ -298,6 +334,8 @@ static int at91_adc_read_raw(struct iio_dev *indio_dev,
298 334
299 if (ret > 0) { 335 if (ret > 0) {
300 *val = st->conversion_value; 336 *val = st->conversion_value;
337 if (chan->scan_type.sign == 's')
338 *val = sign_extend32(*val, 11);
301 ret = IIO_VAL_INT; 339 ret = IIO_VAL_INT;
302 st->conversion_done = false; 340 st->conversion_done = false;
303 } 341 }
@@ -310,6 +348,8 @@ static int at91_adc_read_raw(struct iio_dev *indio_dev,
310 348
311 case IIO_CHAN_INFO_SCALE: 349 case IIO_CHAN_INFO_SCALE:
312 *val = st->vref_uv / 1000; 350 *val = st->vref_uv / 1000;
351 if (chan->differential)
352 *val *= 2;
313 *val2 = chan->scan_type.realbits; 353 *val2 = chan->scan_type.realbits;
314 return IIO_VAL_FRACTIONAL_LOG2; 354 return IIO_VAL_FRACTIONAL_LOG2;
315 355
@@ -444,6 +484,12 @@ static int at91_adc_probe(struct platform_device *pdev)
444 484
445 at91_adc_writel(st, AT91_SAMA5D2_CR, AT91_SAMA5D2_CR_SWRST); 485 at91_adc_writel(st, AT91_SAMA5D2_CR, AT91_SAMA5D2_CR_SWRST);
446 at91_adc_writel(st, AT91_SAMA5D2_IDR, 0xffffffff); 486 at91_adc_writel(st, AT91_SAMA5D2_IDR, 0xffffffff);
487 /*
488 * Transfer field must be set to 2 according to the datasheet and
489 * allows different analog settings for each channel.
490 */
491 at91_adc_writel(st, AT91_SAMA5D2_MR,
492 AT91_SAMA5D2_MR_TRANSFER(2) | AT91_SAMA5D2_MR_ANACH);
447 493
448 at91_adc_setup_samp_freq(st, st->soc_info.min_sample_rate); 494 at91_adc_setup_samp_freq(st, st->soc_info.min_sample_rate);
449 495
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index f284cd6a93d6..52430ba171f3 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -797,8 +797,8 @@ static u32 calc_startup_ticks_9x5(u32 startup_time, u32 adc_clk_khz)
797 * Startup Time = <lookup_table_value> / ADC Clock 797 * Startup Time = <lookup_table_value> / ADC Clock
798 */ 798 */
799 const int startup_lookup[] = { 799 const int startup_lookup[] = {
800 0 , 8 , 16 , 24 , 800 0, 8, 16, 24,
801 64 , 80 , 96 , 112, 801 64, 80, 96, 112,
802 512, 576, 640, 704, 802 512, 576, 640, 704,
803 768, 832, 896, 960 803 768, 832, 896, 960
804 }; 804 };
@@ -924,14 +924,14 @@ static int at91_adc_probe_dt(struct at91_adc_state *st,
924 ret = -EINVAL; 924 ret = -EINVAL;
925 goto error_ret; 925 goto error_ret;
926 } 926 }
927 trig->name = name; 927 trig->name = name;
928 928
929 if (of_property_read_u32(trig_node, "trigger-value", &prop)) { 929 if (of_property_read_u32(trig_node, "trigger-value", &prop)) {
930 dev_err(&idev->dev, "Missing trigger-value property in the DT.\n"); 930 dev_err(&idev->dev, "Missing trigger-value property in the DT.\n");
931 ret = -EINVAL; 931 ret = -EINVAL;
932 goto error_ret; 932 goto error_ret;
933 } 933 }
934 trig->value = prop; 934 trig->value = prop;
935 trig->is_external = of_property_read_bool(trig_node, "trigger-external"); 935 trig->is_external = of_property_read_bool(trig_node, "trigger-external");
936 i++; 936 i++;
937 } 937 }
diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c
index 65909d5858b1..502f2fbe8aef 100644
--- a/drivers/iio/adc/ina2xx-adc.c
+++ b/drivers/iio/adc/ina2xx-adc.c
@@ -185,9 +185,9 @@ static int ina2xx_read_raw(struct iio_dev *indio_dev,
185 case IIO_CHAN_INFO_SCALE: 185 case IIO_CHAN_INFO_SCALE:
186 switch (chan->address) { 186 switch (chan->address) {
187 case INA2XX_SHUNT_VOLTAGE: 187 case INA2XX_SHUNT_VOLTAGE:
188 /* processed (mV) = raw*1000/shunt_div */ 188 /* processed (mV) = raw/shunt_div */
189 *val2 = chip->config->shunt_div; 189 *val2 = chip->config->shunt_div;
190 *val = 1000; 190 *val = 1;
191 return IIO_VAL_FRACTIONAL; 191 return IIO_VAL_FRACTIONAL;
192 192
193 case INA2XX_BUS_VOLTAGE: 193 case INA2XX_BUS_VOLTAGE:
@@ -350,6 +350,23 @@ static ssize_t ina2xx_allow_async_readout_store(struct device *dev,
350 return len; 350 return len;
351} 351}
352 352
353/*
354 * Set current LSB to 1mA, shunt is in uOhms
355 * (equation 13 in datasheet). We hardcode a Current_LSB
356 * of 1.0 x10-6. The only remaining parameter is RShunt.
357 * There is no need to expose the CALIBRATION register
358 * to the user for now. But we need to reset this register
359 * if the user updates RShunt after driver init, e.g upon
360 * reading an EEPROM/Probe-type value.
361 */
362static int ina2xx_set_calibration(struct ina2xx_chip_info *chip)
363{
364 u16 regval = DIV_ROUND_CLOSEST(chip->config->calibration_factor,
365 chip->shunt_resistor);
366
367 return regmap_write(chip->regmap, INA2XX_CALIBRATION, regval);
368}
369
353static int set_shunt_resistor(struct ina2xx_chip_info *chip, unsigned int val) 370static int set_shunt_resistor(struct ina2xx_chip_info *chip, unsigned int val)
354{ 371{
355 if (val <= 0 || val > chip->config->calibration_factor) 372 if (val <= 0 || val > chip->config->calibration_factor)
@@ -385,6 +402,11 @@ static ssize_t ina2xx_shunt_resistor_store(struct device *dev,
385 if (ret) 402 if (ret)
386 return ret; 403 return ret;
387 404
405 /* Update the Calibration register */
406 ret = ina2xx_set_calibration(chip);
407 if (ret)
408 return ret;
409
388 return len; 410 return len;
389} 411}
390 412
@@ -602,24 +624,11 @@ static const struct iio_info ina2xx_info = {
602/* Initialize the configuration and calibration registers. */ 624/* Initialize the configuration and calibration registers. */
603static int ina2xx_init(struct ina2xx_chip_info *chip, unsigned int config) 625static int ina2xx_init(struct ina2xx_chip_info *chip, unsigned int config)
604{ 626{
605 u16 regval; 627 int ret = regmap_write(chip->regmap, INA2XX_CONFIG, config);
606 int ret;
607
608 ret = regmap_write(chip->regmap, INA2XX_CONFIG, config);
609 if (ret) 628 if (ret)
610 return ret; 629 return ret;
611 630
612 /* 631 return ina2xx_set_calibration(chip);
613 * Set current LSB to 1mA, shunt is in uOhms
614 * (equation 13 in datasheet). We hardcode a Current_LSB
615 * of 1.0 x10-6. The only remaining parameter is RShunt.
616 * There is no need to expose the CALIBRATION register
617 * to the user for now.
618 */
619 regval = DIV_ROUND_CLOSEST(chip->config->calibration_factor,
620 chip->shunt_resistor);
621
622 return regmap_write(chip->regmap, INA2XX_CALIBRATION, regval);
623} 632}
624 633
625static int ina2xx_probe(struct i2c_client *client, 634static int ina2xx_probe(struct i2c_client *client,
diff --git a/drivers/iio/adc/lpc18xx_adc.c b/drivers/iio/adc/lpc18xx_adc.c
new file mode 100644
index 000000000000..3ef18f4b27f0
--- /dev/null
+++ b/drivers/iio/adc/lpc18xx_adc.c
@@ -0,0 +1,231 @@
1/*
2 * IIO ADC driver for NXP LPC18xx ADC
3 *
4 * Copyright (C) 2016 Joachim Eastwood <manabian@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * UNSUPPORTED hardware features:
11 * - Hardware triggers
12 * - Burst mode
13 * - Interrupts
14 * - DMA
15 */
16
17#include <linux/clk.h>
18#include <linux/err.h>
19#include <linux/iio/iio.h>
20#include <linux/iio/driver.h>
21#include <linux/io.h>
22#include <linux/iopoll.h>
23#include <linux/module.h>
24#include <linux/mutex.h>
25#include <linux/of.h>
26#include <linux/of_device.h>
27#include <linux/platform_device.h>
28#include <linux/regulator/consumer.h>
29
30/* LPC18XX ADC registers and bits */
31#define LPC18XX_ADC_CR 0x000
32#define LPC18XX_ADC_CR_CLKDIV_SHIFT 8
33#define LPC18XX_ADC_CR_PDN BIT(21)
34#define LPC18XX_ADC_CR_START_NOW (0x1 << 24)
35#define LPC18XX_ADC_GDR 0x004
36
37/* Data register bits */
38#define LPC18XX_ADC_SAMPLE_SHIFT 6
39#define LPC18XX_ADC_SAMPLE_MASK 0x3ff
40#define LPC18XX_ADC_CONV_DONE BIT(31)
41
42/* Clock should be 4.5 MHz or less */
43#define LPC18XX_ADC_CLK_TARGET 4500000
44
45struct lpc18xx_adc {
46 struct regulator *vref;
47 void __iomem *base;
48 struct device *dev;
49 struct mutex lock;
50 struct clk *clk;
51 u32 cr_reg;
52};
53
54#define LPC18XX_ADC_CHAN(_idx) { \
55 .type = IIO_VOLTAGE, \
56 .indexed = 1, \
57 .channel = _idx, \
58 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
59 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
60}
61
62static const struct iio_chan_spec lpc18xx_adc_iio_channels[] = {
63 LPC18XX_ADC_CHAN(0),
64 LPC18XX_ADC_CHAN(1),
65 LPC18XX_ADC_CHAN(2),
66 LPC18XX_ADC_CHAN(3),
67 LPC18XX_ADC_CHAN(4),
68 LPC18XX_ADC_CHAN(5),
69 LPC18XX_ADC_CHAN(6),
70 LPC18XX_ADC_CHAN(7),
71};
72
73static int lpc18xx_adc_read_chan(struct lpc18xx_adc *adc, unsigned int ch)
74{
75 int ret;
76 u32 reg;
77
78 reg = adc->cr_reg | BIT(ch) | LPC18XX_ADC_CR_START_NOW;
79 writel(reg, adc->base + LPC18XX_ADC_CR);
80
81 ret = readl_poll_timeout(adc->base + LPC18XX_ADC_GDR, reg,
82 reg & LPC18XX_ADC_CONV_DONE, 3, 9);
83 if (ret) {
84 dev_warn(adc->dev, "adc read timed out\n");
85 return ret;
86 }
87
88 return (reg >> LPC18XX_ADC_SAMPLE_SHIFT) & LPC18XX_ADC_SAMPLE_MASK;
89}
90
91static int lpc18xx_adc_read_raw(struct iio_dev *indio_dev,
92 struct iio_chan_spec const *chan,
93 int *val, int *val2, long mask)
94{
95 struct lpc18xx_adc *adc = iio_priv(indio_dev);
96
97 switch (mask) {
98 case IIO_CHAN_INFO_RAW:
99 mutex_lock(&adc->lock);
100 *val = lpc18xx_adc_read_chan(adc, chan->channel);
101 mutex_unlock(&adc->lock);
102 if (*val < 0)
103 return *val;
104
105 return IIO_VAL_INT;
106
107 case IIO_CHAN_INFO_SCALE:
108 *val = regulator_get_voltage(adc->vref) / 1000;
109 *val2 = 10;
110
111 return IIO_VAL_FRACTIONAL_LOG2;
112 }
113
114 return -EINVAL;
115}
116
117static const struct iio_info lpc18xx_adc_info = {
118 .read_raw = lpc18xx_adc_read_raw,
119 .driver_module = THIS_MODULE,
120};
121
122static int lpc18xx_adc_probe(struct platform_device *pdev)
123{
124 struct iio_dev *indio_dev;
125 struct lpc18xx_adc *adc;
126 struct resource *res;
127 unsigned int clkdiv;
128 unsigned long rate;
129 int ret;
130
131 indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc));
132 if (!indio_dev)
133 return -ENOMEM;
134
135 platform_set_drvdata(pdev, indio_dev);
136 adc = iio_priv(indio_dev);
137 adc->dev = &pdev->dev;
138 mutex_init(&adc->lock);
139
140 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
141 adc->base = devm_ioremap_resource(&pdev->dev, res);
142 if (IS_ERR(adc->base))
143 return PTR_ERR(adc->base);
144
145 adc->clk = devm_clk_get(&pdev->dev, NULL);
146 if (IS_ERR(adc->clk)) {
147 dev_err(&pdev->dev, "error getting clock\n");
148 return PTR_ERR(adc->clk);
149 }
150
151 rate = clk_get_rate(adc->clk);
152 clkdiv = DIV_ROUND_UP(rate, LPC18XX_ADC_CLK_TARGET);
153
154 adc->vref = devm_regulator_get(&pdev->dev, "vref");
155 if (IS_ERR(adc->vref)) {
156 dev_err(&pdev->dev, "error getting regulator\n");
157 return PTR_ERR(adc->vref);
158 }
159
160 indio_dev->name = dev_name(&pdev->dev);
161 indio_dev->dev.parent = &pdev->dev;
162 indio_dev->info = &lpc18xx_adc_info;
163 indio_dev->modes = INDIO_DIRECT_MODE;
164 indio_dev->channels = lpc18xx_adc_iio_channels;
165 indio_dev->num_channels = ARRAY_SIZE(lpc18xx_adc_iio_channels);
166
167 ret = regulator_enable(adc->vref);
168 if (ret) {
169 dev_err(&pdev->dev, "unable to enable regulator\n");
170 return ret;
171 }
172
173 ret = clk_prepare_enable(adc->clk);
174 if (ret) {
175 dev_err(&pdev->dev, "unable to enable clock\n");
176 goto dis_reg;
177 }
178
179 adc->cr_reg = (clkdiv << LPC18XX_ADC_CR_CLKDIV_SHIFT) |
180 LPC18XX_ADC_CR_PDN;
181 writel(adc->cr_reg, adc->base + LPC18XX_ADC_CR);
182
183 ret = iio_device_register(indio_dev);
184 if (ret) {
185 dev_err(&pdev->dev, "unable to register device\n");
186 goto dis_clk;
187 }
188
189 return 0;
190
191dis_clk:
192 writel(0, adc->base + LPC18XX_ADC_CR);
193 clk_disable_unprepare(adc->clk);
194dis_reg:
195 regulator_disable(adc->vref);
196 return ret;
197}
198
199static int lpc18xx_adc_remove(struct platform_device *pdev)
200{
201 struct iio_dev *indio_dev = platform_get_drvdata(pdev);
202 struct lpc18xx_adc *adc = iio_priv(indio_dev);
203
204 iio_device_unregister(indio_dev);
205
206 writel(0, adc->base + LPC18XX_ADC_CR);
207 clk_disable_unprepare(adc->clk);
208 regulator_disable(adc->vref);
209
210 return 0;
211}
212
213static const struct of_device_id lpc18xx_adc_match[] = {
214 { .compatible = "nxp,lpc1850-adc" },
215 { /* sentinel */ }
216};
217MODULE_DEVICE_TABLE(of, lpc18xx_adc_match);
218
219static struct platform_driver lpc18xx_adc_driver = {
220 .probe = lpc18xx_adc_probe,
221 .remove = lpc18xx_adc_remove,
222 .driver = {
223 .name = "lpc18xx-adc",
224 .of_match_table = lpc18xx_adc_match,
225 },
226};
227module_platform_driver(lpc18xx_adc_driver);
228
229MODULE_DESCRIPTION("LPC18xx ADC driver");
230MODULE_AUTHOR("Joachim Eastwood <manabian@gmail.com>");
231MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c
index d7b36efd2f3c..d1172dc1e8e2 100644
--- a/drivers/iio/adc/mcp3422.c
+++ b/drivers/iio/adc/mcp3422.c
@@ -61,9 +61,9 @@
61 61
62static const int mcp3422_scales[4][4] = { 62static const int mcp3422_scales[4][4] = {
63 { 1000000, 500000, 250000, 125000 }, 63 { 1000000, 500000, 250000, 125000 },
64 { 250000 , 125000, 62500 , 31250 }, 64 { 250000, 125000, 62500, 31250 },
65 { 62500 , 31250 , 15625 , 7812 }, 65 { 62500, 31250, 15625, 7812 },
66 { 15625 , 7812 , 3906 , 1953 } }; 66 { 15625, 7812, 3906, 1953 } };
67 67
68/* Constant msleep times for data acquisitions */ 68/* Constant msleep times for data acquisitions */
69static const int mcp3422_read_times[4] = { 69static const int mcp3422_read_times[4] = {
diff --git a/drivers/iio/adc/mxs-lradc.c b/drivers/iio/adc/mxs-lradc.c
index 33051b87aac2..ad26da1edbee 100644
--- a/drivers/iio/adc/mxs-lradc.c
+++ b/drivers/iio/adc/mxs-lradc.c
@@ -686,6 +686,17 @@ static void mxs_lradc_prepare_pressure(struct mxs_lradc *lradc)
686 686
687static void mxs_lradc_enable_touch_detection(struct mxs_lradc *lradc) 687static void mxs_lradc_enable_touch_detection(struct mxs_lradc *lradc)
688{ 688{
689 /* Configure the touchscreen type */
690 if (lradc->soc == IMX28_LRADC) {
691 mxs_lradc_reg_clear(lradc, LRADC_CTRL0_MX28_TOUCH_SCREEN_TYPE,
692 LRADC_CTRL0);
693
694 if (lradc->use_touchscreen == MXS_LRADC_TOUCHSCREEN_5WIRE)
695 mxs_lradc_reg_set(lradc,
696 LRADC_CTRL0_MX28_TOUCH_SCREEN_TYPE,
697 LRADC_CTRL0);
698 }
699
689 mxs_lradc_setup_touch_detection(lradc); 700 mxs_lradc_setup_touch_detection(lradc);
690 701
691 lradc->cur_plate = LRADC_TOUCH; 702 lradc->cur_plate = LRADC_TOUCH;
@@ -1127,6 +1138,7 @@ static int mxs_lradc_ts_register(struct mxs_lradc *lradc)
1127 __set_bit(EV_ABS, input->evbit); 1138 __set_bit(EV_ABS, input->evbit);
1128 __set_bit(EV_KEY, input->evbit); 1139 __set_bit(EV_KEY, input->evbit);
1129 __set_bit(BTN_TOUCH, input->keybit); 1140 __set_bit(BTN_TOUCH, input->keybit);
1141 __set_bit(INPUT_PROP_DIRECT, input->propbit);
1130 input_set_abs_params(input, ABS_X, 0, LRADC_SINGLE_SAMPLE_MASK, 0, 0); 1142 input_set_abs_params(input, ABS_X, 0, LRADC_SINGLE_SAMPLE_MASK, 0, 0);
1131 input_set_abs_params(input, ABS_Y, 0, LRADC_SINGLE_SAMPLE_MASK, 0, 0); 1143 input_set_abs_params(input, ABS_Y, 0, LRADC_SINGLE_SAMPLE_MASK, 0, 0);
1132 input_set_abs_params(input, ABS_PRESSURE, 0, LRADC_SINGLE_SAMPLE_MASK, 1144 input_set_abs_params(input, ABS_PRESSURE, 0, LRADC_SINGLE_SAMPLE_MASK,
@@ -1475,18 +1487,13 @@ static const struct iio_chan_spec mx28_lradc_chan_spec[] = {
1475 MXS_ADC_CHAN(15, IIO_VOLTAGE, "VDD5V"), 1487 MXS_ADC_CHAN(15, IIO_VOLTAGE, "VDD5V"),
1476}; 1488};
1477 1489
1478static int mxs_lradc_hw_init(struct mxs_lradc *lradc) 1490static void mxs_lradc_hw_init(struct mxs_lradc *lradc)
1479{ 1491{
1480 /* The ADC always uses DELAY CHANNEL 0. */ 1492 /* The ADC always uses DELAY CHANNEL 0. */
1481 const u32 adc_cfg = 1493 const u32 adc_cfg =
1482 (1 << (LRADC_DELAY_TRIGGER_DELAYS_OFFSET + 0)) | 1494 (1 << (LRADC_DELAY_TRIGGER_DELAYS_OFFSET + 0)) |
1483 (LRADC_DELAY_TIMER_PER << LRADC_DELAY_DELAY_OFFSET); 1495 (LRADC_DELAY_TIMER_PER << LRADC_DELAY_DELAY_OFFSET);
1484 1496
1485 int ret = stmp_reset_block(lradc->base);
1486
1487 if (ret)
1488 return ret;
1489
1490 /* Configure DELAY CHANNEL 0 for generic ADC sampling. */ 1497 /* Configure DELAY CHANNEL 0 for generic ADC sampling. */
1491 mxs_lradc_reg_wrt(lradc, adc_cfg, LRADC_DELAY(0)); 1498 mxs_lradc_reg_wrt(lradc, adc_cfg, LRADC_DELAY(0));
1492 1499
@@ -1495,20 +1502,8 @@ static int mxs_lradc_hw_init(struct mxs_lradc *lradc)
1495 mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(2)); 1502 mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(2));
1496 mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(3)); 1503 mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(3));
1497 1504
1498 /* Configure the touchscreen type */
1499 if (lradc->soc == IMX28_LRADC) {
1500 mxs_lradc_reg_clear(lradc, LRADC_CTRL0_MX28_TOUCH_SCREEN_TYPE,
1501 LRADC_CTRL0);
1502
1503 if (lradc->use_touchscreen == MXS_LRADC_TOUCHSCREEN_5WIRE)
1504 mxs_lradc_reg_set(lradc, LRADC_CTRL0_MX28_TOUCH_SCREEN_TYPE,
1505 LRADC_CTRL0);
1506 }
1507
1508 /* Start internal temperature sensing. */ 1505 /* Start internal temperature sensing. */
1509 mxs_lradc_reg_wrt(lradc, 0, LRADC_CTRL2); 1506 mxs_lradc_reg_wrt(lradc, 0, LRADC_CTRL2);
1510
1511 return 0;
1512} 1507}
1513 1508
1514static void mxs_lradc_hw_stop(struct mxs_lradc *lradc) 1509static void mxs_lradc_hw_stop(struct mxs_lradc *lradc)
@@ -1708,11 +1703,13 @@ static int mxs_lradc_probe(struct platform_device *pdev)
1708 } 1703 }
1709 } 1704 }
1710 1705
1711 /* Configure the hardware. */ 1706 ret = stmp_reset_block(lradc->base);
1712 ret = mxs_lradc_hw_init(lradc);
1713 if (ret) 1707 if (ret)
1714 goto err_dev; 1708 goto err_dev;
1715 1709
1710 /* Configure the hardware. */
1711 mxs_lradc_hw_init(lradc);
1712
1716 /* Register the touchscreen input device. */ 1713 /* Register the touchscreen input device. */
1717 if (touch_ret == 0) { 1714 if (touch_ret == 0) {
1718 ret = mxs_lradc_ts_register(lradc); 1715 ret = mxs_lradc_ts_register(lradc);
diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c
index 9c311c1e1ac7..f9ad6c2d6821 100644
--- a/drivers/iio/adc/rockchip_saradc.c
+++ b/drivers/iio/adc/rockchip_saradc.c
@@ -159,6 +159,22 @@ static const struct rockchip_saradc_data rk3066_tsadc_data = {
159 .clk_rate = 50000, 159 .clk_rate = 50000,
160}; 160};
161 161
162static const struct iio_chan_spec rockchip_rk3399_saradc_iio_channels[] = {
163 ADC_CHANNEL(0, "adc0"),
164 ADC_CHANNEL(1, "adc1"),
165 ADC_CHANNEL(2, "adc2"),
166 ADC_CHANNEL(3, "adc3"),
167 ADC_CHANNEL(4, "adc4"),
168 ADC_CHANNEL(5, "adc5"),
169};
170
171static const struct rockchip_saradc_data rk3399_saradc_data = {
172 .num_bits = 10,
173 .channels = rockchip_rk3399_saradc_iio_channels,
174 .num_channels = ARRAY_SIZE(rockchip_rk3399_saradc_iio_channels),
175 .clk_rate = 1000000,
176};
177
162static const struct of_device_id rockchip_saradc_match[] = { 178static const struct of_device_id rockchip_saradc_match[] = {
163 { 179 {
164 .compatible = "rockchip,saradc", 180 .compatible = "rockchip,saradc",
@@ -166,6 +182,9 @@ static const struct of_device_id rockchip_saradc_match[] = {
166 }, { 182 }, {
167 .compatible = "rockchip,rk3066-tsadc", 183 .compatible = "rockchip,rk3066-tsadc",
168 .data = &rk3066_tsadc_data, 184 .data = &rk3066_tsadc_data,
185 }, {
186 .compatible = "rockchip,rk3399-saradc",
187 .data = &rk3399_saradc_data,
169 }, 188 },
170 {}, 189 {},
171}; 190};
diff --git a/drivers/iio/adc/ti-adc081c.c b/drivers/iio/adc/ti-adc081c.c
index ecbc12138d58..9fd032d9f402 100644
--- a/drivers/iio/adc/ti-adc081c.c
+++ b/drivers/iio/adc/ti-adc081c.c
@@ -1,9 +1,21 @@
1/* 1/*
2 * TI ADC081C/ADC101C/ADC121C 8/10/12-bit ADC driver
3 *
2 * Copyright (C) 2012 Avionic Design GmbH 4 * Copyright (C) 2012 Avionic Design GmbH
5 * Copyright (C) 2016 Intel
3 * 6 *
4 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 *
11 * Datasheets:
12 * http://www.ti.com/lit/ds/symlink/adc081c021.pdf
13 * http://www.ti.com/lit/ds/symlink/adc101c021.pdf
14 * http://www.ti.com/lit/ds/symlink/adc121c021.pdf
15 *
16 * The devices have a very similar interface and differ mostly in the number of
17 * bits handled. For the 8-bit and 10-bit models the least-significant 4 or 2
18 * bits of value registers are reserved.
7 */ 19 */
8 20
9#include <linux/err.h> 21#include <linux/err.h>
@@ -12,11 +24,17 @@
12#include <linux/of.h> 24#include <linux/of.h>
13 25
14#include <linux/iio/iio.h> 26#include <linux/iio/iio.h>
27#include <linux/iio/buffer.h>
28#include <linux/iio/trigger_consumer.h>
29#include <linux/iio/triggered_buffer.h>
15#include <linux/regulator/consumer.h> 30#include <linux/regulator/consumer.h>
16 31
17struct adc081c { 32struct adc081c {
18 struct i2c_client *i2c; 33 struct i2c_client *i2c;
19 struct regulator *ref; 34 struct regulator *ref;
35
36 /* 8, 10 or 12 */
37 int bits;
20}; 38};
21 39
22#define REG_CONV_RES 0x00 40#define REG_CONV_RES 0x00
@@ -34,7 +52,7 @@ static int adc081c_read_raw(struct iio_dev *iio,
34 if (err < 0) 52 if (err < 0)
35 return err; 53 return err;
36 54
37 *value = (err >> 4) & 0xff; 55 *value = (err & 0xFFF) >> (12 - adc->bits);
38 return IIO_VAL_INT; 56 return IIO_VAL_INT;
39 57
40 case IIO_CHAN_INFO_SCALE: 58 case IIO_CHAN_INFO_SCALE:
@@ -43,7 +61,7 @@ static int adc081c_read_raw(struct iio_dev *iio,
43 return err; 61 return err;
44 62
45 *value = err / 1000; 63 *value = err / 1000;
46 *shift = 8; 64 *shift = adc->bits;
47 65
48 return IIO_VAL_FRACTIONAL_LOG2; 66 return IIO_VAL_FRACTIONAL_LOG2;
49 67
@@ -54,10 +72,53 @@ static int adc081c_read_raw(struct iio_dev *iio,
54 return -EINVAL; 72 return -EINVAL;
55} 73}
56 74
57static const struct iio_chan_spec adc081c_channel = { 75#define ADCxx1C_CHAN(_bits) { \
58 .type = IIO_VOLTAGE, 76 .type = IIO_VOLTAGE, \
59 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), 77 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
60 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), 78 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
79 .scan_type = { \
80 .sign = 'u', \
81 .realbits = (_bits), \
82 .storagebits = 16, \
83 .shift = 12 - (_bits), \
84 .endianness = IIO_CPU, \
85 }, \
86}
87
88#define DEFINE_ADCxx1C_CHANNELS(_name, _bits) \
89 static const struct iio_chan_spec _name ## _channels[] = { \
90 ADCxx1C_CHAN((_bits)), \
91 IIO_CHAN_SOFT_TIMESTAMP(1), \
92 }; \
93
94#define ADC081C_NUM_CHANNELS 2
95
96struct adcxx1c_model {
97 const struct iio_chan_spec* channels;
98 int bits;
99};
100
101#define ADCxx1C_MODEL(_name, _bits) \
102 { \
103 .channels = _name ## _channels, \
104 .bits = (_bits), \
105 }
106
107DEFINE_ADCxx1C_CHANNELS(adc081c, 8);
108DEFINE_ADCxx1C_CHANNELS(adc101c, 10);
109DEFINE_ADCxx1C_CHANNELS(adc121c, 12);
110
111/* Model ids are indexes in _models array */
112enum adcxx1c_model_id {
113 ADC081C = 0,
114 ADC101C = 1,
115 ADC121C = 2,
116};
117
118static struct adcxx1c_model adcxx1c_models[] = {
119 ADCxx1C_MODEL(adc081c, 8),
120 ADCxx1C_MODEL(adc101c, 10),
121 ADCxx1C_MODEL(adc121c, 12),
61}; 122};
62 123
63static const struct iio_info adc081c_info = { 124static const struct iio_info adc081c_info = {
@@ -65,11 +126,30 @@ static const struct iio_info adc081c_info = {
65 .driver_module = THIS_MODULE, 126 .driver_module = THIS_MODULE,
66}; 127};
67 128
129static irqreturn_t adc081c_trigger_handler(int irq, void *p)
130{
131 struct iio_poll_func *pf = p;
132 struct iio_dev *indio_dev = pf->indio_dev;
133 struct adc081c *data = iio_priv(indio_dev);
134 u16 buf[8]; /* 2 bytes data + 6 bytes padding + 8 bytes timestamp */
135 int ret;
136
137 ret = i2c_smbus_read_word_swapped(data->i2c, REG_CONV_RES);
138 if (ret < 0)
139 goto out;
140 buf[0] = ret;
141 iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns());
142out:
143 iio_trigger_notify_done(indio_dev->trig);
144 return IRQ_HANDLED;
145}
146
68static int adc081c_probe(struct i2c_client *client, 147static int adc081c_probe(struct i2c_client *client,
69 const struct i2c_device_id *id) 148 const struct i2c_device_id *id)
70{ 149{
71 struct iio_dev *iio; 150 struct iio_dev *iio;
72 struct adc081c *adc; 151 struct adc081c *adc;
152 struct adcxx1c_model *model = &adcxx1c_models[id->driver_data];
73 int err; 153 int err;
74 154
75 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA)) 155 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA))
@@ -81,6 +161,7 @@ static int adc081c_probe(struct i2c_client *client,
81 161
82 adc = iio_priv(iio); 162 adc = iio_priv(iio);
83 adc->i2c = client; 163 adc->i2c = client;
164 adc->bits = model->bits;
84 165
85 adc->ref = devm_regulator_get(&client->dev, "vref"); 166 adc->ref = devm_regulator_get(&client->dev, "vref");
86 if (IS_ERR(adc->ref)) 167 if (IS_ERR(adc->ref))
@@ -95,18 +176,26 @@ static int adc081c_probe(struct i2c_client *client,
95 iio->modes = INDIO_DIRECT_MODE; 176 iio->modes = INDIO_DIRECT_MODE;
96 iio->info = &adc081c_info; 177 iio->info = &adc081c_info;
97 178
98 iio->channels = &adc081c_channel; 179 iio->channels = model->channels;
99 iio->num_channels = 1; 180 iio->num_channels = ADC081C_NUM_CHANNELS;
181
182 err = iio_triggered_buffer_setup(iio, NULL, adc081c_trigger_handler, NULL);
183 if (err < 0) {
184 dev_err(&client->dev, "iio triggered buffer setup failed\n");
185 goto err_regulator_disable;
186 }
100 187
101 err = iio_device_register(iio); 188 err = iio_device_register(iio);
102 if (err < 0) 189 if (err < 0)
103 goto regulator_disable; 190 goto err_buffer_cleanup;
104 191
105 i2c_set_clientdata(client, iio); 192 i2c_set_clientdata(client, iio);
106 193
107 return 0; 194 return 0;
108 195
109regulator_disable: 196err_buffer_cleanup:
197 iio_triggered_buffer_cleanup(iio);
198err_regulator_disable:
110 regulator_disable(adc->ref); 199 regulator_disable(adc->ref);
111 200
112 return err; 201 return err;
@@ -118,13 +207,16 @@ static int adc081c_remove(struct i2c_client *client)
118 struct adc081c *adc = iio_priv(iio); 207 struct adc081c *adc = iio_priv(iio);
119 208
120 iio_device_unregister(iio); 209 iio_device_unregister(iio);
210 iio_triggered_buffer_cleanup(iio);
121 regulator_disable(adc->ref); 211 regulator_disable(adc->ref);
122 212
123 return 0; 213 return 0;
124} 214}
125 215
126static const struct i2c_device_id adc081c_id[] = { 216static const struct i2c_device_id adc081c_id[] = {
127 { "adc081c", 0 }, 217 { "adc081c", ADC081C },
218 { "adc101c", ADC101C },
219 { "adc121c", ADC121C },
128 { } 220 { }
129}; 221};
130MODULE_DEVICE_TABLE(i2c, adc081c_id); 222MODULE_DEVICE_TABLE(i2c, adc081c_id);
@@ -132,6 +224,8 @@ MODULE_DEVICE_TABLE(i2c, adc081c_id);
132#ifdef CONFIG_OF 224#ifdef CONFIG_OF
133static const struct of_device_id adc081c_of_match[] = { 225static const struct of_device_id adc081c_of_match[] = {
134 { .compatible = "ti,adc081c" }, 226 { .compatible = "ti,adc081c" },
227 { .compatible = "ti,adc101c" },
228 { .compatible = "ti,adc121c" },
135 { } 229 { }
136}; 230};
137MODULE_DEVICE_TABLE(of, adc081c_of_match); 231MODULE_DEVICE_TABLE(of, adc081c_of_match);
@@ -149,5 +243,5 @@ static struct i2c_driver adc081c_driver = {
149module_i2c_driver(adc081c_driver); 243module_i2c_driver(adc081c_driver);
150 244
151MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>"); 245MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
152MODULE_DESCRIPTION("Texas Instruments ADC081C021/027 driver"); 246MODULE_DESCRIPTION("Texas Instruments ADC081C/ADC101C/ADC121C driver");
153MODULE_LICENSE("GPL v2"); 247MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
index b10f629cc44b..653bf1379d2e 100644
--- a/drivers/iio/adc/vf610_adc.c
+++ b/drivers/iio/adc/vf610_adc.c
@@ -714,19 +714,19 @@ static int vf610_write_raw(struct iio_dev *indio_dev,
714 int i; 714 int i;
715 715
716 switch (mask) { 716 switch (mask) {
717 case IIO_CHAN_INFO_SAMP_FREQ: 717 case IIO_CHAN_INFO_SAMP_FREQ:
718 for (i = 0; 718 for (i = 0;
719 i < ARRAY_SIZE(info->sample_freq_avail); 719 i < ARRAY_SIZE(info->sample_freq_avail);
720 i++) 720 i++)
721 if (val == info->sample_freq_avail[i]) { 721 if (val == info->sample_freq_avail[i]) {
722 info->adc_feature.sample_rate = i; 722 info->adc_feature.sample_rate = i;
723 vf610_adc_sample_set(info); 723 vf610_adc_sample_set(info);
724 return 0; 724 return 0;
725 } 725 }
726 break; 726 break;
727 727
728 default: 728 default:
729 break; 729 break;
730 } 730 }
731 731
732 return -EINVAL; 732 return -EINVAL;
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index 595511022795..5b41f9d0d4f3 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -115,7 +115,7 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
115 return ret; 115 return ret;
116 } 116 }
117 117
118 return 0; 118 return 0;
119#else 119#else
120 atomic_set(&st->user_requested_state, state); 120 atomic_set(&st->user_requested_state, state);
121 return _hid_sensor_power_state(st, state); 121 return _hid_sensor_power_state(st, state);
diff --git a/drivers/iio/common/ms_sensors/ms_sensors_i2c.c b/drivers/iio/common/ms_sensors/ms_sensors_i2c.c
index 669dc7c270f5..ecf7721ecaf4 100644
--- a/drivers/iio/common/ms_sensors/ms_sensors_i2c.c
+++ b/drivers/iio/common/ms_sensors/ms_sensors_i2c.c
@@ -106,7 +106,7 @@ int ms_sensors_convert_and_read(void *cli, u8 conv, u8 rd,
106 unsigned int delay, u32 *adc) 106 unsigned int delay, u32 *adc)
107{ 107{
108 int ret; 108 int ret;
109 __be32 buf = 0; 109 __be32 buf = 0;
110 struct i2c_client *client = (struct i2c_client *)cli; 110 struct i2c_client *client = (struct i2c_client *)cli;
111 111
112 /* Trigger conversion */ 112 /* Trigger conversion */
diff --git a/drivers/iio/common/st_sensors/st_sensors_buffer.c b/drivers/iio/common/st_sensors/st_sensors_buffer.c
index e18bc6782256..c55898543a47 100644
--- a/drivers/iio/common/st_sensors/st_sensors_buffer.c
+++ b/drivers/iio/common/st_sensors/st_sensors_buffer.c
@@ -24,81 +24,30 @@
24 24
25int st_sensors_get_buffer_element(struct iio_dev *indio_dev, u8 *buf) 25int st_sensors_get_buffer_element(struct iio_dev *indio_dev, u8 *buf)
26{ 26{
27 u8 *addr; 27 int i, len;
28 int i, n = 0, len; 28 int total = 0;
29 struct st_sensor_data *sdata = iio_priv(indio_dev); 29 struct st_sensor_data *sdata = iio_priv(indio_dev);
30 unsigned int num_data_channels = sdata->num_data_channels; 30 unsigned int num_data_channels = sdata->num_data_channels;
31 unsigned int byte_for_channel =
32 indio_dev->channels[0].scan_type.storagebits >> 3;
33
34 addr = kmalloc(num_data_channels, GFP_KERNEL);
35 if (!addr) {
36 len = -ENOMEM;
37 goto st_sensors_get_buffer_element_error;
38 }
39 31
40 for (i = 0; i < num_data_channels; i++) { 32 for (i = 0; i < num_data_channels; i++) {
33 unsigned int bytes_to_read;
34
41 if (test_bit(i, indio_dev->active_scan_mask)) { 35 if (test_bit(i, indio_dev->active_scan_mask)) {
42 addr[n] = indio_dev->channels[i].address; 36 bytes_to_read = indio_dev->channels[i].scan_type.storagebits >> 3;
43 n++;
44 }
45 }
46 switch (n) {
47 case 1:
48 len = sdata->tf->read_multiple_byte(&sdata->tb, sdata->dev,
49 addr[0], byte_for_channel, buf, sdata->multiread_bit);
50 break;
51 case 2:
52 if ((addr[1] - addr[0]) == byte_for_channel) {
53 len = sdata->tf->read_multiple_byte(&sdata->tb, 37 len = sdata->tf->read_multiple_byte(&sdata->tb,
54 sdata->dev, addr[0], byte_for_channel * n, 38 sdata->dev, indio_dev->channels[i].address,
55 buf, sdata->multiread_bit); 39 bytes_to_read,
56 } else { 40 buf + total, sdata->multiread_bit);
57 u8 *rx_array;
58 rx_array = kmalloc(byte_for_channel * num_data_channels,
59 GFP_KERNEL);
60 if (!rx_array) {
61 len = -ENOMEM;
62 goto st_sensors_free_memory;
63 }
64 41
65 len = sdata->tf->read_multiple_byte(&sdata->tb, 42 if (len < bytes_to_read)
66 sdata->dev, addr[0], 43 return -EIO;
67 byte_for_channel * num_data_channels, 44
68 rx_array, sdata->multiread_bit); 45 /* Advance the buffer pointer */
69 if (len < 0) { 46 total += len;
70 kfree(rx_array);
71 goto st_sensors_free_memory;
72 }
73
74 for (i = 0; i < n * byte_for_channel; i++) {
75 if (i < n)
76 buf[i] = rx_array[i];
77 else
78 buf[i] = rx_array[n + i];
79 }
80 kfree(rx_array);
81 len = byte_for_channel * n;
82 } 47 }
83 break;
84 case 3:
85 len = sdata->tf->read_multiple_byte(&sdata->tb, sdata->dev,
86 addr[0], byte_for_channel * num_data_channels,
87 buf, sdata->multiread_bit);
88 break;
89 default:
90 len = -EINVAL;
91 goto st_sensors_free_memory;
92 }
93 if (len != byte_for_channel * n) {
94 len = -EIO;
95 goto st_sensors_free_memory;
96 } 48 }
97 49
98st_sensors_free_memory: 50 return total;
99 kfree(addr);
100st_sensors_get_buffer_element_error:
101 return len;
102} 51}
103EXPORT_SYMBOL(st_sensors_get_buffer_element); 52EXPORT_SYMBOL(st_sensors_get_buffer_element);
104 53
@@ -109,6 +58,24 @@ irqreturn_t st_sensors_trigger_handler(int irq, void *p)
109 struct iio_dev *indio_dev = pf->indio_dev; 58 struct iio_dev *indio_dev = pf->indio_dev;
110 struct st_sensor_data *sdata = iio_priv(indio_dev); 59 struct st_sensor_data *sdata = iio_priv(indio_dev);
111 60
61 /* If we have a status register, check if this IRQ came from us */
62 if (sdata->sensor_settings->drdy_irq.addr_stat_drdy) {
63 u8 status;
64
65 len = sdata->tf->read_byte(&sdata->tb, sdata->dev,
66 sdata->sensor_settings->drdy_irq.addr_stat_drdy,
67 &status);
68 if (len < 0)
69 dev_err(sdata->dev, "could not read channel status\n");
70
71 /*
72 * If this was not caused by any channels on this sensor,
73 * return IRQ_NONE
74 */
75 if (!(status & (u8)indio_dev->active_scan_mask[0]))
76 return IRQ_NONE;
77 }
78
112 len = st_sensors_get_buffer_element(indio_dev, sdata->buffer_data); 79 len = st_sensors_get_buffer_element(indio_dev, sdata->buffer_data);
113 if (len < 0) 80 if (len < 0)
114 goto st_sensors_get_buffer_element_error; 81 goto st_sensors_get_buffer_element_error;
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index f5a2d445d0c0..dffe00692169 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -301,6 +301,14 @@ static int st_sensors_set_drdy_int_pin(struct iio_dev *indio_dev,
301 return -EINVAL; 301 return -EINVAL;
302 } 302 }
303 303
304 if (pdata->open_drain) {
305 if (!sdata->sensor_settings->drdy_irq.addr_od)
306 dev_err(&indio_dev->dev,
307 "open drain requested but unsupported.\n");
308 else
309 sdata->int_pin_open_drain = true;
310 }
311
304 return 0; 312 return 0;
305} 313}
306 314
@@ -321,6 +329,8 @@ static struct st_sensors_platform_data *st_sensors_of_probe(struct device *dev,
321 else 329 else
322 pdata->drdy_int_pin = defdata ? defdata->drdy_int_pin : 0; 330 pdata->drdy_int_pin = defdata ? defdata->drdy_int_pin : 0;
323 331
332 pdata->open_drain = of_property_read_bool(np, "drive-open-drain");
333
324 return pdata; 334 return pdata;
325} 335}
326#else 336#else
@@ -374,6 +384,16 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev,
374 return err; 384 return err;
375 } 385 }
376 386
387 if (sdata->int_pin_open_drain) {
388 dev_info(&indio_dev->dev,
389 "set interrupt line to open drain mode\n");
390 err = st_sensors_write_data_with_mask(indio_dev,
391 sdata->sensor_settings->drdy_irq.addr_od,
392 sdata->sensor_settings->drdy_irq.mask_od, 1);
393 if (err < 0)
394 return err;
395 }
396
377 err = st_sensors_set_axis_enable(indio_dev, ST_SENSORS_ENABLE_ALL_AXIS); 397 err = st_sensors_set_axis_enable(indio_dev, ST_SENSORS_ENABLE_ALL_AXIS);
378 398
379 return err; 399 return err;
diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c
index 6a8c98327945..da72279fcf99 100644
--- a/drivers/iio/common/st_sensors/st_sensors_trigger.c
+++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c
@@ -64,6 +64,19 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
64 "rising edge\n", irq_trig); 64 "rising edge\n", irq_trig);
65 irq_trig = IRQF_TRIGGER_RISING; 65 irq_trig = IRQF_TRIGGER_RISING;
66 } 66 }
67
68 /*
69 * If the interrupt pin is Open Drain, by definition this
70 * means that the interrupt line may be shared with other
71 * peripherals. But to do this we also need to have a status
72 * register and mask to figure out if this sensor was firing
73 * the IRQ or not, so we can tell the interrupt handle that
74 * it was "our" interrupt.
75 */
76 if (sdata->int_pin_open_drain &&
77 sdata->sensor_settings->drdy_irq.addr_stat_drdy)
78 irq_trig |= IRQF_SHARED;
79
67 err = request_threaded_irq(irq, 80 err = request_threaded_irq(irq,
68 iio_trigger_generic_data_rdy_poll, 81 iio_trigger_generic_data_rdy_poll,
69 NULL, 82 NULL,
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index 6abcfb8597d9..e63b957c985f 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -74,6 +74,33 @@ config AD5449
74 To compile this driver as a module, choose M here: the 74 To compile this driver as a module, choose M here: the
75 module will be called ad5449. 75 module will be called ad5449.
76 76
77config AD5592R_BASE
78 tristate
79
80config AD5592R
81 tristate "Analog Devices AD5592R ADC/DAC driver"
82 depends on SPI_MASTER
83 select GPIOLIB
84 select AD5592R_BASE
85 help
86 Say yes here to build support for Analog Devices AD5592R
87 Digital to Analog / Analog to Digital Converter.
88
89 To compile this driver as a module, choose M here: the
90 module will be called ad5592r.
91
92config AD5593R
93 tristate "Analog Devices AD5593R ADC/DAC driver"
94 depends on I2C
95 select GPIOLIB
96 select AD5592R_BASE
97 help
98 Say yes here to build support for Analog Devices AD5593R
99 Digital to Analog / Analog to Digital Converter.
100
101 To compile this driver as a module, choose M here: the
102 module will be called ad5593r.
103
77config AD5504 104config AD5504
78 tristate "Analog Devices AD5504/AD5501 DAC SPI driver" 105 tristate "Analog Devices AD5504/AD5501 DAC SPI driver"
79 depends on SPI 106 depends on SPI
@@ -154,6 +181,16 @@ config AD7303
154 To compile this driver as module choose M here: the module will be called 181 To compile this driver as module choose M here: the module will be called
155 ad7303. 182 ad7303.
156 183
184config LPC18XX_DAC
185 tristate "NXP LPC18xx DAC driver"
186 depends on ARCH_LPC18XX || COMPILE_TEST
187 depends on OF && HAS_IOMEM
188 help
189 Say yes here to build support for NXP LPC18XX DAC.
190
191 To compile this driver as a module, choose M here: the module will be
192 called lpc18xx_dac.
193
157config M62332 194config M62332
158 tristate "Mitsubishi M62332 DAC driver" 195 tristate "Mitsubishi M62332 DAC driver"
159 depends on I2C 196 depends on I2C
diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile
index 67b48429686d..8b78d5ca9b11 100644
--- a/drivers/iio/dac/Makefile
+++ b/drivers/iio/dac/Makefile
@@ -11,12 +11,16 @@ obj-$(CONFIG_AD5064) += ad5064.o
11obj-$(CONFIG_AD5504) += ad5504.o 11obj-$(CONFIG_AD5504) += ad5504.o
12obj-$(CONFIG_AD5446) += ad5446.o 12obj-$(CONFIG_AD5446) += ad5446.o
13obj-$(CONFIG_AD5449) += ad5449.o 13obj-$(CONFIG_AD5449) += ad5449.o
14obj-$(CONFIG_AD5592R_BASE) += ad5592r-base.o
15obj-$(CONFIG_AD5592R) += ad5592r.o
16obj-$(CONFIG_AD5593R) += ad5593r.o
14obj-$(CONFIG_AD5755) += ad5755.o 17obj-$(CONFIG_AD5755) += ad5755.o
15obj-$(CONFIG_AD5761) += ad5761.o 18obj-$(CONFIG_AD5761) += ad5761.o
16obj-$(CONFIG_AD5764) += ad5764.o 19obj-$(CONFIG_AD5764) += ad5764.o
17obj-$(CONFIG_AD5791) += ad5791.o 20obj-$(CONFIG_AD5791) += ad5791.o
18obj-$(CONFIG_AD5686) += ad5686.o 21obj-$(CONFIG_AD5686) += ad5686.o
19obj-$(CONFIG_AD7303) += ad7303.o 22obj-$(CONFIG_AD7303) += ad7303.o
23obj-$(CONFIG_LPC18XX_DAC) += lpc18xx_dac.o
20obj-$(CONFIG_M62332) += m62332.o 24obj-$(CONFIG_M62332) += m62332.o
21obj-$(CONFIG_MAX517) += max517.o 25obj-$(CONFIG_MAX517) += max517.o
22obj-$(CONFIG_MAX5821) += max5821.o 26obj-$(CONFIG_MAX5821) += max5821.o
diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c
new file mode 100644
index 000000000000..948f600e7059
--- /dev/null
+++ b/drivers/iio/dac/ad5592r-base.c
@@ -0,0 +1,691 @@
1/*
2 * AD5592R Digital <-> Analog converters driver
3 *
4 * Copyright 2014-2016 Analog Devices Inc.
5 * Author: Paul Cercueil <paul.cercueil@analog.com>
6 *
7 * Licensed under the GPL-2.
8 */
9
10#include <linux/bitops.h>
11#include <linux/delay.h>
12#include <linux/iio/iio.h>
13#include <linux/module.h>
14#include <linux/mutex.h>
15#include <linux/of.h>
16#include <linux/regulator/consumer.h>
17#include <linux/gpio/consumer.h>
18#include <linux/gpio/driver.h>
19#include <linux/gpio.h>
20#include <linux/property.h>
21
22#include <dt-bindings/iio/adi,ad5592r.h>
23
24#include "ad5592r-base.h"
25
26static int ad5592r_gpio_get(struct gpio_chip *chip, unsigned offset)
27{
28 struct ad5592r_state *st = gpiochip_get_data(chip);
29 int ret = 0;
30 u8 val;
31
32 mutex_lock(&st->gpio_lock);
33
34 if (st->gpio_out & BIT(offset))
35 val = st->gpio_val;
36 else
37 ret = st->ops->gpio_read(st, &val);
38
39 mutex_unlock(&st->gpio_lock);
40
41 if (ret < 0)
42 return ret;
43
44 return !!(val & BIT(offset));
45}
46
47static void ad5592r_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
48{
49 struct ad5592r_state *st = gpiochip_get_data(chip);
50
51 mutex_lock(&st->gpio_lock);
52
53 if (value)
54 st->gpio_val |= BIT(offset);
55 else
56 st->gpio_val &= ~BIT(offset);
57
58 st->ops->reg_write(st, AD5592R_REG_GPIO_SET, st->gpio_val);
59
60 mutex_unlock(&st->gpio_lock);
61}
62
63static int ad5592r_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
64{
65 struct ad5592r_state *st = gpiochip_get_data(chip);
66 int ret;
67
68 mutex_lock(&st->gpio_lock);
69
70 st->gpio_out &= ~BIT(offset);
71 st->gpio_in |= BIT(offset);
72
73 ret = st->ops->reg_write(st, AD5592R_REG_GPIO_OUT_EN, st->gpio_out);
74 if (ret < 0)
75 goto err_unlock;
76
77 ret = st->ops->reg_write(st, AD5592R_REG_GPIO_IN_EN, st->gpio_in);
78
79err_unlock:
80 mutex_unlock(&st->gpio_lock);
81
82 return ret;
83}
84
85static int ad5592r_gpio_direction_output(struct gpio_chip *chip,
86 unsigned offset, int value)
87{
88 struct ad5592r_state *st = gpiochip_get_data(chip);
89 int ret;
90
91 mutex_lock(&st->gpio_lock);
92
93 if (value)
94 st->gpio_val |= BIT(offset);
95 else
96 st->gpio_val &= ~BIT(offset);
97
98 st->gpio_in &= ~BIT(offset);
99 st->gpio_out |= BIT(offset);
100
101 ret = st->ops->reg_write(st, AD5592R_REG_GPIO_SET, st->gpio_val);
102 if (ret < 0)
103 goto err_unlock;
104
105 ret = st->ops->reg_write(st, AD5592R_REG_GPIO_OUT_EN, st->gpio_out);
106 if (ret < 0)
107 goto err_unlock;
108
109 ret = st->ops->reg_write(st, AD5592R_REG_GPIO_IN_EN, st->gpio_in);
110
111err_unlock:
112 mutex_unlock(&st->gpio_lock);
113
114 return ret;
115}
116
117static int ad5592r_gpio_request(struct gpio_chip *chip, unsigned offset)
118{
119 struct ad5592r_state *st = gpiochip_get_data(chip);
120
121 if (!(st->gpio_map & BIT(offset))) {
122 dev_err(st->dev, "GPIO %d is reserved by alternate function\n",
123 offset);
124 return -ENODEV;
125 }
126
127 return 0;
128}
129
130static int ad5592r_gpio_init(struct ad5592r_state *st)
131{
132 if (!st->gpio_map)
133 return 0;
134
135 st->gpiochip.label = dev_name(st->dev);
136 st->gpiochip.base = -1;
137 st->gpiochip.ngpio = 8;
138 st->gpiochip.parent = st->dev;
139 st->gpiochip.can_sleep = true;
140 st->gpiochip.direction_input = ad5592r_gpio_direction_input;
141 st->gpiochip.direction_output = ad5592r_gpio_direction_output;
142 st->gpiochip.get = ad5592r_gpio_get;
143 st->gpiochip.set = ad5592r_gpio_set;
144 st->gpiochip.request = ad5592r_gpio_request;
145 st->gpiochip.owner = THIS_MODULE;
146
147 mutex_init(&st->gpio_lock);
148
149 return gpiochip_add_data(&st->gpiochip, st);
150}
151
152static void ad5592r_gpio_cleanup(struct ad5592r_state *st)
153{
154 if (st->gpio_map)
155 gpiochip_remove(&st->gpiochip);
156}
157
158static int ad5592r_reset(struct ad5592r_state *st)
159{
160 struct gpio_desc *gpio;
161 struct iio_dev *iio_dev = iio_priv_to_dev(st);
162
163 gpio = devm_gpiod_get_optional(st->dev, "reset", GPIOD_OUT_LOW);
164 if (IS_ERR(gpio))
165 return PTR_ERR(gpio);
166
167 if (gpio) {
168 udelay(1);
169 gpiod_set_value(gpio, 1);
170 } else {
171 mutex_lock(&iio_dev->mlock);
172 /* Writing this magic value resets the device */
173 st->ops->reg_write(st, AD5592R_REG_RESET, 0xdac);
174 mutex_unlock(&iio_dev->mlock);
175 }
176
177 udelay(250);
178
179 return 0;
180}
181
182static int ad5592r_get_vref(struct ad5592r_state *st)
183{
184 int ret;
185
186 if (st->reg) {
187 ret = regulator_get_voltage(st->reg);
188 if (ret < 0)
189 return ret;
190
191 return ret / 1000;
192 } else {
193 return 2500;
194 }
195}
196
197static int ad5592r_set_channel_modes(struct ad5592r_state *st)
198{
199 const struct ad5592r_rw_ops *ops = st->ops;
200 int ret;
201 unsigned i;
202 struct iio_dev *iio_dev = iio_priv_to_dev(st);
203 u8 pulldown = 0, tristate = 0, dac = 0, adc = 0;
204 u16 read_back;
205
206 for (i = 0; i < st->num_channels; i++) {
207 switch (st->channel_modes[i]) {
208 case CH_MODE_DAC:
209 dac |= BIT(i);
210 break;
211
212 case CH_MODE_ADC:
213 adc |= BIT(i);
214 break;
215
216 case CH_MODE_DAC_AND_ADC:
217 dac |= BIT(i);
218 adc |= BIT(i);
219 break;
220
221 case CH_MODE_GPIO:
222 st->gpio_map |= BIT(i);
223 st->gpio_in |= BIT(i); /* Default to input */
224 break;
225
226 case CH_MODE_UNUSED:
227 /* fall-through */
228 default:
229 switch (st->channel_offstate[i]) {
230 case CH_OFFSTATE_OUT_TRISTATE:
231 tristate |= BIT(i);
232 break;
233
234 case CH_OFFSTATE_OUT_LOW:
235 st->gpio_out |= BIT(i);
236 break;
237
238 case CH_OFFSTATE_OUT_HIGH:
239 st->gpio_out |= BIT(i);
240 st->gpio_val |= BIT(i);
241 break;
242
243 case CH_OFFSTATE_PULLDOWN:
244 /* fall-through */
245 default:
246 pulldown |= BIT(i);
247 break;
248 }
249 }
250 }
251
252 mutex_lock(&iio_dev->mlock);
253
254 /* Pull down unused pins to GND */
255 ret = ops->reg_write(st, AD5592R_REG_PULLDOWN, pulldown);
256 if (ret)
257 goto err_unlock;
258
259 ret = ops->reg_write(st, AD5592R_REG_TRISTATE, tristate);
260 if (ret)
261 goto err_unlock;
262
263 /* Configure pins that we use */
264 ret = ops->reg_write(st, AD5592R_REG_DAC_EN, dac);
265 if (ret)
266 goto err_unlock;
267
268 ret = ops->reg_write(st, AD5592R_REG_ADC_EN, adc);
269 if (ret)
270 goto err_unlock;
271
272 ret = ops->reg_write(st, AD5592R_REG_GPIO_SET, st->gpio_val);
273 if (ret)
274 goto err_unlock;
275
276 ret = ops->reg_write(st, AD5592R_REG_GPIO_OUT_EN, st->gpio_out);
277 if (ret)
278 goto err_unlock;
279
280 ret = ops->reg_write(st, AD5592R_REG_GPIO_IN_EN, st->gpio_in);
281 if (ret)
282 goto err_unlock;
283
284 /* Verify that we can read back at least one register */
285 ret = ops->reg_read(st, AD5592R_REG_ADC_EN, &read_back);
286 if (!ret && (read_back & 0xff) != adc)
287 ret = -EIO;
288
289err_unlock:
290 mutex_unlock(&iio_dev->mlock);
291 return ret;
292}
293
294static int ad5592r_reset_channel_modes(struct ad5592r_state *st)
295{
296 int i;
297
298 for (i = 0; i < ARRAY_SIZE(st->channel_modes); i++)
299 st->channel_modes[i] = CH_MODE_UNUSED;
300
301 return ad5592r_set_channel_modes(st);
302}
303
304static int ad5592r_write_raw(struct iio_dev *iio_dev,
305 struct iio_chan_spec const *chan, int val, int val2, long mask)
306{
307 struct ad5592r_state *st = iio_priv(iio_dev);
308 int ret;
309
310 switch (mask) {
311 case IIO_CHAN_INFO_RAW:
312
313 if (val >= (1 << chan->scan_type.realbits) || val < 0)
314 return -EINVAL;
315
316 if (!chan->output)
317 return -EINVAL;
318
319 mutex_lock(&iio_dev->mlock);
320 ret = st->ops->write_dac(st, chan->channel, val);
321 if (!ret)
322 st->cached_dac[chan->channel] = val;
323 mutex_unlock(&iio_dev->mlock);
324 return ret;
325 case IIO_CHAN_INFO_SCALE:
326 if (chan->type == IIO_VOLTAGE) {
327 bool gain;
328
329 if (val == st->scale_avail[0][0] &&
330 val2 == st->scale_avail[0][1])
331 gain = false;
332 else if (val == st->scale_avail[1][0] &&
333 val2 == st->scale_avail[1][1])
334 gain = true;
335 else
336 return -EINVAL;
337
338 mutex_lock(&iio_dev->mlock);
339
340 ret = st->ops->reg_read(st, AD5592R_REG_CTRL,
341 &st->cached_gp_ctrl);
342 if (ret < 0) {
343 mutex_unlock(&iio_dev->mlock);
344 return ret;
345 }
346
347 if (chan->output) {
348 if (gain)
349 st->cached_gp_ctrl |=
350 AD5592R_REG_CTRL_DAC_RANGE;
351 else
352 st->cached_gp_ctrl &=
353 ~AD5592R_REG_CTRL_DAC_RANGE;
354 } else {
355 if (gain)
356 st->cached_gp_ctrl |=
357 AD5592R_REG_CTRL_ADC_RANGE;
358 else
359 st->cached_gp_ctrl &=
360 ~AD5592R_REG_CTRL_ADC_RANGE;
361 }
362
363 ret = st->ops->reg_write(st, AD5592R_REG_CTRL,
364 st->cached_gp_ctrl);
365 mutex_unlock(&iio_dev->mlock);
366
367 return ret;
368 }
369 break;
370 default:
371 return -EINVAL;
372 }
373
374 return 0;
375}
376
377static int ad5592r_read_raw(struct iio_dev *iio_dev,
378 struct iio_chan_spec const *chan,
379 int *val, int *val2, long m)
380{
381 struct ad5592r_state *st = iio_priv(iio_dev);
382 u16 read_val;
383 int ret;
384
385 switch (m) {
386 case IIO_CHAN_INFO_RAW:
387 mutex_lock(&iio_dev->mlock);
388
389 if (!chan->output) {
390 ret = st->ops->read_adc(st, chan->channel, &read_val);
391 if (ret)
392 goto unlock;
393
394 if ((read_val >> 12 & 0x7) != (chan->channel & 0x7)) {
395 dev_err(st->dev, "Error while reading channel %u\n",
396 chan->channel);
397 ret = -EIO;
398 goto unlock;
399 }
400
401 read_val &= GENMASK(11, 0);
402
403 } else {
404 read_val = st->cached_dac[chan->channel];
405 }
406
407 dev_dbg(st->dev, "Channel %u read: 0x%04hX\n",
408 chan->channel, read_val);
409
410 *val = (int) read_val;
411 ret = IIO_VAL_INT;
412 break;
413 case IIO_CHAN_INFO_SCALE:
414 *val = ad5592r_get_vref(st);
415
416 if (chan->type == IIO_TEMP) {
417 s64 tmp = *val * (3767897513LL / 25LL);
418 *val = div_s64_rem(tmp, 1000000000LL, val2);
419
420 ret = IIO_VAL_INT_PLUS_MICRO;
421 } else {
422 int mult;
423
424 mutex_lock(&iio_dev->mlock);
425
426 if (chan->output)
427 mult = !!(st->cached_gp_ctrl &
428 AD5592R_REG_CTRL_DAC_RANGE);
429 else
430 mult = !!(st->cached_gp_ctrl &
431 AD5592R_REG_CTRL_ADC_RANGE);
432
433 *val *= ++mult;
434
435 *val2 = chan->scan_type.realbits;
436 ret = IIO_VAL_FRACTIONAL_LOG2;
437 }
438 break;
439 case IIO_CHAN_INFO_OFFSET:
440 ret = ad5592r_get_vref(st);
441
442 mutex_lock(&iio_dev->mlock);
443
444 if (st->cached_gp_ctrl & AD5592R_REG_CTRL_ADC_RANGE)
445 *val = (-34365 * 25) / ret;
446 else
447 *val = (-75365 * 25) / ret;
448 ret = IIO_VAL_INT;
449 break;
450 default:
451 ret = -EINVAL;
452 }
453
454unlock:
455 mutex_unlock(&iio_dev->mlock);
456 return ret;
457}
458
459static int ad5592r_write_raw_get_fmt(struct iio_dev *indio_dev,
460 struct iio_chan_spec const *chan, long mask)
461{
462 switch (mask) {
463 case IIO_CHAN_INFO_SCALE:
464 return IIO_VAL_INT_PLUS_NANO;
465
466 default:
467 return IIO_VAL_INT_PLUS_MICRO;
468 }
469
470 return -EINVAL;
471}
472
473static const struct iio_info ad5592r_info = {
474 .read_raw = ad5592r_read_raw,
475 .write_raw = ad5592r_write_raw,
476 .write_raw_get_fmt = ad5592r_write_raw_get_fmt,
477 .driver_module = THIS_MODULE,
478};
479
480static ssize_t ad5592r_show_scale_available(struct iio_dev *iio_dev,
481 uintptr_t private,
482 const struct iio_chan_spec *chan,
483 char *buf)
484{
485 struct ad5592r_state *st = iio_priv(iio_dev);
486
487 return sprintf(buf, "%d.%09u %d.%09u\n",
488 st->scale_avail[0][0], st->scale_avail[0][1],
489 st->scale_avail[1][0], st->scale_avail[1][1]);
490}
491
492static struct iio_chan_spec_ext_info ad5592r_ext_info[] = {
493 {
494 .name = "scale_available",
495 .read = ad5592r_show_scale_available,
496 .shared = true,
497 },
498 {},
499};
500
501static void ad5592r_setup_channel(struct iio_dev *iio_dev,
502 struct iio_chan_spec *chan, bool output, unsigned id)
503{
504 chan->type = IIO_VOLTAGE;
505 chan->indexed = 1;
506 chan->output = output;
507 chan->channel = id;
508 chan->info_mask_separate = BIT(IIO_CHAN_INFO_RAW);
509 chan->info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE);
510 chan->scan_type.sign = 'u';
511 chan->scan_type.realbits = 12;
512 chan->scan_type.storagebits = 16;
513 chan->ext_info = ad5592r_ext_info;
514}
515
516static int ad5592r_alloc_channels(struct ad5592r_state *st)
517{
518 unsigned i, curr_channel = 0,
519 num_channels = st->num_channels;
520 struct iio_dev *iio_dev = iio_priv_to_dev(st);
521 struct iio_chan_spec *channels;
522 struct fwnode_handle *child;
523 u32 reg, tmp;
524 int ret;
525
526 device_for_each_child_node(st->dev, child) {
527 ret = fwnode_property_read_u32(child, "reg", &reg);
528 if (ret || reg > ARRAY_SIZE(st->channel_modes))
529 continue;
530
531 ret = fwnode_property_read_u32(child, "adi,mode", &tmp);
532 if (!ret)
533 st->channel_modes[reg] = tmp;
534
535 fwnode_property_read_u32(child, "adi,off-state", &tmp);
536 if (!ret)
537 st->channel_offstate[reg] = tmp;
538 }
539
540 channels = devm_kzalloc(st->dev,
541 (1 + 2 * num_channels) * sizeof(*channels), GFP_KERNEL);
542 if (!channels)
543 return -ENOMEM;
544
545 for (i = 0; i < num_channels; i++) {
546 switch (st->channel_modes[i]) {
547 case CH_MODE_DAC:
548 ad5592r_setup_channel(iio_dev, &channels[curr_channel],
549 true, i);
550 curr_channel++;
551 break;
552
553 case CH_MODE_ADC:
554 ad5592r_setup_channel(iio_dev, &channels[curr_channel],
555 false, i);
556 curr_channel++;
557 break;
558
559 case CH_MODE_DAC_AND_ADC:
560 ad5592r_setup_channel(iio_dev, &channels[curr_channel],
561 true, i);
562 curr_channel++;
563 ad5592r_setup_channel(iio_dev, &channels[curr_channel],
564 false, i);
565 curr_channel++;
566 break;
567
568 default:
569 continue;
570 }
571 }
572
573 channels[curr_channel].type = IIO_TEMP;
574 channels[curr_channel].channel = 8;
575 channels[curr_channel].info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
576 BIT(IIO_CHAN_INFO_SCALE) |
577 BIT(IIO_CHAN_INFO_OFFSET);
578 curr_channel++;
579
580 iio_dev->num_channels = curr_channel;
581 iio_dev->channels = channels;
582
583 return 0;
584}
585
586static void ad5592r_init_scales(struct ad5592r_state *st, int vref_mV)
587{
588 s64 tmp = (s64)vref_mV * 1000000000LL >> 12;
589
590 st->scale_avail[0][0] =
591 div_s64_rem(tmp, 1000000000LL, &st->scale_avail[0][1]);
592 st->scale_avail[1][0] =
593 div_s64_rem(tmp * 2, 1000000000LL, &st->scale_avail[1][1]);
594}
595
596int ad5592r_probe(struct device *dev, const char *name,
597 const struct ad5592r_rw_ops *ops)
598{
599 struct iio_dev *iio_dev;
600 struct ad5592r_state *st;
601 int ret;
602
603 iio_dev = devm_iio_device_alloc(dev, sizeof(*st));
604 if (!iio_dev)
605 return -ENOMEM;
606
607 st = iio_priv(iio_dev);
608 st->dev = dev;
609 st->ops = ops;
610 st->num_channels = 8;
611 dev_set_drvdata(dev, iio_dev);
612
613 st->reg = devm_regulator_get_optional(dev, "vref");
614 if (IS_ERR(st->reg)) {
615 if ((PTR_ERR(st->reg) != -ENODEV) && dev->of_node)
616 return PTR_ERR(st->reg);
617
618 st->reg = NULL;
619 } else {
620 ret = regulator_enable(st->reg);
621 if (ret)
622 return ret;
623 }
624
625 iio_dev->dev.parent = dev;
626 iio_dev->name = name;
627 iio_dev->info = &ad5592r_info;
628 iio_dev->modes = INDIO_DIRECT_MODE;
629
630 ad5592r_init_scales(st, ad5592r_get_vref(st));
631
632 ret = ad5592r_reset(st);
633 if (ret)
634 goto error_disable_reg;
635
636 ret = ops->reg_write(st, AD5592R_REG_PD,
637 (st->reg == NULL) ? AD5592R_REG_PD_EN_REF : 0);
638 if (ret)
639 goto error_disable_reg;
640
641 ret = ad5592r_alloc_channels(st);
642 if (ret)
643 goto error_disable_reg;
644
645 ret = ad5592r_set_channel_modes(st);
646 if (ret)
647 goto error_reset_ch_modes;
648
649 ret = iio_device_register(iio_dev);
650 if (ret)
651 goto error_reset_ch_modes;
652
653 ret = ad5592r_gpio_init(st);
654 if (ret)
655 goto error_dev_unregister;
656
657 return 0;
658
659error_dev_unregister:
660 iio_device_unregister(iio_dev);
661
662error_reset_ch_modes:
663 ad5592r_reset_channel_modes(st);
664
665error_disable_reg:
666 if (st->reg)
667 regulator_disable(st->reg);
668
669 return ret;
670}
671EXPORT_SYMBOL_GPL(ad5592r_probe);
672
673int ad5592r_remove(struct device *dev)
674{
675 struct iio_dev *iio_dev = dev_get_drvdata(dev);
676 struct ad5592r_state *st = iio_priv(iio_dev);
677
678 iio_device_unregister(iio_dev);
679 ad5592r_reset_channel_modes(st);
680 ad5592r_gpio_cleanup(st);
681
682 if (st->reg)
683 regulator_disable(st->reg);
684
685 return 0;
686}
687EXPORT_SYMBOL_GPL(ad5592r_remove);
688
689MODULE_AUTHOR("Paul Cercueil <paul.cercueil@analog.com>");
690MODULE_DESCRIPTION("Analog Devices AD5592R multi-channel converters");
691MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/ad5592r-base.h b/drivers/iio/dac/ad5592r-base.h
new file mode 100644
index 000000000000..841457e93f85
--- /dev/null
+++ b/drivers/iio/dac/ad5592r-base.h
@@ -0,0 +1,76 @@
1/*
2 * AD5592R / AD5593R Digital <-> Analog converters driver
3 *
4 * Copyright 2015-2016 Analog Devices Inc.
5 * Author: Paul Cercueil <paul.cercueil@analog.com>
6 *
7 * Licensed under the GPL-2.
8 */
9
10#ifndef __DRIVERS_IIO_DAC_AD5592R_BASE_H__
11#define __DRIVERS_IIO_DAC_AD5592R_BASE_H__
12
13#include <linux/types.h>
14#include <linux/cache.h>
15#include <linux/mutex.h>
16#include <linux/gpio/driver.h>
17
18struct device;
19struct ad5592r_state;
20
21enum ad5592r_registers {
22 AD5592R_REG_NOOP = 0x0,
23 AD5592R_REG_DAC_READBACK = 0x1,
24 AD5592R_REG_ADC_SEQ = 0x2,
25 AD5592R_REG_CTRL = 0x3,
26 AD5592R_REG_ADC_EN = 0x4,
27 AD5592R_REG_DAC_EN = 0x5,
28 AD5592R_REG_PULLDOWN = 0x6,
29 AD5592R_REG_LDAC = 0x7,
30 AD5592R_REG_GPIO_OUT_EN = 0x8,
31 AD5592R_REG_GPIO_SET = 0x9,
32 AD5592R_REG_GPIO_IN_EN = 0xA,
33 AD5592R_REG_PD = 0xB,
34 AD5592R_REG_OPEN_DRAIN = 0xC,
35 AD5592R_REG_TRISTATE = 0xD,
36 AD5592R_REG_RESET = 0xF,
37};
38
39#define AD5592R_REG_PD_EN_REF BIT(9)
40#define AD5592R_REG_CTRL_ADC_RANGE BIT(5)
41#define AD5592R_REG_CTRL_DAC_RANGE BIT(4)
42
43struct ad5592r_rw_ops {
44 int (*write_dac)(struct ad5592r_state *st, unsigned chan, u16 value);
45 int (*read_adc)(struct ad5592r_state *st, unsigned chan, u16 *value);
46 int (*reg_write)(struct ad5592r_state *st, u8 reg, u16 value);
47 int (*reg_read)(struct ad5592r_state *st, u8 reg, u16 *value);
48 int (*gpio_read)(struct ad5592r_state *st, u8 *value);
49};
50
51struct ad5592r_state {
52 struct device *dev;
53 struct regulator *reg;
54 struct gpio_chip gpiochip;
55 struct mutex gpio_lock; /* Protect cached gpio_out, gpio_val, etc. */
56 unsigned int num_channels;
57 const struct ad5592r_rw_ops *ops;
58 int scale_avail[2][2];
59 u16 cached_dac[8];
60 u16 cached_gp_ctrl;
61 u8 channel_modes[8];
62 u8 channel_offstate[8];
63 u8 gpio_map;
64 u8 gpio_out;
65 u8 gpio_in;
66 u8 gpio_val;
67
68 __be16 spi_msg ____cacheline_aligned;
69 __be16 spi_msg_nop;
70};
71
72int ad5592r_probe(struct device *dev, const char *name,
73 const struct ad5592r_rw_ops *ops);
74int ad5592r_remove(struct device *dev);
75
76#endif /* __DRIVERS_IIO_DAC_AD5592R_BASE_H__ */
diff --git a/drivers/iio/dac/ad5592r.c b/drivers/iio/dac/ad5592r.c
new file mode 100644
index 000000000000..0b235a2c7359
--- /dev/null
+++ b/drivers/iio/dac/ad5592r.c
@@ -0,0 +1,164 @@
1/*
2 * AD5592R Digital <-> Analog converters driver
3 *
4 * Copyright 2015-2016 Analog Devices Inc.
5 * Author: Paul Cercueil <paul.cercueil@analog.com>
6 *
7 * Licensed under the GPL-2.
8 */
9
10#include "ad5592r-base.h"
11
12#include <linux/bitops.h>
13#include <linux/module.h>
14#include <linux/of.h>
15#include <linux/spi/spi.h>
16
17#define AD5592R_GPIO_READBACK_EN BIT(10)
18#define AD5592R_LDAC_READBACK_EN BIT(6)
19
20static int ad5592r_spi_wnop_r16(struct ad5592r_state *st, u16 *buf)
21{
22 struct spi_device *spi = container_of(st->dev, struct spi_device, dev);
23 struct spi_transfer t = {
24 .tx_buf = &st->spi_msg_nop,
25 .rx_buf = buf,
26 .len = 2
27 };
28
29 st->spi_msg_nop = 0; /* NOP */
30
31 return spi_sync_transfer(spi, &t, 1);
32}
33
34static int ad5592r_write_dac(struct ad5592r_state *st, unsigned chan, u16 value)
35{
36 struct spi_device *spi = container_of(st->dev, struct spi_device, dev);
37
38 st->spi_msg = cpu_to_be16(BIT(15) | (chan << 12) | value);
39
40 return spi_write(spi, &st->spi_msg, sizeof(st->spi_msg));
41}
42
43static int ad5592r_read_adc(struct ad5592r_state *st, unsigned chan, u16 *value)
44{
45 struct spi_device *spi = container_of(st->dev, struct spi_device, dev);
46 int ret;
47
48 st->spi_msg = cpu_to_be16((AD5592R_REG_ADC_SEQ << 11) | BIT(chan));
49
50 ret = spi_write(spi, &st->spi_msg, sizeof(st->spi_msg));
51 if (ret)
52 return ret;
53
54 /*
55 * Invalid data:
56 * See Figure 40. Single-Channel ADC Conversion Sequence
57 */
58 ret = ad5592r_spi_wnop_r16(st, &st->spi_msg);
59 if (ret)
60 return ret;
61
62 ret = ad5592r_spi_wnop_r16(st, &st->spi_msg);
63 if (ret)
64 return ret;
65
66 *value = be16_to_cpu(st->spi_msg);
67
68 return 0;
69}
70
71static int ad5592r_reg_write(struct ad5592r_state *st, u8 reg, u16 value)
72{
73 struct spi_device *spi = container_of(st->dev, struct spi_device, dev);
74
75 st->spi_msg = cpu_to_be16((reg << 11) | value);
76
77 return spi_write(spi, &st->spi_msg, sizeof(st->spi_msg));
78}
79
80static int ad5592r_reg_read(struct ad5592r_state *st, u8 reg, u16 *value)
81{
82 struct spi_device *spi = container_of(st->dev, struct spi_device, dev);
83 int ret;
84
85 st->spi_msg = cpu_to_be16((AD5592R_REG_LDAC << 11) |
86 AD5592R_LDAC_READBACK_EN | (reg << 2));
87
88 ret = spi_write(spi, &st->spi_msg, sizeof(st->spi_msg));
89 if (ret)
90 return ret;
91
92 ret = ad5592r_spi_wnop_r16(st, &st->spi_msg);
93 if (ret)
94 return ret;
95
96 *value = be16_to_cpu(st->spi_msg);
97
98 return 0;
99}
100
101static int ad5593r_gpio_read(struct ad5592r_state *st, u8 *value)
102{
103 int ret;
104
105 ret = ad5592r_reg_write(st, AD5592R_REG_GPIO_IN_EN,
106 AD5592R_GPIO_READBACK_EN | st->gpio_in);
107 if (ret)
108 return ret;
109
110 ret = ad5592r_spi_wnop_r16(st, &st->spi_msg);
111 if (ret)
112 return ret;
113
114 *value = (u8) be16_to_cpu(st->spi_msg);
115
116 return 0;
117}
118
119static const struct ad5592r_rw_ops ad5592r_rw_ops = {
120 .write_dac = ad5592r_write_dac,
121 .read_adc = ad5592r_read_adc,
122 .reg_write = ad5592r_reg_write,
123 .reg_read = ad5592r_reg_read,
124 .gpio_read = ad5593r_gpio_read,
125};
126
127static int ad5592r_spi_probe(struct spi_device *spi)
128{
129 const struct spi_device_id *id = spi_get_device_id(spi);
130
131 return ad5592r_probe(&spi->dev, id->name, &ad5592r_rw_ops);
132}
133
134static int ad5592r_spi_remove(struct spi_device *spi)
135{
136 return ad5592r_remove(&spi->dev);
137}
138
139static const struct spi_device_id ad5592r_spi_ids[] = {
140 { .name = "ad5592r", },
141 {}
142};
143MODULE_DEVICE_TABLE(spi, ad5592r_spi_ids);
144
145static const struct of_device_id ad5592r_of_match[] = {
146 { .compatible = "adi,ad5592r", },
147 {},
148};
149MODULE_DEVICE_TABLE(of, ad5592r_of_match);
150
151static struct spi_driver ad5592r_spi_driver = {
152 .driver = {
153 .name = "ad5592r",
154 .of_match_table = of_match_ptr(ad5592r_of_match),
155 },
156 .probe = ad5592r_spi_probe,
157 .remove = ad5592r_spi_remove,
158 .id_table = ad5592r_spi_ids,
159};
160module_spi_driver(ad5592r_spi_driver);
161
162MODULE_AUTHOR("Paul Cercueil <paul.cercueil@analog.com>");
163MODULE_DESCRIPTION("Analog Devices AD5592R multi-channel converters");
164MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/ad5593r.c b/drivers/iio/dac/ad5593r.c
new file mode 100644
index 000000000000..dca158a88f47
--- /dev/null
+++ b/drivers/iio/dac/ad5593r.c
@@ -0,0 +1,131 @@
1/*
2 * AD5593R Digital <-> Analog converters driver
3 *
4 * Copyright 2015-2016 Analog Devices Inc.
5 * Author: Paul Cercueil <paul.cercueil@analog.com>
6 *
7 * Licensed under the GPL-2.
8 */
9
10#include "ad5592r-base.h"
11
12#include <linux/bitops.h>
13#include <linux/i2c.h>
14#include <linux/module.h>
15#include <linux/of.h>
16
17#define AD5593R_MODE_CONF (0 << 4)
18#define AD5593R_MODE_DAC_WRITE (1 << 4)
19#define AD5593R_MODE_ADC_READBACK (4 << 4)
20#define AD5593R_MODE_DAC_READBACK (5 << 4)
21#define AD5593R_MODE_GPIO_READBACK (6 << 4)
22#define AD5593R_MODE_REG_READBACK (7 << 4)
23
24static int ad5593r_write_dac(struct ad5592r_state *st, unsigned chan, u16 value)
25{
26 struct i2c_client *i2c = to_i2c_client(st->dev);
27
28 return i2c_smbus_write_word_swapped(i2c,
29 AD5593R_MODE_DAC_WRITE | chan, value);
30}
31
32static int ad5593r_read_adc(struct ad5592r_state *st, unsigned chan, u16 *value)
33{
34 struct i2c_client *i2c = to_i2c_client(st->dev);
35 s32 val;
36
37 val = i2c_smbus_write_word_swapped(i2c,
38 AD5593R_MODE_CONF | AD5592R_REG_ADC_SEQ, BIT(chan));
39 if (val < 0)
40 return (int) val;
41
42 val = i2c_smbus_read_word_swapped(i2c, AD5593R_MODE_ADC_READBACK);
43 if (val < 0)
44 return (int) val;
45
46 *value = (u16) val;
47
48 return 0;
49}
50
51static int ad5593r_reg_write(struct ad5592r_state *st, u8 reg, u16 value)
52{
53 struct i2c_client *i2c = to_i2c_client(st->dev);
54
55 return i2c_smbus_write_word_swapped(i2c,
56 AD5593R_MODE_CONF | reg, value);
57}
58
59static int ad5593r_reg_read(struct ad5592r_state *st, u8 reg, u16 *value)
60{
61 struct i2c_client *i2c = to_i2c_client(st->dev);
62 s32 val;
63
64 val = i2c_smbus_read_word_swapped(i2c, AD5593R_MODE_REG_READBACK | reg);
65 if (val < 0)
66 return (int) val;
67
68 *value = (u16) val;
69
70 return 0;
71}
72
73static int ad5593r_gpio_read(struct ad5592r_state *st, u8 *value)
74{
75 struct i2c_client *i2c = to_i2c_client(st->dev);
76 s32 val;
77
78 val = i2c_smbus_read_word_swapped(i2c, AD5593R_MODE_GPIO_READBACK);
79 if (val < 0)
80 return (int) val;
81
82 *value = (u8) val;
83
84 return 0;
85}
86
87static const struct ad5592r_rw_ops ad5593r_rw_ops = {
88 .write_dac = ad5593r_write_dac,
89 .read_adc = ad5593r_read_adc,
90 .reg_write = ad5593r_reg_write,
91 .reg_read = ad5593r_reg_read,
92 .gpio_read = ad5593r_gpio_read,
93};
94
95static int ad5593r_i2c_probe(struct i2c_client *i2c,
96 const struct i2c_device_id *id)
97{
98 return ad5592r_probe(&i2c->dev, id->name, &ad5593r_rw_ops);
99}
100
101static int ad5593r_i2c_remove(struct i2c_client *i2c)
102{
103 return ad5592r_remove(&i2c->dev);
104}
105
106static const struct i2c_device_id ad5593r_i2c_ids[] = {
107 { .name = "ad5593r", },
108 {},
109};
110MODULE_DEVICE_TABLE(i2c, ad5593r_i2c_ids);
111
112static const struct of_device_id ad5593r_of_match[] = {
113 { .compatible = "adi,ad5593r", },
114 {},
115};
116MODULE_DEVICE_TABLE(of, ad5593r_of_match);
117
118static struct i2c_driver ad5593r_driver = {
119 .driver = {
120 .name = "ad5593r",
121 .of_match_table = of_match_ptr(ad5593r_of_match),
122 },
123 .probe = ad5593r_i2c_probe,
124 .remove = ad5593r_i2c_remove,
125 .id_table = ad5593r_i2c_ids,
126};
127module_i2c_driver(ad5593r_driver);
128
129MODULE_AUTHOR("Paul Cercueil <paul.cercueil@analog.com>");
130MODULE_DESCRIPTION("Analog Devices AD5592R multi-channel converters");
131MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/lpc18xx_dac.c b/drivers/iio/dac/lpc18xx_dac.c
new file mode 100644
index 000000000000..55d1456a059d
--- /dev/null
+++ b/drivers/iio/dac/lpc18xx_dac.c
@@ -0,0 +1,210 @@
1/*
2 * IIO DAC driver for NXP LPC18xx DAC
3 *
4 * Copyright (C) 2016 Joachim Eastwood <manabian@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * UNSUPPORTED hardware features:
11 * - Interrupts
12 * - DMA
13 */
14
15#include <linux/clk.h>
16#include <linux/err.h>
17#include <linux/iio/iio.h>
18#include <linux/iio/driver.h>
19#include <linux/io.h>
20#include <linux/iopoll.h>
21#include <linux/module.h>
22#include <linux/mutex.h>
23#include <linux/of.h>
24#include <linux/of_device.h>
25#include <linux/platform_device.h>
26#include <linux/regulator/consumer.h>
27
28/* LPC18XX DAC registers and bits */
29#define LPC18XX_DAC_CR 0x000
30#define LPC18XX_DAC_CR_VALUE_SHIFT 6
31#define LPC18XX_DAC_CR_VALUE_MASK 0x3ff
32#define LPC18XX_DAC_CR_BIAS BIT(16)
33#define LPC18XX_DAC_CTRL 0x004
34#define LPC18XX_DAC_CTRL_DMA_ENA BIT(3)
35
36struct lpc18xx_dac {
37 struct regulator *vref;
38 void __iomem *base;
39 struct mutex lock;
40 struct clk *clk;
41};
42
43static const struct iio_chan_spec lpc18xx_dac_iio_channels[] = {
44 {
45 .type = IIO_VOLTAGE,
46 .output = 1,
47 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
48 BIT(IIO_CHAN_INFO_SCALE),
49 },
50};
51
52static int lpc18xx_dac_read_raw(struct iio_dev *indio_dev,
53 struct iio_chan_spec const *chan,
54 int *val, int *val2, long mask)
55{
56 struct lpc18xx_dac *dac = iio_priv(indio_dev);
57 u32 reg;
58
59 switch (mask) {
60 case IIO_CHAN_INFO_RAW:
61 reg = readl(dac->base + LPC18XX_DAC_CR);
62 *val = reg >> LPC18XX_DAC_CR_VALUE_SHIFT;
63 *val &= LPC18XX_DAC_CR_VALUE_MASK;
64
65 return IIO_VAL_INT;
66
67 case IIO_CHAN_INFO_SCALE:
68 *val = regulator_get_voltage(dac->vref) / 1000;
69 *val2 = 10;
70
71 return IIO_VAL_FRACTIONAL_LOG2;
72 }
73
74 return -EINVAL;
75}
76
77static int lpc18xx_dac_write_raw(struct iio_dev *indio_dev,
78 struct iio_chan_spec const *chan,
79 int val, int val2, long mask)
80{
81 struct lpc18xx_dac *dac = iio_priv(indio_dev);
82 u32 reg;
83
84 switch (mask) {
85 case IIO_CHAN_INFO_RAW:
86 if (val < 0 || val > LPC18XX_DAC_CR_VALUE_MASK)
87 return -EINVAL;
88
89 reg = LPC18XX_DAC_CR_BIAS;
90 reg |= val << LPC18XX_DAC_CR_VALUE_SHIFT;
91
92 mutex_lock(&dac->lock);
93 writel(reg, dac->base + LPC18XX_DAC_CR);
94 writel(LPC18XX_DAC_CTRL_DMA_ENA, dac->base + LPC18XX_DAC_CTRL);
95 mutex_unlock(&dac->lock);
96
97 return 0;
98 }
99
100 return -EINVAL;
101}
102
103static const struct iio_info lpc18xx_dac_info = {
104 .read_raw = lpc18xx_dac_read_raw,
105 .write_raw = lpc18xx_dac_write_raw,
106 .driver_module = THIS_MODULE,
107};
108
109static int lpc18xx_dac_probe(struct platform_device *pdev)
110{
111 struct iio_dev *indio_dev;
112 struct lpc18xx_dac *dac;
113 struct resource *res;
114 int ret;
115
116 indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*dac));
117 if (!indio_dev)
118 return -ENOMEM;
119
120 platform_set_drvdata(pdev, indio_dev);
121 dac = iio_priv(indio_dev);
122 mutex_init(&dac->lock);
123
124 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
125 dac->base = devm_ioremap_resource(&pdev->dev, res);
126 if (IS_ERR(dac->base))
127 return PTR_ERR(dac->base);
128
129 dac->clk = devm_clk_get(&pdev->dev, NULL);
130 if (IS_ERR(dac->clk)) {
131 dev_err(&pdev->dev, "error getting clock\n");
132 return PTR_ERR(dac->clk);
133 }
134
135 dac->vref = devm_regulator_get(&pdev->dev, "vref");
136 if (IS_ERR(dac->vref)) {
137 dev_err(&pdev->dev, "error getting regulator\n");
138 return PTR_ERR(dac->vref);
139 }
140
141 indio_dev->name = dev_name(&pdev->dev);
142 indio_dev->dev.parent = &pdev->dev;
143 indio_dev->info = &lpc18xx_dac_info;
144 indio_dev->modes = INDIO_DIRECT_MODE;
145 indio_dev->channels = lpc18xx_dac_iio_channels;
146 indio_dev->num_channels = ARRAY_SIZE(lpc18xx_dac_iio_channels);
147
148 ret = regulator_enable(dac->vref);
149 if (ret) {
150 dev_err(&pdev->dev, "unable to enable regulator\n");
151 return ret;
152 }
153
154 ret = clk_prepare_enable(dac->clk);
155 if (ret) {
156 dev_err(&pdev->dev, "unable to enable clock\n");
157 goto dis_reg;
158 }
159
160 writel(0, dac->base + LPC18XX_DAC_CTRL);
161 writel(0, dac->base + LPC18XX_DAC_CR);
162
163 ret = iio_device_register(indio_dev);
164 if (ret) {
165 dev_err(&pdev->dev, "unable to register device\n");
166 goto dis_clk;
167 }
168
169 return 0;
170
171dis_clk:
172 clk_disable_unprepare(dac->clk);
173dis_reg:
174 regulator_disable(dac->vref);
175 return ret;
176}
177
178static int lpc18xx_dac_remove(struct platform_device *pdev)
179{
180 struct iio_dev *indio_dev = platform_get_drvdata(pdev);
181 struct lpc18xx_dac *dac = iio_priv(indio_dev);
182
183 iio_device_unregister(indio_dev);
184
185 writel(0, dac->base + LPC18XX_DAC_CTRL);
186 clk_disable_unprepare(dac->clk);
187 regulator_disable(dac->vref);
188
189 return 0;
190}
191
192static const struct of_device_id lpc18xx_dac_match[] = {
193 { .compatible = "nxp,lpc1850-dac" },
194 { /* sentinel */ }
195};
196MODULE_DEVICE_TABLE(of, lpc18xx_dac_match);
197
198static struct platform_driver lpc18xx_dac_driver = {
199 .probe = lpc18xx_dac_probe,
200 .remove = lpc18xx_dac_remove,
201 .driver = {
202 .name = "lpc18xx-dac",
203 .of_match_table = lpc18xx_dac_match,
204 },
205};
206module_platform_driver(lpc18xx_dac_driver);
207
208MODULE_DESCRIPTION("LPC18xx DAC driver");
209MODULE_AUTHOR("Joachim Eastwood <manabian@gmail.com>");
210MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/frequency/ad9523.c b/drivers/iio/frequency/ad9523.c
index 44a30f286de1..99eba524f6dd 100644
--- a/drivers/iio/frequency/ad9523.c
+++ b/drivers/iio/frequency/ad9523.c
@@ -284,7 +284,7 @@ struct ad9523_state {
284 } data[2] ____cacheline_aligned; 284 } data[2] ____cacheline_aligned;
285}; 285};
286 286
287static int ad9523_read(struct iio_dev *indio_dev, unsigned addr) 287static int ad9523_read(struct iio_dev *indio_dev, unsigned int addr)
288{ 288{
289 struct ad9523_state *st = iio_priv(indio_dev); 289 struct ad9523_state *st = iio_priv(indio_dev);
290 int ret; 290 int ret;
@@ -318,7 +318,8 @@ static int ad9523_read(struct iio_dev *indio_dev, unsigned addr)
318 return ret; 318 return ret;
319}; 319};
320 320
321static int ad9523_write(struct iio_dev *indio_dev, unsigned addr, unsigned val) 321static int ad9523_write(struct iio_dev *indio_dev,
322 unsigned int addr, unsigned int val)
322{ 323{
323 struct ad9523_state *st = iio_priv(indio_dev); 324 struct ad9523_state *st = iio_priv(indio_dev);
324 int ret; 325 int ret;
@@ -351,11 +352,11 @@ static int ad9523_io_update(struct iio_dev *indio_dev)
351} 352}
352 353
353static int ad9523_vco_out_map(struct iio_dev *indio_dev, 354static int ad9523_vco_out_map(struct iio_dev *indio_dev,
354 unsigned ch, unsigned out) 355 unsigned int ch, unsigned int out)
355{ 356{
356 struct ad9523_state *st = iio_priv(indio_dev); 357 struct ad9523_state *st = iio_priv(indio_dev);
357 int ret; 358 int ret;
358 unsigned mask; 359 unsigned int mask;
359 360
360 switch (ch) { 361 switch (ch) {
361 case 0 ... 3: 362 case 0 ... 3:
@@ -405,7 +406,7 @@ static int ad9523_vco_out_map(struct iio_dev *indio_dev,
405} 406}
406 407
407static int ad9523_set_clock_provider(struct iio_dev *indio_dev, 408static int ad9523_set_clock_provider(struct iio_dev *indio_dev,
408 unsigned ch, unsigned long freq) 409 unsigned int ch, unsigned long freq)
409{ 410{
410 struct ad9523_state *st = iio_priv(indio_dev); 411 struct ad9523_state *st = iio_priv(indio_dev);
411 long tmp1, tmp2; 412 long tmp1, tmp2;
@@ -619,7 +620,7 @@ static int ad9523_read_raw(struct iio_dev *indio_dev,
619 long m) 620 long m)
620{ 621{
621 struct ad9523_state *st = iio_priv(indio_dev); 622 struct ad9523_state *st = iio_priv(indio_dev);
622 unsigned code; 623 unsigned int code;
623 int ret; 624 int ret;
624 625
625 mutex_lock(&indio_dev->mlock); 626 mutex_lock(&indio_dev->mlock);
@@ -655,7 +656,7 @@ static int ad9523_write_raw(struct iio_dev *indio_dev,
655 long mask) 656 long mask)
656{ 657{
657 struct ad9523_state *st = iio_priv(indio_dev); 658 struct ad9523_state *st = iio_priv(indio_dev);
658 unsigned reg; 659 unsigned int reg;
659 int ret, tmp, code; 660 int ret, tmp, code;
660 661
661 mutex_lock(&indio_dev->mlock); 662 mutex_lock(&indio_dev->mlock);
@@ -709,8 +710,8 @@ out:
709} 710}
710 711
711static int ad9523_reg_access(struct iio_dev *indio_dev, 712static int ad9523_reg_access(struct iio_dev *indio_dev,
712 unsigned reg, unsigned writeval, 713 unsigned int reg, unsigned int writeval,
713 unsigned *readval) 714 unsigned int *readval)
714{ 715{
715 int ret; 716 int ret;
716 717
diff --git a/drivers/iio/gyro/Kconfig b/drivers/iio/gyro/Kconfig
index e816d29d6a62..205a84420ae9 100644
--- a/drivers/iio/gyro/Kconfig
+++ b/drivers/iio/gyro/Kconfig
@@ -93,7 +93,7 @@ config IIO_ST_GYRO_3AXIS
93 select IIO_TRIGGERED_BUFFER if (IIO_BUFFER) 93 select IIO_TRIGGERED_BUFFER if (IIO_BUFFER)
94 help 94 help
95 Say yes here to build support for STMicroelectronics gyroscopes: 95 Say yes here to build support for STMicroelectronics gyroscopes:
96 L3G4200D, LSM330DL, L3GD20, LSM330DLC, L3G4IS, LSM330. 96 L3G4200D, LSM330DL, L3GD20, LSM330DLC, L3G4IS, LSM330, LSM9DS0.
97 97
98 This driver can also be built as a module. If so, these modules 98 This driver can also be built as a module. If so, these modules
99 will be created: 99 will be created:
diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
index 4dac567e75b4..7ccc044063f6 100644
--- a/drivers/iio/gyro/bmg160_core.c
+++ b/drivers/iio/gyro/bmg160_core.c
@@ -17,7 +17,6 @@
17#include <linux/delay.h> 17#include <linux/delay.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/acpi.h> 19#include <linux/acpi.h>
20#include <linux/gpio/consumer.h>
21#include <linux/pm.h> 20#include <linux/pm.h>
22#include <linux/pm_runtime.h> 21#include <linux/pm_runtime.h>
23#include <linux/iio/iio.h> 22#include <linux/iio/iio.h>
@@ -31,7 +30,6 @@
31#include "bmg160.h" 30#include "bmg160.h"
32 31
33#define BMG160_IRQ_NAME "bmg160_event" 32#define BMG160_IRQ_NAME "bmg160_event"
34#define BMG160_GPIO_NAME "gpio_int"
35 33
36#define BMG160_REG_CHIP_ID 0x00 34#define BMG160_REG_CHIP_ID 0x00
37#define BMG160_CHIP_ID_VAL 0x0F 35#define BMG160_CHIP_ID_VAL 0x0F
@@ -97,7 +95,6 @@
97#define BMG160_AUTO_SUSPEND_DELAY_MS 2000 95#define BMG160_AUTO_SUSPEND_DELAY_MS 2000
98 96
99struct bmg160_data { 97struct bmg160_data {
100 struct device *dev;
101 struct regmap *regmap; 98 struct regmap *regmap;
102 struct iio_trigger *dready_trig; 99 struct iio_trigger *dready_trig;
103 struct iio_trigger *motion_trig; 100 struct iio_trigger *motion_trig;
@@ -116,6 +113,7 @@ enum bmg160_axis {
116 AXIS_X, 113 AXIS_X,
117 AXIS_Y, 114 AXIS_Y,
118 AXIS_Z, 115 AXIS_Z,
116 AXIS_MAX,
119}; 117};
120 118
121static const struct { 119static const struct {
@@ -138,11 +136,12 @@ static const struct {
138 136
139static int bmg160_set_mode(struct bmg160_data *data, u8 mode) 137static int bmg160_set_mode(struct bmg160_data *data, u8 mode)
140{ 138{
139 struct device *dev = regmap_get_device(data->regmap);
141 int ret; 140 int ret;
142 141
143 ret = regmap_write(data->regmap, BMG160_REG_PMU_LPW, mode); 142 ret = regmap_write(data->regmap, BMG160_REG_PMU_LPW, mode);
144 if (ret < 0) { 143 if (ret < 0) {
145 dev_err(data->dev, "Error writing reg_pmu_lpw\n"); 144 dev_err(dev, "Error writing reg_pmu_lpw\n");
146 return ret; 145 return ret;
147 } 146 }
148 147
@@ -163,6 +162,7 @@ static int bmg160_convert_freq_to_bit(int val)
163 162
164static int bmg160_set_bw(struct bmg160_data *data, int val) 163static int bmg160_set_bw(struct bmg160_data *data, int val)
165{ 164{
165 struct device *dev = regmap_get_device(data->regmap);
166 int ret; 166 int ret;
167 int bw_bits; 167 int bw_bits;
168 168
@@ -172,7 +172,7 @@ static int bmg160_set_bw(struct bmg160_data *data, int val)
172 172
173 ret = regmap_write(data->regmap, BMG160_REG_PMU_BW, bw_bits); 173 ret = regmap_write(data->regmap, BMG160_REG_PMU_BW, bw_bits);
174 if (ret < 0) { 174 if (ret < 0) {
175 dev_err(data->dev, "Error writing reg_pmu_bw\n"); 175 dev_err(dev, "Error writing reg_pmu_bw\n");
176 return ret; 176 return ret;
177 } 177 }
178 178
@@ -183,18 +183,19 @@ static int bmg160_set_bw(struct bmg160_data *data, int val)
183 183
184static int bmg160_chip_init(struct bmg160_data *data) 184static int bmg160_chip_init(struct bmg160_data *data)
185{ 185{
186 struct device *dev = regmap_get_device(data->regmap);
186 int ret; 187 int ret;
187 unsigned int val; 188 unsigned int val;
188 189
189 ret = regmap_read(data->regmap, BMG160_REG_CHIP_ID, &val); 190 ret = regmap_read(data->regmap, BMG160_REG_CHIP_ID, &val);
190 if (ret < 0) { 191 if (ret < 0) {
191 dev_err(data->dev, "Error reading reg_chip_id\n"); 192 dev_err(dev, "Error reading reg_chip_id\n");
192 return ret; 193 return ret;
193 } 194 }
194 195
195 dev_dbg(data->dev, "Chip Id %x\n", val); 196 dev_dbg(dev, "Chip Id %x\n", val);
196 if (val != BMG160_CHIP_ID_VAL) { 197 if (val != BMG160_CHIP_ID_VAL) {
197 dev_err(data->dev, "invalid chip %x\n", val); 198 dev_err(dev, "invalid chip %x\n", val);
198 return -ENODEV; 199 return -ENODEV;
199 } 200 }
200 201
@@ -213,14 +214,14 @@ static int bmg160_chip_init(struct bmg160_data *data)
213 /* Set Default Range */ 214 /* Set Default Range */
214 ret = regmap_write(data->regmap, BMG160_REG_RANGE, BMG160_RANGE_500DPS); 215 ret = regmap_write(data->regmap, BMG160_REG_RANGE, BMG160_RANGE_500DPS);
215 if (ret < 0) { 216 if (ret < 0) {
216 dev_err(data->dev, "Error writing reg_range\n"); 217 dev_err(dev, "Error writing reg_range\n");
217 return ret; 218 return ret;
218 } 219 }
219 data->dps_range = BMG160_RANGE_500DPS; 220 data->dps_range = BMG160_RANGE_500DPS;
220 221
221 ret = regmap_read(data->regmap, BMG160_REG_SLOPE_THRES, &val); 222 ret = regmap_read(data->regmap, BMG160_REG_SLOPE_THRES, &val);
222 if (ret < 0) { 223 if (ret < 0) {
223 dev_err(data->dev, "Error reading reg_slope_thres\n"); 224 dev_err(dev, "Error reading reg_slope_thres\n");
224 return ret; 225 return ret;
225 } 226 }
226 data->slope_thres = val; 227 data->slope_thres = val;
@@ -229,7 +230,7 @@ static int bmg160_chip_init(struct bmg160_data *data)
229 ret = regmap_update_bits(data->regmap, BMG160_REG_INT_EN_1, 230 ret = regmap_update_bits(data->regmap, BMG160_REG_INT_EN_1,
230 BMG160_INT1_BIT_OD, 0); 231 BMG160_INT1_BIT_OD, 0);
231 if (ret < 0) { 232 if (ret < 0) {
232 dev_err(data->dev, "Error updating bits in reg_int_en_1\n"); 233 dev_err(dev, "Error updating bits in reg_int_en_1\n");
233 return ret; 234 return ret;
234 } 235 }
235 236
@@ -237,7 +238,7 @@ static int bmg160_chip_init(struct bmg160_data *data)
237 BMG160_INT_MODE_LATCH_INT | 238 BMG160_INT_MODE_LATCH_INT |
238 BMG160_INT_MODE_LATCH_RESET); 239 BMG160_INT_MODE_LATCH_RESET);
239 if (ret < 0) { 240 if (ret < 0) {
240 dev_err(data->dev, 241 dev_err(dev,
241 "Error writing reg_motion_intr\n"); 242 "Error writing reg_motion_intr\n");
242 return ret; 243 return ret;
243 } 244 }
@@ -248,20 +249,21 @@ static int bmg160_chip_init(struct bmg160_data *data)
248static int bmg160_set_power_state(struct bmg160_data *data, bool on) 249static int bmg160_set_power_state(struct bmg160_data *data, bool on)
249{ 250{
250#ifdef CONFIG_PM 251#ifdef CONFIG_PM
252 struct device *dev = regmap_get_device(data->regmap);
251 int ret; 253 int ret;
252 254
253 if (on) 255 if (on)
254 ret = pm_runtime_get_sync(data->dev); 256 ret = pm_runtime_get_sync(dev);
255 else { 257 else {
256 pm_runtime_mark_last_busy(data->dev); 258 pm_runtime_mark_last_busy(dev);
257 ret = pm_runtime_put_autosuspend(data->dev); 259 ret = pm_runtime_put_autosuspend(dev);
258 } 260 }
259 261
260 if (ret < 0) { 262 if (ret < 0) {
261 dev_err(data->dev, 263 dev_err(dev, "Failed: bmg160_set_power_state for %d\n", on);
262 "Failed: bmg160_set_power_state for %d\n", on); 264
263 if (on) 265 if (on)
264 pm_runtime_put_noidle(data->dev); 266 pm_runtime_put_noidle(dev);
265 267
266 return ret; 268 return ret;
267 } 269 }
@@ -273,6 +275,7 @@ static int bmg160_set_power_state(struct bmg160_data *data, bool on)
273static int bmg160_setup_any_motion_interrupt(struct bmg160_data *data, 275static int bmg160_setup_any_motion_interrupt(struct bmg160_data *data,
274 bool status) 276 bool status)
275{ 277{
278 struct device *dev = regmap_get_device(data->regmap);
276 int ret; 279 int ret;
277 280
278 /* Enable/Disable INT_MAP0 mapping */ 281 /* Enable/Disable INT_MAP0 mapping */
@@ -280,7 +283,7 @@ static int bmg160_setup_any_motion_interrupt(struct bmg160_data *data,
280 BMG160_INT_MAP_0_BIT_ANY, 283 BMG160_INT_MAP_0_BIT_ANY,
281 (status ? BMG160_INT_MAP_0_BIT_ANY : 0)); 284 (status ? BMG160_INT_MAP_0_BIT_ANY : 0));
282 if (ret < 0) { 285 if (ret < 0) {
283 dev_err(data->dev, "Error updating bits reg_int_map0\n"); 286 dev_err(dev, "Error updating bits reg_int_map0\n");
284 return ret; 287 return ret;
285 } 288 }
286 289
@@ -290,8 +293,7 @@ static int bmg160_setup_any_motion_interrupt(struct bmg160_data *data,
290 ret = regmap_write(data->regmap, BMG160_REG_SLOPE_THRES, 293 ret = regmap_write(data->regmap, BMG160_REG_SLOPE_THRES,
291 data->slope_thres); 294 data->slope_thres);
292 if (ret < 0) { 295 if (ret < 0) {
293 dev_err(data->dev, 296 dev_err(dev, "Error writing reg_slope_thres\n");
294 "Error writing reg_slope_thres\n");
295 return ret; 297 return ret;
296 } 298 }
297 299
@@ -299,8 +301,7 @@ static int bmg160_setup_any_motion_interrupt(struct bmg160_data *data,
299 BMG160_INT_MOTION_X | BMG160_INT_MOTION_Y | 301 BMG160_INT_MOTION_X | BMG160_INT_MOTION_Y |
300 BMG160_INT_MOTION_Z); 302 BMG160_INT_MOTION_Z);
301 if (ret < 0) { 303 if (ret < 0) {
302 dev_err(data->dev, 304 dev_err(dev, "Error writing reg_motion_intr\n");
303 "Error writing reg_motion_intr\n");
304 return ret; 305 return ret;
305 } 306 }
306 307
@@ -315,8 +316,7 @@ static int bmg160_setup_any_motion_interrupt(struct bmg160_data *data,
315 BMG160_INT_MODE_LATCH_INT | 316 BMG160_INT_MODE_LATCH_INT |
316 BMG160_INT_MODE_LATCH_RESET); 317 BMG160_INT_MODE_LATCH_RESET);
317 if (ret < 0) { 318 if (ret < 0) {
318 dev_err(data->dev, 319 dev_err(dev, "Error writing reg_rst_latch\n");
319 "Error writing reg_rst_latch\n");
320 return ret; 320 return ret;
321 } 321 }
322 } 322 }
@@ -329,7 +329,7 @@ static int bmg160_setup_any_motion_interrupt(struct bmg160_data *data,
329 } 329 }
330 330
331 if (ret < 0) { 331 if (ret < 0) {
332 dev_err(data->dev, "Error writing reg_int_en0\n"); 332 dev_err(dev, "Error writing reg_int_en0\n");
333 return ret; 333 return ret;
334 } 334 }
335 335
@@ -339,6 +339,7 @@ static int bmg160_setup_any_motion_interrupt(struct bmg160_data *data,
339static int bmg160_setup_new_data_interrupt(struct bmg160_data *data, 339static int bmg160_setup_new_data_interrupt(struct bmg160_data *data,
340 bool status) 340 bool status)
341{ 341{
342 struct device *dev = regmap_get_device(data->regmap);
342 int ret; 343 int ret;
343 344
344 /* Enable/Disable INT_MAP1 mapping */ 345 /* Enable/Disable INT_MAP1 mapping */
@@ -346,7 +347,7 @@ static int bmg160_setup_new_data_interrupt(struct bmg160_data *data,
346 BMG160_INT_MAP_1_BIT_NEW_DATA, 347 BMG160_INT_MAP_1_BIT_NEW_DATA,
347 (status ? BMG160_INT_MAP_1_BIT_NEW_DATA : 0)); 348 (status ? BMG160_INT_MAP_1_BIT_NEW_DATA : 0));
348 if (ret < 0) { 349 if (ret < 0) {
349 dev_err(data->dev, "Error updating bits in reg_int_map1\n"); 350 dev_err(dev, "Error updating bits in reg_int_map1\n");
350 return ret; 351 return ret;
351 } 352 }
352 353
@@ -355,9 +356,8 @@ static int bmg160_setup_new_data_interrupt(struct bmg160_data *data,
355 BMG160_INT_MODE_NON_LATCH_INT | 356 BMG160_INT_MODE_NON_LATCH_INT |
356 BMG160_INT_MODE_LATCH_RESET); 357 BMG160_INT_MODE_LATCH_RESET);
357 if (ret < 0) { 358 if (ret < 0) {
358 dev_err(data->dev, 359 dev_err(dev, "Error writing reg_rst_latch\n");
359 "Error writing reg_rst_latch\n"); 360 return ret;
360 return ret;
361 } 361 }
362 362
363 ret = regmap_write(data->regmap, BMG160_REG_INT_EN_0, 363 ret = regmap_write(data->regmap, BMG160_REG_INT_EN_0,
@@ -369,16 +369,15 @@ static int bmg160_setup_new_data_interrupt(struct bmg160_data *data,
369 BMG160_INT_MODE_LATCH_INT | 369 BMG160_INT_MODE_LATCH_INT |
370 BMG160_INT_MODE_LATCH_RESET); 370 BMG160_INT_MODE_LATCH_RESET);
371 if (ret < 0) { 371 if (ret < 0) {
372 dev_err(data->dev, 372 dev_err(dev, "Error writing reg_rst_latch\n");
373 "Error writing reg_rst_latch\n"); 373 return ret;
374 return ret;
375 } 374 }
376 375
377 ret = regmap_write(data->regmap, BMG160_REG_INT_EN_0, 0); 376 ret = regmap_write(data->regmap, BMG160_REG_INT_EN_0, 0);
378 } 377 }
379 378
380 if (ret < 0) { 379 if (ret < 0) {
381 dev_err(data->dev, "Error writing reg_int_en0\n"); 380 dev_err(dev, "Error writing reg_int_en0\n");
382 return ret; 381 return ret;
383 } 382 }
384 383
@@ -401,6 +400,7 @@ static int bmg160_get_bw(struct bmg160_data *data, int *val)
401 400
402static int bmg160_set_scale(struct bmg160_data *data, int val) 401static int bmg160_set_scale(struct bmg160_data *data, int val)
403{ 402{
403 struct device *dev = regmap_get_device(data->regmap);
404 int ret, i; 404 int ret, i;
405 405
406 for (i = 0; i < ARRAY_SIZE(bmg160_scale_table); ++i) { 406 for (i = 0; i < ARRAY_SIZE(bmg160_scale_table); ++i) {
@@ -408,8 +408,7 @@ static int bmg160_set_scale(struct bmg160_data *data, int val)
408 ret = regmap_write(data->regmap, BMG160_REG_RANGE, 408 ret = regmap_write(data->regmap, BMG160_REG_RANGE,
409 bmg160_scale_table[i].dps_range); 409 bmg160_scale_table[i].dps_range);
410 if (ret < 0) { 410 if (ret < 0) {
411 dev_err(data->dev, 411 dev_err(dev, "Error writing reg_range\n");
412 "Error writing reg_range\n");
413 return ret; 412 return ret;
414 } 413 }
415 data->dps_range = bmg160_scale_table[i].dps_range; 414 data->dps_range = bmg160_scale_table[i].dps_range;
@@ -422,6 +421,7 @@ static int bmg160_set_scale(struct bmg160_data *data, int val)
422 421
423static int bmg160_get_temp(struct bmg160_data *data, int *val) 422static int bmg160_get_temp(struct bmg160_data *data, int *val)
424{ 423{
424 struct device *dev = regmap_get_device(data->regmap);
425 int ret; 425 int ret;
426 unsigned int raw_val; 426 unsigned int raw_val;
427 427
@@ -434,7 +434,7 @@ static int bmg160_get_temp(struct bmg160_data *data, int *val)
434 434
435 ret = regmap_read(data->regmap, BMG160_REG_TEMP, &raw_val); 435 ret = regmap_read(data->regmap, BMG160_REG_TEMP, &raw_val);
436 if (ret < 0) { 436 if (ret < 0) {
437 dev_err(data->dev, "Error reading reg_temp\n"); 437 dev_err(dev, "Error reading reg_temp\n");
438 bmg160_set_power_state(data, false); 438 bmg160_set_power_state(data, false);
439 mutex_unlock(&data->mutex); 439 mutex_unlock(&data->mutex);
440 return ret; 440 return ret;
@@ -451,6 +451,7 @@ static int bmg160_get_temp(struct bmg160_data *data, int *val)
451 451
452static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val) 452static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
453{ 453{
454 struct device *dev = regmap_get_device(data->regmap);
454 int ret; 455 int ret;
455 __le16 raw_val; 456 __le16 raw_val;
456 457
@@ -464,7 +465,7 @@ static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
464 ret = regmap_bulk_read(data->regmap, BMG160_AXIS_TO_REG(axis), &raw_val, 465 ret = regmap_bulk_read(data->regmap, BMG160_AXIS_TO_REG(axis), &raw_val,
465 sizeof(raw_val)); 466 sizeof(raw_val));
466 if (ret < 0) { 467 if (ret < 0) {
467 dev_err(data->dev, "Error reading axis %d\n", axis); 468 dev_err(dev, "Error reading axis %d\n", axis);
468 bmg160_set_power_state(data, false); 469 bmg160_set_power_state(data, false);
469 mutex_unlock(&data->mutex); 470 mutex_unlock(&data->mutex);
470 return ret; 471 return ret;
@@ -764,26 +765,23 @@ static const struct iio_info bmg160_info = {
764 .driver_module = THIS_MODULE, 765 .driver_module = THIS_MODULE,
765}; 766};
766 767
768static const unsigned long bmg160_accel_scan_masks[] = {
769 BIT(AXIS_X) | BIT(AXIS_Y) | BIT(AXIS_Z),
770 0};
771
767static irqreturn_t bmg160_trigger_handler(int irq, void *p) 772static irqreturn_t bmg160_trigger_handler(int irq, void *p)
768{ 773{
769 struct iio_poll_func *pf = p; 774 struct iio_poll_func *pf = p;
770 struct iio_dev *indio_dev = pf->indio_dev; 775 struct iio_dev *indio_dev = pf->indio_dev;
771 struct bmg160_data *data = iio_priv(indio_dev); 776 struct bmg160_data *data = iio_priv(indio_dev);
772 int bit, ret, i = 0; 777 int ret;
773 unsigned int val;
774 778
775 mutex_lock(&data->mutex); 779 mutex_lock(&data->mutex);
776 for_each_set_bit(bit, indio_dev->active_scan_mask, 780 ret = regmap_bulk_read(data->regmap, BMG160_REG_XOUT_L,
777 indio_dev->masklength) { 781 data->buffer, AXIS_MAX * 2);
778 ret = regmap_bulk_read(data->regmap, BMG160_AXIS_TO_REG(bit),
779 &val, 2);
780 if (ret < 0) {
781 mutex_unlock(&data->mutex);
782 goto err;
783 }
784 data->buffer[i++] = val;
785 }
786 mutex_unlock(&data->mutex); 782 mutex_unlock(&data->mutex);
783 if (ret < 0)
784 goto err;
787 785
788 iio_push_to_buffers_with_timestamp(indio_dev, data->buffer, 786 iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
789 pf->timestamp); 787 pf->timestamp);
@@ -797,6 +795,7 @@ static int bmg160_trig_try_reen(struct iio_trigger *trig)
797{ 795{
798 struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig); 796 struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
799 struct bmg160_data *data = iio_priv(indio_dev); 797 struct bmg160_data *data = iio_priv(indio_dev);
798 struct device *dev = regmap_get_device(data->regmap);
800 int ret; 799 int ret;
801 800
802 /* new data interrupts don't need ack */ 801 /* new data interrupts don't need ack */
@@ -808,7 +807,7 @@ static int bmg160_trig_try_reen(struct iio_trigger *trig)
808 BMG160_INT_MODE_LATCH_INT | 807 BMG160_INT_MODE_LATCH_INT |
809 BMG160_INT_MODE_LATCH_RESET); 808 BMG160_INT_MODE_LATCH_RESET);
810 if (ret < 0) { 809 if (ret < 0) {
811 dev_err(data->dev, "Error writing reg_rst_latch\n"); 810 dev_err(dev, "Error writing reg_rst_latch\n");
812 return ret; 811 return ret;
813 } 812 }
814 813
@@ -868,13 +867,14 @@ static irqreturn_t bmg160_event_handler(int irq, void *private)
868{ 867{
869 struct iio_dev *indio_dev = private; 868 struct iio_dev *indio_dev = private;
870 struct bmg160_data *data = iio_priv(indio_dev); 869 struct bmg160_data *data = iio_priv(indio_dev);
870 struct device *dev = regmap_get_device(data->regmap);
871 int ret; 871 int ret;
872 int dir; 872 int dir;
873 unsigned int val; 873 unsigned int val;
874 874
875 ret = regmap_read(data->regmap, BMG160_REG_INT_STATUS_2, &val); 875 ret = regmap_read(data->regmap, BMG160_REG_INT_STATUS_2, &val);
876 if (ret < 0) { 876 if (ret < 0) {
877 dev_err(data->dev, "Error reading reg_int_status2\n"); 877 dev_err(dev, "Error reading reg_int_status2\n");
878 goto ack_intr_status; 878 goto ack_intr_status;
879 } 879 }
880 880
@@ -911,8 +911,7 @@ ack_intr_status:
911 BMG160_INT_MODE_LATCH_INT | 911 BMG160_INT_MODE_LATCH_INT |
912 BMG160_INT_MODE_LATCH_RESET); 912 BMG160_INT_MODE_LATCH_RESET);
913 if (ret < 0) 913 if (ret < 0)
914 dev_err(data->dev, 914 dev_err(dev, "Error writing reg_rst_latch\n");
915 "Error writing reg_rst_latch\n");
916 } 915 }
917 916
918 return IRQ_HANDLED; 917 return IRQ_HANDLED;
@@ -956,29 +955,6 @@ static const struct iio_buffer_setup_ops bmg160_buffer_setup_ops = {
956 .postdisable = bmg160_buffer_postdisable, 955 .postdisable = bmg160_buffer_postdisable,
957}; 956};
958 957
959static int bmg160_gpio_probe(struct bmg160_data *data)
960
961{
962 struct device *dev;
963 struct gpio_desc *gpio;
964
965 dev = data->dev;
966
967 /* data ready gpio interrupt pin */
968 gpio = devm_gpiod_get_index(dev, BMG160_GPIO_NAME, 0, GPIOD_IN);
969 if (IS_ERR(gpio)) {
970 dev_err(dev, "acpi gpio get index failed\n");
971 return PTR_ERR(gpio);
972 }
973
974 data->irq = gpiod_to_irq(gpio);
975
976 dev_dbg(dev, "GPIO resource, no:%d irq:%d\n", desc_to_gpio(gpio),
977 data->irq);
978
979 return 0;
980}
981
982static const char *bmg160_match_acpi_device(struct device *dev) 958static const char *bmg160_match_acpi_device(struct device *dev)
983{ 959{
984 const struct acpi_device_id *id; 960 const struct acpi_device_id *id;
@@ -1003,7 +979,6 @@ int bmg160_core_probe(struct device *dev, struct regmap *regmap, int irq,
1003 979
1004 data = iio_priv(indio_dev); 980 data = iio_priv(indio_dev);
1005 dev_set_drvdata(dev, indio_dev); 981 dev_set_drvdata(dev, indio_dev);
1006 data->dev = dev;
1007 data->irq = irq; 982 data->irq = irq;
1008 data->regmap = regmap; 983 data->regmap = regmap;
1009 984
@@ -1020,12 +995,10 @@ int bmg160_core_probe(struct device *dev, struct regmap *regmap, int irq,
1020 indio_dev->channels = bmg160_channels; 995 indio_dev->channels = bmg160_channels;
1021 indio_dev->num_channels = ARRAY_SIZE(bmg160_channels); 996 indio_dev->num_channels = ARRAY_SIZE(bmg160_channels);
1022 indio_dev->name = name; 997 indio_dev->name = name;
998 indio_dev->available_scan_masks = bmg160_accel_scan_masks;
1023 indio_dev->modes = INDIO_DIRECT_MODE; 999 indio_dev->modes = INDIO_DIRECT_MODE;
1024 indio_dev->info = &bmg160_info; 1000 indio_dev->info = &bmg160_info;
1025 1001
1026 if (data->irq <= 0)
1027 bmg160_gpio_probe(data);
1028
1029 if (data->irq > 0) { 1002 if (data->irq > 0) {
1030 ret = devm_request_threaded_irq(dev, 1003 ret = devm_request_threaded_irq(dev,
1031 data->irq, 1004 data->irq,
@@ -1168,7 +1141,7 @@ static int bmg160_runtime_suspend(struct device *dev)
1168 1141
1169 ret = bmg160_set_mode(data, BMG160_MODE_SUSPEND); 1142 ret = bmg160_set_mode(data, BMG160_MODE_SUSPEND);
1170 if (ret < 0) { 1143 if (ret < 0) {
1171 dev_err(data->dev, "set mode failed\n"); 1144 dev_err(dev, "set mode failed\n");
1172 return -EAGAIN; 1145 return -EAGAIN;
1173 } 1146 }
1174 1147
diff --git a/drivers/iio/gyro/st_gyro.h b/drivers/iio/gyro/st_gyro.h
index 5353d6328c54..a5c5c4e29add 100644
--- a/drivers/iio/gyro/st_gyro.h
+++ b/drivers/iio/gyro/st_gyro.h
@@ -21,6 +21,7 @@
21#define L3GD20_GYRO_DEV_NAME "l3gd20" 21#define L3GD20_GYRO_DEV_NAME "l3gd20"
22#define L3G4IS_GYRO_DEV_NAME "l3g4is_ui" 22#define L3G4IS_GYRO_DEV_NAME "l3g4is_ui"
23#define LSM330_GYRO_DEV_NAME "lsm330_gyro" 23#define LSM330_GYRO_DEV_NAME "lsm330_gyro"
24#define LSM9DS0_GYRO_DEV_NAME "lsm9ds0_gyro"
24 25
25/** 26/**
26 * struct st_sensors_platform_data - gyro platform data 27 * struct st_sensors_platform_data - gyro platform data
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
index 110f95b6e52f..52a3c87c375c 100644
--- a/drivers/iio/gyro/st_gyro_core.c
+++ b/drivers/iio/gyro/st_gyro_core.c
@@ -190,6 +190,7 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
190 * drain settings, but only for INT1 and not 190 * drain settings, but only for INT1 and not
191 * for the DRDY line on INT2. 191 * for the DRDY line on INT2.
192 */ 192 */
193 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
193 }, 194 },
194 .multi_read_bit = ST_GYRO_1_MULTIREAD_BIT, 195 .multi_read_bit = ST_GYRO_1_MULTIREAD_BIT,
195 .bootime = 2, 196 .bootime = 2,
@@ -203,6 +204,7 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
203 [2] = LSM330DLC_GYRO_DEV_NAME, 204 [2] = LSM330DLC_GYRO_DEV_NAME,
204 [3] = L3G4IS_GYRO_DEV_NAME, 205 [3] = L3G4IS_GYRO_DEV_NAME,
205 [4] = LSM330_GYRO_DEV_NAME, 206 [4] = LSM330_GYRO_DEV_NAME,
207 [5] = LSM9DS0_GYRO_DEV_NAME,
206 }, 208 },
207 .ch = (struct iio_chan_spec *)st_gyro_16bit_channels, 209 .ch = (struct iio_chan_spec *)st_gyro_16bit_channels,
208 .odr = { 210 .odr = {
@@ -258,6 +260,7 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
258 * drain settings, but only for INT1 and not 260 * drain settings, but only for INT1 and not
259 * for the DRDY line on INT2. 261 * for the DRDY line on INT2.
260 */ 262 */
263 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
261 }, 264 },
262 .multi_read_bit = ST_GYRO_2_MULTIREAD_BIT, 265 .multi_read_bit = ST_GYRO_2_MULTIREAD_BIT,
263 .bootime = 2, 266 .bootime = 2,
@@ -322,6 +325,7 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
322 * drain settings, but only for INT1 and not 325 * drain settings, but only for INT1 and not
323 * for the DRDY line on INT2. 326 * for the DRDY line on INT2.
324 */ 327 */
328 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
325 }, 329 },
326 .multi_read_bit = ST_GYRO_3_MULTIREAD_BIT, 330 .multi_read_bit = ST_GYRO_3_MULTIREAD_BIT,
327 .bootime = 2, 331 .bootime = 2,
diff --git a/drivers/iio/gyro/st_gyro_i2c.c b/drivers/iio/gyro/st_gyro_i2c.c
index 6848451f817a..40056b821036 100644
--- a/drivers/iio/gyro/st_gyro_i2c.c
+++ b/drivers/iio/gyro/st_gyro_i2c.c
@@ -48,6 +48,10 @@ static const struct of_device_id st_gyro_of_match[] = {
48 .compatible = "st,lsm330-gyro", 48 .compatible = "st,lsm330-gyro",
49 .data = LSM330_GYRO_DEV_NAME, 49 .data = LSM330_GYRO_DEV_NAME,
50 }, 50 },
51 {
52 .compatible = "st,lsm9ds0-gyro",
53 .data = LSM9DS0_GYRO_DEV_NAME,
54 },
51 {}, 55 {},
52}; 56};
53MODULE_DEVICE_TABLE(of, st_gyro_of_match); 57MODULE_DEVICE_TABLE(of, st_gyro_of_match);
@@ -93,6 +97,7 @@ static const struct i2c_device_id st_gyro_id_table[] = {
93 { L3GD20_GYRO_DEV_NAME }, 97 { L3GD20_GYRO_DEV_NAME },
94 { L3G4IS_GYRO_DEV_NAME }, 98 { L3G4IS_GYRO_DEV_NAME },
95 { LSM330_GYRO_DEV_NAME }, 99 { LSM330_GYRO_DEV_NAME },
100 { LSM9DS0_GYRO_DEV_NAME },
96 {}, 101 {},
97}; 102};
98MODULE_DEVICE_TABLE(i2c, st_gyro_id_table); 103MODULE_DEVICE_TABLE(i2c, st_gyro_id_table);
diff --git a/drivers/iio/gyro/st_gyro_spi.c b/drivers/iio/gyro/st_gyro_spi.c
index d2b7a5fa344c..fbf2faed501c 100644
--- a/drivers/iio/gyro/st_gyro_spi.c
+++ b/drivers/iio/gyro/st_gyro_spi.c
@@ -54,6 +54,7 @@ static const struct spi_device_id st_gyro_id_table[] = {
54 { L3GD20_GYRO_DEV_NAME }, 54 { L3GD20_GYRO_DEV_NAME },
55 { L3G4IS_GYRO_DEV_NAME }, 55 { L3G4IS_GYRO_DEV_NAME },
56 { LSM330_GYRO_DEV_NAME }, 56 { LSM330_GYRO_DEV_NAME },
57 { LSM9DS0_GYRO_DEV_NAME },
57 {}, 58 {},
58}; 59};
59MODULE_DEVICE_TABLE(spi, st_gyro_id_table); 60MODULE_DEVICE_TABLE(spi, st_gyro_id_table);
diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig
index 866dda133336..738a86d9e4a9 100644
--- a/drivers/iio/humidity/Kconfig
+++ b/drivers/iio/humidity/Kconfig
@@ -3,6 +3,16 @@
3# 3#
4menu "Humidity sensors" 4menu "Humidity sensors"
5 5
6config AM2315
7 tristate "Aosong AM2315 relative humidity and temperature sensor"
8 depends on I2C
9 help
10 If you say yes here you get support for the Aosong AM2315
11 relative humidity and ambient temperature sensor.
12
13 This driver can also be built as a module. If so, the module will
14 be called am2315.
15
6config DHT11 16config DHT11
7 tristate "DHT11 (and compatible sensors) driver" 17 tristate "DHT11 (and compatible sensors) driver"
8 depends on GPIOLIB || COMPILE_TEST 18 depends on GPIOLIB || COMPILE_TEST
diff --git a/drivers/iio/humidity/Makefile b/drivers/iio/humidity/Makefile
index c9f089a9a6b8..4a73442fcd9c 100644
--- a/drivers/iio/humidity/Makefile
+++ b/drivers/iio/humidity/Makefile
@@ -2,6 +2,7 @@
2# Makefile for IIO humidity sensor drivers 2# Makefile for IIO humidity sensor drivers
3# 3#
4 4
5obj-$(CONFIG_AM2315) += am2315.o
5obj-$(CONFIG_DHT11) += dht11.o 6obj-$(CONFIG_DHT11) += dht11.o
6obj-$(CONFIG_HDC100X) += hdc100x.o 7obj-$(CONFIG_HDC100X) += hdc100x.o
7obj-$(CONFIG_HTU21) += htu21.o 8obj-$(CONFIG_HTU21) += htu21.o
diff --git a/drivers/iio/humidity/am2315.c b/drivers/iio/humidity/am2315.c
new file mode 100644
index 000000000000..3be6d209a159
--- /dev/null
+++ b/drivers/iio/humidity/am2315.c
@@ -0,0 +1,303 @@
1/**
2 * Aosong AM2315 relative humidity and temperature
3 *
4 * Copyright (c) 2016, Intel Corporation.
5 *
6 * This file is subject to the terms and conditions of version 2 of
7 * the GNU General Public License. See the file COPYING in the main
8 * directory of this archive for more details.
9 *
10 * 7-bit I2C address: 0x5C.
11 */
12
13#include <linux/acpi.h>
14#include <linux/delay.h>
15#include <linux/i2c.h>
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/iio/buffer.h>
19#include <linux/iio/iio.h>
20#include <linux/iio/sysfs.h>
21#include <linux/iio/trigger_consumer.h>
22#include <linux/iio/triggered_buffer.h>
23
24#define AM2315_REG_HUM_MSB 0x00
25#define AM2315_REG_HUM_LSB 0x01
26#define AM2315_REG_TEMP_MSB 0x02
27#define AM2315_REG_TEMP_LSB 0x03
28
29#define AM2315_FUNCTION_READ 0x03
30#define AM2315_HUM_OFFSET 2
31#define AM2315_TEMP_OFFSET 4
32#define AM2315_ALL_CHANNEL_MASK GENMASK(1, 0)
33
34#define AM2315_DRIVER_NAME "am2315"
35
36struct am2315_data {
37 struct i2c_client *client;
38 struct mutex lock;
39 s16 buffer[8]; /* 2x16-bit channels + 2x16 padding + 4x16 timestamp */
40};
41
42struct am2315_sensor_data {
43 s16 hum_data;
44 s16 temp_data;
45};
46
47static const struct iio_chan_spec am2315_channels[] = {
48 {
49 .type = IIO_HUMIDITYRELATIVE,
50 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
51 BIT(IIO_CHAN_INFO_SCALE),
52 .scan_index = 0,
53 .scan_type = {
54 .sign = 's',
55 .realbits = 16,
56 .storagebits = 16,
57 .endianness = IIO_CPU,
58 },
59 },
60 {
61 .type = IIO_TEMP,
62 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
63 BIT(IIO_CHAN_INFO_SCALE),
64 .scan_index = 1,
65 .scan_type = {
66 .sign = 's',
67 .realbits = 16,
68 .storagebits = 16,
69 .endianness = IIO_CPU,
70 },
71 },
72 IIO_CHAN_SOFT_TIMESTAMP(2),
73};
74
75/* CRC calculation algorithm, as specified in the datasheet (page 13). */
76static u16 am2315_crc(u8 *data, u8 nr_bytes)
77{
78 int i;
79 u16 crc = 0xffff;
80
81 while (nr_bytes--) {
82 crc ^= *data++;
83 for (i = 0; i < 8; i++) {
84 if (crc & 0x01) {
85 crc >>= 1;
86 crc ^= 0xA001;
87 } else {
88 crc >>= 1;
89 }
90 }
91 }
92
93 return crc;
94}
95
96/* Simple function that sends a few bytes to the device to wake it up. */
97static void am2315_ping(struct i2c_client *client)
98{
99 i2c_smbus_read_byte_data(client, AM2315_REG_HUM_MSB);
100}
101
102static int am2315_read_data(struct am2315_data *data,
103 struct am2315_sensor_data *sensor_data)
104{
105 int ret;
106 /* tx_buf format: <function code> <start addr> <nr of regs to read> */
107 u8 tx_buf[3] = { AM2315_FUNCTION_READ, AM2315_REG_HUM_MSB, 4 };
108 /*
109 * rx_buf format:
110 * <function code> <number of registers read>
111 * <humidity MSB> <humidity LSB> <temp MSB> <temp LSB>
112 * <CRC LSB> <CRC MSB>
113 */
114 u8 rx_buf[8];
115 u16 crc;
116
117 /* First wake up the device. */
118 am2315_ping(data->client);
119
120 mutex_lock(&data->lock);
121 ret = i2c_master_send(data->client, tx_buf, sizeof(tx_buf));
122 if (ret < 0) {
123 dev_err(&data->client->dev, "failed to send read request\n");
124 goto exit_unlock;
125 }
126 /* Wait 2-3 ms, then read back the data sent by the device. */
127 usleep_range(2000, 3000);
128 /* Do a bulk data read, then pick out what we need. */
129 ret = i2c_master_recv(data->client, rx_buf, sizeof(rx_buf));
130 if (ret < 0) {
131 dev_err(&data->client->dev, "failed to read sensor data\n");
132 goto exit_unlock;
133 }
134 mutex_unlock(&data->lock);
135 /*
136 * Do a CRC check on the data and compare it to the value
137 * calculated by the device.
138 */
139 crc = am2315_crc(rx_buf, sizeof(rx_buf) - 2);
140 if ((crc & 0xff) != rx_buf[6] || (crc >> 8) != rx_buf[7]) {
141 dev_err(&data->client->dev, "failed to verify sensor data\n");
142 return -EIO;
143 }
144
145 sensor_data->hum_data = (rx_buf[AM2315_HUM_OFFSET] << 8) |
146 rx_buf[AM2315_HUM_OFFSET + 1];
147 sensor_data->temp_data = (rx_buf[AM2315_TEMP_OFFSET] << 8) |
148 rx_buf[AM2315_TEMP_OFFSET + 1];
149
150 return ret;
151
152exit_unlock:
153 mutex_unlock(&data->lock);
154 return ret;
155}
156
157static irqreturn_t am2315_trigger_handler(int irq, void *p)
158{
159 int i;
160 int ret;
161 int bit;
162 struct iio_poll_func *pf = p;
163 struct iio_dev *indio_dev = pf->indio_dev;
164 struct am2315_data *data = iio_priv(indio_dev);
165 struct am2315_sensor_data sensor_data;
166
167 ret = am2315_read_data(data, &sensor_data);
168 if (ret < 0) {
169 mutex_unlock(&data->lock);
170 goto err;
171 }
172
173 mutex_lock(&data->lock);
174 if (*(indio_dev->active_scan_mask) == AM2315_ALL_CHANNEL_MASK) {
175 data->buffer[0] = sensor_data.hum_data;
176 data->buffer[1] = sensor_data.temp_data;
177 } else {
178 i = 0;
179 for_each_set_bit(bit, indio_dev->active_scan_mask,
180 indio_dev->masklength) {
181 data->buffer[i] = (bit ? sensor_data.temp_data :
182 sensor_data.hum_data);
183 i++;
184 }
185 }
186 mutex_unlock(&data->lock);
187
188 iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
189 pf->timestamp);
190err:
191 iio_trigger_notify_done(indio_dev->trig);
192 return IRQ_HANDLED;
193}
194
195static int am2315_read_raw(struct iio_dev *indio_dev,
196 struct iio_chan_spec const *chan,
197 int *val, int *val2, long mask)
198{
199 int ret;
200 struct am2315_sensor_data sensor_data;
201 struct am2315_data *data = iio_priv(indio_dev);
202
203 switch (mask) {
204 case IIO_CHAN_INFO_RAW:
205 ret = am2315_read_data(data, &sensor_data);
206 if (ret < 0)
207 return ret;
208 *val = (chan->type == IIO_HUMIDITYRELATIVE) ?
209 sensor_data.hum_data : sensor_data.temp_data;
210 return IIO_VAL_INT;
211 case IIO_CHAN_INFO_SCALE:
212 *val = 100;
213 return IIO_VAL_INT;
214 }
215
216 return -EINVAL;
217}
218
219static const struct iio_info am2315_info = {
220 .driver_module = THIS_MODULE,
221 .read_raw = am2315_read_raw,
222};
223
224static int am2315_probe(struct i2c_client *client,
225 const struct i2c_device_id *id)
226{
227 int ret;
228 struct iio_dev *indio_dev;
229 struct am2315_data *data;
230
231 indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
232 if (!indio_dev) {
233 dev_err(&client->dev, "iio allocation failed!\n");
234 return -ENOMEM;
235 }
236
237 data = iio_priv(indio_dev);
238 data->client = client;
239 i2c_set_clientdata(client, indio_dev);
240 mutex_init(&data->lock);
241
242 indio_dev->dev.parent = &client->dev;
243 indio_dev->info = &am2315_info;
244 indio_dev->name = AM2315_DRIVER_NAME;
245 indio_dev->modes = INDIO_DIRECT_MODE;
246 indio_dev->channels = am2315_channels;
247 indio_dev->num_channels = ARRAY_SIZE(am2315_channels);
248
249 ret = iio_triggered_buffer_setup(indio_dev, NULL,
250 am2315_trigger_handler, NULL);
251 if (ret < 0) {
252 dev_err(&client->dev, "iio triggered buffer setup failed\n");
253 return ret;
254 }
255
256 ret = iio_device_register(indio_dev);
257 if (ret < 0)
258 goto err_buffer_cleanup;
259
260 return 0;
261
262err_buffer_cleanup:
263 iio_triggered_buffer_cleanup(indio_dev);
264 return ret;
265}
266
267static int am2315_remove(struct i2c_client *client)
268{
269 struct iio_dev *indio_dev = i2c_get_clientdata(client);
270
271 iio_device_unregister(indio_dev);
272 iio_triggered_buffer_cleanup(indio_dev);
273
274 return 0;
275}
276
277static const struct i2c_device_id am2315_i2c_id[] = {
278 {"am2315", 0},
279 {}
280};
281
282static const struct acpi_device_id am2315_acpi_id[] = {
283 {"AOS2315", 0},
284 {}
285};
286
287MODULE_DEVICE_TABLE(acpi, am2315_acpi_id);
288
289static struct i2c_driver am2315_driver = {
290 .driver = {
291 .name = "am2315",
292 .acpi_match_table = ACPI_PTR(am2315_acpi_id),
293 },
294 .probe = am2315_probe,
295 .remove = am2315_remove,
296 .id_table = am2315_i2c_id,
297};
298
299module_i2c_driver(am2315_driver);
300
301MODULE_AUTHOR("Tiberiu Breana <tiberiu.a.breana@intel.com>");
302MODULE_DESCRIPTION("Aosong AM2315 relative humidity and temperature");
303MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/humidity/dht11.c b/drivers/iio/humidity/dht11.c
index 20b500da94db..9c47bc98f3ac 100644
--- a/drivers/iio/humidity/dht11.c
+++ b/drivers/iio/humidity/dht11.c
@@ -96,6 +96,24 @@ struct dht11 {
96 struct {s64 ts; int value; } edges[DHT11_EDGES_PER_READ]; 96 struct {s64 ts; int value; } edges[DHT11_EDGES_PER_READ];
97}; 97};
98 98
99#ifdef CONFIG_DYNAMIC_DEBUG
100/*
101 * dht11_edges_print: show the data as actually received by the
102 * driver.
103 */
104static void dht11_edges_print(struct dht11 *dht11)
105{
106 int i;
107
108 dev_dbg(dht11->dev, "%d edges detected:\n", dht11->num_edges);
109 for (i = 1; i < dht11->num_edges; ++i) {
110 dev_dbg(dht11->dev, "%d: %lld ns %s\n", i,
111 dht11->edges[i].ts - dht11->edges[i - 1].ts,
112 dht11->edges[i - 1].value ? "high" : "low");
113 }
114}
115#endif /* CONFIG_DYNAMIC_DEBUG */
116
99static unsigned char dht11_decode_byte(char *bits) 117static unsigned char dht11_decode_byte(char *bits)
100{ 118{
101 unsigned char ret = 0; 119 unsigned char ret = 0;
@@ -119,8 +137,12 @@ static int dht11_decode(struct dht11 *dht11, int offset)
119 for (i = 0; i < DHT11_BITS_PER_READ; ++i) { 137 for (i = 0; i < DHT11_BITS_PER_READ; ++i) {
120 t = dht11->edges[offset + 2 * i + 2].ts - 138 t = dht11->edges[offset + 2 * i + 2].ts -
121 dht11->edges[offset + 2 * i + 1].ts; 139 dht11->edges[offset + 2 * i + 1].ts;
122 if (!dht11->edges[offset + 2 * i + 1].value) 140 if (!dht11->edges[offset + 2 * i + 1].value) {
123 return -EIO; /* lost synchronisation */ 141 dev_dbg(dht11->dev,
142 "lost synchronisation at edge %d\n",
143 offset + 2 * i + 1);
144 return -EIO;
145 }
124 bits[i] = t > DHT11_THRESHOLD; 146 bits[i] = t > DHT11_THRESHOLD;
125 } 147 }
126 148
@@ -130,8 +152,10 @@ static int dht11_decode(struct dht11 *dht11, int offset)
130 temp_dec = dht11_decode_byte(&bits[24]); 152 temp_dec = dht11_decode_byte(&bits[24]);
131 checksum = dht11_decode_byte(&bits[32]); 153 checksum = dht11_decode_byte(&bits[32]);
132 154
133 if (((hum_int + hum_dec + temp_int + temp_dec) & 0xff) != checksum) 155 if (((hum_int + hum_dec + temp_int + temp_dec) & 0xff) != checksum) {
156 dev_dbg(dht11->dev, "invalid checksum\n");
134 return -EIO; 157 return -EIO;
158 }
135 159
136 dht11->timestamp = ktime_get_boot_ns(); 160 dht11->timestamp = ktime_get_boot_ns();
137 if (hum_int < 20) { /* DHT22 */ 161 if (hum_int < 20) { /* DHT22 */
@@ -182,6 +206,7 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
182 mutex_lock(&dht11->lock); 206 mutex_lock(&dht11->lock);
183 if (dht11->timestamp + DHT11_DATA_VALID_TIME < ktime_get_boot_ns()) { 207 if (dht11->timestamp + DHT11_DATA_VALID_TIME < ktime_get_boot_ns()) {
184 timeres = ktime_get_resolution_ns(); 208 timeres = ktime_get_resolution_ns();
209 dev_dbg(dht11->dev, "current timeresolution: %dns\n", timeres);
185 if (timeres > DHT11_MIN_TIMERES) { 210 if (timeres > DHT11_MIN_TIMERES) {
186 dev_err(dht11->dev, "timeresolution %dns too low\n", 211 dev_err(dht11->dev, "timeresolution %dns too low\n",
187 timeres); 212 timeres);
@@ -219,10 +244,13 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
219 244
220 free_irq(dht11->irq, iio_dev); 245 free_irq(dht11->irq, iio_dev);
221 246
247#ifdef CONFIG_DYNAMIC_DEBUG
248 dht11_edges_print(dht11);
249#endif
250
222 if (ret == 0 && dht11->num_edges < DHT11_EDGES_PER_READ - 1) { 251 if (ret == 0 && dht11->num_edges < DHT11_EDGES_PER_READ - 1) {
223 dev_err(&iio_dev->dev, 252 dev_err(dht11->dev, "Only %d signal edges detected\n",
224 "Only %d signal edges detected\n", 253 dht11->num_edges);
225 dht11->num_edges);
226 ret = -ETIMEDOUT; 254 ret = -ETIMEDOUT;
227 } 255 }
228 if (ret < 0) 256 if (ret < 0)
diff --git a/drivers/iio/imu/Kconfig b/drivers/iio/imu/Kconfig
index 5e610f7de5aa..1f1ad41ef881 100644
--- a/drivers/iio/imu/Kconfig
+++ b/drivers/iio/imu/Kconfig
@@ -25,6 +25,8 @@ config ADIS16480
25 Say yes here to build support for Analog Devices ADIS16375, ADIS16480, 25 Say yes here to build support for Analog Devices ADIS16375, ADIS16480,
26 ADIS16485, ADIS16488 inertial sensors. 26 ADIS16485, ADIS16488 inertial sensors.
27 27
28source "drivers/iio/imu/bmi160/Kconfig"
29
28config KMX61 30config KMX61
29 tristate "Kionix KMX61 6-axis accelerometer and magnetometer" 31 tristate "Kionix KMX61 6-axis accelerometer and magnetometer"
30 depends on I2C 32 depends on I2C
diff --git a/drivers/iio/imu/Makefile b/drivers/iio/imu/Makefile
index e1e6e3d70e26..c71bcd30dc38 100644
--- a/drivers/iio/imu/Makefile
+++ b/drivers/iio/imu/Makefile
@@ -13,6 +13,7 @@ adis_lib-$(CONFIG_IIO_ADIS_LIB_BUFFER) += adis_trigger.o
13adis_lib-$(CONFIG_IIO_ADIS_LIB_BUFFER) += adis_buffer.o 13adis_lib-$(CONFIG_IIO_ADIS_LIB_BUFFER) += adis_buffer.o
14obj-$(CONFIG_IIO_ADIS_LIB) += adis_lib.o 14obj-$(CONFIG_IIO_ADIS_LIB) += adis_lib.o
15 15
16obj-y += bmi160/
16obj-y += inv_mpu6050/ 17obj-y += inv_mpu6050/
17 18
18obj-$(CONFIG_KMX61) += kmx61.o 19obj-$(CONFIG_KMX61) += kmx61.o
diff --git a/drivers/iio/imu/adis.c b/drivers/iio/imu/adis.c
index 911255d41c1a..ad6f91d06185 100644
--- a/drivers/iio/imu/adis.c
+++ b/drivers/iio/imu/adis.c
@@ -324,7 +324,12 @@ static int adis_self_test(struct adis *adis)
324 324
325 msleep(adis->data->startup_delay); 325 msleep(adis->data->startup_delay);
326 326
327 return adis_check_status(adis); 327 ret = adis_check_status(adis);
328
329 if (adis->data->self_test_no_autoclear)
330 adis_write_reg_16(adis, adis->data->msc_ctrl_reg, 0x00);
331
332 return ret;
328} 333}
329 334
330/** 335/**
diff --git a/drivers/iio/imu/bmi160/Kconfig b/drivers/iio/imu/bmi160/Kconfig
new file mode 100644
index 000000000000..005c17ccc2b0
--- /dev/null
+++ b/drivers/iio/imu/bmi160/Kconfig
@@ -0,0 +1,32 @@
1#
2# BMI160 IMU driver
3#
4
5config BMI160
6 tristate
7 select IIO_BUFFER
8 select IIO_TRIGGERED_BUFFER
9
10config BMI160_I2C
11 tristate "Bosch BMI160 I2C driver"
12 depends on I2C
13 select BMI160
14 select REGMAP_I2C
15 help
16 If you say yes here you get support for BMI160 IMU on I2C with
17 accelerometer, gyroscope and external BMG160 magnetometer.
18
19 This driver can also be built as a module. If so, the module will be
20 called bmi160_i2c.
21
22config BMI160_SPI
23 tristate "Bosch BMI160 SPI driver"
24 depends on SPI
25 select BMI160
26 select REGMAP_SPI
27 help
28 If you say yes here you get support for BMI160 IMU on SPI with
29 accelerometer, gyroscope and external BMG160 magnetometer.
30
31 This driver can also be built as a module. If so, the module will be
32 called bmi160_spi.
diff --git a/drivers/iio/imu/bmi160/Makefile b/drivers/iio/imu/bmi160/Makefile
new file mode 100644
index 000000000000..10365e493ae2
--- /dev/null
+++ b/drivers/iio/imu/bmi160/Makefile
@@ -0,0 +1,6 @@
1#
2# Makefile for Bosch BMI160 IMU
3#
4obj-$(CONFIG_BMI160) += bmi160_core.o
5obj-$(CONFIG_BMI160_I2C) += bmi160_i2c.o
6obj-$(CONFIG_BMI160_SPI) += bmi160_spi.o
diff --git a/drivers/iio/imu/bmi160/bmi160.h b/drivers/iio/imu/bmi160/bmi160.h
new file mode 100644
index 000000000000..d2ae6ed70271
--- /dev/null
+++ b/drivers/iio/imu/bmi160/bmi160.h
@@ -0,0 +1,10 @@
1#ifndef BMI160_H_
2#define BMI160_H_
3
4extern const struct regmap_config bmi160_regmap_config;
5
6int bmi160_core_probe(struct device *dev, struct regmap *regmap,
7 const char *name, bool use_spi);
8void bmi160_core_remove(struct device *dev);
9
10#endif /* BMI160_H_ */
diff --git a/drivers/iio/imu/bmi160/bmi160_core.c b/drivers/iio/imu/bmi160/bmi160_core.c
new file mode 100644
index 000000000000..0bf92b06d7d8
--- /dev/null
+++ b/drivers/iio/imu/bmi160/bmi160_core.c
@@ -0,0 +1,596 @@
1/*
2 * BMI160 - Bosch IMU (accel, gyro plus external magnetometer)
3 *
4 * Copyright (c) 2016, Intel Corporation.
5 *
6 * This file is subject to the terms and conditions of version 2 of
7 * the GNU General Public License. See the file COPYING in the main
8 * directory of this archive for more details.
9 *
10 * IIO core driver for BMI160, with support for I2C/SPI busses
11 *
12 * TODO: magnetometer, interrupts, hardware FIFO
13 */
14#include <linux/module.h>
15#include <linux/regmap.h>
16#include <linux/acpi.h>
17#include <linux/delay.h>
18
19#include <linux/iio/iio.h>
20#include <linux/iio/triggered_buffer.h>
21#include <linux/iio/trigger_consumer.h>
22#include <linux/iio/buffer.h>
23
24#include "bmi160.h"
25
26#define BMI160_REG_CHIP_ID 0x00
27#define BMI160_CHIP_ID_VAL 0xD1
28
29#define BMI160_REG_PMU_STATUS 0x03
30
31/* X axis data low byte address, the rest can be obtained using axis offset */
32#define BMI160_REG_DATA_MAGN_XOUT_L 0x04
33#define BMI160_REG_DATA_GYRO_XOUT_L 0x0C
34#define BMI160_REG_DATA_ACCEL_XOUT_L 0x12
35
36#define BMI160_REG_ACCEL_CONFIG 0x40
37#define BMI160_ACCEL_CONFIG_ODR_MASK GENMASK(3, 0)
38#define BMI160_ACCEL_CONFIG_BWP_MASK GENMASK(6, 4)
39
40#define BMI160_REG_ACCEL_RANGE 0x41
41#define BMI160_ACCEL_RANGE_2G 0x03
42#define BMI160_ACCEL_RANGE_4G 0x05
43#define BMI160_ACCEL_RANGE_8G 0x08
44#define BMI160_ACCEL_RANGE_16G 0x0C
45
46#define BMI160_REG_GYRO_CONFIG 0x42
47#define BMI160_GYRO_CONFIG_ODR_MASK GENMASK(3, 0)
48#define BMI160_GYRO_CONFIG_BWP_MASK GENMASK(5, 4)
49
50#define BMI160_REG_GYRO_RANGE 0x43
51#define BMI160_GYRO_RANGE_2000DPS 0x00
52#define BMI160_GYRO_RANGE_1000DPS 0x01
53#define BMI160_GYRO_RANGE_500DPS 0x02
54#define BMI160_GYRO_RANGE_250DPS 0x03
55#define BMI160_GYRO_RANGE_125DPS 0x04
56
57#define BMI160_REG_CMD 0x7E
58#define BMI160_CMD_ACCEL_PM_SUSPEND 0x10
59#define BMI160_CMD_ACCEL_PM_NORMAL 0x11
60#define BMI160_CMD_ACCEL_PM_LOW_POWER 0x12
61#define BMI160_CMD_GYRO_PM_SUSPEND 0x14
62#define BMI160_CMD_GYRO_PM_NORMAL 0x15
63#define BMI160_CMD_GYRO_PM_FAST_STARTUP 0x17
64#define BMI160_CMD_SOFTRESET 0xB6
65
66#define BMI160_REG_DUMMY 0x7F
67
68#define BMI160_ACCEL_PMU_MIN_USLEEP 3200
69#define BMI160_ACCEL_PMU_MAX_USLEEP 3800
70#define BMI160_GYRO_PMU_MIN_USLEEP 55000
71#define BMI160_GYRO_PMU_MAX_USLEEP 80000
72#define BMI160_SOFTRESET_USLEEP 1000
73
74#define BMI160_CHANNEL(_type, _axis, _index) { \
75 .type = _type, \
76 .modified = 1, \
77 .channel2 = IIO_MOD_##_axis, \
78 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
79 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
80 BIT(IIO_CHAN_INFO_SAMP_FREQ), \
81 .scan_index = _index, \
82 .scan_type = { \
83 .sign = 's', \
84 .realbits = 16, \
85 .storagebits = 16, \
86 .endianness = IIO_LE, \
87 }, \
88}
89
90/* scan indexes follow DATA register order */
91enum bmi160_scan_axis {
92 BMI160_SCAN_EXT_MAGN_X = 0,
93 BMI160_SCAN_EXT_MAGN_Y,
94 BMI160_SCAN_EXT_MAGN_Z,
95 BMI160_SCAN_RHALL,
96 BMI160_SCAN_GYRO_X,
97 BMI160_SCAN_GYRO_Y,
98 BMI160_SCAN_GYRO_Z,
99 BMI160_SCAN_ACCEL_X,
100 BMI160_SCAN_ACCEL_Y,
101 BMI160_SCAN_ACCEL_Z,
102 BMI160_SCAN_TIMESTAMP,
103};
104
105enum bmi160_sensor_type {
106 BMI160_ACCEL = 0,
107 BMI160_GYRO,
108 BMI160_EXT_MAGN,
109 BMI160_NUM_SENSORS /* must be last */
110};
111
112struct bmi160_data {
113 struct regmap *regmap;
114};
115
116const struct regmap_config bmi160_regmap_config = {
117 .reg_bits = 8,
118 .val_bits = 8,
119};
120EXPORT_SYMBOL(bmi160_regmap_config);
121
122struct bmi160_regs {
123 u8 data; /* LSB byte register for X-axis */
124 u8 config;
125 u8 config_odr_mask;
126 u8 config_bwp_mask;
127 u8 range;
128 u8 pmu_cmd_normal;
129 u8 pmu_cmd_suspend;
130};
131
132static struct bmi160_regs bmi160_regs[] = {
133 [BMI160_ACCEL] = {
134 .data = BMI160_REG_DATA_ACCEL_XOUT_L,
135 .config = BMI160_REG_ACCEL_CONFIG,
136 .config_odr_mask = BMI160_ACCEL_CONFIG_ODR_MASK,
137 .config_bwp_mask = BMI160_ACCEL_CONFIG_BWP_MASK,
138 .range = BMI160_REG_ACCEL_RANGE,
139 .pmu_cmd_normal = BMI160_CMD_ACCEL_PM_NORMAL,
140 .pmu_cmd_suspend = BMI160_CMD_ACCEL_PM_SUSPEND,
141 },
142 [BMI160_GYRO] = {
143 .data = BMI160_REG_DATA_GYRO_XOUT_L,
144 .config = BMI160_REG_GYRO_CONFIG,
145 .config_odr_mask = BMI160_GYRO_CONFIG_ODR_MASK,
146 .config_bwp_mask = BMI160_GYRO_CONFIG_BWP_MASK,
147 .range = BMI160_REG_GYRO_RANGE,
148 .pmu_cmd_normal = BMI160_CMD_GYRO_PM_NORMAL,
149 .pmu_cmd_suspend = BMI160_CMD_GYRO_PM_SUSPEND,
150 },
151};
152
153struct bmi160_pmu_time {
154 unsigned long min;
155 unsigned long max;
156};
157
158static struct bmi160_pmu_time bmi160_pmu_time[] = {
159 [BMI160_ACCEL] = {
160 .min = BMI160_ACCEL_PMU_MIN_USLEEP,
161 .max = BMI160_ACCEL_PMU_MAX_USLEEP
162 },
163 [BMI160_GYRO] = {
164 .min = BMI160_GYRO_PMU_MIN_USLEEP,
165 .max = BMI160_GYRO_PMU_MIN_USLEEP,
166 },
167};
168
169struct bmi160_scale {
170 u8 bits;
171 int uscale;
172};
173
174struct bmi160_odr {
175 u8 bits;
176 int odr;
177 int uodr;
178};
179
180static const struct bmi160_scale bmi160_accel_scale[] = {
181 { BMI160_ACCEL_RANGE_2G, 598},
182 { BMI160_ACCEL_RANGE_4G, 1197},
183 { BMI160_ACCEL_RANGE_8G, 2394},
184 { BMI160_ACCEL_RANGE_16G, 4788},
185};
186
187static const struct bmi160_scale bmi160_gyro_scale[] = {
188 { BMI160_GYRO_RANGE_2000DPS, 1065},
189 { BMI160_GYRO_RANGE_1000DPS, 532},
190 { BMI160_GYRO_RANGE_500DPS, 266},
191 { BMI160_GYRO_RANGE_250DPS, 133},
192 { BMI160_GYRO_RANGE_125DPS, 66},
193};
194
195struct bmi160_scale_item {
196 const struct bmi160_scale *tbl;
197 int num;
198};
199
200static const struct bmi160_scale_item bmi160_scale_table[] = {
201 [BMI160_ACCEL] = {
202 .tbl = bmi160_accel_scale,
203 .num = ARRAY_SIZE(bmi160_accel_scale),
204 },
205 [BMI160_GYRO] = {
206 .tbl = bmi160_gyro_scale,
207 .num = ARRAY_SIZE(bmi160_gyro_scale),
208 },
209};
210
211static const struct bmi160_odr bmi160_accel_odr[] = {
212 {0x01, 0, 78125},
213 {0x02, 1, 5625},
214 {0x03, 3, 125},
215 {0x04, 6, 25},
216 {0x05, 12, 5},
217 {0x06, 25, 0},
218 {0x07, 50, 0},
219 {0x08, 100, 0},
220 {0x09, 200, 0},
221 {0x0A, 400, 0},
222 {0x0B, 800, 0},
223 {0x0C, 1600, 0},
224};
225
226static const struct bmi160_odr bmi160_gyro_odr[] = {
227 {0x06, 25, 0},
228 {0x07, 50, 0},
229 {0x08, 100, 0},
230 {0x09, 200, 0},
231 {0x0A, 400, 0},
232 {0x0B, 8000, 0},
233 {0x0C, 1600, 0},
234 {0x0D, 3200, 0},
235};
236
237struct bmi160_odr_item {
238 const struct bmi160_odr *tbl;
239 int num;
240};
241
242static const struct bmi160_odr_item bmi160_odr_table[] = {
243 [BMI160_ACCEL] = {
244 .tbl = bmi160_accel_odr,
245 .num = ARRAY_SIZE(bmi160_accel_odr),
246 },
247 [BMI160_GYRO] = {
248 .tbl = bmi160_gyro_odr,
249 .num = ARRAY_SIZE(bmi160_gyro_odr),
250 },
251};
252
253static const struct iio_chan_spec bmi160_channels[] = {
254 BMI160_CHANNEL(IIO_ACCEL, X, BMI160_SCAN_ACCEL_X),
255 BMI160_CHANNEL(IIO_ACCEL, Y, BMI160_SCAN_ACCEL_Y),
256 BMI160_CHANNEL(IIO_ACCEL, Z, BMI160_SCAN_ACCEL_Z),
257 BMI160_CHANNEL(IIO_ANGL_VEL, X, BMI160_SCAN_GYRO_X),
258 BMI160_CHANNEL(IIO_ANGL_VEL, Y, BMI160_SCAN_GYRO_Y),
259 BMI160_CHANNEL(IIO_ANGL_VEL, Z, BMI160_SCAN_GYRO_Z),
260 IIO_CHAN_SOFT_TIMESTAMP(BMI160_SCAN_TIMESTAMP),
261};
262
263static enum bmi160_sensor_type bmi160_to_sensor(enum iio_chan_type iio_type)
264{
265 switch (iio_type) {
266 case IIO_ACCEL:
267 return BMI160_ACCEL;
268 case IIO_ANGL_VEL:
269 return BMI160_GYRO;
270 default:
271 return -EINVAL;
272 }
273}
274
275static
276int bmi160_set_mode(struct bmi160_data *data, enum bmi160_sensor_type t,
277 bool mode)
278{
279 int ret;
280 u8 cmd;
281
282 if (mode)
283 cmd = bmi160_regs[t].pmu_cmd_normal;
284 else
285 cmd = bmi160_regs[t].pmu_cmd_suspend;
286
287 ret = regmap_write(data->regmap, BMI160_REG_CMD, cmd);
288 if (ret < 0)
289 return ret;
290
291 usleep_range(bmi160_pmu_time[t].min, bmi160_pmu_time[t].max);
292
293 return 0;
294}
295
296static
297int bmi160_set_scale(struct bmi160_data *data, enum bmi160_sensor_type t,
298 int uscale)
299{
300 int i;
301
302 for (i = 0; i < bmi160_scale_table[t].num; i++)
303 if (bmi160_scale_table[t].tbl[i].uscale == uscale)
304 break;
305
306 if (i == bmi160_scale_table[t].num)
307 return -EINVAL;
308
309 return regmap_write(data->regmap, bmi160_regs[t].range,
310 bmi160_scale_table[t].tbl[i].bits);
311}
312
313static
314int bmi160_get_scale(struct bmi160_data *data, enum bmi160_sensor_type t,
315 int *uscale)
316{
317 int i, ret, val;
318
319 ret = regmap_read(data->regmap, bmi160_regs[t].range, &val);
320 if (ret < 0)
321 return ret;
322
323 for (i = 0; i < bmi160_scale_table[t].num; i++)
324 if (bmi160_scale_table[t].tbl[i].bits == val) {
325 *uscale = bmi160_scale_table[t].tbl[i].uscale;
326 return 0;
327 }
328
329 return -EINVAL;
330}
331
332static int bmi160_get_data(struct bmi160_data *data, int chan_type,
333 int axis, int *val)
334{
335 u8 reg;
336 int ret;
337 __le16 sample;
338 enum bmi160_sensor_type t = bmi160_to_sensor(chan_type);
339
340 reg = bmi160_regs[t].data + (axis - IIO_MOD_X) * sizeof(__le16);
341
342 ret = regmap_bulk_read(data->regmap, reg, &sample, sizeof(__le16));
343 if (ret < 0)
344 return ret;
345
346 *val = sign_extend32(le16_to_cpu(sample), 15);
347
348 return 0;
349}
350
351static
352int bmi160_set_odr(struct bmi160_data *data, enum bmi160_sensor_type t,
353 int odr, int uodr)
354{
355 int i;
356
357 for (i = 0; i < bmi160_odr_table[t].num; i++)
358 if (bmi160_odr_table[t].tbl[i].odr == odr &&
359 bmi160_odr_table[t].tbl[i].uodr == uodr)
360 break;
361
362 if (i >= bmi160_odr_table[t].num)
363 return -EINVAL;
364
365 return regmap_update_bits(data->regmap,
366 bmi160_regs[t].config,
367 bmi160_odr_table[t].tbl[i].bits,
368 bmi160_regs[t].config_odr_mask);
369}
370
371static int bmi160_get_odr(struct bmi160_data *data, enum bmi160_sensor_type t,
372 int *odr, int *uodr)
373{
374 int i, val, ret;
375
376 ret = regmap_read(data->regmap, bmi160_regs[t].config, &val);
377 if (ret < 0)
378 return ret;
379
380 val &= bmi160_regs[t].config_odr_mask;
381
382 for (i = 0; i < bmi160_odr_table[t].num; i++)
383 if (val == bmi160_odr_table[t].tbl[i].bits)
384 break;
385
386 if (i >= bmi160_odr_table[t].num)
387 return -EINVAL;
388
389 *odr = bmi160_odr_table[t].tbl[i].odr;
390 *uodr = bmi160_odr_table[t].tbl[i].uodr;
391
392 return 0;
393}
394
395static irqreturn_t bmi160_trigger_handler(int irq, void *p)
396{
397 struct iio_poll_func *pf = p;
398 struct iio_dev *indio_dev = pf->indio_dev;
399 struct bmi160_data *data = iio_priv(indio_dev);
400 s16 buf[16]; /* 3 sens x 3 axis x s16 + 3 x s16 pad + 4 x s16 tstamp */
401 int i, ret, j = 0, base = BMI160_REG_DATA_MAGN_XOUT_L;
402 __le16 sample;
403
404 for_each_set_bit(i, indio_dev->active_scan_mask,
405 indio_dev->masklength) {
406 ret = regmap_bulk_read(data->regmap, base + i * sizeof(__le16),
407 &sample, sizeof(__le16));
408 if (ret < 0)
409 goto done;
410 buf[j++] = sample;
411 }
412
413 iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns());
414done:
415 iio_trigger_notify_done(indio_dev->trig);
416 return IRQ_HANDLED;
417}
418
419static int bmi160_read_raw(struct iio_dev *indio_dev,
420 struct iio_chan_spec const *chan,
421 int *val, int *val2, long mask)
422{
423 int ret;
424 struct bmi160_data *data = iio_priv(indio_dev);
425
426 switch (mask) {
427 case IIO_CHAN_INFO_RAW:
428 ret = bmi160_get_data(data, chan->type, chan->channel2, val);
429 if (ret < 0)
430 return ret;
431 return IIO_VAL_INT;
432 case IIO_CHAN_INFO_SCALE:
433 *val = 0;
434 ret = bmi160_get_scale(data,
435 bmi160_to_sensor(chan->type), val2);
436 return ret < 0 ? ret : IIO_VAL_INT_PLUS_MICRO;
437 case IIO_CHAN_INFO_SAMP_FREQ:
438 ret = bmi160_get_odr(data, bmi160_to_sensor(chan->type),
439 val, val2);
440 return ret < 0 ? ret : IIO_VAL_INT_PLUS_MICRO;
441 default:
442 return -EINVAL;
443 }
444
445 return 0;
446}
447
448static int bmi160_write_raw(struct iio_dev *indio_dev,
449 struct iio_chan_spec const *chan,
450 int val, int val2, long mask)
451{
452 struct bmi160_data *data = iio_priv(indio_dev);
453
454 switch (mask) {
455 case IIO_CHAN_INFO_SCALE:
456 return bmi160_set_scale(data,
457 bmi160_to_sensor(chan->type), val2);
458 break;
459 case IIO_CHAN_INFO_SAMP_FREQ:
460 return bmi160_set_odr(data, bmi160_to_sensor(chan->type),
461 val, val2);
462 default:
463 return -EINVAL;
464 }
465
466 return 0;
467}
468
469static const struct iio_info bmi160_info = {
470 .driver_module = THIS_MODULE,
471 .read_raw = bmi160_read_raw,
472 .write_raw = bmi160_write_raw,
473};
474
475static const char *bmi160_match_acpi_device(struct device *dev)
476{
477 const struct acpi_device_id *id;
478
479 id = acpi_match_device(dev->driver->acpi_match_table, dev);
480 if (!id)
481 return NULL;
482
483 return dev_name(dev);
484}
485
486static int bmi160_chip_init(struct bmi160_data *data, bool use_spi)
487{
488 int ret;
489 unsigned int val;
490 struct device *dev = regmap_get_device(data->regmap);
491
492 ret = regmap_write(data->regmap, BMI160_REG_CMD, BMI160_CMD_SOFTRESET);
493 if (ret < 0)
494 return ret;
495
496 usleep_range(BMI160_SOFTRESET_USLEEP, BMI160_SOFTRESET_USLEEP + 1);
497
498 /*
499 * CS rising edge is needed before starting SPI, so do a dummy read
500 * See Section 3.2.1, page 86 of the datasheet
501 */
502 if (use_spi) {
503 ret = regmap_read(data->regmap, BMI160_REG_DUMMY, &val);
504 if (ret < 0)
505 return ret;
506 }
507
508 ret = regmap_read(data->regmap, BMI160_REG_CHIP_ID, &val);
509 if (ret < 0) {
510 dev_err(dev, "Error reading chip id\n");
511 return ret;
512 }
513 if (val != BMI160_CHIP_ID_VAL) {
514 dev_err(dev, "Wrong chip id, got %x expected %x\n",
515 val, BMI160_CHIP_ID_VAL);
516 return -ENODEV;
517 }
518
519 ret = bmi160_set_mode(data, BMI160_ACCEL, true);
520 if (ret < 0)
521 return ret;
522
523 ret = bmi160_set_mode(data, BMI160_GYRO, true);
524 if (ret < 0)
525 return ret;
526
527 return 0;
528}
529
530static void bmi160_chip_uninit(struct bmi160_data *data)
531{
532 bmi160_set_mode(data, BMI160_GYRO, false);
533 bmi160_set_mode(data, BMI160_ACCEL, false);
534}
535
536int bmi160_core_probe(struct device *dev, struct regmap *regmap,
537 const char *name, bool use_spi)
538{
539 struct iio_dev *indio_dev;
540 struct bmi160_data *data;
541 int ret;
542
543 indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
544 if (!indio_dev)
545 return -ENOMEM;
546
547 data = iio_priv(indio_dev);
548 dev_set_drvdata(dev, indio_dev);
549 data->regmap = regmap;
550
551 ret = bmi160_chip_init(data, use_spi);
552 if (ret < 0)
553 return ret;
554
555 if (!name && ACPI_HANDLE(dev))
556 name = bmi160_match_acpi_device(dev);
557
558 indio_dev->dev.parent = dev;
559 indio_dev->channels = bmi160_channels;
560 indio_dev->num_channels = ARRAY_SIZE(bmi160_channels);
561 indio_dev->name = name;
562 indio_dev->modes = INDIO_DIRECT_MODE;
563 indio_dev->info = &bmi160_info;
564
565 ret = iio_triggered_buffer_setup(indio_dev, NULL,
566 bmi160_trigger_handler, NULL);
567 if (ret < 0)
568 goto uninit;
569
570 ret = iio_device_register(indio_dev);
571 if (ret < 0)
572 goto buffer_cleanup;
573
574 return 0;
575buffer_cleanup:
576 iio_triggered_buffer_cleanup(indio_dev);
577uninit:
578 bmi160_chip_uninit(data);
579 return ret;
580}
581EXPORT_SYMBOL_GPL(bmi160_core_probe);
582
583void bmi160_core_remove(struct device *dev)
584{
585 struct iio_dev *indio_dev = dev_get_drvdata(dev);
586 struct bmi160_data *data = iio_priv(indio_dev);
587
588 iio_device_unregister(indio_dev);
589 iio_triggered_buffer_cleanup(indio_dev);
590 bmi160_chip_uninit(data);
591}
592EXPORT_SYMBOL_GPL(bmi160_core_remove);
593
594MODULE_AUTHOR("Daniel Baluta <daniel.baluta@intel.com");
595MODULE_DESCRIPTION("Bosch BMI160 driver");
596MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/imu/bmi160/bmi160_i2c.c b/drivers/iio/imu/bmi160/bmi160_i2c.c
new file mode 100644
index 000000000000..07a179d8fb48
--- /dev/null
+++ b/drivers/iio/imu/bmi160/bmi160_i2c.c
@@ -0,0 +1,72 @@
1/*
2 * BMI160 - Bosch IMU, I2C bits
3 *
4 * Copyright (c) 2016, Intel Corporation.
5 *
6 * This file is subject to the terms and conditions of version 2 of
7 * the GNU General Public License. See the file COPYING in the main
8 * directory of this archive for more details.
9 *
10 * 7-bit I2C slave address is:
11 * - 0x68 if SDO is pulled to GND
12 * - 0x69 if SDO is pulled to VDDIO
13 */
14#include <linux/module.h>
15#include <linux/i2c.h>
16#include <linux/regmap.h>
17#include <linux/acpi.h>
18
19#include "bmi160.h"
20
21static int bmi160_i2c_probe(struct i2c_client *client,
22 const struct i2c_device_id *id)
23{
24 struct regmap *regmap;
25 const char *name = NULL;
26
27 regmap = devm_regmap_init_i2c(client, &bmi160_regmap_config);
28 if (IS_ERR(regmap)) {
29 dev_err(&client->dev, "Failed to register i2c regmap %d\n",
30 (int)PTR_ERR(regmap));
31 return PTR_ERR(regmap);
32 }
33
34 if (id)
35 name = id->name;
36
37 return bmi160_core_probe(&client->dev, regmap, name, false);
38}
39
40static int bmi160_i2c_remove(struct i2c_client *client)
41{
42 bmi160_core_remove(&client->dev);
43
44 return 0;
45}
46
47static const struct i2c_device_id bmi160_i2c_id[] = {
48 {"bmi160", 0},
49 {}
50};
51MODULE_DEVICE_TABLE(i2c, bmi160_i2c_id);
52
53static const struct acpi_device_id bmi160_acpi_match[] = {
54 {"BMI0160", 0},
55 { },
56};
57MODULE_DEVICE_TABLE(acpi, bmi160_acpi_match);
58
59static struct i2c_driver bmi160_i2c_driver = {
60 .driver = {
61 .name = "bmi160_i2c",
62 .acpi_match_table = ACPI_PTR(bmi160_acpi_match),
63 },
64 .probe = bmi160_i2c_probe,
65 .remove = bmi160_i2c_remove,
66 .id_table = bmi160_i2c_id,
67};
68module_i2c_driver(bmi160_i2c_driver);
69
70MODULE_AUTHOR("Daniel Baluta <daniel.baluta@intel.com>");
71MODULE_DESCRIPTION("BMI160 I2C driver");
72MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/imu/bmi160/bmi160_spi.c b/drivers/iio/imu/bmi160/bmi160_spi.c
new file mode 100644
index 000000000000..1ec8b12bd984
--- /dev/null
+++ b/drivers/iio/imu/bmi160/bmi160_spi.c
@@ -0,0 +1,63 @@
1/*
2 * BMI160 - Bosch IMU, SPI bits
3 *
4 * Copyright (c) 2016, Intel Corporation.
5 *
6 * This file is subject to the terms and conditions of version 2 of
7 * the GNU General Public License. See the file COPYING in the main
8 * directory of this archive for more details.
9 */
10#include <linux/module.h>
11#include <linux/spi/spi.h>
12#include <linux/regmap.h>
13#include <linux/acpi.h>
14
15#include "bmi160.h"
16
17static int bmi160_spi_probe(struct spi_device *spi)
18{
19 struct regmap *regmap;
20 const struct spi_device_id *id = spi_get_device_id(spi);
21
22 regmap = devm_regmap_init_spi(spi, &bmi160_regmap_config);
23 if (IS_ERR(regmap)) {
24 dev_err(&spi->dev, "Failed to register spi regmap %d\n",
25 (int)PTR_ERR(regmap));
26 return PTR_ERR(regmap);
27 }
28 return bmi160_core_probe(&spi->dev, regmap, id->name, true);
29}
30
31static int bmi160_spi_remove(struct spi_device *spi)
32{
33 bmi160_core_remove(&spi->dev);
34
35 return 0;
36}
37
38static const struct spi_device_id bmi160_spi_id[] = {
39 {"bmi160", 0},
40 {}
41};
42MODULE_DEVICE_TABLE(spi, bmi160_spi_id);
43
44static const struct acpi_device_id bmi160_acpi_match[] = {
45 {"BMI0160", 0},
46 { },
47};
48MODULE_DEVICE_TABLE(acpi, bmi160_acpi_match);
49
50static struct spi_driver bmi160_spi_driver = {
51 .probe = bmi160_spi_probe,
52 .remove = bmi160_spi_remove,
53 .id_table = bmi160_spi_id,
54 .driver = {
55 .acpi_match_table = ACPI_PTR(bmi160_acpi_match),
56 .name = "bmi160_spi",
57 },
58};
59module_spi_driver(bmi160_spi_driver);
60
61MODULE_AUTHOR("Daniel Baluta <daniel.baluta@intel.com");
62MODULE_DESCRIPTION("Bosch BMI160 SPI driver");
63MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/imu/inv_mpu6050/Kconfig b/drivers/iio/imu/inv_mpu6050/Kconfig
index 847455a2d6bb..f756feecfa4c 100644
--- a/drivers/iio/imu/inv_mpu6050/Kconfig
+++ b/drivers/iio/imu/inv_mpu6050/Kconfig
@@ -13,10 +13,8 @@ config INV_MPU6050_I2C
13 select INV_MPU6050_IIO 13 select INV_MPU6050_IIO
14 select REGMAP_I2C 14 select REGMAP_I2C
15 help 15 help
16 This driver supports the Invensense MPU6050 devices. 16 This driver supports the Invensense MPU6050/6500/9150 motion tracking
17 This driver can also support MPU6500 in MPU6050 compatibility mode 17 devices over I2C.
18 and also in MPU6500 mode with some limitations.
19 It is a gyroscope/accelerometer combo device.
20 This driver can be built as a module. The module will be called 18 This driver can be built as a module. The module will be called
21 inv-mpu6050-i2c. 19 inv-mpu6050-i2c.
22 20
@@ -26,7 +24,7 @@ config INV_MPU6050_SPI
26 select INV_MPU6050_IIO 24 select INV_MPU6050_IIO
27 select REGMAP_SPI 25 select REGMAP_SPI
28 help 26 help
29 This driver supports the Invensense MPU6050 devices. 27 This driver supports the Invensense MPU6000/6500/9150 motion tracking
30 It is a gyroscope/accelerometer combo device. 28 devices over SPI.
31 This driver can be built as a module. The module will be called 29 This driver can be built as a module. The module will be called
32 inv-mpu6050-spi. 30 inv-mpu6050-spi.
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 0c2bded2b5b7..ee40dae5ab58 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -87,16 +87,29 @@ static const struct inv_mpu6050_chip_config chip_config_6050 = {
87 .accl_fs = INV_MPU6050_FS_02G, 87 .accl_fs = INV_MPU6050_FS_02G,
88}; 88};
89 89
90/* Indexed by enum inv_devices */
90static const struct inv_mpu6050_hw hw_info[] = { 91static const struct inv_mpu6050_hw hw_info[] = {
91 { 92 {
92 .num_reg = 117, 93 .whoami = INV_MPU6050_WHOAMI_VALUE,
94 .name = "MPU6050",
95 .reg = &reg_set_6050,
96 .config = &chip_config_6050,
97 },
98 {
99 .whoami = INV_MPU6500_WHOAMI_VALUE,
93 .name = "MPU6500", 100 .name = "MPU6500",
94 .reg = &reg_set_6500, 101 .reg = &reg_set_6500,
95 .config = &chip_config_6050, 102 .config = &chip_config_6050,
96 }, 103 },
97 { 104 {
98 .num_reg = 117, 105 .whoami = INV_MPU6000_WHOAMI_VALUE,
99 .name = "MPU6050", 106 .name = "MPU6000",
107 .reg = &reg_set_6050,
108 .config = &chip_config_6050,
109 },
110 {
111 .whoami = INV_MPU9150_WHOAMI_VALUE,
112 .name = "MPU9150",
100 .reg = &reg_set_6050, 113 .reg = &reg_set_6050,
101 .config = &chip_config_6050, 114 .config = &chip_config_6050,
102 }, 115 },
@@ -599,6 +612,10 @@ inv_fifo_rate_show(struct device *dev, struct device_attribute *attr,
599/** 612/**
600 * inv_attr_show() - calling this function will show current 613 * inv_attr_show() - calling this function will show current
601 * parameters. 614 * parameters.
615 *
616 * Deprecated in favor of IIO mounting matrix API.
617 *
618 * See inv_get_mount_matrix()
602 */ 619 */
603static ssize_t inv_attr_show(struct device *dev, struct device_attribute *attr, 620static ssize_t inv_attr_show(struct device *dev, struct device_attribute *attr,
604 char *buf) 621 char *buf)
@@ -643,6 +660,18 @@ static int inv_mpu6050_validate_trigger(struct iio_dev *indio_dev,
643 return 0; 660 return 0;
644} 661}
645 662
663static const struct iio_mount_matrix *
664inv_get_mount_matrix(const struct iio_dev *indio_dev,
665 const struct iio_chan_spec *chan)
666{
667 return &((struct inv_mpu6050_state *)iio_priv(indio_dev))->orientation;
668}
669
670static const struct iio_chan_spec_ext_info inv_ext_info[] = {
671 IIO_MOUNT_MATRIX(IIO_SHARED_BY_TYPE, inv_get_mount_matrix),
672 { },
673};
674
646#define INV_MPU6050_CHAN(_type, _channel2, _index) \ 675#define INV_MPU6050_CHAN(_type, _channel2, _index) \
647 { \ 676 { \
648 .type = _type, \ 677 .type = _type, \
@@ -659,6 +688,7 @@ static int inv_mpu6050_validate_trigger(struct iio_dev *indio_dev,
659 .shift = 0, \ 688 .shift = 0, \
660 .endianness = IIO_BE, \ 689 .endianness = IIO_BE, \
661 }, \ 690 }, \
691 .ext_info = inv_ext_info, \
662 } 692 }
663 693
664static const struct iio_chan_spec inv_mpu_channels[] = { 694static const struct iio_chan_spec inv_mpu_channels[] = {
@@ -691,14 +721,16 @@ static IIO_CONST_ATTR(in_accel_scale_available,
691 "0.000598 0.001196 0.002392 0.004785"); 721 "0.000598 0.001196 0.002392 0.004785");
692static IIO_DEV_ATTR_SAMP_FREQ(S_IRUGO | S_IWUSR, inv_fifo_rate_show, 722static IIO_DEV_ATTR_SAMP_FREQ(S_IRUGO | S_IWUSR, inv_fifo_rate_show,
693 inv_mpu6050_fifo_rate_store); 723 inv_mpu6050_fifo_rate_store);
724
725/* Deprecated: kept for userspace backward compatibility. */
694static IIO_DEVICE_ATTR(in_gyro_matrix, S_IRUGO, inv_attr_show, NULL, 726static IIO_DEVICE_ATTR(in_gyro_matrix, S_IRUGO, inv_attr_show, NULL,
695 ATTR_GYRO_MATRIX); 727 ATTR_GYRO_MATRIX);
696static IIO_DEVICE_ATTR(in_accel_matrix, S_IRUGO, inv_attr_show, NULL, 728static IIO_DEVICE_ATTR(in_accel_matrix, S_IRUGO, inv_attr_show, NULL,
697 ATTR_ACCL_MATRIX); 729 ATTR_ACCL_MATRIX);
698 730
699static struct attribute *inv_attributes[] = { 731static struct attribute *inv_attributes[] = {
700 &iio_dev_attr_in_gyro_matrix.dev_attr.attr, 732 &iio_dev_attr_in_gyro_matrix.dev_attr.attr, /* deprecated */
701 &iio_dev_attr_in_accel_matrix.dev_attr.attr, 733 &iio_dev_attr_in_accel_matrix.dev_attr.attr, /* deprecated */
702 &iio_dev_attr_sampling_frequency.dev_attr.attr, 734 &iio_dev_attr_sampling_frequency.dev_attr.attr,
703 &iio_const_attr_sampling_frequency_available.dev_attr.attr, 735 &iio_const_attr_sampling_frequency_available.dev_attr.attr,
704 &iio_const_attr_in_accel_scale_available.dev_attr.attr, 736 &iio_const_attr_in_accel_scale_available.dev_attr.attr,
@@ -725,6 +757,7 @@ static const struct iio_info mpu_info = {
725static int inv_check_and_setup_chip(struct inv_mpu6050_state *st) 757static int inv_check_and_setup_chip(struct inv_mpu6050_state *st)
726{ 758{
727 int result; 759 int result;
760 unsigned int regval;
728 761
729 st->hw = &hw_info[st->chip_type]; 762 st->hw = &hw_info[st->chip_type];
730 st->reg = hw_info[st->chip_type].reg; 763 st->reg = hw_info[st->chip_type].reg;
@@ -735,6 +768,17 @@ static int inv_check_and_setup_chip(struct inv_mpu6050_state *st)
735 if (result) 768 if (result)
736 return result; 769 return result;
737 msleep(INV_MPU6050_POWER_UP_TIME); 770 msleep(INV_MPU6050_POWER_UP_TIME);
771
772 /* check chip self-identification */
773 result = regmap_read(st->map, INV_MPU6050_REG_WHOAMI, &regval);
774 if (result)
775 return result;
776 if (regval != st->hw->whoami) {
777 dev_warn(regmap_get_device(st->map),
778 "whoami mismatch got %#02x expected %#02hhx for %s\n",
779 regval, st->hw->whoami, st->hw->name);
780 }
781
738 /* 782 /*
739 * toggle power state. After reset, the sleep bit could be on 783 * toggle power state. After reset, the sleep bit could be on
740 * or off depending on the OTP settings. Toggling power would 784 * or off depending on the OTP settings. Toggling power would
@@ -773,14 +817,31 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
773 if (!indio_dev) 817 if (!indio_dev)
774 return -ENOMEM; 818 return -ENOMEM;
775 819
820 BUILD_BUG_ON(ARRAY_SIZE(hw_info) != INV_NUM_PARTS);
821 if (chip_type < 0 || chip_type >= INV_NUM_PARTS) {
822 dev_err(dev, "Bad invensense chip_type=%d name=%s\n",
823 chip_type, name);
824 return -ENODEV;
825 }
776 st = iio_priv(indio_dev); 826 st = iio_priv(indio_dev);
777 st->chip_type = chip_type; 827 st->chip_type = chip_type;
778 st->powerup_count = 0; 828 st->powerup_count = 0;
779 st->irq = irq; 829 st->irq = irq;
780 st->map = regmap; 830 st->map = regmap;
831
781 pdata = dev_get_platdata(dev); 832 pdata = dev_get_platdata(dev);
782 if (pdata) 833 if (!pdata) {
834 result = of_iio_read_mount_matrix(dev, "mount-matrix",
835 &st->orientation);
836 if (result) {
837 dev_err(dev, "Failed to retrieve mounting matrix %d\n",
838 result);
839 return result;
840 }
841 } else {
783 st->plat_data = *pdata; 842 st->plat_data = *pdata;
843 }
844
784 /* power is turned on inside check chip type*/ 845 /* power is turned on inside check chip type*/
785 result = inv_check_and_setup_chip(st); 846 result = inv_check_and_setup_chip(st);
786 if (result) 847 if (result)
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
index 9ba1179105bd..e1fd7fa53e3b 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
@@ -169,13 +169,14 @@ static int inv_mpu_remove(struct i2c_client *client)
169static const struct i2c_device_id inv_mpu_id[] = { 169static const struct i2c_device_id inv_mpu_id[] = {
170 {"mpu6050", INV_MPU6050}, 170 {"mpu6050", INV_MPU6050},
171 {"mpu6500", INV_MPU6500}, 171 {"mpu6500", INV_MPU6500},
172 {"mpu9150", INV_MPU9150},
172 {} 173 {}
173}; 174};
174 175
175MODULE_DEVICE_TABLE(i2c, inv_mpu_id); 176MODULE_DEVICE_TABLE(i2c, inv_mpu_id);
176 177
177static const struct acpi_device_id inv_acpi_match[] = { 178static const struct acpi_device_id inv_acpi_match[] = {
178 {"INVN6500", 0}, 179 {"INVN6500", INV_MPU6500},
179 { }, 180 { },
180}; 181};
181 182
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index bb3cef6d7059..3bf8544ccc9f 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -69,6 +69,7 @@ enum inv_devices {
69 INV_MPU6050, 69 INV_MPU6050,
70 INV_MPU6500, 70 INV_MPU6500,
71 INV_MPU6000, 71 INV_MPU6000,
72 INV_MPU9150,
72 INV_NUM_PARTS 73 INV_NUM_PARTS
73}; 74};
74 75
@@ -94,13 +95,13 @@ struct inv_mpu6050_chip_config {
94 95
95/** 96/**
96 * struct inv_mpu6050_hw - Other important hardware information. 97 * struct inv_mpu6050_hw - Other important hardware information.
97 * @num_reg: Number of registers on device. 98 * @whoami: Self identification byte from WHO_AM_I register
98 * @name: name of the chip. 99 * @name: name of the chip.
99 * @reg: register map of the chip. 100 * @reg: register map of the chip.
100 * @config: configuration of the chip. 101 * @config: configuration of the chip.
101 */ 102 */
102struct inv_mpu6050_hw { 103struct inv_mpu6050_hw {
103 u8 num_reg; 104 u8 whoami;
104 u8 *name; 105 u8 *name;
105 const struct inv_mpu6050_reg_map *reg; 106 const struct inv_mpu6050_reg_map *reg;
106 const struct inv_mpu6050_chip_config *config; 107 const struct inv_mpu6050_chip_config *config;
@@ -115,7 +116,8 @@ struct inv_mpu6050_hw {
115 * @hw: Other hardware-specific information. 116 * @hw: Other hardware-specific information.
116 * @chip_type: chip type. 117 * @chip_type: chip type.
117 * @time_stamp_lock: spin lock to time stamp. 118 * @time_stamp_lock: spin lock to time stamp.
118 * @plat_data: platform data. 119 * @plat_data: platform data (deprecated in favor of @orientation).
120 * @orientation: sensor chip orientation relative to main hardware.
119 * @timestamps: kfifo queue to store time stamp. 121 * @timestamps: kfifo queue to store time stamp.
120 * @map regmap pointer. 122 * @map regmap pointer.
121 * @irq interrupt number. 123 * @irq interrupt number.
@@ -132,6 +134,7 @@ struct inv_mpu6050_state {
132 struct i2c_client *mux_client; 134 struct i2c_client *mux_client;
133 unsigned int powerup_count; 135 unsigned int powerup_count;
134 struct inv_mpu6050_platform_data plat_data; 136 struct inv_mpu6050_platform_data plat_data;
137 struct iio_mount_matrix orientation;
135 DECLARE_KFIFO(timestamps, long long, TIMESTAMP_FIFO_SIZE); 138 DECLARE_KFIFO(timestamps, long long, TIMESTAMP_FIFO_SIZE);
136 struct regmap *map; 139 struct regmap *map;
137 int irq; 140 int irq;
@@ -216,6 +219,13 @@ struct inv_mpu6050_state {
216#define INV_MPU6050_MIN_FIFO_RATE 4 219#define INV_MPU6050_MIN_FIFO_RATE 4
217#define INV_MPU6050_ONE_K_HZ 1000 220#define INV_MPU6050_ONE_K_HZ 1000
218 221
222#define INV_MPU6050_REG_WHOAMI 117
223
224#define INV_MPU6000_WHOAMI_VALUE 0x68
225#define INV_MPU6050_WHOAMI_VALUE 0x68
226#define INV_MPU6500_WHOAMI_VALUE 0x70
227#define INV_MPU9150_WHOAMI_VALUE 0x68
228
219/* scan element definition */ 229/* scan element definition */
220enum inv_mpu6050_scan { 230enum inv_mpu6050_scan {
221 INV_MPU6050_SCAN_ACCL_X, 231 INV_MPU6050_SCAN_ACCL_X,
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
index 7bcb8d839f05..190a4a51c830 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
@@ -44,9 +44,19 @@ static int inv_mpu_i2c_disable(struct iio_dev *indio_dev)
44static int inv_mpu_probe(struct spi_device *spi) 44static int inv_mpu_probe(struct spi_device *spi)
45{ 45{
46 struct regmap *regmap; 46 struct regmap *regmap;
47 const struct spi_device_id *id = spi_get_device_id(spi); 47 const struct spi_device_id *spi_id;
48 const char *name = id ? id->name : NULL; 48 const struct acpi_device_id *acpi_id;
49 const int chip_type = id ? id->driver_data : 0; 49 const char *name = NULL;
50 enum inv_devices chip_type;
51
52 if ((spi_id = spi_get_device_id(spi))) {
53 chip_type = (enum inv_devices)spi_id->driver_data;
54 name = spi_id->name;
55 } else if ((acpi_id = acpi_match_device(spi->dev.driver->acpi_match_table, &spi->dev))) {
56 chip_type = (enum inv_devices)acpi_id->driver_data;
57 } else {
58 return -ENODEV;
59 }
50 60
51 regmap = devm_regmap_init_spi(spi, &inv_mpu_regmap_config); 61 regmap = devm_regmap_init_spi(spi, &inv_mpu_regmap_config);
52 if (IS_ERR(regmap)) { 62 if (IS_ERR(regmap)) {
@@ -70,13 +80,15 @@ static int inv_mpu_remove(struct spi_device *spi)
70 */ 80 */
71static const struct spi_device_id inv_mpu_id[] = { 81static const struct spi_device_id inv_mpu_id[] = {
72 {"mpu6000", INV_MPU6000}, 82 {"mpu6000", INV_MPU6000},
83 {"mpu6500", INV_MPU6500},
84 {"mpu9150", INV_MPU9150},
73 {} 85 {}
74}; 86};
75 87
76MODULE_DEVICE_TABLE(spi, inv_mpu_id); 88MODULE_DEVICE_TABLE(spi, inv_mpu_id);
77 89
78static const struct acpi_device_id inv_acpi_match[] = { 90static const struct acpi_device_id inv_acpi_match[] = {
79 {"INVN6000", 0}, 91 {"INVN6000", INV_MPU6000},
80 { }, 92 { },
81}; 93};
82MODULE_DEVICE_TABLE(acpi, inv_acpi_match); 94MODULE_DEVICE_TABLE(acpi, inv_acpi_match);
diff --git a/drivers/iio/imu/kmx61.c b/drivers/iio/imu/kmx61.c
index e5306b4e020e..2e7dd5754a56 100644
--- a/drivers/iio/imu/kmx61.c
+++ b/drivers/iio/imu/kmx61.c
@@ -14,7 +14,6 @@
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/i2c.h> 15#include <linux/i2c.h>
16#include <linux/acpi.h> 16#include <linux/acpi.h>
17#include <linux/gpio/consumer.h>
18#include <linux/interrupt.h> 17#include <linux/interrupt.h>
19#include <linux/pm.h> 18#include <linux/pm.h>
20#include <linux/pm_runtime.h> 19#include <linux/pm_runtime.h>
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 70cb7eb0a75c..e6319a9346b2 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -25,6 +25,7 @@
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/anon_inodes.h> 26#include <linux/anon_inodes.h>
27#include <linux/debugfs.h> 27#include <linux/debugfs.h>
28#include <linux/mutex.h>
28#include <linux/iio/iio.h> 29#include <linux/iio/iio.h>
29#include "iio_core.h" 30#include "iio_core.h"
30#include "iio_core_trigger.h" 31#include "iio_core_trigger.h"
@@ -78,6 +79,7 @@ static const char * const iio_chan_type_name_spec[] = {
78 [IIO_CONCENTRATION] = "concentration", 79 [IIO_CONCENTRATION] = "concentration",
79 [IIO_RESISTANCE] = "resistance", 80 [IIO_RESISTANCE] = "resistance",
80 [IIO_PH] = "ph", 81 [IIO_PH] = "ph",
82 [IIO_UVINDEX] = "uvindex",
81}; 83};
82 84
83static const char * const iio_modifier_names[] = { 85static const char * const iio_modifier_names[] = {
@@ -100,6 +102,7 @@ static const char * const iio_modifier_names[] = {
100 [IIO_MOD_LIGHT_RED] = "red", 102 [IIO_MOD_LIGHT_RED] = "red",
101 [IIO_MOD_LIGHT_GREEN] = "green", 103 [IIO_MOD_LIGHT_GREEN] = "green",
102 [IIO_MOD_LIGHT_BLUE] = "blue", 104 [IIO_MOD_LIGHT_BLUE] = "blue",
105 [IIO_MOD_LIGHT_UV] = "uv",
103 [IIO_MOD_QUATERNION] = "quaternion", 106 [IIO_MOD_QUATERNION] = "quaternion",
104 [IIO_MOD_TEMP_AMBIENT] = "ambient", 107 [IIO_MOD_TEMP_AMBIENT] = "ambient",
105 [IIO_MOD_TEMP_OBJECT] = "object", 108 [IIO_MOD_TEMP_OBJECT] = "object",
@@ -409,6 +412,88 @@ ssize_t iio_enum_write(struct iio_dev *indio_dev,
409} 412}
410EXPORT_SYMBOL_GPL(iio_enum_write); 413EXPORT_SYMBOL_GPL(iio_enum_write);
411 414
415static const struct iio_mount_matrix iio_mount_idmatrix = {
416 .rotation = {
417 "1", "0", "0",
418 "0", "1", "0",
419 "0", "0", "1"
420 }
421};
422
423static int iio_setup_mount_idmatrix(const struct device *dev,
424 struct iio_mount_matrix *matrix)
425{
426 *matrix = iio_mount_idmatrix;
427 dev_info(dev, "mounting matrix not found: using identity...\n");
428 return 0;
429}
430
431ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv,
432 const struct iio_chan_spec *chan, char *buf)
433{
434 const struct iio_mount_matrix *mtx = ((iio_get_mount_matrix_t *)
435 priv)(indio_dev, chan);
436
437 if (IS_ERR(mtx))
438 return PTR_ERR(mtx);
439
440 if (!mtx)
441 mtx = &iio_mount_idmatrix;
442
443 return snprintf(buf, PAGE_SIZE, "%s, %s, %s; %s, %s, %s; %s, %s, %s\n",
444 mtx->rotation[0], mtx->rotation[1], mtx->rotation[2],
445 mtx->rotation[3], mtx->rotation[4], mtx->rotation[5],
446 mtx->rotation[6], mtx->rotation[7], mtx->rotation[8]);
447}
448EXPORT_SYMBOL_GPL(iio_show_mount_matrix);
449
450/**
451 * of_iio_read_mount_matrix() - retrieve iio device mounting matrix from
452 * device-tree "mount-matrix" property
453 * @dev: device the mounting matrix property is assigned to
454 * @propname: device specific mounting matrix property name
455 * @matrix: where to store retrieved matrix
456 *
457 * If device is assigned no mounting matrix property, a default 3x3 identity
458 * matrix will be filled in.
459 *
460 * Return: 0 if success, or a negative error code on failure.
461 */
462#ifdef CONFIG_OF
463int of_iio_read_mount_matrix(const struct device *dev,
464 const char *propname,
465 struct iio_mount_matrix *matrix)
466{
467 if (dev->of_node) {
468 int err = of_property_read_string_array(dev->of_node,
469 propname, matrix->rotation,
470 ARRAY_SIZE(iio_mount_idmatrix.rotation));
471
472 if (err == ARRAY_SIZE(iio_mount_idmatrix.rotation))
473 return 0;
474
475 if (err >= 0)
476 /* Invalid number of matrix entries. */
477 return -EINVAL;
478
479 if (err != -EINVAL)
480 /* Invalid matrix declaration format. */
481 return err;
482 }
483
484 /* Matrix was not declared at all: fallback to identity. */
485 return iio_setup_mount_idmatrix(dev, matrix);
486}
487#else
488int of_iio_read_mount_matrix(const struct device *dev,
489 const char *propname,
490 struct iio_mount_matrix *matrix)
491{
492 return iio_setup_mount_idmatrix(dev, matrix);
493}
494#endif
495EXPORT_SYMBOL(of_iio_read_mount_matrix);
496
412/** 497/**
413 * iio_format_value() - Formats a IIO value into its string representation 498 * iio_format_value() - Formats a IIO value into its string representation
414 * @buf: The buffer to which the formatted value gets written 499 * @buf: The buffer to which the formatted value gets written
@@ -1375,6 +1460,44 @@ void devm_iio_device_unregister(struct device *dev, struct iio_dev *indio_dev)
1375} 1460}
1376EXPORT_SYMBOL_GPL(devm_iio_device_unregister); 1461EXPORT_SYMBOL_GPL(devm_iio_device_unregister);
1377 1462
1463/**
1464 * iio_device_claim_direct_mode - Keep device in direct mode
1465 * @indio_dev: the iio_dev associated with the device
1466 *
1467 * If the device is in direct mode it is guaranteed to stay
1468 * that way until iio_device_release_direct_mode() is called.
1469 *
1470 * Use with iio_device_release_direct_mode()
1471 *
1472 * Returns: 0 on success, -EBUSY on failure
1473 */
1474int iio_device_claim_direct_mode(struct iio_dev *indio_dev)
1475{
1476 mutex_lock(&indio_dev->mlock);
1477
1478 if (iio_buffer_enabled(indio_dev)) {
1479 mutex_unlock(&indio_dev->mlock);
1480 return -EBUSY;
1481 }
1482 return 0;
1483}
1484EXPORT_SYMBOL_GPL(iio_device_claim_direct_mode);
1485
1486/**
1487 * iio_device_release_direct_mode - releases claim on direct mode
1488 * @indio_dev: the iio_dev associated with the device
1489 *
1490 * Release the claim. Device is no longer guaranteed to stay
1491 * in direct mode.
1492 *
1493 * Use with iio_device_claim_direct_mode()
1494 */
1495void iio_device_release_direct_mode(struct iio_dev *indio_dev)
1496{
1497 mutex_unlock(&indio_dev->mlock);
1498}
1499EXPORT_SYMBOL_GPL(iio_device_release_direct_mode);
1500
1378subsys_initcall(iio_init); 1501subsys_initcall(iio_init);
1379module_exit(iio_exit); 1502module_exit(iio_exit);
1380 1503
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index 734a0042de0c..c4757e6367e7 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -356,6 +356,54 @@ void iio_channel_release(struct iio_channel *channel)
356} 356}
357EXPORT_SYMBOL_GPL(iio_channel_release); 357EXPORT_SYMBOL_GPL(iio_channel_release);
358 358
359static void devm_iio_channel_free(struct device *dev, void *res)
360{
361 struct iio_channel *channel = *(struct iio_channel **)res;
362
363 iio_channel_release(channel);
364}
365
366static int devm_iio_channel_match(struct device *dev, void *res, void *data)
367{
368 struct iio_channel **r = res;
369
370 if (!r || !*r) {
371 WARN_ON(!r || !*r);
372 return 0;
373 }
374
375 return *r == data;
376}
377
378struct iio_channel *devm_iio_channel_get(struct device *dev,
379 const char *channel_name)
380{
381 struct iio_channel **ptr, *channel;
382
383 ptr = devres_alloc(devm_iio_channel_free, sizeof(*ptr), GFP_KERNEL);
384 if (!ptr)
385 return ERR_PTR(-ENOMEM);
386
387 channel = iio_channel_get(dev, channel_name);
388 if (IS_ERR(channel)) {
389 devres_free(ptr);
390 return channel;
391 }
392
393 *ptr = channel;
394 devres_add(dev, ptr);
395
396 return channel;
397}
398EXPORT_SYMBOL_GPL(devm_iio_channel_get);
399
400void devm_iio_channel_release(struct device *dev, struct iio_channel *channel)
401{
402 WARN_ON(devres_release(dev, devm_iio_channel_free,
403 devm_iio_channel_match, channel));
404}
405EXPORT_SYMBOL_GPL(devm_iio_channel_release);
406
359struct iio_channel *iio_channel_get_all(struct device *dev) 407struct iio_channel *iio_channel_get_all(struct device *dev)
360{ 408{
361 const char *name; 409 const char *name;
@@ -441,6 +489,42 @@ void iio_channel_release_all(struct iio_channel *channels)
441} 489}
442EXPORT_SYMBOL_GPL(iio_channel_release_all); 490EXPORT_SYMBOL_GPL(iio_channel_release_all);
443 491
492static void devm_iio_channel_free_all(struct device *dev, void *res)
493{
494 struct iio_channel *channels = *(struct iio_channel **)res;
495
496 iio_channel_release_all(channels);
497}
498
499struct iio_channel *devm_iio_channel_get_all(struct device *dev)
500{
501 struct iio_channel **ptr, *channels;
502
503 ptr = devres_alloc(devm_iio_channel_free_all, sizeof(*ptr), GFP_KERNEL);
504 if (!ptr)
505 return ERR_PTR(-ENOMEM);
506
507 channels = iio_channel_get_all(dev);
508 if (IS_ERR(channels)) {
509 devres_free(ptr);
510 return channels;
511 }
512
513 *ptr = channels;
514 devres_add(dev, ptr);
515
516 return channels;
517}
518EXPORT_SYMBOL_GPL(devm_iio_channel_get_all);
519
520void devm_iio_channel_release_all(struct device *dev,
521 struct iio_channel *channels)
522{
523 WARN_ON(devres_release(dev, devm_iio_channel_free_all,
524 devm_iio_channel_match, channels));
525}
526EXPORT_SYMBOL_GPL(devm_iio_channel_release_all);
527
444static int iio_channel_read(struct iio_channel *chan, int *val, int *val2, 528static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
445 enum iio_chan_info_enum info) 529 enum iio_chan_info_enum info)
446{ 530{
@@ -452,7 +536,7 @@ static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
452 if (val2 == NULL) 536 if (val2 == NULL)
453 val2 = &unused; 537 val2 = &unused;
454 538
455 if(!iio_channel_has_info(chan->channel, info)) 539 if (!iio_channel_has_info(chan->channel, info))
456 return -EINVAL; 540 return -EINVAL;
457 541
458 if (chan->indio_dev->info->read_raw_multi) { 542 if (chan->indio_dev->info->read_raw_multi) {
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index cfd3df8416bb..7c566f516572 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -73,6 +73,17 @@ config BH1750
73 To compile this driver as a module, choose M here: the module will 73 To compile this driver as a module, choose M here: the module will
74 be called bh1750. 74 be called bh1750.
75 75
76config BH1780
77 tristate "ROHM BH1780 ambient light sensor"
78 depends on I2C
79 depends on !SENSORS_BH1780
80 help
81 Say Y here to build support for the ROHM BH1780GLI ambient
82 light sensor.
83
84 To compile this driver as a module, choose M here: the module will
85 be called bh1780.
86
76config CM32181 87config CM32181
77 depends on I2C 88 depends on I2C
78 tristate "CM32181 driver" 89 tristate "CM32181 driver"
@@ -223,6 +234,17 @@ config LTR501
223 This driver can also be built as a module. If so, the module 234 This driver can also be built as a module. If so, the module
224 will be called ltr501. 235 will be called ltr501.
225 236
237config MAX44000
238 tristate "MAX44000 Ambient and Infrared Proximity Sensor"
239 depends on I2C
240 select REGMAP_I2C
241 help
242 Say Y here if you want to build support for Maxim Integrated's
243 MAX44000 ambient and infrared proximity sensor device.
244
245 To compile this driver as a module, choose M here:
246 the module will be called max44000.
247
226config OPT3001 248config OPT3001
227 tristate "Texas Instruments OPT3001 Light Sensor" 249 tristate "Texas Instruments OPT3001 Light Sensor"
228 depends on I2C 250 depends on I2C
@@ -320,4 +342,14 @@ config VCNL4000
320 To compile this driver as a module, choose M here: the 342 To compile this driver as a module, choose M here: the
321 module will be called vcnl4000. 343 module will be called vcnl4000.
322 344
345config VEML6070
346 tristate "VEML6070 UV A light sensor"
347 depends on I2C
348 help
349 Say Y here if you want to build a driver for the Vishay VEML6070 UV A
350 light sensor.
351
352 To compile this driver as a module, choose M here: the
353 module will be called veml6070.
354
323endmenu 355endmenu
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index b2c31053db0c..6f2a3c62de27 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_AL3320A) += al3320a.o
9obj-$(CONFIG_APDS9300) += apds9300.o 9obj-$(CONFIG_APDS9300) += apds9300.o
10obj-$(CONFIG_APDS9960) += apds9960.o 10obj-$(CONFIG_APDS9960) += apds9960.o
11obj-$(CONFIG_BH1750) += bh1750.o 11obj-$(CONFIG_BH1750) += bh1750.o
12obj-$(CONFIG_BH1780) += bh1780.o
12obj-$(CONFIG_CM32181) += cm32181.o 13obj-$(CONFIG_CM32181) += cm32181.o
13obj-$(CONFIG_CM3232) += cm3232.o 14obj-$(CONFIG_CM3232) += cm3232.o
14obj-$(CONFIG_CM3323) += cm3323.o 15obj-$(CONFIG_CM3323) += cm3323.o
@@ -20,6 +21,7 @@ obj-$(CONFIG_ISL29125) += isl29125.o
20obj-$(CONFIG_JSA1212) += jsa1212.o 21obj-$(CONFIG_JSA1212) += jsa1212.o
21obj-$(CONFIG_SENSORS_LM3533) += lm3533-als.o 22obj-$(CONFIG_SENSORS_LM3533) += lm3533-als.o
22obj-$(CONFIG_LTR501) += ltr501.o 23obj-$(CONFIG_LTR501) += ltr501.o
24obj-$(CONFIG_MAX44000) += max44000.o
23obj-$(CONFIG_OPT3001) += opt3001.o 25obj-$(CONFIG_OPT3001) += opt3001.o
24obj-$(CONFIG_PA12203001) += pa12203001.o 26obj-$(CONFIG_PA12203001) += pa12203001.o
25obj-$(CONFIG_RPR0521) += rpr0521.o 27obj-$(CONFIG_RPR0521) += rpr0521.o
@@ -30,3 +32,4 @@ obj-$(CONFIG_TCS3472) += tcs3472.o
30obj-$(CONFIG_TSL4531) += tsl4531.o 32obj-$(CONFIG_TSL4531) += tsl4531.o
31obj-$(CONFIG_US5182D) += us5182d.o 33obj-$(CONFIG_US5182D) += us5182d.o
32obj-$(CONFIG_VCNL4000) += vcnl4000.o 34obj-$(CONFIG_VCNL4000) += vcnl4000.o
35obj-$(CONFIG_VEML6070) += veml6070.o
diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
index a6af56ad10e1..b4dbb3912977 100644
--- a/drivers/iio/light/apds9960.c
+++ b/drivers/iio/light/apds9960.c
@@ -321,8 +321,12 @@ static const struct iio_chan_spec apds9960_channels[] = {
321}; 321};
322 322
323/* integration time in us */ 323/* integration time in us */
324static const int apds9960_int_time[][2] = 324static const int apds9960_int_time[][2] = {
325 { {28000, 246}, {100000, 219}, {200000, 182}, {700000, 0} }; 325 { 28000, 246},
326 {100000, 219},
327 {200000, 182},
328 {700000, 0}
329};
326 330
327/* gain mapping */ 331/* gain mapping */
328static const int apds9960_pxs_gain_map[] = {1, 2, 4, 8}; 332static const int apds9960_pxs_gain_map[] = {1, 2, 4, 8};
@@ -491,9 +495,10 @@ static int apds9960_read_raw(struct iio_dev *indio_dev,
491 case IIO_INTENSITY: 495 case IIO_INTENSITY:
492 ret = regmap_bulk_read(data->regmap, chan->address, 496 ret = regmap_bulk_read(data->regmap, chan->address,
493 &buf, 2); 497 &buf, 2);
494 if (!ret) 498 if (!ret) {
495 ret = IIO_VAL_INT; 499 ret = IIO_VAL_INT;
496 *val = le16_to_cpu(buf); 500 *val = le16_to_cpu(buf);
501 }
497 break; 502 break;
498 default: 503 default:
499 ret = -EINVAL; 504 ret = -EINVAL;
diff --git a/drivers/iio/light/bh1780.c b/drivers/iio/light/bh1780.c
new file mode 100644
index 000000000000..72b364e4aa72
--- /dev/null
+++ b/drivers/iio/light/bh1780.c
@@ -0,0 +1,297 @@
1/*
2 * ROHM 1780GLI Ambient Light Sensor Driver
3 *
4 * Copyright (C) 2016 Linaro Ltd.
5 * Author: Linus Walleij <linus.walleij@linaro.org>
6 * Loosely based on the previous BH1780 ALS misc driver
7 * Copyright (C) 2010 Texas Instruments
8 * Author: Hemanth V <hemanthv@ti.com>
9 */
10#include <linux/i2c.h>
11#include <linux/slab.h>
12#include <linux/platform_device.h>
13#include <linux/delay.h>
14#include <linux/module.h>
15#include <linux/of.h>
16#include <linux/pm_runtime.h>
17#include <linux/iio/iio.h>
18#include <linux/iio/sysfs.h>
19#include <linux/bitops.h>
20
21#define BH1780_CMD_BIT BIT(7)
22#define BH1780_REG_CONTROL 0x00
23#define BH1780_REG_PARTID 0x0A
24#define BH1780_REG_MANFID 0x0B
25#define BH1780_REG_DLOW 0x0C
26#define BH1780_REG_DHIGH 0x0D
27
28#define BH1780_REVMASK GENMASK(3,0)
29#define BH1780_POWMASK GENMASK(1,0)
30#define BH1780_POFF (0x0)
31#define BH1780_PON (0x3)
32
33/* power on settling time in ms */
34#define BH1780_PON_DELAY 2
35/* max time before value available in ms */
36#define BH1780_INTERVAL 250
37
38struct bh1780_data {
39 struct i2c_client *client;
40};
41
42static int bh1780_write(struct bh1780_data *bh1780, u8 reg, u8 val)
43{
44 int ret = i2c_smbus_write_byte_data(bh1780->client,
45 BH1780_CMD_BIT | reg,
46 val);
47 if (ret < 0)
48 dev_err(&bh1780->client->dev,
49 "i2c_smbus_write_byte_data failed error "
50 "%d, register %01x\n",
51 ret, reg);
52 return ret;
53}
54
55static int bh1780_read(struct bh1780_data *bh1780, u8 reg)
56{
57 int ret = i2c_smbus_read_byte_data(bh1780->client,
58 BH1780_CMD_BIT | reg);
59 if (ret < 0)
60 dev_err(&bh1780->client->dev,
61 "i2c_smbus_read_byte_data failed error "
62 "%d, register %01x\n",
63 ret, reg);
64 return ret;
65}
66
67static int bh1780_read_word(struct bh1780_data *bh1780, u8 reg)
68{
69 int ret = i2c_smbus_read_word_data(bh1780->client,
70 BH1780_CMD_BIT | reg);
71 if (ret < 0)
72 dev_err(&bh1780->client->dev,
73 "i2c_smbus_read_word_data failed error "
74 "%d, register %01x\n",
75 ret, reg);
76 return ret;
77}
78
79static int bh1780_debugfs_reg_access(struct iio_dev *indio_dev,
80 unsigned int reg, unsigned int writeval,
81 unsigned int *readval)
82{
83 struct bh1780_data *bh1780 = iio_priv(indio_dev);
84 int ret;
85
86 if (!readval)
87 bh1780_write(bh1780, (u8)reg, (u8)writeval);
88
89 ret = bh1780_read(bh1780, (u8)reg);
90 if (ret < 0)
91 return ret;
92
93 *readval = ret;
94
95 return 0;
96}
97
98static int bh1780_read_raw(struct iio_dev *indio_dev,
99 struct iio_chan_spec const *chan,
100 int *val, int *val2, long mask)
101{
102 struct bh1780_data *bh1780 = iio_priv(indio_dev);
103 int value;
104
105 switch (mask) {
106 case IIO_CHAN_INFO_RAW:
107 switch (chan->type) {
108 case IIO_LIGHT:
109 pm_runtime_get_sync(&bh1780->client->dev);
110 value = bh1780_read_word(bh1780, BH1780_REG_DLOW);
111 if (value < 0)
112 return value;
113 pm_runtime_mark_last_busy(&bh1780->client->dev);
114 pm_runtime_put_autosuspend(&bh1780->client->dev);
115 *val = value;
116
117 return IIO_VAL_INT;
118 default:
119 return -EINVAL;
120 }
121 case IIO_CHAN_INFO_INT_TIME:
122 *val = 0;
123 *val2 = BH1780_INTERVAL * 1000;
124 return IIO_VAL_INT_PLUS_MICRO;
125 default:
126 return -EINVAL;
127 }
128}
129
130static const struct iio_info bh1780_info = {
131 .driver_module = THIS_MODULE,
132 .read_raw = bh1780_read_raw,
133 .debugfs_reg_access = bh1780_debugfs_reg_access,
134};
135
136static const struct iio_chan_spec bh1780_channels[] = {
137 {
138 .type = IIO_LIGHT,
139 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
140 BIT(IIO_CHAN_INFO_INT_TIME)
141 }
142};
143
144static int bh1780_probe(struct i2c_client *client,
145 const struct i2c_device_id *id)
146{
147 int ret;
148 struct bh1780_data *bh1780;
149 struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
150 struct iio_dev *indio_dev;
151
152 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE))
153 return -EIO;
154
155 indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*bh1780));
156 if (!indio_dev)
157 return -ENOMEM;
158
159 bh1780 = iio_priv(indio_dev);
160 bh1780->client = client;
161 i2c_set_clientdata(client, indio_dev);
162
163 /* Power up the device */
164 ret = bh1780_write(bh1780, BH1780_REG_CONTROL, BH1780_PON);
165 if (ret < 0)
166 return ret;
167 msleep(BH1780_PON_DELAY);
168 pm_runtime_get_noresume(&client->dev);
169 pm_runtime_set_active(&client->dev);
170 pm_runtime_enable(&client->dev);
171
172 ret = bh1780_read(bh1780, BH1780_REG_PARTID);
173 if (ret < 0)
174 goto out_disable_pm;
175 dev_info(&client->dev,
176 "Ambient Light Sensor, Rev : %lu\n",
177 (ret & BH1780_REVMASK));
178
179 /*
180 * As the device takes 250 ms to even come up with a fresh
181 * measurement after power-on, do not shut it down unnecessarily.
182 * Set autosuspend to a five seconds.
183 */
184 pm_runtime_set_autosuspend_delay(&client->dev, 5000);
185 pm_runtime_use_autosuspend(&client->dev);
186 pm_runtime_put(&client->dev);
187
188 indio_dev->dev.parent = &client->dev;
189 indio_dev->info = &bh1780_info;
190 indio_dev->name = id->name;
191 indio_dev->channels = bh1780_channels;
192 indio_dev->num_channels = ARRAY_SIZE(bh1780_channels);
193 indio_dev->modes = INDIO_DIRECT_MODE;
194
195 ret = iio_device_register(indio_dev);
196 if (ret)
197 goto out_disable_pm;
198 return 0;
199
200out_disable_pm:
201 pm_runtime_put_noidle(&client->dev);
202 pm_runtime_disable(&client->dev);
203 return ret;
204}
205
206static int bh1780_remove(struct i2c_client *client)
207{
208 struct iio_dev *indio_dev = i2c_get_clientdata(client);
209 struct bh1780_data *bh1780 = iio_priv(indio_dev);
210 int ret;
211
212 iio_device_unregister(indio_dev);
213 pm_runtime_get_sync(&client->dev);
214 pm_runtime_put_noidle(&client->dev);
215 pm_runtime_disable(&client->dev);
216 ret = bh1780_write(bh1780, BH1780_REG_CONTROL, BH1780_POFF);
217 if (ret < 0) {
218 dev_err(&client->dev, "failed to power off\n");
219 return ret;
220 }
221
222 return 0;
223}
224
225#ifdef CONFIG_PM
226static int bh1780_runtime_suspend(struct device *dev)
227{
228 struct i2c_client *client = to_i2c_client(dev);
229 struct bh1780_data *bh1780 = i2c_get_clientdata(client);
230 int ret;
231
232 ret = bh1780_write(bh1780, BH1780_REG_CONTROL, BH1780_POFF);
233 if (ret < 0) {
234 dev_err(dev, "failed to runtime suspend\n");
235 return ret;
236 }
237
238 return 0;
239}
240
241static int bh1780_runtime_resume(struct device *dev)
242{
243 struct i2c_client *client = to_i2c_client(dev);
244 struct bh1780_data *bh1780 = i2c_get_clientdata(client);
245 int ret;
246
247 ret = bh1780_write(bh1780, BH1780_REG_CONTROL, BH1780_PON);
248 if (ret < 0) {
249 dev_err(dev, "failed to runtime resume\n");
250 return ret;
251 }
252
253 /* Wait for power on, then for a value to be available */
254 msleep(BH1780_PON_DELAY + BH1780_INTERVAL);
255
256 return 0;
257}
258#endif /* CONFIG_PM */
259
260static const struct dev_pm_ops bh1780_dev_pm_ops = {
261 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
262 pm_runtime_force_resume)
263 SET_RUNTIME_PM_OPS(bh1780_runtime_suspend,
264 bh1780_runtime_resume, NULL)
265};
266
267static const struct i2c_device_id bh1780_id[] = {
268 { "bh1780", 0 },
269 { },
270};
271
272MODULE_DEVICE_TABLE(i2c, bh1780_id);
273
274#ifdef CONFIG_OF
275static const struct of_device_id of_bh1780_match[] = {
276 { .compatible = "rohm,bh1780gli", },
277 {},
278};
279MODULE_DEVICE_TABLE(of, of_bh1780_match);
280#endif
281
282static struct i2c_driver bh1780_driver = {
283 .probe = bh1780_probe,
284 .remove = bh1780_remove,
285 .id_table = bh1780_id,
286 .driver = {
287 .name = "bh1780",
288 .pm = &bh1780_dev_pm_ops,
289 .of_match_table = of_match_ptr(of_bh1780_match),
290 },
291};
292
293module_i2c_driver(bh1780_driver);
294
295MODULE_DESCRIPTION("ROHM BH1780GLI Ambient Light Sensor Driver");
296MODULE_LICENSE("GPL");
297MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
diff --git a/drivers/iio/light/max44000.c b/drivers/iio/light/max44000.c
new file mode 100644
index 000000000000..e01e58a9bd14
--- /dev/null
+++ b/drivers/iio/light/max44000.c
@@ -0,0 +1,639 @@
1/*
2 * MAX44000 Ambient and Infrared Proximity Sensor
3 *
4 * Copyright (c) 2016, Intel Corporation.
5 *
6 * This file is subject to the terms and conditions of version 2 of
7 * the GNU General Public License. See the file COPYING in the main
8 * directory of this archive for more details.
9 *
10 * Data sheet: https://datasheets.maximintegrated.com/en/ds/MAX44000.pdf
11 *
12 * 7-bit I2C slave address 0x4a
13 */
14
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/i2c.h>
18#include <linux/regmap.h>
19#include <linux/util_macros.h>
20#include <linux/iio/iio.h>
21#include <linux/iio/sysfs.h>
22#include <linux/iio/buffer.h>
23#include <linux/iio/trigger_consumer.h>
24#include <linux/iio/triggered_buffer.h>
25#include <linux/acpi.h>
26
27#define MAX44000_DRV_NAME "max44000"
28
29/* Registers in datasheet order */
30#define MAX44000_REG_STATUS 0x00
31#define MAX44000_REG_CFG_MAIN 0x01
32#define MAX44000_REG_CFG_RX 0x02
33#define MAX44000_REG_CFG_TX 0x03
34#define MAX44000_REG_ALS_DATA_HI 0x04
35#define MAX44000_REG_ALS_DATA_LO 0x05
36#define MAX44000_REG_PRX_DATA 0x16
37#define MAX44000_REG_ALS_UPTHR_HI 0x06
38#define MAX44000_REG_ALS_UPTHR_LO 0x07
39#define MAX44000_REG_ALS_LOTHR_HI 0x08
40#define MAX44000_REG_ALS_LOTHR_LO 0x09
41#define MAX44000_REG_PST 0x0a
42#define MAX44000_REG_PRX_IND 0x0b
43#define MAX44000_REG_PRX_THR 0x0c
44#define MAX44000_REG_TRIM_GAIN_GREEN 0x0f
45#define MAX44000_REG_TRIM_GAIN_IR 0x10
46
47/* REG_CFG bits */
48#define MAX44000_CFG_ALSINTE 0x01
49#define MAX44000_CFG_PRXINTE 0x02
50#define MAX44000_CFG_MASK 0x1c
51#define MAX44000_CFG_MODE_SHUTDOWN 0x00
52#define MAX44000_CFG_MODE_ALS_GIR 0x04
53#define MAX44000_CFG_MODE_ALS_G 0x08
54#define MAX44000_CFG_MODE_ALS_IR 0x0c
55#define MAX44000_CFG_MODE_ALS_PRX 0x10
56#define MAX44000_CFG_MODE_PRX 0x14
57#define MAX44000_CFG_TRIM 0x20
58
59/*
60 * Upper 4 bits are not documented but start as 1 on powerup
61 * Setting them to 0 causes proximity to misbehave so set them to 1
62 */
63#define MAX44000_REG_CFG_RX_DEFAULT 0xf0
64
65/* REG_RX bits */
66#define MAX44000_CFG_RX_ALSTIM_MASK 0x0c
67#define MAX44000_CFG_RX_ALSTIM_SHIFT 2
68#define MAX44000_CFG_RX_ALSPGA_MASK 0x03
69#define MAX44000_CFG_RX_ALSPGA_SHIFT 0
70
71/* REG_TX bits */
72#define MAX44000_LED_CURRENT_MASK 0xf
73#define MAX44000_LED_CURRENT_MAX 11
74#define MAX44000_LED_CURRENT_DEFAULT 6
75
76#define MAX44000_ALSDATA_OVERFLOW 0x4000
77
78struct max44000_data {
79 struct mutex lock;
80 struct regmap *regmap;
81};
82
83/* Default scale is set to the minimum of 0.03125 or 1 / (1 << 5) lux */
84#define MAX44000_ALS_TO_LUX_DEFAULT_FRACTION_LOG2 5
85
86/* Scale can be multiplied by up to 128x via ALSPGA for measurement gain */
87static const int max44000_alspga_shift[] = {0, 2, 4, 7};
88#define MAX44000_ALSPGA_MAX_SHIFT 7
89
90/*
91 * Scale can be multiplied by up to 64x via ALSTIM because of lost resolution
92 *
93 * This scaling factor is hidden from userspace and instead accounted for when
94 * reading raw values from the device.
95 *
96 * This makes it possible to cleanly expose ALSPGA as IIO_CHAN_INFO_SCALE and
97 * ALSTIM as IIO_CHAN_INFO_INT_TIME without the values affecting each other.
98 *
99 * Handling this internally is also required for buffer support because the
100 * channel's scan_type can't be modified dynamically.
101 */
102static const int max44000_alstim_shift[] = {0, 2, 4, 6};
103#define MAX44000_ALSTIM_SHIFT(alstim) (2 * (alstim))
104
105/* Available integration times with pretty manual alignment: */
106static const int max44000_int_time_avail_ns_array[] = {
107 100000000,
108 25000000,
109 6250000,
110 1562500,
111};
112static const char max44000_int_time_avail_str[] =
113 "0.100 "
114 "0.025 "
115 "0.00625 "
116 "0.001625";
117
118/* Available scales (internal to ulux) with pretty manual alignment: */
119static const int max44000_scale_avail_ulux_array[] = {
120 31250,
121 125000,
122 500000,
123 4000000,
124};
125static const char max44000_scale_avail_str[] =
126 "0.03125 "
127 "0.125 "
128 "0.5 "
129 "4";
130
131#define MAX44000_SCAN_INDEX_ALS 0
132#define MAX44000_SCAN_INDEX_PRX 1
133
134static const struct iio_chan_spec max44000_channels[] = {
135 {
136 .type = IIO_LIGHT,
137 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
138 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |
139 BIT(IIO_CHAN_INFO_INT_TIME),
140 .scan_index = MAX44000_SCAN_INDEX_ALS,
141 .scan_type = {
142 .sign = 'u',
143 .realbits = 14,
144 .storagebits = 16,
145 }
146 },
147 {
148 .type = IIO_PROXIMITY,
149 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
150 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
151 .scan_index = MAX44000_SCAN_INDEX_PRX,
152 .scan_type = {
153 .sign = 'u',
154 .realbits = 8,
155 .storagebits = 16,
156 }
157 },
158 IIO_CHAN_SOFT_TIMESTAMP(2),
159 {
160 .type = IIO_CURRENT,
161 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
162 BIT(IIO_CHAN_INFO_SCALE),
163 .extend_name = "led",
164 .output = 1,
165 .scan_index = -1,
166 },
167};
168
169static int max44000_read_alstim(struct max44000_data *data)
170{
171 unsigned int val;
172 int ret;
173
174 ret = regmap_read(data->regmap, MAX44000_REG_CFG_RX, &val);
175 if (ret < 0)
176 return ret;
177 return (val & MAX44000_CFG_RX_ALSTIM_MASK) >> MAX44000_CFG_RX_ALSTIM_SHIFT;
178}
179
180static int max44000_write_alstim(struct max44000_data *data, int val)
181{
182 return regmap_write_bits(data->regmap, MAX44000_REG_CFG_RX,
183 MAX44000_CFG_RX_ALSTIM_MASK,
184 val << MAX44000_CFG_RX_ALSTIM_SHIFT);
185}
186
187static int max44000_read_alspga(struct max44000_data *data)
188{
189 unsigned int val;
190 int ret;
191
192 ret = regmap_read(data->regmap, MAX44000_REG_CFG_RX, &val);
193 if (ret < 0)
194 return ret;
195 return (val & MAX44000_CFG_RX_ALSPGA_MASK) >> MAX44000_CFG_RX_ALSPGA_SHIFT;
196}
197
198static int max44000_write_alspga(struct max44000_data *data, int val)
199{
200 return regmap_write_bits(data->regmap, MAX44000_REG_CFG_RX,
201 MAX44000_CFG_RX_ALSPGA_MASK,
202 val << MAX44000_CFG_RX_ALSPGA_SHIFT);
203}
204
205static int max44000_read_alsval(struct max44000_data *data)
206{
207 u16 regval;
208 int alstim, ret;
209
210 ret = regmap_bulk_read(data->regmap, MAX44000_REG_ALS_DATA_HI,
211 &regval, sizeof(regval));
212 if (ret < 0)
213 return ret;
214 alstim = ret = max44000_read_alstim(data);
215 if (ret < 0)
216 return ret;
217
218 regval = be16_to_cpu(regval);
219
220 /*
221 * Overflow is explained on datasheet page 17.
222 *
223 * It's a warning that either the G or IR channel has become saturated
224 * and that the value in the register is likely incorrect.
225 *
226 * The recommendation is to change the scale (ALSPGA).
227 * The driver just returns the max representable value.
228 */
229 if (regval & MAX44000_ALSDATA_OVERFLOW)
230 return 0x3FFF;
231
232 return regval << MAX44000_ALSTIM_SHIFT(alstim);
233}
234
235static int max44000_write_led_current_raw(struct max44000_data *data, int val)
236{
237 /* Maybe we should clamp the value instead? */
238 if (val < 0 || val > MAX44000_LED_CURRENT_MAX)
239 return -ERANGE;
240 if (val >= 8)
241 val += 4;
242 return regmap_write_bits(data->regmap, MAX44000_REG_CFG_TX,
243 MAX44000_LED_CURRENT_MASK, val);
244}
245
246static int max44000_read_led_current_raw(struct max44000_data *data)
247{
248 unsigned int regval;
249 int ret;
250
251 ret = regmap_read(data->regmap, MAX44000_REG_CFG_TX, &regval);
252 if (ret < 0)
253 return ret;
254 regval &= MAX44000_LED_CURRENT_MASK;
255 if (regval >= 8)
256 regval -= 4;
257 return regval;
258}
259
260static int max44000_read_raw(struct iio_dev *indio_dev,
261 struct iio_chan_spec const *chan,
262 int *val, int *val2, long mask)
263{
264 struct max44000_data *data = iio_priv(indio_dev);
265 int alstim, alspga;
266 unsigned int regval;
267 int ret;
268
269 switch (mask) {
270 case IIO_CHAN_INFO_RAW:
271 switch (chan->type) {
272 case IIO_LIGHT:
273 mutex_lock(&data->lock);
274 ret = max44000_read_alsval(data);
275 mutex_unlock(&data->lock);
276 if (ret < 0)
277 return ret;
278 *val = ret;
279 return IIO_VAL_INT;
280
281 case IIO_PROXIMITY:
282 mutex_lock(&data->lock);
283 ret = regmap_read(data->regmap, MAX44000_REG_PRX_DATA, &regval);
284 mutex_unlock(&data->lock);
285 if (ret < 0)
286 return ret;
287 *val = regval;
288 return IIO_VAL_INT;
289
290 case IIO_CURRENT:
291 mutex_lock(&data->lock);
292 ret = max44000_read_led_current_raw(data);
293 mutex_unlock(&data->lock);
294 if (ret < 0)
295 return ret;
296 *val = ret;
297 return IIO_VAL_INT;
298
299 default:
300 return -EINVAL;
301 }
302
303 case IIO_CHAN_INFO_SCALE:
304 switch (chan->type) {
305 case IIO_CURRENT:
306 /* Output register is in 10s of miliamps */
307 *val = 10;
308 return IIO_VAL_INT;
309
310 case IIO_LIGHT:
311 mutex_lock(&data->lock);
312 alspga = ret = max44000_read_alspga(data);
313 mutex_unlock(&data->lock);
314 if (ret < 0)
315 return ret;
316
317 /* Avoid negative shifts */
318 *val = (1 << MAX44000_ALSPGA_MAX_SHIFT);
319 *val2 = MAX44000_ALS_TO_LUX_DEFAULT_FRACTION_LOG2
320 + MAX44000_ALSPGA_MAX_SHIFT
321 - max44000_alspga_shift[alspga];
322 return IIO_VAL_FRACTIONAL_LOG2;
323
324 default:
325 return -EINVAL;
326 }
327
328 case IIO_CHAN_INFO_INT_TIME:
329 mutex_lock(&data->lock);
330 alstim = ret = max44000_read_alstim(data);
331 mutex_unlock(&data->lock);
332
333 if (ret < 0)
334 return ret;
335 *val = 0;
336 *val2 = max44000_int_time_avail_ns_array[alstim];
337 return IIO_VAL_INT_PLUS_NANO;
338
339 default:
340 return -EINVAL;
341 }
342}
343
344static int max44000_write_raw(struct iio_dev *indio_dev,
345 struct iio_chan_spec const *chan,
346 int val, int val2, long mask)
347{
348 struct max44000_data *data = iio_priv(indio_dev);
349 int ret;
350
351 if (mask == IIO_CHAN_INFO_RAW && chan->type == IIO_CURRENT) {
352 mutex_lock(&data->lock);
353 ret = max44000_write_led_current_raw(data, val);
354 mutex_unlock(&data->lock);
355 return ret;
356 } else if (mask == IIO_CHAN_INFO_INT_TIME && chan->type == IIO_LIGHT) {
357 s64 valns = val * NSEC_PER_SEC + val2;
358 int alstim = find_closest_descending(valns,
359 max44000_int_time_avail_ns_array,
360 ARRAY_SIZE(max44000_int_time_avail_ns_array));
361 mutex_lock(&data->lock);
362 ret = max44000_write_alstim(data, alstim);
363 mutex_unlock(&data->lock);
364 return ret;
365 } else if (mask == IIO_CHAN_INFO_SCALE && chan->type == IIO_LIGHT) {
366 s64 valus = val * USEC_PER_SEC + val2;
367 int alspga = find_closest(valus,
368 max44000_scale_avail_ulux_array,
369 ARRAY_SIZE(max44000_scale_avail_ulux_array));
370 mutex_lock(&data->lock);
371 ret = max44000_write_alspga(data, alspga);
372 mutex_unlock(&data->lock);
373 return ret;
374 }
375
376 return -EINVAL;
377}
378
379static int max44000_write_raw_get_fmt(struct iio_dev *indio_dev,
380 struct iio_chan_spec const *chan,
381 long mask)
382{
383 if (mask == IIO_CHAN_INFO_INT_TIME && chan->type == IIO_LIGHT)
384 return IIO_VAL_INT_PLUS_NANO;
385 else if (mask == IIO_CHAN_INFO_SCALE && chan->type == IIO_LIGHT)
386 return IIO_VAL_INT_PLUS_MICRO;
387 else
388 return IIO_VAL_INT;
389}
390
391static IIO_CONST_ATTR(illuminance_integration_time_available, max44000_int_time_avail_str);
392static IIO_CONST_ATTR(illuminance_scale_available, max44000_scale_avail_str);
393
394static struct attribute *max44000_attributes[] = {
395 &iio_const_attr_illuminance_integration_time_available.dev_attr.attr,
396 &iio_const_attr_illuminance_scale_available.dev_attr.attr,
397 NULL
398};
399
400static const struct attribute_group max44000_attribute_group = {
401 .attrs = max44000_attributes,
402};
403
404static const struct iio_info max44000_info = {
405 .driver_module = THIS_MODULE,
406 .read_raw = max44000_read_raw,
407 .write_raw = max44000_write_raw,
408 .write_raw_get_fmt = max44000_write_raw_get_fmt,
409 .attrs = &max44000_attribute_group,
410};
411
412static bool max44000_readable_reg(struct device *dev, unsigned int reg)
413{
414 switch (reg) {
415 case MAX44000_REG_STATUS:
416 case MAX44000_REG_CFG_MAIN:
417 case MAX44000_REG_CFG_RX:
418 case MAX44000_REG_CFG_TX:
419 case MAX44000_REG_ALS_DATA_HI:
420 case MAX44000_REG_ALS_DATA_LO:
421 case MAX44000_REG_PRX_DATA:
422 case MAX44000_REG_ALS_UPTHR_HI:
423 case MAX44000_REG_ALS_UPTHR_LO:
424 case MAX44000_REG_ALS_LOTHR_HI:
425 case MAX44000_REG_ALS_LOTHR_LO:
426 case MAX44000_REG_PST:
427 case MAX44000_REG_PRX_IND:
428 case MAX44000_REG_PRX_THR:
429 case MAX44000_REG_TRIM_GAIN_GREEN:
430 case MAX44000_REG_TRIM_GAIN_IR:
431 return true;
432 default:
433 return false;
434 }
435}
436
437static bool max44000_writeable_reg(struct device *dev, unsigned int reg)
438{
439 switch (reg) {
440 case MAX44000_REG_CFG_MAIN:
441 case MAX44000_REG_CFG_RX:
442 case MAX44000_REG_CFG_TX:
443 case MAX44000_REG_ALS_UPTHR_HI:
444 case MAX44000_REG_ALS_UPTHR_LO:
445 case MAX44000_REG_ALS_LOTHR_HI:
446 case MAX44000_REG_ALS_LOTHR_LO:
447 case MAX44000_REG_PST:
448 case MAX44000_REG_PRX_IND:
449 case MAX44000_REG_PRX_THR:
450 case MAX44000_REG_TRIM_GAIN_GREEN:
451 case MAX44000_REG_TRIM_GAIN_IR:
452 return true;
453 default:
454 return false;
455 }
456}
457
458static bool max44000_volatile_reg(struct device *dev, unsigned int reg)
459{
460 switch (reg) {
461 case MAX44000_REG_STATUS:
462 case MAX44000_REG_ALS_DATA_HI:
463 case MAX44000_REG_ALS_DATA_LO:
464 case MAX44000_REG_PRX_DATA:
465 return true;
466 default:
467 return false;
468 }
469}
470
471static bool max44000_precious_reg(struct device *dev, unsigned int reg)
472{
473 return reg == MAX44000_REG_STATUS;
474}
475
476static const struct regmap_config max44000_regmap_config = {
477 .reg_bits = 8,
478 .val_bits = 8,
479
480 .max_register = MAX44000_REG_PRX_DATA,
481 .readable_reg = max44000_readable_reg,
482 .writeable_reg = max44000_writeable_reg,
483 .volatile_reg = max44000_volatile_reg,
484 .precious_reg = max44000_precious_reg,
485
486 .use_single_rw = 1,
487 .cache_type = REGCACHE_RBTREE,
488};
489
490static irqreturn_t max44000_trigger_handler(int irq, void *p)
491{
492 struct iio_poll_func *pf = p;
493 struct iio_dev *indio_dev = pf->indio_dev;
494 struct max44000_data *data = iio_priv(indio_dev);
495 u16 buf[8]; /* 2x u16 + padding + 8 bytes timestamp */
496 int index = 0;
497 unsigned int regval;
498 int ret;
499
500 mutex_lock(&data->lock);
501 if (test_bit(MAX44000_SCAN_INDEX_ALS, indio_dev->active_scan_mask)) {
502 ret = max44000_read_alsval(data);
503 if (ret < 0)
504 goto out_unlock;
505 buf[index++] = ret;
506 }
507 if (test_bit(MAX44000_SCAN_INDEX_PRX, indio_dev->active_scan_mask)) {
508 ret = regmap_read(data->regmap, MAX44000_REG_PRX_DATA, &regval);
509 if (ret < 0)
510 goto out_unlock;
511 buf[index] = regval;
512 }
513 mutex_unlock(&data->lock);
514
515 iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns());
516 iio_trigger_notify_done(indio_dev->trig);
517 return IRQ_HANDLED;
518
519out_unlock:
520 mutex_unlock(&data->lock);
521 iio_trigger_notify_done(indio_dev->trig);
522 return IRQ_HANDLED;
523}
524
525static int max44000_probe(struct i2c_client *client,
526 const struct i2c_device_id *id)
527{
528 struct max44000_data *data;
529 struct iio_dev *indio_dev;
530 int ret, reg;
531
532 indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
533 if (!indio_dev)
534 return -ENOMEM;
535 data = iio_priv(indio_dev);
536 data->regmap = devm_regmap_init_i2c(client, &max44000_regmap_config);
537 if (IS_ERR(data->regmap)) {
538 dev_err(&client->dev, "regmap_init failed!\n");
539 return PTR_ERR(data->regmap);
540 }
541
542 i2c_set_clientdata(client, indio_dev);
543 mutex_init(&data->lock);
544 indio_dev->dev.parent = &client->dev;
545 indio_dev->info = &max44000_info;
546 indio_dev->name = MAX44000_DRV_NAME;
547 indio_dev->channels = max44000_channels;
548 indio_dev->num_channels = ARRAY_SIZE(max44000_channels);
549
550 /*
551 * The device doesn't have a reset function so we just clear some
552 * important bits at probe time to ensure sane operation.
553 *
554 * Since we don't support interrupts/events the threshold values are
555 * not important. We also don't touch trim values.
556 */
557
558 /* Reset ALS scaling bits */
559 ret = regmap_write(data->regmap, MAX44000_REG_CFG_RX,
560 MAX44000_REG_CFG_RX_DEFAULT);
561 if (ret < 0) {
562 dev_err(&client->dev, "failed to write default CFG_RX: %d\n",
563 ret);
564 return ret;
565 }
566
567 /*
568 * By default the LED pulse used for the proximity sensor is disabled.
569 * Set a middle value so that we get some sort of valid data by default.
570 */
571 ret = max44000_write_led_current_raw(data, MAX44000_LED_CURRENT_DEFAULT);
572 if (ret < 0) {
573 dev_err(&client->dev, "failed to write init config: %d\n", ret);
574 return ret;
575 }
576
577 /* Reset CFG bits to ALS_PRX mode which allows easy reading of both values. */
578 reg = MAX44000_CFG_TRIM | MAX44000_CFG_MODE_ALS_PRX;
579 ret = regmap_write(data->regmap, MAX44000_REG_CFG_MAIN, reg);
580 if (ret < 0) {
581 dev_err(&client->dev, "failed to write init config: %d\n", ret);
582 return ret;
583 }
584
585 /* Read status at least once to clear any stale interrupt bits. */
586 ret = regmap_read(data->regmap, MAX44000_REG_STATUS, &reg);
587 if (ret < 0) {
588 dev_err(&client->dev, "failed to read init status: %d\n", ret);
589 return ret;
590 }
591
592 ret = iio_triggered_buffer_setup(indio_dev, NULL, max44000_trigger_handler, NULL);
593 if (ret < 0) {
594 dev_err(&client->dev, "iio triggered buffer setup failed\n");
595 return ret;
596 }
597
598 return iio_device_register(indio_dev);
599}
600
601static int max44000_remove(struct i2c_client *client)
602{
603 struct iio_dev *indio_dev = i2c_get_clientdata(client);
604
605 iio_device_unregister(indio_dev);
606 iio_triggered_buffer_cleanup(indio_dev);
607
608 return 0;
609}
610
611static const struct i2c_device_id max44000_id[] = {
612 {"max44000", 0},
613 { }
614};
615MODULE_DEVICE_TABLE(i2c, max44000_id);
616
617#ifdef CONFIG_ACPI
618static const struct acpi_device_id max44000_acpi_match[] = {
619 {"MAX44000", 0},
620 { }
621};
622MODULE_DEVICE_TABLE(acpi, max44000_acpi_match);
623#endif
624
625static struct i2c_driver max44000_driver = {
626 .driver = {
627 .name = MAX44000_DRV_NAME,
628 .acpi_match_table = ACPI_PTR(max44000_acpi_match),
629 },
630 .probe = max44000_probe,
631 .remove = max44000_remove,
632 .id_table = max44000_id,
633};
634
635module_i2c_driver(max44000_driver);
636
637MODULE_AUTHOR("Crestez Dan Leonard <leonard.crestez@intel.com>");
638MODULE_DESCRIPTION("MAX44000 Ambient and Infrared Proximity Sensor");
639MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c
index 42d334ba612e..9e847f8f4f0c 100644
--- a/drivers/iio/light/stk3310.c
+++ b/drivers/iio/light/stk3310.c
@@ -16,7 +16,6 @@
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/regmap.h> 18#include <linux/regmap.h>
19#include <linux/gpio/consumer.h>
20#include <linux/iio/events.h> 19#include <linux/iio/events.h>
21#include <linux/iio/iio.h> 20#include <linux/iio/iio.h>
22#include <linux/iio/sysfs.h> 21#include <linux/iio/sysfs.h>
diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
index 12731d6b89ec..57b108c30e98 100644
--- a/drivers/iio/light/tsl2563.c
+++ b/drivers/iio/light/tsl2563.c
@@ -806,8 +806,7 @@ static int tsl2563_probe(struct i2c_client *client,
806 return 0; 806 return 0;
807 807
808fail: 808fail:
809 cancel_delayed_work(&chip->poweroff_work); 809 cancel_delayed_work_sync(&chip->poweroff_work);
810 flush_scheduled_work();
811 return err; 810 return err;
812} 811}
813 812
diff --git a/drivers/iio/light/veml6070.c b/drivers/iio/light/veml6070.c
new file mode 100644
index 000000000000..bc1c4cb782cd
--- /dev/null
+++ b/drivers/iio/light/veml6070.c
@@ -0,0 +1,218 @@
1/*
2 * veml6070.c - Support for Vishay VEML6070 UV A light sensor
3 *
4 * Copyright 2016 Peter Meerwald-Stadler <pmeerw@pmeerw.net>
5 *
6 * This file is subject to the terms and conditions of version 2 of
7 * the GNU General Public License. See the file COPYING in the main
8 * directory of this archive for more details.
9 *
10 * IIO driver for VEML6070 (7-bit I2C slave addresses 0x38 and 0x39)
11 *
12 * TODO: integration time, ACK signal
13 */
14
15#include <linux/module.h>
16#include <linux/i2c.h>
17#include <linux/mutex.h>
18#include <linux/err.h>
19#include <linux/delay.h>
20
21#include <linux/iio/iio.h>
22#include <linux/iio/sysfs.h>
23
24#define VEML6070_DRV_NAME "veml6070"
25
26#define VEML6070_ADDR_CONFIG_DATA_MSB 0x38 /* read: MSB data, write: config */
27#define VEML6070_ADDR_DATA_LSB 0x39 /* LSB data */
28
29#define VEML6070_COMMAND_ACK BIT(5) /* raise interrupt when over threshold */
30#define VEML6070_COMMAND_IT GENMASK(3, 2) /* bit mask integration time */
31#define VEML6070_COMMAND_RSRVD BIT(1) /* reserved, set to 1 */
32#define VEML6070_COMMAND_SD BIT(0) /* shutdown mode when set */
33
34#define VEML6070_IT_10 0x04 /* integration time 1x */
35
36struct veml6070_data {
37 struct i2c_client *client1;
38 struct i2c_client *client2;
39 u8 config;
40 struct mutex lock;
41};
42
43static int veml6070_read(struct veml6070_data *data)
44{
45 int ret;
46 u8 msb, lsb;
47
48 mutex_lock(&data->lock);
49
50 /* disable shutdown */
51 ret = i2c_smbus_write_byte(data->client1,
52 data->config & ~VEML6070_COMMAND_SD);
53 if (ret < 0)
54 goto out;
55
56 msleep(125 + 10); /* measurement takes up to 125 ms for IT 1x */
57
58 ret = i2c_smbus_read_byte(data->client2); /* read MSB, address 0x39 */
59 if (ret < 0)
60 goto out;
61 msb = ret;
62
63 ret = i2c_smbus_read_byte(data->client1); /* read LSB, address 0x38 */
64 if (ret < 0)
65 goto out;
66 lsb = ret;
67
68 /* shutdown again */
69 ret = i2c_smbus_write_byte(data->client1, data->config);
70 if (ret < 0)
71 goto out;
72
73 ret = (msb << 8) | lsb;
74
75out:
76 mutex_unlock(&data->lock);
77 return ret;
78}
79
80static const struct iio_chan_spec veml6070_channels[] = {
81 {
82 .type = IIO_INTENSITY,
83 .modified = 1,
84 .channel2 = IIO_MOD_LIGHT_UV,
85 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
86 },
87 {
88 .type = IIO_UVINDEX,
89 .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
90 }
91};
92
93static int veml6070_to_uv_index(unsigned val)
94{
95 /*
96 * conversion of raw UV intensity values to UV index depends on
97 * integration time (IT) and value of the resistor connected to
98 * the RSET pin (default: 270 KOhm)
99 */
100 unsigned uvi[11] = {
101 187, 373, 560, /* low */
102 746, 933, 1120, /* moderate */
103 1308, 1494, /* high */
104 1681, 1868, 2054}; /* very high */
105 int i;
106
107 for (i = 0; i < ARRAY_SIZE(uvi); i++)
108 if (val <= uvi[i])
109 return i;
110
111 return 11; /* extreme */
112}
113
114static int veml6070_read_raw(struct iio_dev *indio_dev,
115 struct iio_chan_spec const *chan,
116 int *val, int *val2, long mask)
117{
118 struct veml6070_data *data = iio_priv(indio_dev);
119 int ret;
120
121 switch (mask) {
122 case IIO_CHAN_INFO_RAW:
123 case IIO_CHAN_INFO_PROCESSED:
124 ret = veml6070_read(data);
125 if (ret < 0)
126 return ret;
127 if (mask == IIO_CHAN_INFO_PROCESSED)
128 *val = veml6070_to_uv_index(ret);
129 else
130 *val = ret;
131 return IIO_VAL_INT;
132 default:
133 return -EINVAL;
134 }
135}
136
137static const struct iio_info veml6070_info = {
138 .read_raw = veml6070_read_raw,
139 .driver_module = THIS_MODULE,
140};
141
142static int veml6070_probe(struct i2c_client *client,
143 const struct i2c_device_id *id)
144{
145 struct veml6070_data *data;
146 struct iio_dev *indio_dev;
147 int ret;
148
149 indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
150 if (!indio_dev)
151 return -ENOMEM;
152
153 data = iio_priv(indio_dev);
154 i2c_set_clientdata(client, indio_dev);
155 data->client1 = client;
156 mutex_init(&data->lock);
157
158 indio_dev->dev.parent = &client->dev;
159 indio_dev->info = &veml6070_info;
160 indio_dev->channels = veml6070_channels;
161 indio_dev->num_channels = ARRAY_SIZE(veml6070_channels);
162 indio_dev->name = VEML6070_DRV_NAME;
163 indio_dev->modes = INDIO_DIRECT_MODE;
164
165 data->client2 = i2c_new_dummy(client->adapter, VEML6070_ADDR_DATA_LSB);
166 if (!data->client2) {
167 dev_err(&client->dev, "i2c device for second chip address failed\n");
168 return -ENODEV;
169 }
170
171 data->config = VEML6070_IT_10 | VEML6070_COMMAND_RSRVD |
172 VEML6070_COMMAND_SD;
173 ret = i2c_smbus_write_byte(data->client1, data->config);
174 if (ret < 0)
175 goto fail;
176
177 ret = iio_device_register(indio_dev);
178 if (ret < 0)
179 goto fail;
180
181 return ret;
182
183fail:
184 i2c_unregister_device(data->client2);
185 return ret;
186}
187
188static int veml6070_remove(struct i2c_client *client)
189{
190 struct iio_dev *indio_dev = i2c_get_clientdata(client);
191 struct veml6070_data *data = iio_priv(indio_dev);
192
193 iio_device_unregister(indio_dev);
194 i2c_unregister_device(data->client2);
195
196 return 0;
197}
198
199static const struct i2c_device_id veml6070_id[] = {
200 { "veml6070", 0 },
201 { }
202};
203MODULE_DEVICE_TABLE(i2c, veml6070_id);
204
205static struct i2c_driver veml6070_driver = {
206 .driver = {
207 .name = VEML6070_DRV_NAME,
208 },
209 .probe = veml6070_probe,
210 .remove = veml6070_remove,
211 .id_table = veml6070_id,
212};
213
214module_i2c_driver(veml6070_driver);
215
216MODULE_AUTHOR("Peter Meerwald-Stadler <pmeerw@pmeerw.net>");
217MODULE_DESCRIPTION("Vishay VEML6070 UV A light sensor driver");
218MODULE_LICENSE("GPL");
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index 021dc5361f53..84e6559ccc65 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -9,6 +9,8 @@ config AK8975
9 tristate "Asahi Kasei AK 3-Axis Magnetometer" 9 tristate "Asahi Kasei AK 3-Axis Magnetometer"
10 depends on I2C 10 depends on I2C
11 depends on GPIOLIB || COMPILE_TEST 11 depends on GPIOLIB || COMPILE_TEST
12 select IIO_BUFFER
13 select IIO_TRIGGERED_BUFFER
12 help 14 help
13 Say yes here to build support for Asahi Kasei AK8975, AK8963, 15 Say yes here to build support for Asahi Kasei AK8975, AK8963,
14 AK09911 or AK09912 3-Axis Magnetometer. 16 AK09911 or AK09912 3-Axis Magnetometer.
@@ -25,22 +27,41 @@ config AK09911
25 Deprecated: AK09911 is now supported by AK8975 driver. 27 Deprecated: AK09911 is now supported by AK8975 driver.
26 28
27config BMC150_MAGN 29config BMC150_MAGN
28 tristate "Bosch BMC150 Magnetometer Driver" 30 tristate
29 depends on I2C
30 select REGMAP_I2C
31 select IIO_BUFFER 31 select IIO_BUFFER
32 select IIO_TRIGGERED_BUFFER 32 select IIO_TRIGGERED_BUFFER
33
34config BMC150_MAGN_I2C
35 tristate "Bosch BMC150 I2C Magnetometer Driver"
36 depends on I2C
37 select BMC150_MAGN
38 select REGMAP_I2C
33 help 39 help
34 Say yes here to build support for the BMC150 magnetometer. 40 Say yes here to build support for the BMC150 magnetometer with
41 I2C interface.
35 42
36 Currently this only supports the device via an i2c interface. 43 This is a combo module with both accelerometer and magnetometer.
44 This driver is only implementing magnetometer part, which has
45 its own address and register map.
46
47 To compile this driver as a module, choose M here: the module will be
48 called bmc150_magn_i2c.
49
50config BMC150_MAGN_SPI
51 tristate "Bosch BMC150 SPI Magnetometer Driver"
52 depends on SPI
53 select BMC150_MAGN
54 select REGMAP_SPI
55 help
56 Say yes here to build support for the BMC150 magnetometer with
57 SPI interface.
37 58
38 This is a combo module with both accelerometer and magnetometer. 59 This is a combo module with both accelerometer and magnetometer.
39 This driver is only implementing magnetometer part, which has 60 This driver is only implementing magnetometer part, which has
40 its own address and register map. 61 its own address and register map.
41 62
42 To compile this driver as a module, choose M here: the module will be 63 To compile this driver as a module, choose M here: the module will be
43 called bmc150_magn. 64 called bmc150_magn_spi.
44 65
45config MAG3110 66config MAG3110
46 tristate "Freescale MAG3110 3-Axis Magnetometer" 67 tristate "Freescale MAG3110 3-Axis Magnetometer"
diff --git a/drivers/iio/magnetometer/Makefile b/drivers/iio/magnetometer/Makefile
index dd03fe524481..92a745c9a6e8 100644
--- a/drivers/iio/magnetometer/Makefile
+++ b/drivers/iio/magnetometer/Makefile
@@ -5,6 +5,9 @@
5# When adding new entries keep the list in alphabetical order 5# When adding new entries keep the list in alphabetical order
6obj-$(CONFIG_AK8975) += ak8975.o 6obj-$(CONFIG_AK8975) += ak8975.o
7obj-$(CONFIG_BMC150_MAGN) += bmc150_magn.o 7obj-$(CONFIG_BMC150_MAGN) += bmc150_magn.o
8obj-$(CONFIG_BMC150_MAGN_I2C) += bmc150_magn_i2c.o
9obj-$(CONFIG_BMC150_MAGN_SPI) += bmc150_magn_spi.o
10
8obj-$(CONFIG_MAG3110) += mag3110.o 11obj-$(CONFIG_MAG3110) += mag3110.o
9obj-$(CONFIG_HID_SENSOR_MAGNETOMETER_3D) += hid-sensor-magn-3d.o 12obj-$(CONFIG_HID_SENSOR_MAGNETOMETER_3D) += hid-sensor-magn-3d.o
10obj-$(CONFIG_MMC35240) += mmc35240.o 13obj-$(CONFIG_MMC35240) += mmc35240.o
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index 0e931a9a1669..609a2c401b5d 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -32,9 +32,17 @@
32#include <linux/gpio.h> 32#include <linux/gpio.h>
33#include <linux/of_gpio.h> 33#include <linux/of_gpio.h>
34#include <linux/acpi.h> 34#include <linux/acpi.h>
35#include <linux/regulator/consumer.h>
35 36
36#include <linux/iio/iio.h> 37#include <linux/iio/iio.h>
37#include <linux/iio/sysfs.h> 38#include <linux/iio/sysfs.h>
39#include <linux/iio/buffer.h>
40#include <linux/iio/trigger.h>
41#include <linux/iio/trigger_consumer.h>
42#include <linux/iio/triggered_buffer.h>
43
44#include <linux/iio/magnetometer/ak8975.h>
45
38/* 46/*
39 * Register definitions, as well as various shifts and masks to get at the 47 * Register definitions, as well as various shifts and masks to get at the
40 * individual fields of the registers. 48 * individual fields of the registers.
@@ -361,7 +369,6 @@ static const struct ak_def ak_def_array[AK_MAX_TYPE] = {
361struct ak8975_data { 369struct ak8975_data {
362 struct i2c_client *client; 370 struct i2c_client *client;
363 const struct ak_def *def; 371 const struct ak_def *def;
364 struct attribute_group attrs;
365 struct mutex lock; 372 struct mutex lock;
366 u8 asa[3]; 373 u8 asa[3];
367 long raw_to_gauss[3]; 374 long raw_to_gauss[3];
@@ -370,8 +377,41 @@ struct ak8975_data {
370 wait_queue_head_t data_ready_queue; 377 wait_queue_head_t data_ready_queue;
371 unsigned long flags; 378 unsigned long flags;
372 u8 cntl_cache; 379 u8 cntl_cache;
380 struct iio_mount_matrix orientation;
381 struct regulator *vdd;
373}; 382};
374 383
384/* Enable attached power regulator if any. */
385static int ak8975_power_on(struct i2c_client *client)
386{
387 const struct iio_dev *indio_dev = i2c_get_clientdata(client);
388 struct ak8975_data *data = iio_priv(indio_dev);
389 int ret;
390
391 data->vdd = devm_regulator_get(&client->dev, "vdd");
392 if (IS_ERR_OR_NULL(data->vdd)) {
393 ret = PTR_ERR(data->vdd);
394 if (ret == -ENODEV)
395 ret = 0;
396 } else {
397 ret = regulator_enable(data->vdd);
398 }
399
400 if (ret)
401 dev_err(&client->dev, "failed to enable Vdd supply: %d\n", ret);
402 return ret;
403}
404
405/* Disable attached power regulator if any. */
406static void ak8975_power_off(const struct i2c_client *client)
407{
408 const struct iio_dev *indio_dev = i2c_get_clientdata(client);
409 const struct ak8975_data *data = iio_priv(indio_dev);
410
411 if (!IS_ERR_OR_NULL(data->vdd))
412 regulator_disable(data->vdd);
413}
414
375/* 415/*
376 * Return 0 if the i2c device is the one we expect. 416 * Return 0 if the i2c device is the one we expect.
377 * return a negative error number otherwise 417 * return a negative error number otherwise
@@ -601,22 +641,15 @@ static int wait_conversion_complete_interrupt(struct ak8975_data *data)
601 return ret > 0 ? 0 : -ETIME; 641 return ret > 0 ? 0 : -ETIME;
602} 642}
603 643
604/* 644static int ak8975_start_read_axis(struct ak8975_data *data,
605 * Emits the raw flux value for the x, y, or z axis. 645 const struct i2c_client *client)
606 */
607static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
608{ 646{
609 struct ak8975_data *data = iio_priv(indio_dev);
610 struct i2c_client *client = data->client;
611 int ret;
612
613 mutex_lock(&data->lock);
614
615 /* Set up the device for taking a sample. */ 647 /* Set up the device for taking a sample. */
616 ret = ak8975_set_mode(data, MODE_ONCE); 648 int ret = ak8975_set_mode(data, MODE_ONCE);
649
617 if (ret < 0) { 650 if (ret < 0) {
618 dev_err(&client->dev, "Error in setting operating mode\n"); 651 dev_err(&client->dev, "Error in setting operating mode\n");
619 goto exit; 652 return ret;
620 } 653 }
621 654
622 /* Wait for the conversion to complete. */ 655 /* Wait for the conversion to complete. */
@@ -627,7 +660,7 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
627 else 660 else
628 ret = wait_conversion_complete_polled(data); 661 ret = wait_conversion_complete_polled(data);
629 if (ret < 0) 662 if (ret < 0)
630 goto exit; 663 return ret;
631 664
632 /* This will be executed only for non-interrupt based waiting case */ 665 /* This will be executed only for non-interrupt based waiting case */
633 if (ret & data->def->ctrl_masks[ST1_DRDY]) { 666 if (ret & data->def->ctrl_masks[ST1_DRDY]) {
@@ -635,32 +668,45 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
635 data->def->ctrl_regs[ST2]); 668 data->def->ctrl_regs[ST2]);
636 if (ret < 0) { 669 if (ret < 0) {
637 dev_err(&client->dev, "Error in reading ST2\n"); 670 dev_err(&client->dev, "Error in reading ST2\n");
638 goto exit; 671 return ret;
639 } 672 }
640 if (ret & (data->def->ctrl_masks[ST2_DERR] | 673 if (ret & (data->def->ctrl_masks[ST2_DERR] |
641 data->def->ctrl_masks[ST2_HOFL])) { 674 data->def->ctrl_masks[ST2_HOFL])) {
642 dev_err(&client->dev, "ST2 status error 0x%x\n", ret); 675 dev_err(&client->dev, "ST2 status error 0x%x\n", ret);
643 ret = -EINVAL; 676 return -EINVAL;
644 goto exit;
645 } 677 }
646 } 678 }
647 679
648 /* Read the flux value from the appropriate register 680 return 0;
649 (the register is specified in the iio device attributes). */ 681}
650 ret = i2c_smbus_read_word_data(client, data->def->data_regs[index]); 682
651 if (ret < 0) { 683/* Retrieve raw flux value for one of the x, y, or z axis. */
652 dev_err(&client->dev, "Read axis data fails\n"); 684static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
685{
686 struct ak8975_data *data = iio_priv(indio_dev);
687 const struct i2c_client *client = data->client;
688 const struct ak_def *def = data->def;
689 int ret;
690
691 mutex_lock(&data->lock);
692
693 ret = ak8975_start_read_axis(data, client);
694 if (ret)
695 goto exit;
696
697 ret = i2c_smbus_read_word_data(client, def->data_regs[index]);
698 if (ret < 0)
653 goto exit; 699 goto exit;
654 }
655 700
656 mutex_unlock(&data->lock); 701 mutex_unlock(&data->lock);
657 702
658 /* Clamp to valid range. */ 703 /* Clamp to valid range. */
659 *val = clamp_t(s16, ret, -data->def->range, data->def->range); 704 *val = clamp_t(s16, ret, -def->range, def->range);
660 return IIO_VAL_INT; 705 return IIO_VAL_INT;
661 706
662exit: 707exit:
663 mutex_unlock(&data->lock); 708 mutex_unlock(&data->lock);
709 dev_err(&client->dev, "Error in reading axis\n");
664 return ret; 710 return ret;
665} 711}
666 712
@@ -682,6 +728,18 @@ static int ak8975_read_raw(struct iio_dev *indio_dev,
682 return -EINVAL; 728 return -EINVAL;
683} 729}
684 730
731static const struct iio_mount_matrix *
732ak8975_get_mount_matrix(const struct iio_dev *indio_dev,
733 const struct iio_chan_spec *chan)
734{
735 return &((struct ak8975_data *)iio_priv(indio_dev))->orientation;
736}
737
738static const struct iio_chan_spec_ext_info ak8975_ext_info[] = {
739 IIO_MOUNT_MATRIX(IIO_SHARED_BY_DIR, ak8975_get_mount_matrix),
740 { },
741};
742
685#define AK8975_CHANNEL(axis, index) \ 743#define AK8975_CHANNEL(axis, index) \
686 { \ 744 { \
687 .type = IIO_MAGN, \ 745 .type = IIO_MAGN, \
@@ -690,12 +748,23 @@ static int ak8975_read_raw(struct iio_dev *indio_dev,
690 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ 748 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
691 BIT(IIO_CHAN_INFO_SCALE), \ 749 BIT(IIO_CHAN_INFO_SCALE), \
692 .address = index, \ 750 .address = index, \
751 .scan_index = index, \
752 .scan_type = { \
753 .sign = 's', \
754 .realbits = 16, \
755 .storagebits = 16, \
756 .endianness = IIO_CPU \
757 }, \
758 .ext_info = ak8975_ext_info, \
693 } 759 }
694 760
695static const struct iio_chan_spec ak8975_channels[] = { 761static const struct iio_chan_spec ak8975_channels[] = {
696 AK8975_CHANNEL(X, 0), AK8975_CHANNEL(Y, 1), AK8975_CHANNEL(Z, 2), 762 AK8975_CHANNEL(X, 0), AK8975_CHANNEL(Y, 1), AK8975_CHANNEL(Z, 2),
763 IIO_CHAN_SOFT_TIMESTAMP(3),
697}; 764};
698 765
766static const unsigned long ak8975_scan_masks[] = { 0x7, 0 };
767
699static const struct iio_info ak8975_info = { 768static const struct iio_info ak8975_info = {
700 .read_raw = &ak8975_read_raw, 769 .read_raw = &ak8975_read_raw,
701 .driver_module = THIS_MODULE, 770 .driver_module = THIS_MODULE,
@@ -724,6 +793,56 @@ static const char *ak8975_match_acpi_device(struct device *dev,
724 return dev_name(dev); 793 return dev_name(dev);
725} 794}
726 795
796static void ak8975_fill_buffer(struct iio_dev *indio_dev)
797{
798 struct ak8975_data *data = iio_priv(indio_dev);
799 const struct i2c_client *client = data->client;
800 const struct ak_def *def = data->def;
801 int ret;
802 s16 buff[8]; /* 3 x 16 bits axis values + 1 aligned 64 bits timestamp */
803
804 mutex_lock(&data->lock);
805
806 ret = ak8975_start_read_axis(data, client);
807 if (ret)
808 goto unlock;
809
810 /*
811 * For each axis, read the flux value from the appropriate register
812 * (the register is specified in the iio device attributes).
813 */
814 ret = i2c_smbus_read_i2c_block_data_or_emulated(client,
815 def->data_regs[0],
816 3 * sizeof(buff[0]),
817 (u8 *)buff);
818 if (ret < 0)
819 goto unlock;
820
821 mutex_unlock(&data->lock);
822
823 /* Clamp to valid range. */
824 buff[0] = clamp_t(s16, le16_to_cpu(buff[0]), -def->range, def->range);
825 buff[1] = clamp_t(s16, le16_to_cpu(buff[1]), -def->range, def->range);
826 buff[2] = clamp_t(s16, le16_to_cpu(buff[2]), -def->range, def->range);
827
828 iio_push_to_buffers_with_timestamp(indio_dev, buff, iio_get_time_ns());
829 return;
830
831unlock:
832 mutex_unlock(&data->lock);
833 dev_err(&client->dev, "Error in reading axes block\n");
834}
835
836static irqreturn_t ak8975_handle_trigger(int irq, void *p)
837{
838 const struct iio_poll_func *pf = p;
839 struct iio_dev *indio_dev = pf->indio_dev;
840
841 ak8975_fill_buffer(indio_dev);
842 iio_trigger_notify_done(indio_dev->trig);
843 return IRQ_HANDLED;
844}
845
727static int ak8975_probe(struct i2c_client *client, 846static int ak8975_probe(struct i2c_client *client,
728 const struct i2c_device_id *id) 847 const struct i2c_device_id *id)
729{ 848{
@@ -733,10 +852,12 @@ static int ak8975_probe(struct i2c_client *client,
733 int err; 852 int err;
734 const char *name = NULL; 853 const char *name = NULL;
735 enum asahi_compass_chipset chipset = AK_MAX_TYPE; 854 enum asahi_compass_chipset chipset = AK_MAX_TYPE;
855 const struct ak8975_platform_data *pdata =
856 dev_get_platdata(&client->dev);
736 857
737 /* Grab and set up the supplied GPIO. */ 858 /* Grab and set up the supplied GPIO. */
738 if (client->dev.platform_data) 859 if (pdata)
739 eoc_gpio = *(int *)(client->dev.platform_data); 860 eoc_gpio = pdata->eoc_gpio;
740 else if (client->dev.of_node) 861 else if (client->dev.of_node)
741 eoc_gpio = of_get_gpio(client->dev.of_node, 0); 862 eoc_gpio = of_get_gpio(client->dev.of_node, 0);
742 else 863 else
@@ -770,13 +891,24 @@ static int ak8975_probe(struct i2c_client *client,
770 data->eoc_gpio = eoc_gpio; 891 data->eoc_gpio = eoc_gpio;
771 data->eoc_irq = 0; 892 data->eoc_irq = 0;
772 893
894 if (!pdata) {
895 err = of_iio_read_mount_matrix(&client->dev,
896 "mount-matrix",
897 &data->orientation);
898 if (err)
899 return err;
900 } else
901 data->orientation = pdata->orientation;
902
773 /* id will be NULL when enumerated via ACPI */ 903 /* id will be NULL when enumerated via ACPI */
774 if (id) { 904 if (id) {
775 chipset = (enum asahi_compass_chipset)(id->driver_data); 905 chipset = (enum asahi_compass_chipset)(id->driver_data);
776 name = id->name; 906 name = id->name;
777 } else if (ACPI_HANDLE(&client->dev)) 907 } else if (ACPI_HANDLE(&client->dev)) {
778 name = ak8975_match_acpi_device(&client->dev, &chipset); 908 name = ak8975_match_acpi_device(&client->dev, &chipset);
779 else 909 if (!name)
910 return -ENODEV;
911 } else
780 return -ENOSYS; 912 return -ENOSYS;
781 913
782 if (chipset >= AK_MAX_TYPE) { 914 if (chipset >= AK_MAX_TYPE) {
@@ -786,10 +918,15 @@ static int ak8975_probe(struct i2c_client *client,
786 } 918 }
787 919
788 data->def = &ak_def_array[chipset]; 920 data->def = &ak_def_array[chipset];
921
922 err = ak8975_power_on(client);
923 if (err)
924 return err;
925
789 err = ak8975_who_i_am(client, data->def->type); 926 err = ak8975_who_i_am(client, data->def->type);
790 if (err < 0) { 927 if (err < 0) {
791 dev_err(&client->dev, "Unexpected device\n"); 928 dev_err(&client->dev, "Unexpected device\n");
792 return err; 929 goto power_off;
793 } 930 }
794 dev_dbg(&client->dev, "Asahi compass chip %s\n", name); 931 dev_dbg(&client->dev, "Asahi compass chip %s\n", name);
795 932
@@ -797,7 +934,7 @@ static int ak8975_probe(struct i2c_client *client,
797 err = ak8975_setup(client); 934 err = ak8975_setup(client);
798 if (err < 0) { 935 if (err < 0) {
799 dev_err(&client->dev, "%s initialization fails\n", name); 936 dev_err(&client->dev, "%s initialization fails\n", name);
800 return err; 937 goto power_off;
801 } 938 }
802 939
803 mutex_init(&data->lock); 940 mutex_init(&data->lock);
@@ -805,9 +942,41 @@ static int ak8975_probe(struct i2c_client *client,
805 indio_dev->channels = ak8975_channels; 942 indio_dev->channels = ak8975_channels;
806 indio_dev->num_channels = ARRAY_SIZE(ak8975_channels); 943 indio_dev->num_channels = ARRAY_SIZE(ak8975_channels);
807 indio_dev->info = &ak8975_info; 944 indio_dev->info = &ak8975_info;
945 indio_dev->available_scan_masks = ak8975_scan_masks;
808 indio_dev->modes = INDIO_DIRECT_MODE; 946 indio_dev->modes = INDIO_DIRECT_MODE;
809 indio_dev->name = name; 947 indio_dev->name = name;
810 return devm_iio_device_register(&client->dev, indio_dev); 948
949 err = iio_triggered_buffer_setup(indio_dev, NULL, ak8975_handle_trigger,
950 NULL);
951 if (err) {
952 dev_err(&client->dev, "triggered buffer setup failed\n");
953 goto power_off;
954 }
955
956 err = iio_device_register(indio_dev);
957 if (err) {
958 dev_err(&client->dev, "device register failed\n");
959 goto cleanup_buffer;
960 }
961
962 return 0;
963
964cleanup_buffer:
965 iio_triggered_buffer_cleanup(indio_dev);
966power_off:
967 ak8975_power_off(client);
968 return err;
969}
970
971static int ak8975_remove(struct i2c_client *client)
972{
973 struct iio_dev *indio_dev = i2c_get_clientdata(client);
974
975 iio_device_unregister(indio_dev);
976 iio_triggered_buffer_cleanup(indio_dev);
977 ak8975_power_off(client);
978
979 return 0;
811} 980}
812 981
813static const struct i2c_device_id ak8975_id[] = { 982static const struct i2c_device_id ak8975_id[] = {
@@ -841,6 +1010,7 @@ static struct i2c_driver ak8975_driver = {
841 .acpi_match_table = ACPI_PTR(ak_acpi_match), 1010 .acpi_match_table = ACPI_PTR(ak_acpi_match),
842 }, 1011 },
843 .probe = ak8975_probe, 1012 .probe = ak8975_probe,
1013 .remove = ak8975_remove,
844 .id_table = ak8975_id, 1014 .id_table = ak8975_id,
845}; 1015};
846module_i2c_driver(ak8975_driver); 1016module_i2c_driver(ak8975_driver);
diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c
index ffcb75ea64fb..d104fb8d9379 100644
--- a/drivers/iio/magnetometer/bmc150_magn.c
+++ b/drivers/iio/magnetometer/bmc150_magn.c
@@ -23,7 +23,6 @@
23#include <linux/delay.h> 23#include <linux/delay.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/acpi.h> 25#include <linux/acpi.h>
26#include <linux/gpio/consumer.h>
27#include <linux/pm.h> 26#include <linux/pm.h>
28#include <linux/pm_runtime.h> 27#include <linux/pm_runtime.h>
29#include <linux/iio/iio.h> 28#include <linux/iio/iio.h>
@@ -35,6 +34,8 @@
35#include <linux/iio/triggered_buffer.h> 34#include <linux/iio/triggered_buffer.h>
36#include <linux/regmap.h> 35#include <linux/regmap.h>
37 36
37#include "bmc150_magn.h"
38
38#define BMC150_MAGN_DRV_NAME "bmc150_magn" 39#define BMC150_MAGN_DRV_NAME "bmc150_magn"
39#define BMC150_MAGN_IRQ_NAME "bmc150_magn_event" 40#define BMC150_MAGN_IRQ_NAME "bmc150_magn_event"
40 41
@@ -135,7 +136,7 @@ struct bmc150_magn_trim_regs {
135} __packed; 136} __packed;
136 137
137struct bmc150_magn_data { 138struct bmc150_magn_data {
138 struct i2c_client *client; 139 struct device *dev;
139 /* 140 /*
140 * 1. Protect this structure. 141 * 1. Protect this structure.
141 * 2. Serialize sequences that power on/off the device and access HW. 142 * 2. Serialize sequences that power on/off the device and access HW.
@@ -147,6 +148,7 @@ struct bmc150_magn_data {
147 struct iio_trigger *dready_trig; 148 struct iio_trigger *dready_trig;
148 bool dready_trigger_on; 149 bool dready_trigger_on;
149 int max_odr; 150 int max_odr;
151 int irq;
150}; 152};
151 153
152static const struct { 154static const struct {
@@ -216,7 +218,7 @@ static bool bmc150_magn_is_volatile_reg(struct device *dev, unsigned int reg)
216 } 218 }
217} 219}
218 220
219static const struct regmap_config bmc150_magn_regmap_config = { 221const struct regmap_config bmc150_magn_regmap_config = {
220 .reg_bits = 8, 222 .reg_bits = 8,
221 .val_bits = 8, 223 .val_bits = 8,
222 224
@@ -226,6 +228,7 @@ static const struct regmap_config bmc150_magn_regmap_config = {
226 .writeable_reg = bmc150_magn_is_writeable_reg, 228 .writeable_reg = bmc150_magn_is_writeable_reg,
227 .volatile_reg = bmc150_magn_is_volatile_reg, 229 .volatile_reg = bmc150_magn_is_volatile_reg,
228}; 230};
231EXPORT_SYMBOL(bmc150_magn_regmap_config);
229 232
230static int bmc150_magn_set_power_mode(struct bmc150_magn_data *data, 233static int bmc150_magn_set_power_mode(struct bmc150_magn_data *data,
231 enum bmc150_magn_power_modes mode, 234 enum bmc150_magn_power_modes mode,
@@ -264,17 +267,17 @@ static int bmc150_magn_set_power_state(struct bmc150_magn_data *data, bool on)
264 int ret; 267 int ret;
265 268
266 if (on) { 269 if (on) {
267 ret = pm_runtime_get_sync(&data->client->dev); 270 ret = pm_runtime_get_sync(data->dev);
268 } else { 271 } else {
269 pm_runtime_mark_last_busy(&data->client->dev); 272 pm_runtime_mark_last_busy(data->dev);
270 ret = pm_runtime_put_autosuspend(&data->client->dev); 273 ret = pm_runtime_put_autosuspend(data->dev);
271 } 274 }
272 275
273 if (ret < 0) { 276 if (ret < 0) {
274 dev_err(&data->client->dev, 277 dev_err(data->dev,
275 "failed to change power state to %d\n", on); 278 "failed to change power state to %d\n", on);
276 if (on) 279 if (on)
277 pm_runtime_put_noidle(&data->client->dev); 280 pm_runtime_put_noidle(data->dev);
278 281
279 return ret; 282 return ret;
280 } 283 }
@@ -351,7 +354,7 @@ static int bmc150_magn_set_max_odr(struct bmc150_magn_data *data, int rep_xy,
351 /* the maximum selectable read-out frequency from datasheet */ 354 /* the maximum selectable read-out frequency from datasheet */
352 max_odr = 1000000 / (145 * rep_xy + 500 * rep_z + 980); 355 max_odr = 1000000 / (145 * rep_xy + 500 * rep_z + 980);
353 if (odr > max_odr) { 356 if (odr > max_odr) {
354 dev_err(&data->client->dev, 357 dev_err(data->dev,
355 "Can't set oversampling with sampling freq %d\n", 358 "Can't set oversampling with sampling freq %d\n",
356 odr); 359 odr);
357 return -EINVAL; 360 return -EINVAL;
@@ -685,27 +688,27 @@ static int bmc150_magn_init(struct bmc150_magn_data *data)
685 ret = bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_SUSPEND, 688 ret = bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_SUSPEND,
686 false); 689 false);
687 if (ret < 0) { 690 if (ret < 0) {
688 dev_err(&data->client->dev, 691 dev_err(data->dev,
689 "Failed to bring up device from suspend mode\n"); 692 "Failed to bring up device from suspend mode\n");
690 return ret; 693 return ret;
691 } 694 }
692 695
693 ret = regmap_read(data->regmap, BMC150_MAGN_REG_CHIP_ID, &chip_id); 696 ret = regmap_read(data->regmap, BMC150_MAGN_REG_CHIP_ID, &chip_id);
694 if (ret < 0) { 697 if (ret < 0) {
695 dev_err(&data->client->dev, "Failed reading chip id\n"); 698 dev_err(data->dev, "Failed reading chip id\n");
696 goto err_poweroff; 699 goto err_poweroff;
697 } 700 }
698 if (chip_id != BMC150_MAGN_CHIP_ID_VAL) { 701 if (chip_id != BMC150_MAGN_CHIP_ID_VAL) {
699 dev_err(&data->client->dev, "Invalid chip id 0x%x\n", chip_id); 702 dev_err(data->dev, "Invalid chip id 0x%x\n", chip_id);
700 ret = -ENODEV; 703 ret = -ENODEV;
701 goto err_poweroff; 704 goto err_poweroff;
702 } 705 }
703 dev_dbg(&data->client->dev, "Chip id %x\n", chip_id); 706 dev_dbg(data->dev, "Chip id %x\n", chip_id);
704 707
705 preset = bmc150_magn_presets_table[BMC150_MAGN_DEFAULT_PRESET]; 708 preset = bmc150_magn_presets_table[BMC150_MAGN_DEFAULT_PRESET];
706 ret = bmc150_magn_set_odr(data, preset.odr); 709 ret = bmc150_magn_set_odr(data, preset.odr);
707 if (ret < 0) { 710 if (ret < 0) {
708 dev_err(&data->client->dev, "Failed to set ODR to %d\n", 711 dev_err(data->dev, "Failed to set ODR to %d\n",
709 preset.odr); 712 preset.odr);
710 goto err_poweroff; 713 goto err_poweroff;
711 } 714 }
@@ -713,7 +716,7 @@ static int bmc150_magn_init(struct bmc150_magn_data *data)
713 ret = regmap_write(data->regmap, BMC150_MAGN_REG_REP_XY, 716 ret = regmap_write(data->regmap, BMC150_MAGN_REG_REP_XY,
714 BMC150_MAGN_REPXY_TO_REGVAL(preset.rep_xy)); 717 BMC150_MAGN_REPXY_TO_REGVAL(preset.rep_xy));
715 if (ret < 0) { 718 if (ret < 0) {
716 dev_err(&data->client->dev, "Failed to set REP XY to %d\n", 719 dev_err(data->dev, "Failed to set REP XY to %d\n",
717 preset.rep_xy); 720 preset.rep_xy);
718 goto err_poweroff; 721 goto err_poweroff;
719 } 722 }
@@ -721,7 +724,7 @@ static int bmc150_magn_init(struct bmc150_magn_data *data)
721 ret = regmap_write(data->regmap, BMC150_MAGN_REG_REP_Z, 724 ret = regmap_write(data->regmap, BMC150_MAGN_REG_REP_Z,
722 BMC150_MAGN_REPZ_TO_REGVAL(preset.rep_z)); 725 BMC150_MAGN_REPZ_TO_REGVAL(preset.rep_z));
723 if (ret < 0) { 726 if (ret < 0) {
724 dev_err(&data->client->dev, "Failed to set REP Z to %d\n", 727 dev_err(data->dev, "Failed to set REP Z to %d\n",
725 preset.rep_z); 728 preset.rep_z);
726 goto err_poweroff; 729 goto err_poweroff;
727 } 730 }
@@ -734,7 +737,7 @@ static int bmc150_magn_init(struct bmc150_magn_data *data)
734 ret = bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_NORMAL, 737 ret = bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_NORMAL,
735 true); 738 true);
736 if (ret < 0) { 739 if (ret < 0) {
737 dev_err(&data->client->dev, "Failed to power on device\n"); 740 dev_err(data->dev, "Failed to power on device\n");
738 goto err_poweroff; 741 goto err_poweroff;
739 } 742 }
740 743
@@ -843,41 +846,33 @@ static const char *bmc150_magn_match_acpi_device(struct device *dev)
843 return dev_name(dev); 846 return dev_name(dev);
844} 847}
845 848
846static int bmc150_magn_probe(struct i2c_client *client, 849int bmc150_magn_probe(struct device *dev, struct regmap *regmap,
847 const struct i2c_device_id *id) 850 int irq, const char *name)
848{ 851{
849 struct bmc150_magn_data *data; 852 struct bmc150_magn_data *data;
850 struct iio_dev *indio_dev; 853 struct iio_dev *indio_dev;
851 const char *name = NULL;
852 int ret; 854 int ret;
853 855
854 indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data)); 856 indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
855 if (!indio_dev) 857 if (!indio_dev)
856 return -ENOMEM; 858 return -ENOMEM;
857 859
858 data = iio_priv(indio_dev); 860 data = iio_priv(indio_dev);
859 i2c_set_clientdata(client, indio_dev); 861 dev_set_drvdata(dev, indio_dev);
860 data->client = client; 862 data->regmap = regmap;
863 data->irq = irq;
864 data->dev = dev;
861 865
862 if (id) 866 if (!name && ACPI_HANDLE(dev))
863 name = id->name; 867 name = bmc150_magn_match_acpi_device(dev);
864 else if (ACPI_HANDLE(&client->dev))
865 name = bmc150_magn_match_acpi_device(&client->dev);
866 else
867 return -ENOSYS;
868 868
869 mutex_init(&data->mutex); 869 mutex_init(&data->mutex);
870 data->regmap = devm_regmap_init_i2c(client, &bmc150_magn_regmap_config);
871 if (IS_ERR(data->regmap)) {
872 dev_err(&client->dev, "Failed to allocate register map\n");
873 return PTR_ERR(data->regmap);
874 }
875 870
876 ret = bmc150_magn_init(data); 871 ret = bmc150_magn_init(data);
877 if (ret < 0) 872 if (ret < 0)
878 return ret; 873 return ret;
879 874
880 indio_dev->dev.parent = &client->dev; 875 indio_dev->dev.parent = dev;
881 indio_dev->channels = bmc150_magn_channels; 876 indio_dev->channels = bmc150_magn_channels;
882 indio_dev->num_channels = ARRAY_SIZE(bmc150_magn_channels); 877 indio_dev->num_channels = ARRAY_SIZE(bmc150_magn_channels);
883 indio_dev->available_scan_masks = bmc150_magn_scan_masks; 878 indio_dev->available_scan_masks = bmc150_magn_scan_masks;
@@ -885,35 +880,34 @@ static int bmc150_magn_probe(struct i2c_client *client,
885 indio_dev->modes = INDIO_DIRECT_MODE; 880 indio_dev->modes = INDIO_DIRECT_MODE;
886 indio_dev->info = &bmc150_magn_info; 881 indio_dev->info = &bmc150_magn_info;
887 882
888 if (client->irq > 0) { 883 if (irq > 0) {
889 data->dready_trig = devm_iio_trigger_alloc(&client->dev, 884 data->dready_trig = devm_iio_trigger_alloc(dev,
890 "%s-dev%d", 885 "%s-dev%d",
891 indio_dev->name, 886 indio_dev->name,
892 indio_dev->id); 887 indio_dev->id);
893 if (!data->dready_trig) { 888 if (!data->dready_trig) {
894 ret = -ENOMEM; 889 ret = -ENOMEM;
895 dev_err(&client->dev, "iio trigger alloc failed\n"); 890 dev_err(dev, "iio trigger alloc failed\n");
896 goto err_poweroff; 891 goto err_poweroff;
897 } 892 }
898 893
899 data->dready_trig->dev.parent = &client->dev; 894 data->dready_trig->dev.parent = dev;
900 data->dready_trig->ops = &bmc150_magn_trigger_ops; 895 data->dready_trig->ops = &bmc150_magn_trigger_ops;
901 iio_trigger_set_drvdata(data->dready_trig, indio_dev); 896 iio_trigger_set_drvdata(data->dready_trig, indio_dev);
902 ret = iio_trigger_register(data->dready_trig); 897 ret = iio_trigger_register(data->dready_trig);
903 if (ret) { 898 if (ret) {
904 dev_err(&client->dev, "iio trigger register failed\n"); 899 dev_err(dev, "iio trigger register failed\n");
905 goto err_poweroff; 900 goto err_poweroff;
906 } 901 }
907 902
908 ret = request_threaded_irq(client->irq, 903 ret = request_threaded_irq(irq,
909 iio_trigger_generic_data_rdy_poll, 904 iio_trigger_generic_data_rdy_poll,
910 NULL, 905 NULL,
911 IRQF_TRIGGER_RISING | IRQF_ONESHOT, 906 IRQF_TRIGGER_RISING | IRQF_ONESHOT,
912 BMC150_MAGN_IRQ_NAME, 907 BMC150_MAGN_IRQ_NAME,
913 data->dready_trig); 908 data->dready_trig);
914 if (ret < 0) { 909 if (ret < 0) {
915 dev_err(&client->dev, "request irq %d failed\n", 910 dev_err(dev, "request irq %d failed\n", irq);
916 client->irq);
917 goto err_trigger_unregister; 911 goto err_trigger_unregister;
918 } 912 }
919 } 913 }
@@ -923,34 +917,33 @@ static int bmc150_magn_probe(struct i2c_client *client,
923 bmc150_magn_trigger_handler, 917 bmc150_magn_trigger_handler,
924 &bmc150_magn_buffer_setup_ops); 918 &bmc150_magn_buffer_setup_ops);
925 if (ret < 0) { 919 if (ret < 0) {
926 dev_err(&client->dev, 920 dev_err(dev, "iio triggered buffer setup failed\n");
927 "iio triggered buffer setup failed\n");
928 goto err_free_irq; 921 goto err_free_irq;
929 } 922 }
930 923
931 ret = pm_runtime_set_active(&client->dev); 924 ret = pm_runtime_set_active(dev);
932 if (ret) 925 if (ret)
933 goto err_buffer_cleanup; 926 goto err_buffer_cleanup;
934 927
935 pm_runtime_enable(&client->dev); 928 pm_runtime_enable(dev);
936 pm_runtime_set_autosuspend_delay(&client->dev, 929 pm_runtime_set_autosuspend_delay(dev,
937 BMC150_MAGN_AUTO_SUSPEND_DELAY_MS); 930 BMC150_MAGN_AUTO_SUSPEND_DELAY_MS);
938 pm_runtime_use_autosuspend(&client->dev); 931 pm_runtime_use_autosuspend(dev);
939 932
940 ret = iio_device_register(indio_dev); 933 ret = iio_device_register(indio_dev);
941 if (ret < 0) { 934 if (ret < 0) {
942 dev_err(&client->dev, "unable to register iio device\n"); 935 dev_err(dev, "unable to register iio device\n");
943 goto err_buffer_cleanup; 936 goto err_buffer_cleanup;
944 } 937 }
945 938
946 dev_dbg(&indio_dev->dev, "Registered device %s\n", name); 939 dev_dbg(dev, "Registered device %s\n", name);
947 return 0; 940 return 0;
948 941
949err_buffer_cleanup: 942err_buffer_cleanup:
950 iio_triggered_buffer_cleanup(indio_dev); 943 iio_triggered_buffer_cleanup(indio_dev);
951err_free_irq: 944err_free_irq:
952 if (client->irq > 0) 945 if (irq > 0)
953 free_irq(client->irq, data->dready_trig); 946 free_irq(irq, data->dready_trig);
954err_trigger_unregister: 947err_trigger_unregister:
955 if (data->dready_trig) 948 if (data->dready_trig)
956 iio_trigger_unregister(data->dready_trig); 949 iio_trigger_unregister(data->dready_trig);
@@ -958,22 +951,23 @@ err_poweroff:
958 bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_SUSPEND, true); 951 bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_SUSPEND, true);
959 return ret; 952 return ret;
960} 953}
954EXPORT_SYMBOL(bmc150_magn_probe);
961 955
962static int bmc150_magn_remove(struct i2c_client *client) 956int bmc150_magn_remove(struct device *dev)
963{ 957{
964 struct iio_dev *indio_dev = i2c_get_clientdata(client); 958 struct iio_dev *indio_dev = dev_get_drvdata(dev);
965 struct bmc150_magn_data *data = iio_priv(indio_dev); 959 struct bmc150_magn_data *data = iio_priv(indio_dev);
966 960
967 iio_device_unregister(indio_dev); 961 iio_device_unregister(indio_dev);
968 962
969 pm_runtime_disable(&client->dev); 963 pm_runtime_disable(dev);
970 pm_runtime_set_suspended(&client->dev); 964 pm_runtime_set_suspended(dev);
971 pm_runtime_put_noidle(&client->dev); 965 pm_runtime_put_noidle(dev);
972 966
973 iio_triggered_buffer_cleanup(indio_dev); 967 iio_triggered_buffer_cleanup(indio_dev);
974 968
975 if (client->irq > 0) 969 if (data->irq > 0)
976 free_irq(data->client->irq, data->dready_trig); 970 free_irq(data->irq, data->dready_trig);
977 971
978 if (data->dready_trig) 972 if (data->dready_trig)
979 iio_trigger_unregister(data->dready_trig); 973 iio_trigger_unregister(data->dready_trig);
@@ -984,11 +978,12 @@ static int bmc150_magn_remove(struct i2c_client *client)
984 978
985 return 0; 979 return 0;
986} 980}
981EXPORT_SYMBOL(bmc150_magn_remove);
987 982
988#ifdef CONFIG_PM 983#ifdef CONFIG_PM
989static int bmc150_magn_runtime_suspend(struct device *dev) 984static int bmc150_magn_runtime_suspend(struct device *dev)
990{ 985{
991 struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev)); 986 struct iio_dev *indio_dev = dev_get_drvdata(dev);
992 struct bmc150_magn_data *data = iio_priv(indio_dev); 987 struct bmc150_magn_data *data = iio_priv(indio_dev);
993 int ret; 988 int ret;
994 989
@@ -997,7 +992,7 @@ static int bmc150_magn_runtime_suspend(struct device *dev)
997 true); 992 true);
998 mutex_unlock(&data->mutex); 993 mutex_unlock(&data->mutex);
999 if (ret < 0) { 994 if (ret < 0) {
1000 dev_err(&data->client->dev, "powering off device failed\n"); 995 dev_err(dev, "powering off device failed\n");
1001 return ret; 996 return ret;
1002 } 997 }
1003 return 0; 998 return 0;
@@ -1008,7 +1003,7 @@ static int bmc150_magn_runtime_suspend(struct device *dev)
1008 */ 1003 */
1009static int bmc150_magn_runtime_resume(struct device *dev) 1004static int bmc150_magn_runtime_resume(struct device *dev)
1010{ 1005{
1011 struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev)); 1006 struct iio_dev *indio_dev = dev_get_drvdata(dev);
1012 struct bmc150_magn_data *data = iio_priv(indio_dev); 1007 struct bmc150_magn_data *data = iio_priv(indio_dev);
1013 1008
1014 return bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_NORMAL, 1009 return bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_NORMAL,
@@ -1019,7 +1014,7 @@ static int bmc150_magn_runtime_resume(struct device *dev)
1019#ifdef CONFIG_PM_SLEEP 1014#ifdef CONFIG_PM_SLEEP
1020static int bmc150_magn_suspend(struct device *dev) 1015static int bmc150_magn_suspend(struct device *dev)
1021{ 1016{
1022 struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev)); 1017 struct iio_dev *indio_dev = dev_get_drvdata(dev);
1023 struct bmc150_magn_data *data = iio_priv(indio_dev); 1018 struct bmc150_magn_data *data = iio_priv(indio_dev);
1024 int ret; 1019 int ret;
1025 1020
@@ -1033,7 +1028,7 @@ static int bmc150_magn_suspend(struct device *dev)
1033 1028
1034static int bmc150_magn_resume(struct device *dev) 1029static int bmc150_magn_resume(struct device *dev)
1035{ 1030{
1036 struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev)); 1031 struct iio_dev *indio_dev = dev_get_drvdata(dev);
1037 struct bmc150_magn_data *data = iio_priv(indio_dev); 1032 struct bmc150_magn_data *data = iio_priv(indio_dev);
1038 int ret; 1033 int ret;
1039 1034
@@ -1046,38 +1041,13 @@ static int bmc150_magn_resume(struct device *dev)
1046} 1041}
1047#endif 1042#endif
1048 1043
1049static const struct dev_pm_ops bmc150_magn_pm_ops = { 1044const struct dev_pm_ops bmc150_magn_pm_ops = {
1050 SET_SYSTEM_SLEEP_PM_OPS(bmc150_magn_suspend, bmc150_magn_resume) 1045 SET_SYSTEM_SLEEP_PM_OPS(bmc150_magn_suspend, bmc150_magn_resume)
1051 SET_RUNTIME_PM_OPS(bmc150_magn_runtime_suspend, 1046 SET_RUNTIME_PM_OPS(bmc150_magn_runtime_suspend,
1052 bmc150_magn_runtime_resume, NULL) 1047 bmc150_magn_runtime_resume, NULL)
1053}; 1048};
1054 1049EXPORT_SYMBOL(bmc150_magn_pm_ops);
1055static const struct acpi_device_id bmc150_magn_acpi_match[] = {
1056 {"BMC150B", 0},
1057 {"BMC156B", 0},
1058 {},
1059};
1060MODULE_DEVICE_TABLE(acpi, bmc150_magn_acpi_match);
1061
1062static const struct i2c_device_id bmc150_magn_id[] = {
1063 {"bmc150_magn", 0},
1064 {"bmc156_magn", 0},
1065 {},
1066};
1067MODULE_DEVICE_TABLE(i2c, bmc150_magn_id);
1068
1069static struct i2c_driver bmc150_magn_driver = {
1070 .driver = {
1071 .name = BMC150_MAGN_DRV_NAME,
1072 .acpi_match_table = ACPI_PTR(bmc150_magn_acpi_match),
1073 .pm = &bmc150_magn_pm_ops,
1074 },
1075 .probe = bmc150_magn_probe,
1076 .remove = bmc150_magn_remove,
1077 .id_table = bmc150_magn_id,
1078};
1079module_i2c_driver(bmc150_magn_driver);
1080 1050
1081MODULE_AUTHOR("Irina Tirdea <irina.tirdea@intel.com>"); 1051MODULE_AUTHOR("Irina Tirdea <irina.tirdea@intel.com>");
1082MODULE_LICENSE("GPL v2"); 1052MODULE_LICENSE("GPL v2");
1083MODULE_DESCRIPTION("BMC150 magnetometer driver"); 1053MODULE_DESCRIPTION("BMC150 magnetometer core driver");
diff --git a/drivers/iio/magnetometer/bmc150_magn.h b/drivers/iio/magnetometer/bmc150_magn.h
new file mode 100644
index 000000000000..9a8e26812ca8
--- /dev/null
+++ b/drivers/iio/magnetometer/bmc150_magn.h
@@ -0,0 +1,11 @@
1#ifndef _BMC150_MAGN_H_
2#define _BMC150_MAGN_H_
3
4extern const struct regmap_config bmc150_magn_regmap_config;
5extern const struct dev_pm_ops bmc150_magn_pm_ops;
6
7int bmc150_magn_probe(struct device *dev, struct regmap *regmap, int irq,
8 const char *name);
9int bmc150_magn_remove(struct device *dev);
10
11#endif /* _BMC150_MAGN_H_ */
diff --git a/drivers/iio/magnetometer/bmc150_magn_i2c.c b/drivers/iio/magnetometer/bmc150_magn_i2c.c
new file mode 100644
index 000000000000..eddc7f0d0096
--- /dev/null
+++ b/drivers/iio/magnetometer/bmc150_magn_i2c.c
@@ -0,0 +1,77 @@
1/*
2 * 3-axis magnetometer driver supporting following I2C Bosch-Sensortec chips:
3 * - BMC150
4 * - BMC156
5 *
6 * Copyright (c) 2016, Intel Corporation.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 */
17#include <linux/device.h>
18#include <linux/mod_devicetable.h>
19#include <linux/i2c.h>
20#include <linux/module.h>
21#include <linux/acpi.h>
22#include <linux/regmap.h>
23
24#include "bmc150_magn.h"
25
26static int bmc150_magn_i2c_probe(struct i2c_client *client,
27 const struct i2c_device_id *id)
28{
29 struct regmap *regmap;
30 const char *name = NULL;
31
32 regmap = devm_regmap_init_i2c(client, &bmc150_magn_regmap_config);
33 if (IS_ERR(regmap)) {
34 dev_err(&client->dev, "Failed to initialize i2c regmap\n");
35 return PTR_ERR(regmap);
36 }
37
38 if (id)
39 name = id->name;
40
41 return bmc150_magn_probe(&client->dev, regmap, client->irq, name);
42}
43
44static int bmc150_magn_i2c_remove(struct i2c_client *client)
45{
46 return bmc150_magn_remove(&client->dev);
47}
48
49static const struct acpi_device_id bmc150_magn_acpi_match[] = {
50 {"BMC150B", 0},
51 {"BMC156B", 0},
52 {},
53};
54MODULE_DEVICE_TABLE(acpi, bmc150_magn_acpi_match);
55
56static const struct i2c_device_id bmc150_magn_i2c_id[] = {
57 {"bmc150_magn", 0},
58 {"bmc156_magn", 0},
59 {}
60};
61MODULE_DEVICE_TABLE(i2c, bmc150_magn_i2c_id);
62
63static struct i2c_driver bmc150_magn_driver = {
64 .driver = {
65 .name = "bmc150_magn_i2c",
66 .acpi_match_table = ACPI_PTR(bmc150_magn_acpi_match),
67 .pm = &bmc150_magn_pm_ops,
68 },
69 .probe = bmc150_magn_i2c_probe,
70 .remove = bmc150_magn_i2c_remove,
71 .id_table = bmc150_magn_i2c_id,
72};
73module_i2c_driver(bmc150_magn_driver);
74
75MODULE_AUTHOR("Daniel Baluta <daniel.baluta@intel.com");
76MODULE_LICENSE("GPL v2");
77MODULE_DESCRIPTION("BMC150 I2C magnetometer driver");
diff --git a/drivers/iio/magnetometer/bmc150_magn_spi.c b/drivers/iio/magnetometer/bmc150_magn_spi.c
new file mode 100644
index 000000000000..c4c738a07695
--- /dev/null
+++ b/drivers/iio/magnetometer/bmc150_magn_spi.c
@@ -0,0 +1,68 @@
1/*
2 * 3-axis magnetometer driver support following SPI Bosch-Sensortec chips:
3 * - BMC150
4 * - BMC156
5 *
6 * Copyright (c) 2016, Intel Corporation.
7 *
8 * This file is subject to the terms and conditions of version 2 of
9 * the GNU General Public License. See the file COPYING in the main
10 * directory of this archive for more details.
11 */
12#include <linux/module.h>
13#include <linux/mod_devicetable.h>
14#include <linux/spi/spi.h>
15#include <linux/acpi.h>
16#include <linux/regmap.h>
17
18#include "bmc150_magn.h"
19
20static int bmc150_magn_spi_probe(struct spi_device *spi)
21{
22 struct regmap *regmap;
23 const struct spi_device_id *id = spi_get_device_id(spi);
24
25 regmap = devm_regmap_init_spi(spi, &bmc150_magn_regmap_config);
26 if (IS_ERR(regmap)) {
27 dev_err(&spi->dev, "Failed to register spi regmap %d\n",
28 (int)PTR_ERR(regmap));
29 return PTR_ERR(regmap);
30 }
31 return bmc150_magn_probe(&spi->dev, regmap, spi->irq, id->name);
32}
33
34static int bmc150_magn_spi_remove(struct spi_device *spi)
35{
36 bmc150_magn_remove(&spi->dev);
37
38 return 0;
39}
40
41static const struct spi_device_id bmc150_magn_spi_id[] = {
42 {"bmc150_magn", 0},
43 {"bmc156_magn", 0},
44 {}
45};
46MODULE_DEVICE_TABLE(spi, bmc150_magn_spi_id);
47
48static const struct acpi_device_id bmc150_magn_acpi_match[] = {
49 {"BMC150B", 0},
50 {"BMC156B", 0},
51 {},
52};
53MODULE_DEVICE_TABLE(acpi, bmc150_magn_acpi_match);
54
55static struct spi_driver bmc150_magn_spi_driver = {
56 .probe = bmc150_magn_spi_probe,
57 .remove = bmc150_magn_spi_remove,
58 .id_table = bmc150_magn_spi_id,
59 .driver = {
60 .acpi_match_table = ACPI_PTR(bmc150_magn_acpi_match),
61 .name = "bmc150_magn_spi",
62 },
63};
64module_spi_driver(bmc150_magn_spi_driver);
65
66MODULE_AUTHOR("Daniel Baluta <daniel.baluta@intel.com");
67MODULE_DESCRIPTION("BMC150 magnetometer SPI driver");
68MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index 501f858df413..62036d2a9956 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -484,6 +484,7 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = {
484 .mask_int1 = ST_MAGN_3_DRDY_INT_MASK, 484 .mask_int1 = ST_MAGN_3_DRDY_INT_MASK,
485 .addr_ihl = ST_MAGN_3_IHL_IRQ_ADDR, 485 .addr_ihl = ST_MAGN_3_IHL_IRQ_ADDR,
486 .mask_ihl = ST_MAGN_3_IHL_IRQ_MASK, 486 .mask_ihl = ST_MAGN_3_IHL_IRQ_MASK,
487 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
487 }, 488 },
488 .multi_read_bit = ST_MAGN_3_MULTIREAD_BIT, 489 .multi_read_bit = ST_MAGN_3_MULTIREAD_BIT,
489 .bootime = 2, 490 .bootime = 2,
diff --git a/drivers/iio/potentiometer/Kconfig b/drivers/iio/potentiometer/Kconfig
index ffc735c168fb..6acb23810bb4 100644
--- a/drivers/iio/potentiometer/Kconfig
+++ b/drivers/iio/potentiometer/Kconfig
@@ -5,6 +5,34 @@
5 5
6menu "Digital potentiometers" 6menu "Digital potentiometers"
7 7
8config DS1803
9 tristate "Maxim Integrated DS1803 Digital Potentiometer driver"
10 depends on I2C
11 help
12 Say yes here to build support for the Maxim Integrated DS1803
13 digital potentiomenter chip.
14
15 To compile this driver as a module, choose M here: the
16 module will be called ds1803.
17
18config MCP4131
19 tristate "Microchip MCP413X/414X/415X/416X/423X/424X/425X/426X Digital Potentiometer driver"
20 depends on SPI
21 help
22 Say yes here to build support for the Microchip
23 MCP4131, MCP4132,
24 MCP4141, MCP4142,
25 MCP4151, MCP4152,
26 MCP4161, MCP4162,
27 MCP4231, MCP4232,
28 MCP4241, MCP4242,
29 MCP4251, MCP4252,
30 MCP4261, MCP4262,
31 digital potentiomenter chips.
32
33 To compile this driver as a module, choose M here: the
34 module will be called mcp4131.
35
8config MCP4531 36config MCP4531
9 tristate "Microchip MCP45xx/MCP46xx Digital Potentiometer driver" 37 tristate "Microchip MCP45xx/MCP46xx Digital Potentiometer driver"
10 depends on I2C 38 depends on I2C
diff --git a/drivers/iio/potentiometer/Makefile b/drivers/iio/potentiometer/Makefile
index b563b492b486..6007faa2fb02 100644
--- a/drivers/iio/potentiometer/Makefile
+++ b/drivers/iio/potentiometer/Makefile
@@ -3,5 +3,7 @@
3# 3#
4 4
5# When adding new entries keep the list in alphabetical order 5# When adding new entries keep the list in alphabetical order
6obj-$(CONFIG_DS1803) += ds1803.o
7obj-$(CONFIG_MCP4131) += mcp4131.o
6obj-$(CONFIG_MCP4531) += mcp4531.o 8obj-$(CONFIG_MCP4531) += mcp4531.o
7obj-$(CONFIG_TPL0102) += tpl0102.o 9obj-$(CONFIG_TPL0102) += tpl0102.o
diff --git a/drivers/iio/potentiometer/ds1803.c b/drivers/iio/potentiometer/ds1803.c
new file mode 100644
index 000000000000..fb9e2a337dc2
--- /dev/null
+++ b/drivers/iio/potentiometer/ds1803.c
@@ -0,0 +1,173 @@
1/*
2 * Maxim Integrated DS1803 digital potentiometer driver
3 * Copyright (c) 2016 Slawomir Stepien
4 *
5 * Datasheet: https://datasheets.maximintegrated.com/en/ds/DS1803.pdf
6 *
7 * DEVID #Wipers #Positions Resistor Opts (kOhm) i2c address
8 * ds1803 2 256 10, 50, 100 0101xxx
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published by
12 * the Free Software Foundation.
13 */
14
15#include <linux/err.h>
16#include <linux/export.h>
17#include <linux/i2c.h>
18#include <linux/iio/iio.h>
19#include <linux/module.h>
20#include <linux/of.h>
21
22#define DS1803_MAX_POS 255
23#define DS1803_WRITE(chan) (0xa8 | ((chan) + 1))
24
25enum ds1803_type {
26 DS1803_010,
27 DS1803_050,
28 DS1803_100,
29};
30
31struct ds1803_cfg {
32 int kohms;
33};
34
35static const struct ds1803_cfg ds1803_cfg[] = {
36 [DS1803_010] = { .kohms = 10, },
37 [DS1803_050] = { .kohms = 50, },
38 [DS1803_100] = { .kohms = 100, },
39};
40
41struct ds1803_data {
42 struct i2c_client *client;
43 const struct ds1803_cfg *cfg;
44};
45
46#define DS1803_CHANNEL(ch) { \
47 .type = IIO_RESISTANCE, \
48 .indexed = 1, \
49 .output = 1, \
50 .channel = (ch), \
51 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
52 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
53}
54
55static const struct iio_chan_spec ds1803_channels[] = {
56 DS1803_CHANNEL(0),
57 DS1803_CHANNEL(1),
58};
59
60static int ds1803_read_raw(struct iio_dev *indio_dev,
61 struct iio_chan_spec const *chan,
62 int *val, int *val2, long mask)
63{
64 struct ds1803_data *data = iio_priv(indio_dev);
65 int pot = chan->channel;
66 int ret;
67 u8 result[indio_dev->num_channels];
68
69 switch (mask) {
70 case IIO_CHAN_INFO_RAW:
71 ret = i2c_master_recv(data->client, result,
72 indio_dev->num_channels);
73 if (ret < 0)
74 return ret;
75
76 *val = result[pot];
77 return IIO_VAL_INT;
78
79 case IIO_CHAN_INFO_SCALE:
80 *val = 1000 * data->cfg->kohms;
81 *val2 = DS1803_MAX_POS;
82 return IIO_VAL_FRACTIONAL;
83 }
84
85 return -EINVAL;
86}
87
88static int ds1803_write_raw(struct iio_dev *indio_dev,
89 struct iio_chan_spec const *chan,
90 int val, int val2, long mask)
91{
92 struct ds1803_data *data = iio_priv(indio_dev);
93 int pot = chan->channel;
94
95 if (val2 != 0)
96 return -EINVAL;
97
98 switch (mask) {
99 case IIO_CHAN_INFO_RAW:
100 if (val > DS1803_MAX_POS || val < 0)
101 return -EINVAL;
102 break;
103 default:
104 return -EINVAL;
105 }
106
107 return i2c_smbus_write_byte_data(data->client, DS1803_WRITE(pot), val);
108}
109
110static const struct iio_info ds1803_info = {
111 .read_raw = ds1803_read_raw,
112 .write_raw = ds1803_write_raw,
113 .driver_module = THIS_MODULE,
114};
115
116static int ds1803_probe(struct i2c_client *client,
117 const struct i2c_device_id *id)
118{
119 struct device *dev = &client->dev;
120 struct ds1803_data *data;
121 struct iio_dev *indio_dev;
122
123 indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
124 if (!indio_dev)
125 return -ENOMEM;
126
127 i2c_set_clientdata(client, indio_dev);
128
129 data = iio_priv(indio_dev);
130 data->client = client;
131 data->cfg = &ds1803_cfg[id->driver_data];
132
133 indio_dev->dev.parent = dev;
134 indio_dev->info = &ds1803_info;
135 indio_dev->channels = ds1803_channels;
136 indio_dev->num_channels = ARRAY_SIZE(ds1803_channels);
137 indio_dev->name = client->name;
138
139 return devm_iio_device_register(dev, indio_dev);
140}
141
142#if defined(CONFIG_OF)
143static const struct of_device_id ds1803_dt_ids[] = {
144 { .compatible = "maxim,ds1803-010", .data = &ds1803_cfg[DS1803_010] },
145 { .compatible = "maxim,ds1803-050", .data = &ds1803_cfg[DS1803_050] },
146 { .compatible = "maxim,ds1803-100", .data = &ds1803_cfg[DS1803_100] },
147 {}
148};
149MODULE_DEVICE_TABLE(of, ds1803_dt_ids);
150#endif /* CONFIG_OF */
151
152static const struct i2c_device_id ds1803_id[] = {
153 { "ds1803-010", DS1803_010 },
154 { "ds1803-050", DS1803_050 },
155 { "ds1803-100", DS1803_100 },
156 {}
157};
158MODULE_DEVICE_TABLE(i2c, ds1803_id);
159
160static struct i2c_driver ds1803_driver = {
161 .driver = {
162 .name = "ds1803",
163 .of_match_table = of_match_ptr(ds1803_dt_ids),
164 },
165 .probe = ds1803_probe,
166 .id_table = ds1803_id,
167};
168
169module_i2c_driver(ds1803_driver);
170
171MODULE_AUTHOR("Slawomir Stepien <sst@poczta.fm>");
172MODULE_DESCRIPTION("DS1803 digital potentiometer");
173MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/potentiometer/mcp4131.c b/drivers/iio/potentiometer/mcp4131.c
new file mode 100644
index 000000000000..4e7e2c6c522c
--- /dev/null
+++ b/drivers/iio/potentiometer/mcp4131.c
@@ -0,0 +1,494 @@
1/*
2 * Industrial I/O driver for Microchip digital potentiometers
3 *
4 * Copyright (c) 2016 Slawomir Stepien
5 * Based on: Peter Rosin's code from mcp4531.c
6 *
7 * Datasheet: http://ww1.microchip.com/downloads/en/DeviceDoc/22060b.pdf
8 *
9 * DEVID #Wipers #Positions Resistor Opts (kOhm)
10 * mcp4131 1 129 5, 10, 50, 100
11 * mcp4132 1 129 5, 10, 50, 100
12 * mcp4141 1 129 5, 10, 50, 100
13 * mcp4142 1 129 5, 10, 50, 100
14 * mcp4151 1 257 5, 10, 50, 100
15 * mcp4152 1 257 5, 10, 50, 100
16 * mcp4161 1 257 5, 10, 50, 100
17 * mcp4162 1 257 5, 10, 50, 100
18 * mcp4231 2 129 5, 10, 50, 100
19 * mcp4232 2 129 5, 10, 50, 100
20 * mcp4241 2 129 5, 10, 50, 100
21 * mcp4242 2 129 5, 10, 50, 100
22 * mcp4251 2 257 5, 10, 50, 100
23 * mcp4252 2 257 5, 10, 50, 100
24 * mcp4261 2 257 5, 10, 50, 100
25 * mcp4262 2 257 5, 10, 50, 100
26 *
27 * This program is free software; you can redistribute it and/or modify it
28 * under the terms of the GNU General Public License version 2 as published by
29 * the Free Software Foundation.
30 */
31
32/*
33 * TODO:
34 * 1. Write wiper setting to EEPROM for EEPROM capable models.
35 */
36
37#include <linux/cache.h>
38#include <linux/err.h>
39#include <linux/export.h>
40#include <linux/iio/iio.h>
41#include <linux/iio/types.h>
42#include <linux/module.h>
43#include <linux/mutex.h>
44#include <linux/of.h>
45#include <linux/spi/spi.h>
46
47#define MCP4131_WRITE (0x00 << 2)
48#define MCP4131_READ (0x03 << 2)
49
50#define MCP4131_WIPER_SHIFT 4
51#define MCP4131_CMDERR(r) ((r[0]) & 0x02)
52#define MCP4131_RAW(r) ((r[0]) == 0xff ? 0x100 : (r[1]))
53
54struct mcp4131_cfg {
55 int wipers;
56 int max_pos;
57 int kohms;
58};
59
60enum mcp4131_type {
61 MCP413x_502 = 0,
62 MCP413x_103,
63 MCP413x_503,
64 MCP413x_104,
65 MCP414x_502,
66 MCP414x_103,
67 MCP414x_503,
68 MCP414x_104,
69 MCP415x_502,
70 MCP415x_103,
71 MCP415x_503,
72 MCP415x_104,
73 MCP416x_502,
74 MCP416x_103,
75 MCP416x_503,
76 MCP416x_104,
77 MCP423x_502,
78 MCP423x_103,
79 MCP423x_503,
80 MCP423x_104,
81 MCP424x_502,
82 MCP424x_103,
83 MCP424x_503,
84 MCP424x_104,
85 MCP425x_502,
86 MCP425x_103,
87 MCP425x_503,
88 MCP425x_104,
89 MCP426x_502,
90 MCP426x_103,
91 MCP426x_503,
92 MCP426x_104,
93};
94
95static const struct mcp4131_cfg mcp4131_cfg[] = {
96 [MCP413x_502] = { .wipers = 1, .max_pos = 128, .kohms = 5, },
97 [MCP413x_103] = { .wipers = 1, .max_pos = 128, .kohms = 10, },
98 [MCP413x_503] = { .wipers = 1, .max_pos = 128, .kohms = 50, },
99 [MCP413x_104] = { .wipers = 1, .max_pos = 128, .kohms = 100, },
100 [MCP414x_502] = { .wipers = 1, .max_pos = 128, .kohms = 5, },
101 [MCP414x_103] = { .wipers = 1, .max_pos = 128, .kohms = 10, },
102 [MCP414x_503] = { .wipers = 1, .max_pos = 128, .kohms = 50, },
103 [MCP414x_104] = { .wipers = 1, .max_pos = 128, .kohms = 100, },
104 [MCP415x_502] = { .wipers = 1, .max_pos = 256, .kohms = 5, },
105 [MCP415x_103] = { .wipers = 1, .max_pos = 256, .kohms = 10, },
106 [MCP415x_503] = { .wipers = 1, .max_pos = 256, .kohms = 50, },
107 [MCP415x_104] = { .wipers = 1, .max_pos = 256, .kohms = 100, },
108 [MCP416x_502] = { .wipers = 1, .max_pos = 256, .kohms = 5, },
109 [MCP416x_103] = { .wipers = 1, .max_pos = 256, .kohms = 10, },
110 [MCP416x_503] = { .wipers = 1, .max_pos = 256, .kohms = 50, },
111 [MCP416x_104] = { .wipers = 1, .max_pos = 256, .kohms = 100, },
112 [MCP423x_502] = { .wipers = 2, .max_pos = 128, .kohms = 5, },
113 [MCP423x_103] = { .wipers = 2, .max_pos = 128, .kohms = 10, },
114 [MCP423x_503] = { .wipers = 2, .max_pos = 128, .kohms = 50, },
115 [MCP423x_104] = { .wipers = 2, .max_pos = 128, .kohms = 100, },
116 [MCP424x_502] = { .wipers = 2, .max_pos = 128, .kohms = 5, },
117 [MCP424x_103] = { .wipers = 2, .max_pos = 128, .kohms = 10, },
118 [MCP424x_503] = { .wipers = 2, .max_pos = 128, .kohms = 50, },
119 [MCP424x_104] = { .wipers = 2, .max_pos = 128, .kohms = 100, },
120 [MCP425x_502] = { .wipers = 2, .max_pos = 256, .kohms = 5, },
121 [MCP425x_103] = { .wipers = 2, .max_pos = 256, .kohms = 10, },
122 [MCP425x_503] = { .wipers = 2, .max_pos = 256, .kohms = 50, },
123 [MCP425x_104] = { .wipers = 2, .max_pos = 256, .kohms = 100, },
124 [MCP426x_502] = { .wipers = 2, .max_pos = 256, .kohms = 5, },
125 [MCP426x_103] = { .wipers = 2, .max_pos = 256, .kohms = 10, },
126 [MCP426x_503] = { .wipers = 2, .max_pos = 256, .kohms = 50, },
127 [MCP426x_104] = { .wipers = 2, .max_pos = 256, .kohms = 100, },
128};
129
130struct mcp4131_data {
131 struct spi_device *spi;
132 const struct mcp4131_cfg *cfg;
133 struct mutex lock;
134 u8 buf[2] ____cacheline_aligned;
135};
136
137#define MCP4131_CHANNEL(ch) { \
138 .type = IIO_RESISTANCE, \
139 .indexed = 1, \
140 .output = 1, \
141 .channel = (ch), \
142 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
143 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
144}
145
146static const struct iio_chan_spec mcp4131_channels[] = {
147 MCP4131_CHANNEL(0),
148 MCP4131_CHANNEL(1),
149};
150
151static int mcp4131_read(struct spi_device *spi, void *buf, size_t len)
152{
153 struct spi_transfer t = {
154 .tx_buf = buf, /* We need to send addr, cmd and 12 bits */
155 .rx_buf = buf,
156 .len = len,
157 };
158 struct spi_message m;
159
160 spi_message_init(&m);
161 spi_message_add_tail(&t, &m);
162
163 return spi_sync(spi, &m);
164}
165
166static int mcp4131_read_raw(struct iio_dev *indio_dev,
167 struct iio_chan_spec const *chan,
168 int *val, int *val2, long mask)
169{
170 int err;
171 struct mcp4131_data *data = iio_priv(indio_dev);
172 int address = chan->channel;
173
174 switch (mask) {
175 case IIO_CHAN_INFO_RAW:
176 mutex_lock(&data->lock);
177
178 data->buf[0] = (address << MCP4131_WIPER_SHIFT) | MCP4131_READ;
179 data->buf[1] = 0;
180
181 err = mcp4131_read(data->spi, data->buf, 2);
182 if (err) {
183 mutex_unlock(&data->lock);
184 return err;
185 }
186
187 /* Error, bad address/command combination */
188 if (!MCP4131_CMDERR(data->buf)) {
189 mutex_unlock(&data->lock);
190 return -EIO;
191 }
192
193 *val = MCP4131_RAW(data->buf);
194 mutex_unlock(&data->lock);
195
196 return IIO_VAL_INT;
197
198 case IIO_CHAN_INFO_SCALE:
199 *val = 1000 * data->cfg->kohms;
200 *val2 = data->cfg->max_pos;
201 return IIO_VAL_FRACTIONAL;
202 }
203
204 return -EINVAL;
205}
206
207static int mcp4131_write_raw(struct iio_dev *indio_dev,
208 struct iio_chan_spec const *chan,
209 int val, int val2, long mask)
210{
211 int err;
212 struct mcp4131_data *data = iio_priv(indio_dev);
213 int address = chan->channel << MCP4131_WIPER_SHIFT;
214
215 switch (mask) {
216 case IIO_CHAN_INFO_RAW:
217 if (val > data->cfg->max_pos || val < 0)
218 return -EINVAL;
219 break;
220
221 default:
222 return -EINVAL;
223 }
224
225 mutex_lock(&data->lock);
226
227 data->buf[0] = address << MCP4131_WIPER_SHIFT;
228 data->buf[0] |= MCP4131_WRITE | (val >> 8);
229 data->buf[1] = val & 0xFF; /* 8 bits here */
230
231 err = spi_write(data->spi, data->buf, 2);
232 mutex_unlock(&data->lock);
233
234 return err;
235}
236
237static const struct iio_info mcp4131_info = {
238 .read_raw = mcp4131_read_raw,
239 .write_raw = mcp4131_write_raw,
240 .driver_module = THIS_MODULE,
241};
242
243static int mcp4131_probe(struct spi_device *spi)
244{
245 int err;
246 struct device *dev = &spi->dev;
247 unsigned long devid = spi_get_device_id(spi)->driver_data;
248 struct mcp4131_data *data;
249 struct iio_dev *indio_dev;
250
251 indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
252 if (!indio_dev)
253 return -ENOMEM;
254
255 data = iio_priv(indio_dev);
256 spi_set_drvdata(spi, indio_dev);
257 data->spi = spi;
258 data->cfg = &mcp4131_cfg[devid];
259
260 mutex_init(&data->lock);
261
262 indio_dev->dev.parent = dev;
263 indio_dev->info = &mcp4131_info;
264 indio_dev->channels = mcp4131_channels;
265 indio_dev->num_channels = data->cfg->wipers;
266 indio_dev->name = spi_get_device_id(spi)->name;
267
268 err = devm_iio_device_register(dev, indio_dev);
269 if (err) {
270 dev_info(&spi->dev, "Unable to register %s\n", indio_dev->name);
271 return err;
272 }
273
274 return 0;
275}
276
277#if defined(CONFIG_OF)
278static const struct of_device_id mcp4131_dt_ids[] = {
279 { .compatible = "microchip,mcp4131-502",
280 .data = &mcp4131_cfg[MCP413x_502] },
281 { .compatible = "microchip,mcp4131-103",
282 .data = &mcp4131_cfg[MCP413x_103] },
283 { .compatible = "microchip,mcp4131-503",
284 .data = &mcp4131_cfg[MCP413x_503] },
285 { .compatible = "microchip,mcp4131-104",
286 .data = &mcp4131_cfg[MCP413x_104] },
287 { .compatible = "microchip,mcp4132-502",
288 .data = &mcp4131_cfg[MCP413x_502] },
289 { .compatible = "microchip,mcp4132-103",
290 .data = &mcp4131_cfg[MCP413x_103] },
291 { .compatible = "microchip,mcp4132-503",
292 .data = &mcp4131_cfg[MCP413x_503] },
293 { .compatible = "microchip,mcp4132-104",
294 .data = &mcp4131_cfg[MCP413x_104] },
295 { .compatible = "microchip,mcp4141-502",
296 .data = &mcp4131_cfg[MCP414x_502] },
297 { .compatible = "microchip,mcp4141-103",
298 .data = &mcp4131_cfg[MCP414x_103] },
299 { .compatible = "microchip,mcp4141-503",
300 .data = &mcp4131_cfg[MCP414x_503] },
301 { .compatible = "microchip,mcp4141-104",
302 .data = &mcp4131_cfg[MCP414x_104] },
303 { .compatible = "microchip,mcp4142-502",
304 .data = &mcp4131_cfg[MCP414x_502] },
305 { .compatible = "microchip,mcp4142-103",
306 .data = &mcp4131_cfg[MCP414x_103] },
307 { .compatible = "microchip,mcp4142-503",
308 .data = &mcp4131_cfg[MCP414x_503] },
309 { .compatible = "microchip,mcp4142-104",
310 .data = &mcp4131_cfg[MCP414x_104] },
311 { .compatible = "microchip,mcp4151-502",
312 .data = &mcp4131_cfg[MCP415x_502] },
313 { .compatible = "microchip,mcp4151-103",
314 .data = &mcp4131_cfg[MCP415x_103] },
315 { .compatible = "microchip,mcp4151-503",
316 .data = &mcp4131_cfg[MCP415x_503] },
317 { .compatible = "microchip,mcp4151-104",
318 .data = &mcp4131_cfg[MCP415x_104] },
319 { .compatible = "microchip,mcp4152-502",
320 .data = &mcp4131_cfg[MCP415x_502] },
321 { .compatible = "microchip,mcp4152-103",
322 .data = &mcp4131_cfg[MCP415x_103] },
323 { .compatible = "microchip,mcp4152-503",
324 .data = &mcp4131_cfg[MCP415x_503] },
325 { .compatible = "microchip,mcp4152-104",
326 .data = &mcp4131_cfg[MCP415x_104] },
327 { .compatible = "microchip,mcp4161-502",
328 .data = &mcp4131_cfg[MCP416x_502] },
329 { .compatible = "microchip,mcp4161-103",
330 .data = &mcp4131_cfg[MCP416x_103] },
331 { .compatible = "microchip,mcp4161-503",
332 .data = &mcp4131_cfg[MCP416x_503] },
333 { .compatible = "microchip,mcp4161-104",
334 .data = &mcp4131_cfg[MCP416x_104] },
335 { .compatible = "microchip,mcp4162-502",
336 .data = &mcp4131_cfg[MCP416x_502] },
337 { .compatible = "microchip,mcp4162-103",
338 .data = &mcp4131_cfg[MCP416x_103] },
339 { .compatible = "microchip,mcp4162-503",
340 .data = &mcp4131_cfg[MCP416x_503] },
341 { .compatible = "microchip,mcp4162-104",
342 .data = &mcp4131_cfg[MCP416x_104] },
343 { .compatible = "microchip,mcp4231-502",
344 .data = &mcp4131_cfg[MCP423x_502] },
345 { .compatible = "microchip,mcp4231-103",
346 .data = &mcp4131_cfg[MCP423x_103] },
347 { .compatible = "microchip,mcp4231-503",
348 .data = &mcp4131_cfg[MCP423x_503] },
349 { .compatible = "microchip,mcp4231-104",
350 .data = &mcp4131_cfg[MCP423x_104] },
351 { .compatible = "microchip,mcp4232-502",
352 .data = &mcp4131_cfg[MCP423x_502] },
353 { .compatible = "microchip,mcp4232-103",
354 .data = &mcp4131_cfg[MCP423x_103] },
355 { .compatible = "microchip,mcp4232-503",
356 .data = &mcp4131_cfg[MCP423x_503] },
357 { .compatible = "microchip,mcp4232-104",
358 .data = &mcp4131_cfg[MCP423x_104] },
359 { .compatible = "microchip,mcp4241-502",
360 .data = &mcp4131_cfg[MCP424x_502] },
361 { .compatible = "microchip,mcp4241-103",
362 .data = &mcp4131_cfg[MCP424x_103] },
363 { .compatible = "microchip,mcp4241-503",
364 .data = &mcp4131_cfg[MCP424x_503] },
365 { .compatible = "microchip,mcp4241-104",
366 .data = &mcp4131_cfg[MCP424x_104] },
367 { .compatible = "microchip,mcp4242-502",
368 .data = &mcp4131_cfg[MCP424x_502] },
369 { .compatible = "microchip,mcp4242-103",
370 .data = &mcp4131_cfg[MCP424x_103] },
371 { .compatible = "microchip,mcp4242-503",
372 .data = &mcp4131_cfg[MCP424x_503] },
373 { .compatible = "microchip,mcp4242-104",
374 .data = &mcp4131_cfg[MCP424x_104] },
375 { .compatible = "microchip,mcp4251-502",
376 .data = &mcp4131_cfg[MCP425x_502] },
377 { .compatible = "microchip,mcp4251-103",
378 .data = &mcp4131_cfg[MCP425x_103] },
379 { .compatible = "microchip,mcp4251-503",
380 .data = &mcp4131_cfg[MCP425x_503] },
381 { .compatible = "microchip,mcp4251-104",
382 .data = &mcp4131_cfg[MCP425x_104] },
383 { .compatible = "microchip,mcp4252-502",
384 .data = &mcp4131_cfg[MCP425x_502] },
385 { .compatible = "microchip,mcp4252-103",
386 .data = &mcp4131_cfg[MCP425x_103] },
387 { .compatible = "microchip,mcp4252-503",
388 .data = &mcp4131_cfg[MCP425x_503] },
389 { .compatible = "microchip,mcp4252-104",
390 .data = &mcp4131_cfg[MCP425x_104] },
391 { .compatible = "microchip,mcp4261-502",
392 .data = &mcp4131_cfg[MCP426x_502] },
393 { .compatible = "microchip,mcp4261-103",
394 .data = &mcp4131_cfg[MCP426x_103] },
395 { .compatible = "microchip,mcp4261-503",
396 .data = &mcp4131_cfg[MCP426x_503] },
397 { .compatible = "microchip,mcp4261-104",
398 .data = &mcp4131_cfg[MCP426x_104] },
399 { .compatible = "microchip,mcp4262-502",
400 .data = &mcp4131_cfg[MCP426x_502] },
401 { .compatible = "microchip,mcp4262-103",
402 .data = &mcp4131_cfg[MCP426x_103] },
403 { .compatible = "microchip,mcp4262-503",
404 .data = &mcp4131_cfg[MCP426x_503] },
405 { .compatible = "microchip,mcp4262-104",
406 .data = &mcp4131_cfg[MCP426x_104] },
407 {}
408};
409MODULE_DEVICE_TABLE(of, mcp4131_dt_ids);
410#endif /* CONFIG_OF */
411
412static const struct spi_device_id mcp4131_id[] = {
413 { "mcp4131-502", MCP413x_502 },
414 { "mcp4131-103", MCP413x_103 },
415 { "mcp4131-503", MCP413x_503 },
416 { "mcp4131-104", MCP413x_104 },
417 { "mcp4132-502", MCP413x_502 },
418 { "mcp4132-103", MCP413x_103 },
419 { "mcp4132-503", MCP413x_503 },
420 { "mcp4132-104", MCP413x_104 },
421 { "mcp4141-502", MCP414x_502 },
422 { "mcp4141-103", MCP414x_103 },
423 { "mcp4141-503", MCP414x_503 },
424 { "mcp4141-104", MCP414x_104 },
425 { "mcp4142-502", MCP414x_502 },
426 { "mcp4142-103", MCP414x_103 },
427 { "mcp4142-503", MCP414x_503 },
428 { "mcp4142-104", MCP414x_104 },
429 { "mcp4151-502", MCP415x_502 },
430 { "mcp4151-103", MCP415x_103 },
431 { "mcp4151-503", MCP415x_503 },
432 { "mcp4151-104", MCP415x_104 },
433 { "mcp4152-502", MCP415x_502 },
434 { "mcp4152-103", MCP415x_103 },
435 { "mcp4152-503", MCP415x_503 },
436 { "mcp4152-104", MCP415x_104 },
437 { "mcp4161-502", MCP416x_502 },
438 { "mcp4161-103", MCP416x_103 },
439 { "mcp4161-503", MCP416x_503 },
440 { "mcp4161-104", MCP416x_104 },
441 { "mcp4162-502", MCP416x_502 },
442 { "mcp4162-103", MCP416x_103 },
443 { "mcp4162-503", MCP416x_503 },
444 { "mcp4162-104", MCP416x_104 },
445 { "mcp4231-502", MCP423x_502 },
446 { "mcp4231-103", MCP423x_103 },
447 { "mcp4231-503", MCP423x_503 },
448 { "mcp4231-104", MCP423x_104 },
449 { "mcp4232-502", MCP423x_502 },
450 { "mcp4232-103", MCP423x_103 },
451 { "mcp4232-503", MCP423x_503 },
452 { "mcp4232-104", MCP423x_104 },
453 { "mcp4241-502", MCP424x_502 },
454 { "mcp4241-103", MCP424x_103 },
455 { "mcp4241-503", MCP424x_503 },
456 { "mcp4241-104", MCP424x_104 },
457 { "mcp4242-502", MCP424x_502 },
458 { "mcp4242-103", MCP424x_103 },
459 { "mcp4242-503", MCP424x_503 },
460 { "mcp4242-104", MCP424x_104 },
461 { "mcp4251-502", MCP425x_502 },
462 { "mcp4251-103", MCP425x_103 },
463 { "mcp4251-503", MCP425x_503 },
464 { "mcp4251-104", MCP425x_104 },
465 { "mcp4252-502", MCP425x_502 },
466 { "mcp4252-103", MCP425x_103 },
467 { "mcp4252-503", MCP425x_503 },
468 { "mcp4252-104", MCP425x_104 },
469 { "mcp4261-502", MCP426x_502 },
470 { "mcp4261-103", MCP426x_103 },
471 { "mcp4261-503", MCP426x_503 },
472 { "mcp4261-104", MCP426x_104 },
473 { "mcp4262-502", MCP426x_502 },
474 { "mcp4262-103", MCP426x_103 },
475 { "mcp4262-503", MCP426x_503 },
476 { "mcp4262-104", MCP426x_104 },
477 {}
478};
479MODULE_DEVICE_TABLE(spi, mcp4131_id);
480
481static struct spi_driver mcp4131_driver = {
482 .driver = {
483 .name = "mcp4131",
484 .of_match_table = of_match_ptr(mcp4131_dt_ids),
485 },
486 .probe = mcp4131_probe,
487 .id_table = mcp4131_id,
488};
489
490module_spi_driver(mcp4131_driver);
491
492MODULE_AUTHOR("Slawomir Stepien <sst@poczta.fm>");
493MODULE_DESCRIPTION("MCP4131 digital potentiometer");
494MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/potentiometer/mcp4531.c b/drivers/iio/potentiometer/mcp4531.c
index 0db67fe14766..3b72e1a595db 100644
--- a/drivers/iio/potentiometer/mcp4531.c
+++ b/drivers/iio/potentiometer/mcp4531.c
@@ -79,7 +79,7 @@ static const struct mcp4531_cfg mcp4531_cfg[] = {
79 79
80struct mcp4531_data { 80struct mcp4531_data {
81 struct i2c_client *client; 81 struct i2c_client *client;
82 unsigned long devid; 82 const struct mcp4531_cfg *cfg;
83}; 83};
84 84
85#define MCP4531_CHANNEL(ch) { \ 85#define MCP4531_CHANNEL(ch) { \
@@ -113,8 +113,8 @@ static int mcp4531_read_raw(struct iio_dev *indio_dev,
113 *val = ret; 113 *val = ret;
114 return IIO_VAL_INT; 114 return IIO_VAL_INT;
115 case IIO_CHAN_INFO_SCALE: 115 case IIO_CHAN_INFO_SCALE:
116 *val = 1000 * mcp4531_cfg[data->devid].kohms; 116 *val = 1000 * data->cfg->kohms;
117 *val2 = mcp4531_cfg[data->devid].max_pos; 117 *val2 = data->cfg->max_pos;
118 return IIO_VAL_FRACTIONAL; 118 return IIO_VAL_FRACTIONAL;
119 } 119 }
120 120
@@ -130,7 +130,7 @@ static int mcp4531_write_raw(struct iio_dev *indio_dev,
130 130
131 switch (mask) { 131 switch (mask) {
132 case IIO_CHAN_INFO_RAW: 132 case IIO_CHAN_INFO_RAW:
133 if (val > mcp4531_cfg[data->devid].max_pos || val < 0) 133 if (val > data->cfg->max_pos || val < 0)
134 return -EINVAL; 134 return -EINVAL;
135 break; 135 break;
136 default: 136 default:
@@ -152,7 +152,6 @@ static int mcp4531_probe(struct i2c_client *client,
152 const struct i2c_device_id *id) 152 const struct i2c_device_id *id)
153{ 153{
154 struct device *dev = &client->dev; 154 struct device *dev = &client->dev;
155 unsigned long devid = id->driver_data;
156 struct mcp4531_data *data; 155 struct mcp4531_data *data;
157 struct iio_dev *indio_dev; 156 struct iio_dev *indio_dev;
158 157
@@ -168,12 +167,12 @@ static int mcp4531_probe(struct i2c_client *client,
168 data = iio_priv(indio_dev); 167 data = iio_priv(indio_dev);
169 i2c_set_clientdata(client, indio_dev); 168 i2c_set_clientdata(client, indio_dev);
170 data->client = client; 169 data->client = client;
171 data->devid = devid; 170 data->cfg = &mcp4531_cfg[id->driver_data];
172 171
173 indio_dev->dev.parent = dev; 172 indio_dev->dev.parent = dev;
174 indio_dev->info = &mcp4531_info; 173 indio_dev->info = &mcp4531_info;
175 indio_dev->channels = mcp4531_channels; 174 indio_dev->channels = mcp4531_channels;
176 indio_dev->num_channels = mcp4531_cfg[devid].wipers; 175 indio_dev->num_channels = data->cfg->wipers;
177 indio_dev->name = client->name; 176 indio_dev->name = client->name;
178 177
179 return devm_iio_device_register(dev, indio_dev); 178 return devm_iio_device_register(dev, indio_dev);
diff --git a/drivers/iio/potentiometer/tpl0102.c b/drivers/iio/potentiometer/tpl0102.c
index 313124b6fd59..5c304d42d713 100644
--- a/drivers/iio/potentiometer/tpl0102.c
+++ b/drivers/iio/potentiometer/tpl0102.c
@@ -118,7 +118,7 @@ static int tpl0102_probe(struct i2c_client *client,
118 118
119 if (!i2c_check_functionality(client->adapter, 119 if (!i2c_check_functionality(client->adapter,
120 I2C_FUNC_SMBUS_WORD_DATA)) 120 I2C_FUNC_SMBUS_WORD_DATA))
121 return -ENOTSUPP; 121 return -EOPNOTSUPP;
122 122
123 indio_dev = devm_iio_device_alloc(dev, sizeof(*data)); 123 indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
124 if (!indio_dev) 124 if (!indio_dev)
diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
index 31c0e1fd2202..cda9f128f3a4 100644
--- a/drivers/iio/pressure/Kconfig
+++ b/drivers/iio/pressure/Kconfig
@@ -6,12 +6,13 @@
6menu "Pressure sensors" 6menu "Pressure sensors"
7 7
8config BMP280 8config BMP280
9 tristate "Bosch Sensortec BMP280 pressure sensor driver" 9 tristate "Bosch Sensortec BMP180 and BMP280 pressure sensor driver"
10 depends on I2C 10 depends on I2C
11 depends on !(BMP085_I2C=y || BMP085_I2C=m)
11 select REGMAP_I2C 12 select REGMAP_I2C
12 help 13 help
13 Say yes here to build support for Bosch Sensortec BMP280 14 Say yes here to build support for Bosch Sensortec BMP180 and BMP280
14 pressure and temperature sensor. 15 pressure and temperature sensors.
15 16
16 To compile this driver as a module, choose M here: the module 17 To compile this driver as a module, choose M here: the module
17 will be called bmp280. 18 will be called bmp280.
@@ -30,6 +31,17 @@ config HID_SENSOR_PRESS
30 To compile this driver as a module, choose M here: the module 31 To compile this driver as a module, choose M here: the module
31 will be called hid-sensor-press. 32 will be called hid-sensor-press.
32 33
34config HP03
35 tristate "Hope RF HP03 temperature and pressure sensor driver"
36 depends on I2C
37 select REGMAP_I2C
38 help
39 Say yes here to build support for Hope RF HP03 pressure and
40 temperature sensor.
41
42 To compile this driver as a module, choose M here: the module
43 will be called hp03.
44
33config MPL115 45config MPL115
34 tristate 46 tristate
35 47
@@ -148,4 +160,14 @@ config T5403
148 To compile this driver as a module, choose M here: the module 160 To compile this driver as a module, choose M here: the module
149 will be called t5403. 161 will be called t5403.
150 162
163config HP206C
164 tristate "HOPERF HP206C precision barometer and altimeter sensor"
165 depends on I2C
166 help
167 Say yes here to build support for the HOPREF HP206C precision
168 barometer and altimeter sensor.
169
170 This driver can also be built as a module. If so, the module will
171 be called hp206c.
172
151endmenu 173endmenu
diff --git a/drivers/iio/pressure/Makefile b/drivers/iio/pressure/Makefile
index d336af14f3fe..17d6e7afa1ff 100644
--- a/drivers/iio/pressure/Makefile
+++ b/drivers/iio/pressure/Makefile
@@ -5,6 +5,7 @@
5# When adding new entries keep the list in alphabetical order 5# When adding new entries keep the list in alphabetical order
6obj-$(CONFIG_BMP280) += bmp280.o 6obj-$(CONFIG_BMP280) += bmp280.o
7obj-$(CONFIG_HID_SENSOR_PRESS) += hid-sensor-press.o 7obj-$(CONFIG_HID_SENSOR_PRESS) += hid-sensor-press.o
8obj-$(CONFIG_HP03) += hp03.o
8obj-$(CONFIG_MPL115) += mpl115.o 9obj-$(CONFIG_MPL115) += mpl115.o
9obj-$(CONFIG_MPL115_I2C) += mpl115_i2c.o 10obj-$(CONFIG_MPL115_I2C) += mpl115_i2c.o
10obj-$(CONFIG_MPL115_SPI) += mpl115_spi.o 11obj-$(CONFIG_MPL115_SPI) += mpl115_spi.o
@@ -17,6 +18,7 @@ obj-$(CONFIG_IIO_ST_PRESS) += st_pressure.o
17st_pressure-y := st_pressure_core.o 18st_pressure-y := st_pressure_core.o
18st_pressure-$(CONFIG_IIO_BUFFER) += st_pressure_buffer.o 19st_pressure-$(CONFIG_IIO_BUFFER) += st_pressure_buffer.o
19obj-$(CONFIG_T5403) += t5403.o 20obj-$(CONFIG_T5403) += t5403.o
21obj-$(CONFIG_HP206C) += hp206c.o
20 22
21obj-$(CONFIG_IIO_ST_PRESS_I2C) += st_pressure_i2c.o 23obj-$(CONFIG_IIO_ST_PRESS_I2C) += st_pressure_i2c.o
22obj-$(CONFIG_IIO_ST_PRESS_SPI) += st_pressure_spi.o 24obj-$(CONFIG_IIO_ST_PRESS_SPI) += st_pressure_spi.o
diff --git a/drivers/iio/pressure/bmp280.c b/drivers/iio/pressure/bmp280.c
index a2602d8dd6d5..2f1498e12bb2 100644
--- a/drivers/iio/pressure/bmp280.c
+++ b/drivers/iio/pressure/bmp280.c
@@ -1,12 +1,15 @@
1/* 1/*
2 * Copyright (c) 2014 Intel Corporation 2 * Copyright (c) 2014 Intel Corporation
3 * 3 *
4 * Driver for Bosch Sensortec BMP280 digital pressure sensor. 4 * Driver for Bosch Sensortec BMP180 and BMP280 digital pressure sensor.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 * 9 *
10 * Datasheet:
11 * https://ae-bst.resource.bosch.com/media/_tech/media/datasheets/BST-BMP180-DS000-121.pdf
12 * https://ae-bst.resource.bosch.com/media/_tech/media/datasheets/BST-BMP280-DS001-12.pdf
10 */ 13 */
11 14
12#define pr_fmt(fmt) "bmp280: " fmt 15#define pr_fmt(fmt) "bmp280: " fmt
@@ -15,9 +18,11 @@
15#include <linux/i2c.h> 18#include <linux/i2c.h>
16#include <linux/acpi.h> 19#include <linux/acpi.h>
17#include <linux/regmap.h> 20#include <linux/regmap.h>
21#include <linux/delay.h>
18#include <linux/iio/iio.h> 22#include <linux/iio/iio.h>
19#include <linux/iio/sysfs.h> 23#include <linux/iio/sysfs.h>
20 24
25/* BMP280 specific registers */
21#define BMP280_REG_TEMP_XLSB 0xFC 26#define BMP280_REG_TEMP_XLSB 0xFC
22#define BMP280_REG_TEMP_LSB 0xFB 27#define BMP280_REG_TEMP_LSB 0xFB
23#define BMP280_REG_TEMP_MSB 0xFA 28#define BMP280_REG_TEMP_MSB 0xFA
@@ -26,10 +31,7 @@
26#define BMP280_REG_PRESS_MSB 0xF7 31#define BMP280_REG_PRESS_MSB 0xF7
27 32
28#define BMP280_REG_CONFIG 0xF5 33#define BMP280_REG_CONFIG 0xF5
29#define BMP280_REG_CTRL_MEAS 0xF4
30#define BMP280_REG_STATUS 0xF3 34#define BMP280_REG_STATUS 0xF3
31#define BMP280_REG_RESET 0xE0
32#define BMP280_REG_ID 0xD0
33 35
34#define BMP280_REG_COMP_TEMP_START 0x88 36#define BMP280_REG_COMP_TEMP_START 0x88
35#define BMP280_COMP_TEMP_REG_COUNT 6 37#define BMP280_COMP_TEMP_REG_COUNT 6
@@ -46,25 +48,49 @@
46 48
47#define BMP280_OSRS_TEMP_MASK (BIT(7) | BIT(6) | BIT(5)) 49#define BMP280_OSRS_TEMP_MASK (BIT(7) | BIT(6) | BIT(5))
48#define BMP280_OSRS_TEMP_SKIP 0 50#define BMP280_OSRS_TEMP_SKIP 0
49#define BMP280_OSRS_TEMP_1X BIT(5) 51#define BMP280_OSRS_TEMP_X(osrs_t) ((osrs_t) << 5)
50#define BMP280_OSRS_TEMP_2X BIT(6) 52#define BMP280_OSRS_TEMP_1X BMP280_OSRS_TEMP_X(1)
51#define BMP280_OSRS_TEMP_4X (BIT(6) | BIT(5)) 53#define BMP280_OSRS_TEMP_2X BMP280_OSRS_TEMP_X(2)
52#define BMP280_OSRS_TEMP_8X BIT(7) 54#define BMP280_OSRS_TEMP_4X BMP280_OSRS_TEMP_X(3)
53#define BMP280_OSRS_TEMP_16X (BIT(7) | BIT(5)) 55#define BMP280_OSRS_TEMP_8X BMP280_OSRS_TEMP_X(4)
56#define BMP280_OSRS_TEMP_16X BMP280_OSRS_TEMP_X(5)
54 57
55#define BMP280_OSRS_PRESS_MASK (BIT(4) | BIT(3) | BIT(2)) 58#define BMP280_OSRS_PRESS_MASK (BIT(4) | BIT(3) | BIT(2))
56#define BMP280_OSRS_PRESS_SKIP 0 59#define BMP280_OSRS_PRESS_SKIP 0
57#define BMP280_OSRS_PRESS_1X BIT(2) 60#define BMP280_OSRS_PRESS_X(osrs_p) ((osrs_p) << 2)
58#define BMP280_OSRS_PRESS_2X BIT(3) 61#define BMP280_OSRS_PRESS_1X BMP280_OSRS_PRESS_X(1)
59#define BMP280_OSRS_PRESS_4X (BIT(3) | BIT(2)) 62#define BMP280_OSRS_PRESS_2X BMP280_OSRS_PRESS_X(2)
60#define BMP280_OSRS_PRESS_8X BIT(4) 63#define BMP280_OSRS_PRESS_4X BMP280_OSRS_PRESS_X(3)
61#define BMP280_OSRS_PRESS_16X (BIT(4) | BIT(2)) 64#define BMP280_OSRS_PRESS_8X BMP280_OSRS_PRESS_X(4)
65#define BMP280_OSRS_PRESS_16X BMP280_OSRS_PRESS_X(5)
62 66
63#define BMP280_MODE_MASK (BIT(1) | BIT(0)) 67#define BMP280_MODE_MASK (BIT(1) | BIT(0))
64#define BMP280_MODE_SLEEP 0 68#define BMP280_MODE_SLEEP 0
65#define BMP280_MODE_FORCED BIT(0) 69#define BMP280_MODE_FORCED BIT(0)
66#define BMP280_MODE_NORMAL (BIT(1) | BIT(0)) 70#define BMP280_MODE_NORMAL (BIT(1) | BIT(0))
67 71
72/* BMP180 specific registers */
73#define BMP180_REG_OUT_XLSB 0xF8
74#define BMP180_REG_OUT_LSB 0xF7
75#define BMP180_REG_OUT_MSB 0xF6
76
77#define BMP180_REG_CALIB_START 0xAA
78#define BMP180_REG_CALIB_COUNT 22
79
80#define BMP180_MEAS_SCO BIT(5)
81#define BMP180_MEAS_TEMP (0x0E | BMP180_MEAS_SCO)
82#define BMP180_MEAS_PRESS_X(oss) ((oss) << 6 | 0x14 | BMP180_MEAS_SCO)
83#define BMP180_MEAS_PRESS_1X BMP180_MEAS_PRESS_X(0)
84#define BMP180_MEAS_PRESS_2X BMP180_MEAS_PRESS_X(1)
85#define BMP180_MEAS_PRESS_4X BMP180_MEAS_PRESS_X(2)
86#define BMP180_MEAS_PRESS_8X BMP180_MEAS_PRESS_X(3)
87
88/* BMP180 and BMP280 common registers */
89#define BMP280_REG_CTRL_MEAS 0xF4
90#define BMP280_REG_RESET 0xE0
91#define BMP280_REG_ID 0xD0
92
93#define BMP180_CHIP_ID 0x55
68#define BMP280_CHIP_ID 0x58 94#define BMP280_CHIP_ID 0x58
69#define BMP280_SOFT_RESET_VAL 0xB6 95#define BMP280_SOFT_RESET_VAL 0xB6
70 96
@@ -72,6 +98,11 @@ struct bmp280_data {
72 struct i2c_client *client; 98 struct i2c_client *client;
73 struct mutex lock; 99 struct mutex lock;
74 struct regmap *regmap; 100 struct regmap *regmap;
101 const struct bmp280_chip_info *chip_info;
102
103 /* log of base 2 of oversampling rate */
104 u8 oversampling_press;
105 u8 oversampling_temp;
75 106
76 /* 107 /*
77 * Carryover value from temperature conversion, used in pressure 108 * Carryover value from temperature conversion, used in pressure
@@ -80,9 +111,23 @@ struct bmp280_data {
80 s32 t_fine; 111 s32 t_fine;
81}; 112};
82 113
114struct bmp280_chip_info {
115 const struct regmap_config *regmap_config;
116
117 const int *oversampling_temp_avail;
118 int num_oversampling_temp_avail;
119
120 const int *oversampling_press_avail;
121 int num_oversampling_press_avail;
122
123 int (*chip_config)(struct bmp280_data *);
124 int (*read_temp)(struct bmp280_data *, int *);
125 int (*read_press)(struct bmp280_data *, int *, int *);
126};
127
83/* 128/*
84 * These enums are used for indexing into the array of compensation 129 * These enums are used for indexing into the array of compensation
85 * parameters. 130 * parameters for BMP280.
86 */ 131 */
87enum { T1, T2, T3 }; 132enum { T1, T2, T3 };
88enum { P1, P2, P3, P4, P5, P6, P7, P8, P9 }; 133enum { P1, P2, P3, P4, P5, P6, P7, P8, P9 };
@@ -90,11 +135,13 @@ enum { P1, P2, P3, P4, P5, P6, P7, P8, P9 };
90static const struct iio_chan_spec bmp280_channels[] = { 135static const struct iio_chan_spec bmp280_channels[] = {
91 { 136 {
92 .type = IIO_PRESSURE, 137 .type = IIO_PRESSURE,
93 .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), 138 .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
139 BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
94 }, 140 },
95 { 141 {
96 .type = IIO_TEMP, 142 .type = IIO_TEMP,
97 .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), 143 .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
144 BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
98 }, 145 },
99}; 146};
100 147
@@ -290,10 +337,25 @@ static int bmp280_read_raw(struct iio_dev *indio_dev,
290 case IIO_CHAN_INFO_PROCESSED: 337 case IIO_CHAN_INFO_PROCESSED:
291 switch (chan->type) { 338 switch (chan->type) {
292 case IIO_PRESSURE: 339 case IIO_PRESSURE:
293 ret = bmp280_read_press(data, val, val2); 340 ret = data->chip_info->read_press(data, val, val2);
294 break; 341 break;
295 case IIO_TEMP: 342 case IIO_TEMP:
296 ret = bmp280_read_temp(data, val); 343 ret = data->chip_info->read_temp(data, val);
344 break;
345 default:
346 ret = -EINVAL;
347 break;
348 }
349 break;
350 case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
351 switch (chan->type) {
352 case IIO_PRESSURE:
353 *val = 1 << data->oversampling_press;
354 ret = IIO_VAL_INT;
355 break;
356 case IIO_TEMP:
357 *val = 1 << data->oversampling_temp;
358 ret = IIO_VAL_INT;
297 break; 359 break;
298 default: 360 default:
299 ret = -EINVAL; 361 ret = -EINVAL;
@@ -310,22 +372,135 @@ static int bmp280_read_raw(struct iio_dev *indio_dev,
310 return ret; 372 return ret;
311} 373}
312 374
375static int bmp280_write_oversampling_ratio_temp(struct bmp280_data *data,
376 int val)
377{
378 int i;
379 const int *avail = data->chip_info->oversampling_temp_avail;
380 const int n = data->chip_info->num_oversampling_temp_avail;
381
382 for (i = 0; i < n; i++) {
383 if (avail[i] == val) {
384 data->oversampling_temp = ilog2(val);
385
386 return data->chip_info->chip_config(data);
387 }
388 }
389 return -EINVAL;
390}
391
392static int bmp280_write_oversampling_ratio_press(struct bmp280_data *data,
393 int val)
394{
395 int i;
396 const int *avail = data->chip_info->oversampling_press_avail;
397 const int n = data->chip_info->num_oversampling_press_avail;
398
399 for (i = 0; i < n; i++) {
400 if (avail[i] == val) {
401 data->oversampling_press = ilog2(val);
402
403 return data->chip_info->chip_config(data);
404 }
405 }
406 return -EINVAL;
407}
408
409static int bmp280_write_raw(struct iio_dev *indio_dev,
410 struct iio_chan_spec const *chan,
411 int val, int val2, long mask)
412{
413 int ret = 0;
414 struct bmp280_data *data = iio_priv(indio_dev);
415
416 switch (mask) {
417 case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
418 mutex_lock(&data->lock);
419 switch (chan->type) {
420 case IIO_PRESSURE:
421 ret = bmp280_write_oversampling_ratio_press(data, val);
422 break;
423 case IIO_TEMP:
424 ret = bmp280_write_oversampling_ratio_temp(data, val);
425 break;
426 default:
427 ret = -EINVAL;
428 break;
429 }
430 mutex_unlock(&data->lock);
431 break;
432 default:
433 return -EINVAL;
434 }
435
436 return ret;
437}
438
439static ssize_t bmp280_show_avail(char *buf, const int *vals, const int n)
440{
441 size_t len = 0;
442 int i;
443
444 for (i = 0; i < n; i++)
445 len += scnprintf(buf + len, PAGE_SIZE - len, "%d ", vals[i]);
446
447 buf[len - 1] = '\n';
448
449 return len;
450}
451
452static ssize_t bmp280_show_temp_oversampling_avail(struct device *dev,
453 struct device_attribute *attr, char *buf)
454{
455 struct bmp280_data *data = iio_priv(dev_to_iio_dev(dev));
456
457 return bmp280_show_avail(buf, data->chip_info->oversampling_temp_avail,
458 data->chip_info->num_oversampling_temp_avail);
459}
460
461static ssize_t bmp280_show_press_oversampling_avail(struct device *dev,
462 struct device_attribute *attr, char *buf)
463{
464 struct bmp280_data *data = iio_priv(dev_to_iio_dev(dev));
465
466 return bmp280_show_avail(buf, data->chip_info->oversampling_press_avail,
467 data->chip_info->num_oversampling_press_avail);
468}
469
470static IIO_DEVICE_ATTR(in_temp_oversampling_ratio_available,
471 S_IRUGO, bmp280_show_temp_oversampling_avail, NULL, 0);
472
473static IIO_DEVICE_ATTR(in_pressure_oversampling_ratio_available,
474 S_IRUGO, bmp280_show_press_oversampling_avail, NULL, 0);
475
476static struct attribute *bmp280_attributes[] = {
477 &iio_dev_attr_in_temp_oversampling_ratio_available.dev_attr.attr,
478 &iio_dev_attr_in_pressure_oversampling_ratio_available.dev_attr.attr,
479 NULL,
480};
481
482static const struct attribute_group bmp280_attrs_group = {
483 .attrs = bmp280_attributes,
484};
485
313static const struct iio_info bmp280_info = { 486static const struct iio_info bmp280_info = {
314 .driver_module = THIS_MODULE, 487 .driver_module = THIS_MODULE,
315 .read_raw = &bmp280_read_raw, 488 .read_raw = &bmp280_read_raw,
489 .write_raw = &bmp280_write_raw,
490 .attrs = &bmp280_attrs_group,
316}; 491};
317 492
318static int bmp280_chip_init(struct bmp280_data *data) 493static int bmp280_chip_config(struct bmp280_data *data)
319{ 494{
320 int ret; 495 int ret;
496 u8 osrs = BMP280_OSRS_TEMP_X(data->oversampling_temp + 1) |
497 BMP280_OSRS_PRESS_X(data->oversampling_press + 1);
321 498
322 ret = regmap_update_bits(data->regmap, BMP280_REG_CTRL_MEAS, 499 ret = regmap_update_bits(data->regmap, BMP280_REG_CTRL_MEAS,
323 BMP280_OSRS_TEMP_MASK | 500 BMP280_OSRS_TEMP_MASK |
324 BMP280_OSRS_PRESS_MASK | 501 BMP280_OSRS_PRESS_MASK |
325 BMP280_MODE_MASK, 502 BMP280_MODE_MASK,
326 BMP280_OSRS_TEMP_2X | 503 osrs | BMP280_MODE_NORMAL);
327 BMP280_OSRS_PRESS_16X |
328 BMP280_MODE_NORMAL);
329 if (ret < 0) { 504 if (ret < 0) {
330 dev_err(&data->client->dev, 505 dev_err(&data->client->dev,
331 "failed to write ctrl_meas register\n"); 506 "failed to write ctrl_meas register\n");
@@ -344,6 +519,317 @@ static int bmp280_chip_init(struct bmp280_data *data)
344 return ret; 519 return ret;
345} 520}
346 521
522static const int bmp280_oversampling_avail[] = { 1, 2, 4, 8, 16 };
523
524static const struct bmp280_chip_info bmp280_chip_info = {
525 .regmap_config = &bmp280_regmap_config,
526
527 .oversampling_temp_avail = bmp280_oversampling_avail,
528 .num_oversampling_temp_avail = ARRAY_SIZE(bmp280_oversampling_avail),
529
530 .oversampling_press_avail = bmp280_oversampling_avail,
531 .num_oversampling_press_avail = ARRAY_SIZE(bmp280_oversampling_avail),
532
533 .chip_config = bmp280_chip_config,
534 .read_temp = bmp280_read_temp,
535 .read_press = bmp280_read_press,
536};
537
538static bool bmp180_is_writeable_reg(struct device *dev, unsigned int reg)
539{
540 switch (reg) {
541 case BMP280_REG_CTRL_MEAS:
542 case BMP280_REG_RESET:
543 return true;
544 default:
545 return false;
546 };
547}
548
549static bool bmp180_is_volatile_reg(struct device *dev, unsigned int reg)
550{
551 switch (reg) {
552 case BMP180_REG_OUT_XLSB:
553 case BMP180_REG_OUT_LSB:
554 case BMP180_REG_OUT_MSB:
555 case BMP280_REG_CTRL_MEAS:
556 return true;
557 default:
558 return false;
559 }
560}
561
562static const struct regmap_config bmp180_regmap_config = {
563 .reg_bits = 8,
564 .val_bits = 8,
565
566 .max_register = BMP180_REG_OUT_XLSB,
567 .cache_type = REGCACHE_RBTREE,
568
569 .writeable_reg = bmp180_is_writeable_reg,
570 .volatile_reg = bmp180_is_volatile_reg,
571};
572
573static int bmp180_measure(struct bmp280_data *data, u8 ctrl_meas)
574{
575 int ret;
576 const int conversion_time_max[] = { 4500, 7500, 13500, 25500 };
577 unsigned int delay_us;
578 unsigned int ctrl;
579
580 ret = regmap_write(data->regmap, BMP280_REG_CTRL_MEAS, ctrl_meas);
581 if (ret)
582 return ret;
583
584 if (ctrl_meas == BMP180_MEAS_TEMP)
585 delay_us = 4500;
586 else
587 delay_us = conversion_time_max[data->oversampling_press];
588
589 usleep_range(delay_us, delay_us + 1000);
590
591 ret = regmap_read(data->regmap, BMP280_REG_CTRL_MEAS, &ctrl);
592 if (ret)
593 return ret;
594
595 /* The value of this bit reset to "0" after conversion is complete */
596 if (ctrl & BMP180_MEAS_SCO)
597 return -EIO;
598
599 return 0;
600}
601
602static int bmp180_read_adc_temp(struct bmp280_data *data, int *val)
603{
604 int ret;
605 __be16 tmp = 0;
606
607 ret = bmp180_measure(data, BMP180_MEAS_TEMP);
608 if (ret)
609 return ret;
610
611 ret = regmap_bulk_read(data->regmap, BMP180_REG_OUT_MSB, (u8 *)&tmp, 2);
612 if (ret)
613 return ret;
614
615 *val = be16_to_cpu(tmp);
616
617 return 0;
618}
619
620/*
621 * These enums are used for indexing into the array of calibration
622 * coefficients for BMP180.
623 */
624enum { AC1, AC2, AC3, AC4, AC5, AC6, B1, B2, MB, MC, MD };
625
626struct bmp180_calib {
627 s16 AC1;
628 s16 AC2;
629 s16 AC3;
630 u16 AC4;
631 u16 AC5;
632 u16 AC6;
633 s16 B1;
634 s16 B2;
635 s16 MB;
636 s16 MC;
637 s16 MD;
638};
639
640static int bmp180_read_calib(struct bmp280_data *data,
641 struct bmp180_calib *calib)
642{
643 int ret;
644 int i;
645 __be16 buf[BMP180_REG_CALIB_COUNT / 2];
646
647 ret = regmap_bulk_read(data->regmap, BMP180_REG_CALIB_START, buf,
648 sizeof(buf));
649
650 if (ret < 0)
651 return ret;
652
653 /* None of the words has the value 0 or 0xFFFF */
654 for (i = 0; i < ARRAY_SIZE(buf); i++) {
655 if (buf[i] == cpu_to_be16(0) || buf[i] == cpu_to_be16(0xffff))
656 return -EIO;
657 }
658
659 calib->AC1 = be16_to_cpu(buf[AC1]);
660 calib->AC2 = be16_to_cpu(buf[AC2]);
661 calib->AC3 = be16_to_cpu(buf[AC3]);
662 calib->AC4 = be16_to_cpu(buf[AC4]);
663 calib->AC5 = be16_to_cpu(buf[AC5]);
664 calib->AC6 = be16_to_cpu(buf[AC6]);
665 calib->B1 = be16_to_cpu(buf[B1]);
666 calib->B2 = be16_to_cpu(buf[B2]);
667 calib->MB = be16_to_cpu(buf[MB]);
668 calib->MC = be16_to_cpu(buf[MC]);
669 calib->MD = be16_to_cpu(buf[MD]);
670
671 return 0;
672}
673
674/*
675 * Returns temperature in DegC, resolution is 0.1 DegC.
676 * t_fine carries fine temperature as global value.
677 *
678 * Taken from datasheet, Section 3.5, "Calculating pressure and temperature".
679 */
680static s32 bmp180_compensate_temp(struct bmp280_data *data, s32 adc_temp)
681{
682 int ret;
683 s32 x1, x2;
684 struct bmp180_calib calib;
685
686 ret = bmp180_read_calib(data, &calib);
687 if (ret < 0) {
688 dev_err(&data->client->dev,
689 "failed to read calibration coefficients\n");
690 return ret;
691 }
692
693 x1 = ((adc_temp - calib.AC6) * calib.AC5) >> 15;
694 x2 = (calib.MC << 11) / (x1 + calib.MD);
695 data->t_fine = x1 + x2;
696
697 return (data->t_fine + 8) >> 4;
698}
699
700static int bmp180_read_temp(struct bmp280_data *data, int *val)
701{
702 int ret;
703 s32 adc_temp, comp_temp;
704
705 ret = bmp180_read_adc_temp(data, &adc_temp);
706 if (ret)
707 return ret;
708
709 comp_temp = bmp180_compensate_temp(data, adc_temp);
710
711 /*
712 * val might be NULL if we're called by the read_press routine,
713 * who only cares about the carry over t_fine value.
714 */
715 if (val) {
716 *val = comp_temp * 100;
717 return IIO_VAL_INT;
718 }
719
720 return 0;
721}
722
723static int bmp180_read_adc_press(struct bmp280_data *data, int *val)
724{
725 int ret;
726 __be32 tmp = 0;
727 u8 oss = data->oversampling_press;
728
729 ret = bmp180_measure(data, BMP180_MEAS_PRESS_X(oss));
730 if (ret)
731 return ret;
732
733 ret = regmap_bulk_read(data->regmap, BMP180_REG_OUT_MSB, (u8 *)&tmp, 3);
734 if (ret)
735 return ret;
736
737 *val = (be32_to_cpu(tmp) >> 8) >> (8 - oss);
738
739 return 0;
740}
741
742/*
743 * Returns pressure in Pa, resolution is 1 Pa.
744 *
745 * Taken from datasheet, Section 3.5, "Calculating pressure and temperature".
746 */
747static u32 bmp180_compensate_press(struct bmp280_data *data, s32 adc_press)
748{
749 int ret;
750 s32 x1, x2, x3, p;
751 s32 b3, b6;
752 u32 b4, b7;
753 s32 oss = data->oversampling_press;
754 struct bmp180_calib calib;
755
756 ret = bmp180_read_calib(data, &calib);
757 if (ret < 0) {
758 dev_err(&data->client->dev,
759 "failed to read calibration coefficients\n");
760 return ret;
761 }
762
763 b6 = data->t_fine - 4000;
764 x1 = (calib.B2 * (b6 * b6 >> 12)) >> 11;
765 x2 = calib.AC2 * b6 >> 11;
766 x3 = x1 + x2;
767 b3 = ((((s32)calib.AC1 * 4 + x3) << oss) + 2) / 4;
768 x1 = calib.AC3 * b6 >> 13;
769 x2 = (calib.B1 * ((b6 * b6) >> 12)) >> 16;
770 x3 = (x1 + x2 + 2) >> 2;
771 b4 = calib.AC4 * (u32)(x3 + 32768) >> 15;
772 b7 = ((u32)adc_press - b3) * (50000 >> oss);
773 if (b7 < 0x80000000)
774 p = (b7 * 2) / b4;
775 else
776 p = (b7 / b4) * 2;
777
778 x1 = (p >> 8) * (p >> 8);
779 x1 = (x1 * 3038) >> 16;
780 x2 = (-7357 * p) >> 16;
781
782 return p + ((x1 + x2 + 3791) >> 4);
783}
784
785static int bmp180_read_press(struct bmp280_data *data,
786 int *val, int *val2)
787{
788 int ret;
789 s32 adc_press;
790 u32 comp_press;
791
792 /* Read and compensate temperature so we get a reading of t_fine. */
793 ret = bmp180_read_temp(data, NULL);
794 if (ret)
795 return ret;
796
797 ret = bmp180_read_adc_press(data, &adc_press);
798 if (ret)
799 return ret;
800
801 comp_press = bmp180_compensate_press(data, adc_press);
802
803 *val = comp_press;
804 *val2 = 1000;
805
806 return IIO_VAL_FRACTIONAL;
807}
808
809static int bmp180_chip_config(struct bmp280_data *data)
810{
811 return 0;
812}
813
814static const int bmp180_oversampling_temp_avail[] = { 1 };
815static const int bmp180_oversampling_press_avail[] = { 1, 2, 4, 8 };
816
817static const struct bmp280_chip_info bmp180_chip_info = {
818 .regmap_config = &bmp180_regmap_config,
819
820 .oversampling_temp_avail = bmp180_oversampling_temp_avail,
821 .num_oversampling_temp_avail =
822 ARRAY_SIZE(bmp180_oversampling_temp_avail),
823
824 .oversampling_press_avail = bmp180_oversampling_press_avail,
825 .num_oversampling_press_avail =
826 ARRAY_SIZE(bmp180_oversampling_press_avail),
827
828 .chip_config = bmp180_chip_config,
829 .read_temp = bmp180_read_temp,
830 .read_press = bmp180_read_press,
831};
832
347static int bmp280_probe(struct i2c_client *client, 833static int bmp280_probe(struct i2c_client *client,
348 const struct i2c_device_id *id) 834 const struct i2c_device_id *id)
349{ 835{
@@ -367,7 +853,23 @@ static int bmp280_probe(struct i2c_client *client,
367 indio_dev->info = &bmp280_info; 853 indio_dev->info = &bmp280_info;
368 indio_dev->modes = INDIO_DIRECT_MODE; 854 indio_dev->modes = INDIO_DIRECT_MODE;
369 855
370 data->regmap = devm_regmap_init_i2c(client, &bmp280_regmap_config); 856 switch (id->driver_data) {
857 case BMP180_CHIP_ID:
858 data->chip_info = &bmp180_chip_info;
859 data->oversampling_press = ilog2(8);
860 data->oversampling_temp = ilog2(1);
861 break;
862 case BMP280_CHIP_ID:
863 data->chip_info = &bmp280_chip_info;
864 data->oversampling_press = ilog2(16);
865 data->oversampling_temp = ilog2(2);
866 break;
867 default:
868 return -EINVAL;
869 }
870
871 data->regmap = devm_regmap_init_i2c(client,
872 data->chip_info->regmap_config);
371 if (IS_ERR(data->regmap)) { 873 if (IS_ERR(data->regmap)) {
372 dev_err(&client->dev, "failed to allocate register map\n"); 874 dev_err(&client->dev, "failed to allocate register map\n");
373 return PTR_ERR(data->regmap); 875 return PTR_ERR(data->regmap);
@@ -376,13 +878,13 @@ static int bmp280_probe(struct i2c_client *client,
376 ret = regmap_read(data->regmap, BMP280_REG_ID, &chip_id); 878 ret = regmap_read(data->regmap, BMP280_REG_ID, &chip_id);
377 if (ret < 0) 879 if (ret < 0)
378 return ret; 880 return ret;
379 if (chip_id != BMP280_CHIP_ID) { 881 if (chip_id != id->driver_data) {
380 dev_err(&client->dev, "bad chip id. expected %x got %x\n", 882 dev_err(&client->dev, "bad chip id. expected %x got %x\n",
381 BMP280_CHIP_ID, chip_id); 883 BMP280_CHIP_ID, chip_id);
382 return -EINVAL; 884 return -EINVAL;
383 } 885 }
384 886
385 ret = bmp280_chip_init(data); 887 ret = data->chip_info->chip_config(data);
386 if (ret < 0) 888 if (ret < 0)
387 return ret; 889 return ret;
388 890
@@ -390,13 +892,17 @@ static int bmp280_probe(struct i2c_client *client,
390} 892}
391 893
392static const struct acpi_device_id bmp280_acpi_match[] = { 894static const struct acpi_device_id bmp280_acpi_match[] = {
393 {"BMP0280", 0}, 895 {"BMP0280", BMP280_CHIP_ID },
896 {"BMP0180", BMP180_CHIP_ID },
897 {"BMP0085", BMP180_CHIP_ID },
394 { }, 898 { },
395}; 899};
396MODULE_DEVICE_TABLE(acpi, bmp280_acpi_match); 900MODULE_DEVICE_TABLE(acpi, bmp280_acpi_match);
397 901
398static const struct i2c_device_id bmp280_id[] = { 902static const struct i2c_device_id bmp280_id[] = {
399 {"bmp280", 0}, 903 {"bmp280", BMP280_CHIP_ID },
904 {"bmp180", BMP180_CHIP_ID },
905 {"bmp085", BMP180_CHIP_ID },
400 { }, 906 { },
401}; 907};
402MODULE_DEVICE_TABLE(i2c, bmp280_id); 908MODULE_DEVICE_TABLE(i2c, bmp280_id);
@@ -412,5 +918,5 @@ static struct i2c_driver bmp280_driver = {
412module_i2c_driver(bmp280_driver); 918module_i2c_driver(bmp280_driver);
413 919
414MODULE_AUTHOR("Vlad Dogaru <vlad.dogaru@intel.com>"); 920MODULE_AUTHOR("Vlad Dogaru <vlad.dogaru@intel.com>");
415MODULE_DESCRIPTION("Driver for Bosch Sensortec BMP280 pressure and temperature sensor"); 921MODULE_DESCRIPTION("Driver for Bosch Sensortec BMP180/BMP280 pressure and temperature sensor");
416MODULE_LICENSE("GPL v2"); 922MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/pressure/hp03.c b/drivers/iio/pressure/hp03.c
new file mode 100644
index 000000000000..ac76515d5d49
--- /dev/null
+++ b/drivers/iio/pressure/hp03.c
@@ -0,0 +1,312 @@
1/*
2 * Copyright (c) 2016 Marek Vasut <marex@denx.de>
3 *
4 * Driver for Hope RF HP03 digital temperature and pressure sensor.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#define pr_fmt(fmt) "hp03: " fmt
12
13#include <linux/module.h>
14#include <linux/delay.h>
15#include <linux/gpio/consumer.h>
16#include <linux/i2c.h>
17#include <linux/regmap.h>
18#include <linux/iio/iio.h>
19#include <linux/iio/sysfs.h>
20
21/*
22 * The HP03 sensor occupies two fixed I2C addresses:
23 * 0x50 ... read-only EEPROM with calibration data
24 * 0x77 ... read-write ADC for pressure and temperature
25 */
26#define HP03_EEPROM_ADDR 0x50
27#define HP03_ADC_ADDR 0x77
28
29#define HP03_EEPROM_CX_OFFSET 0x10
30#define HP03_EEPROM_AB_OFFSET 0x1e
31#define HP03_EEPROM_CD_OFFSET 0x20
32
33#define HP03_ADC_WRITE_REG 0xff
34#define HP03_ADC_READ_REG 0xfd
35#define HP03_ADC_READ_PRESSURE 0xf0 /* D1 in datasheet */
36#define HP03_ADC_READ_TEMP 0xe8 /* D2 in datasheet */
37
38struct hp03_priv {
39 struct i2c_client *client;
40 struct mutex lock;
41 struct gpio_desc *xclr_gpio;
42
43 struct i2c_client *eeprom_client;
44 struct regmap *eeprom_regmap;
45
46 s32 pressure; /* kPa */
47 s32 temp; /* Deg. C */
48};
49
50static const struct iio_chan_spec hp03_channels[] = {
51 {
52 .type = IIO_PRESSURE,
53 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
54 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
55 },
56 {
57 .type = IIO_TEMP,
58 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
59 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
60 },
61};
62
63static bool hp03_is_writeable_reg(struct device *dev, unsigned int reg)
64{
65 return false;
66}
67
68static bool hp03_is_volatile_reg(struct device *dev, unsigned int reg)
69{
70 return false;
71}
72
73static const struct regmap_config hp03_regmap_config = {
74 .reg_bits = 8,
75 .val_bits = 8,
76
77 .max_register = HP03_EEPROM_CD_OFFSET + 1,
78 .cache_type = REGCACHE_RBTREE,
79
80 .writeable_reg = hp03_is_writeable_reg,
81 .volatile_reg = hp03_is_volatile_reg,
82};
83
84static int hp03_get_temp_pressure(struct hp03_priv *priv, const u8 reg)
85{
86 int ret;
87
88 ret = i2c_smbus_write_byte_data(priv->client, HP03_ADC_WRITE_REG, reg);
89 if (ret < 0)
90 return ret;
91
92 msleep(50); /* Wait for conversion to finish */
93
94 return i2c_smbus_read_word_data(priv->client, HP03_ADC_READ_REG);
95}
96
97static int hp03_update_temp_pressure(struct hp03_priv *priv)
98{
99 struct device *dev = &priv->client->dev;
100 u8 coefs[18];
101 u16 cx_val[7];
102 int ab_val, d1_val, d2_val, diff_val, dut, off, sens, x;
103 int i, ret;
104
105 /* Sample coefficients from EEPROM */
106 ret = regmap_bulk_read(priv->eeprom_regmap, HP03_EEPROM_CX_OFFSET,
107 coefs, sizeof(coefs));
108 if (ret < 0) {
109 dev_err(dev, "Failed to read EEPROM (reg=%02x)\n",
110 HP03_EEPROM_CX_OFFSET);
111 return ret;
112 }
113
114 /* Sample Temperature and Pressure */
115 gpiod_set_value_cansleep(priv->xclr_gpio, 1);
116
117 ret = hp03_get_temp_pressure(priv, HP03_ADC_READ_PRESSURE);
118 if (ret < 0) {
119 dev_err(dev, "Failed to read pressure\n");
120 goto err_adc;
121 }
122 d1_val = ret;
123
124 ret = hp03_get_temp_pressure(priv, HP03_ADC_READ_TEMP);
125 if (ret < 0) {
126 dev_err(dev, "Failed to read temperature\n");
127 goto err_adc;
128 }
129 d2_val = ret;
130
131 gpiod_set_value_cansleep(priv->xclr_gpio, 0);
132
133 /* The Cx coefficients and Temp/Pressure values are MSB first. */
134 for (i = 0; i < 7; i++)
135 cx_val[i] = (coefs[2 * i] << 8) | (coefs[(2 * i) + 1] << 0);
136 d1_val = ((d1_val >> 8) & 0xff) | ((d1_val & 0xff) << 8);
137 d2_val = ((d2_val >> 8) & 0xff) | ((d2_val & 0xff) << 8);
138
139 /* Coefficient voodoo from the HP03 datasheet. */
140 if (d2_val >= cx_val[4])
141 ab_val = coefs[14]; /* A-value */
142 else
143 ab_val = coefs[15]; /* B-value */
144
145 diff_val = d2_val - cx_val[4];
146 dut = (ab_val * (diff_val >> 7) * (diff_val >> 7)) >> coefs[16];
147 dut = diff_val - dut;
148
149 off = (cx_val[1] + (((cx_val[3] - 1024) * dut) >> 14)) * 4;
150 sens = cx_val[0] + ((cx_val[2] * dut) >> 10);
151 x = ((sens * (d1_val - 7168)) >> 14) - off;
152
153 priv->pressure = ((x * 100) >> 5) + (cx_val[6] * 10);
154 priv->temp = 250 + ((dut * cx_val[5]) >> 16) - (dut >> coefs[17]);
155
156 return 0;
157
158err_adc:
159 gpiod_set_value_cansleep(priv->xclr_gpio, 0);
160 return ret;
161}
162
163static int hp03_read_raw(struct iio_dev *indio_dev,
164 struct iio_chan_spec const *chan,
165 int *val, int *val2, long mask)
166{
167 struct hp03_priv *priv = iio_priv(indio_dev);
168 int ret;
169
170 mutex_lock(&priv->lock);
171 ret = hp03_update_temp_pressure(priv);
172 mutex_unlock(&priv->lock);
173
174 if (ret)
175 return ret;
176
177 switch (mask) {
178 case IIO_CHAN_INFO_RAW:
179 switch (chan->type) {
180 case IIO_PRESSURE:
181 *val = priv->pressure;
182 return IIO_VAL_INT;
183 case IIO_TEMP:
184 *val = priv->temp;
185 return IIO_VAL_INT;
186 default:
187 return -EINVAL;
188 }
189 break;
190 case IIO_CHAN_INFO_SCALE:
191 switch (chan->type) {
192 case IIO_PRESSURE:
193 *val = 0;
194 *val2 = 1000;
195 return IIO_VAL_INT_PLUS_MICRO;
196 case IIO_TEMP:
197 *val = 10;
198 return IIO_VAL_INT;
199 default:
200 return -EINVAL;
201 }
202 break;
203 default:
204 return -EINVAL;
205 }
206
207 return -EINVAL;
208}
209
210static const struct iio_info hp03_info = {
211 .driver_module = THIS_MODULE,
212 .read_raw = &hp03_read_raw,
213};
214
215static int hp03_probe(struct i2c_client *client,
216 const struct i2c_device_id *id)
217{
218 struct device *dev = &client->dev;
219 struct iio_dev *indio_dev;
220 struct hp03_priv *priv;
221 int ret;
222
223 indio_dev = devm_iio_device_alloc(dev, sizeof(*priv));
224 if (!indio_dev)
225 return -ENOMEM;
226
227 priv = iio_priv(indio_dev);
228 priv->client = client;
229 mutex_init(&priv->lock);
230
231 indio_dev->dev.parent = dev;
232 indio_dev->name = id->name;
233 indio_dev->channels = hp03_channels;
234 indio_dev->num_channels = ARRAY_SIZE(hp03_channels);
235 indio_dev->info = &hp03_info;
236 indio_dev->modes = INDIO_DIRECT_MODE;
237
238 priv->xclr_gpio = devm_gpiod_get_index(dev, "xclr", 0, GPIOD_OUT_HIGH);
239 if (IS_ERR(priv->xclr_gpio)) {
240 dev_err(dev, "Failed to claim XCLR GPIO\n");
241 ret = PTR_ERR(priv->xclr_gpio);
242 return ret;
243 }
244
245 /*
246 * Allocate another device for the on-sensor EEPROM,
247 * which has it's dedicated I2C address and contains
248 * the calibration constants for the sensor.
249 */
250 priv->eeprom_client = i2c_new_dummy(client->adapter, HP03_EEPROM_ADDR);
251 if (!priv->eeprom_client) {
252 dev_err(dev, "New EEPROM I2C device failed\n");
253 return -ENODEV;
254 }
255
256 priv->eeprom_regmap = regmap_init_i2c(priv->eeprom_client,
257 &hp03_regmap_config);
258 if (IS_ERR(priv->eeprom_regmap)) {
259 dev_err(dev, "Failed to allocate EEPROM regmap\n");
260 ret = PTR_ERR(priv->eeprom_regmap);
261 goto err_cleanup_eeprom_client;
262 }
263
264 ret = iio_device_register(indio_dev);
265 if (ret) {
266 dev_err(dev, "Failed to register IIO device\n");
267 goto err_cleanup_eeprom_regmap;
268 }
269
270 i2c_set_clientdata(client, indio_dev);
271
272 return 0;
273
274err_cleanup_eeprom_regmap:
275 regmap_exit(priv->eeprom_regmap);
276
277err_cleanup_eeprom_client:
278 i2c_unregister_device(priv->eeprom_client);
279 return ret;
280}
281
282static int hp03_remove(struct i2c_client *client)
283{
284 struct iio_dev *indio_dev = i2c_get_clientdata(client);
285 struct hp03_priv *priv = iio_priv(indio_dev);
286
287 iio_device_unregister(indio_dev);
288 regmap_exit(priv->eeprom_regmap);
289 i2c_unregister_device(priv->eeprom_client);
290
291 return 0;
292}
293
294static const struct i2c_device_id hp03_id[] = {
295 { "hp03", 0 },
296 { },
297};
298MODULE_DEVICE_TABLE(i2c, hp03_id);
299
300static struct i2c_driver hp03_driver = {
301 .driver = {
302 .name = "hp03",
303 },
304 .probe = hp03_probe,
305 .remove = hp03_remove,
306 .id_table = hp03_id,
307};
308module_i2c_driver(hp03_driver);
309
310MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
311MODULE_DESCRIPTION("Driver for Hope RF HP03 pressure and temperature sensor");
312MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/pressure/hp206c.c b/drivers/iio/pressure/hp206c.c
new file mode 100644
index 000000000000..90f2b6e4a920
--- /dev/null
+++ b/drivers/iio/pressure/hp206c.c
@@ -0,0 +1,426 @@
1/*
2 * hp206c.c - HOPERF HP206C precision barometer and altimeter sensor
3 *
4 * Copyright (c) 2016, Intel Corporation.
5 *
6 * This file is subject to the terms and conditions of version 2 of
7 * the GNU General Public License. See the file COPYING in the main
8 * directory of this archive for more details.
9 *
10 * (7-bit I2C slave address 0x76)
11 *
12 * Datasheet:
13 * http://www.hoperf.com/upload/sensor/HP206C_DataSheet_EN_V2.0.pdf
14 */
15
16#include <linux/module.h>
17#include <linux/i2c.h>
18#include <linux/iio/iio.h>
19#include <linux/iio/sysfs.h>
20#include <linux/delay.h>
21#include <linux/util_macros.h>
22#include <linux/acpi.h>
23
24/* I2C commands: */
25#define HP206C_CMD_SOFT_RST 0x06
26
27#define HP206C_CMD_ADC_CVT 0x40
28
29#define HP206C_CMD_ADC_CVT_OSR_4096 0x00
30#define HP206C_CMD_ADC_CVT_OSR_2048 0x04
31#define HP206C_CMD_ADC_CVT_OSR_1024 0x08
32#define HP206C_CMD_ADC_CVT_OSR_512 0x0c
33#define HP206C_CMD_ADC_CVT_OSR_256 0x10
34#define HP206C_CMD_ADC_CVT_OSR_128 0x14
35
36#define HP206C_CMD_ADC_CVT_CHNL_PT 0x00
37#define HP206C_CMD_ADC_CVT_CHNL_T 0x02
38
39#define HP206C_CMD_READ_P 0x30
40#define HP206C_CMD_READ_T 0x32
41
42#define HP206C_CMD_READ_REG 0x80
43#define HP206C_CMD_WRITE_REG 0xc0
44
45#define HP206C_REG_INT_EN 0x0b
46#define HP206C_REG_INT_CFG 0x0c
47
48#define HP206C_REG_INT_SRC 0x0d
49#define HP206C_FLAG_DEV_RDY 0x40
50
51#define HP206C_REG_PARA 0x0f
52#define HP206C_FLAG_CMPS_EN 0x80
53
54/* Maximum spin for DEV_RDY */
55#define HP206C_MAX_DEV_RDY_WAIT_COUNT 20
56#define HP206C_DEV_RDY_WAIT_US 20000
57
58struct hp206c_data {
59 struct mutex mutex;
60 struct i2c_client *client;
61 int temp_osr_index;
62 int pres_osr_index;
63};
64
65struct hp206c_osr_setting {
66 u8 osr_mask;
67 unsigned int temp_conv_time_us;
68 unsigned int pres_conv_time_us;
69};
70
71/* Data from Table 5 in datasheet. */
72static const struct hp206c_osr_setting hp206c_osr_settings[] = {
73 { HP206C_CMD_ADC_CVT_OSR_4096, 65600, 131100 },
74 { HP206C_CMD_ADC_CVT_OSR_2048, 32800, 65600 },
75 { HP206C_CMD_ADC_CVT_OSR_1024, 16400, 32800 },
76 { HP206C_CMD_ADC_CVT_OSR_512, 8200, 16400 },
77 { HP206C_CMD_ADC_CVT_OSR_256, 4100, 8200 },
78 { HP206C_CMD_ADC_CVT_OSR_128, 2100, 4100 },
79};
80static const int hp206c_osr_rates[] = { 4096, 2048, 1024, 512, 256, 128 };
81static const char hp206c_osr_rates_str[] = "4096 2048 1024 512 256 128";
82
83static inline int hp206c_read_reg(struct i2c_client *client, u8 reg)
84{
85 return i2c_smbus_read_byte_data(client, HP206C_CMD_READ_REG | reg);
86}
87
88static inline int hp206c_write_reg(struct i2c_client *client, u8 reg, u8 val)
89{
90 return i2c_smbus_write_byte_data(client,
91 HP206C_CMD_WRITE_REG | reg, val);
92}
93
94static int hp206c_read_20bit(struct i2c_client *client, u8 cmd)
95{
96 int ret;
97 u8 values[3];
98
99 ret = i2c_smbus_read_i2c_block_data(client, cmd, 3, values);
100 if (ret < 0)
101 return ret;
102 if (ret != 3)
103 return -EIO;
104 return ((values[0] & 0xF) << 16) | (values[1] << 8) | (values[2]);
105}
106
107/* Spin for max 160ms until DEV_RDY is 1, or return error. */
108static int hp206c_wait_dev_rdy(struct iio_dev *indio_dev)
109{
110 int ret;
111 int count = 0;
112 struct hp206c_data *data = iio_priv(indio_dev);
113 struct i2c_client *client = data->client;
114
115 while (++count <= HP206C_MAX_DEV_RDY_WAIT_COUNT) {
116 ret = hp206c_read_reg(client, HP206C_REG_INT_SRC);
117 if (ret < 0) {
118 dev_err(&indio_dev->dev, "Failed READ_REG INT_SRC: %d\n", ret);
119 return ret;
120 }
121 if (ret & HP206C_FLAG_DEV_RDY)
122 return 0;
123 usleep_range(HP206C_DEV_RDY_WAIT_US, HP206C_DEV_RDY_WAIT_US * 3 / 2);
124 }
125 return -ETIMEDOUT;
126}
127
128static int hp206c_set_compensation(struct i2c_client *client, bool enabled)
129{
130 int val;
131
132 val = hp206c_read_reg(client, HP206C_REG_PARA);
133 if (val < 0)
134 return val;
135 if (enabled)
136 val |= HP206C_FLAG_CMPS_EN;
137 else
138 val &= ~HP206C_FLAG_CMPS_EN;
139
140 return hp206c_write_reg(client, HP206C_REG_PARA, val);
141}
142
143/* Do a soft reset */
144static int hp206c_soft_reset(struct iio_dev *indio_dev)
145{
146 int ret;
147 struct hp206c_data *data = iio_priv(indio_dev);
148 struct i2c_client *client = data->client;
149
150 ret = i2c_smbus_write_byte(client, HP206C_CMD_SOFT_RST);
151 if (ret) {
152 dev_err(&client->dev, "Failed to reset device: %d\n", ret);
153 return ret;
154 }
155
156 usleep_range(400, 600);
157
158 ret = hp206c_wait_dev_rdy(indio_dev);
159 if (ret) {
160 dev_err(&client->dev, "Device not ready after soft reset: %d\n", ret);
161 return ret;
162 }
163
164 ret = hp206c_set_compensation(client, true);
165 if (ret)
166 dev_err(&client->dev, "Failed to enable compensation: %d\n", ret);
167 return ret;
168}
169
170static int hp206c_conv_and_read(struct iio_dev *indio_dev,
171 u8 conv_cmd, u8 read_cmd,
172 unsigned int sleep_us)
173{
174 int ret;
175 struct hp206c_data *data = iio_priv(indio_dev);
176 struct i2c_client *client = data->client;
177
178 ret = hp206c_wait_dev_rdy(indio_dev);
179 if (ret < 0) {
180 dev_err(&indio_dev->dev, "Device not ready: %d\n", ret);
181 return ret;
182 }
183
184 ret = i2c_smbus_write_byte(client, conv_cmd);
185 if (ret < 0) {
186 dev_err(&indio_dev->dev, "Failed convert: %d\n", ret);
187 return ret;
188 }
189
190 usleep_range(sleep_us, sleep_us * 3 / 2);
191
192 ret = hp206c_wait_dev_rdy(indio_dev);
193 if (ret < 0) {
194 dev_err(&indio_dev->dev, "Device not ready: %d\n", ret);
195 return ret;
196 }
197
198 ret = hp206c_read_20bit(client, read_cmd);
199 if (ret < 0)
200 dev_err(&indio_dev->dev, "Failed read: %d\n", ret);
201
202 return ret;
203}
204
205static int hp206c_read_raw(struct iio_dev *indio_dev,
206 struct iio_chan_spec const *chan, int *val,
207 int *val2, long mask)
208{
209 int ret;
210 struct hp206c_data *data = iio_priv(indio_dev);
211 const struct hp206c_osr_setting *osr_setting;
212 u8 conv_cmd;
213
214 mutex_lock(&data->mutex);
215
216 switch (mask) {
217 case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
218 switch (chan->type) {
219 case IIO_TEMP:
220 *val = hp206c_osr_rates[data->temp_osr_index];
221 ret = IIO_VAL_INT;
222 break;
223
224 case IIO_PRESSURE:
225 *val = hp206c_osr_rates[data->pres_osr_index];
226 ret = IIO_VAL_INT;
227 break;
228 default:
229 ret = -EINVAL;
230 }
231 break;
232
233 case IIO_CHAN_INFO_RAW:
234 switch (chan->type) {
235 case IIO_TEMP:
236 osr_setting = &hp206c_osr_settings[data->temp_osr_index];
237 conv_cmd = HP206C_CMD_ADC_CVT |
238 osr_setting->osr_mask |
239 HP206C_CMD_ADC_CVT_CHNL_T;
240 ret = hp206c_conv_and_read(indio_dev,
241 conv_cmd,
242 HP206C_CMD_READ_T,
243 osr_setting->temp_conv_time_us);
244 if (ret >= 0) {
245 /* 20 significant bits are provided.
246 * Extend sign over the rest.
247 */
248 *val = sign_extend32(ret, 19);
249 ret = IIO_VAL_INT;
250 }
251 break;
252
253 case IIO_PRESSURE:
254 osr_setting = &hp206c_osr_settings[data->pres_osr_index];
255 conv_cmd = HP206C_CMD_ADC_CVT |
256 osr_setting->osr_mask |
257 HP206C_CMD_ADC_CVT_CHNL_PT;
258 ret = hp206c_conv_and_read(indio_dev,
259 conv_cmd,
260 HP206C_CMD_READ_P,
261 osr_setting->pres_conv_time_us);
262 if (ret >= 0) {
263 *val = ret;
264 ret = IIO_VAL_INT;
265 }
266 break;
267 default:
268 ret = -EINVAL;
269 }
270 break;
271
272 case IIO_CHAN_INFO_SCALE:
273 switch (chan->type) {
274 case IIO_TEMP:
275 *val = 0;
276 *val2 = 10000;
277 ret = IIO_VAL_INT_PLUS_MICRO;
278 break;
279
280 case IIO_PRESSURE:
281 *val = 0;
282 *val2 = 1000;
283 ret = IIO_VAL_INT_PLUS_MICRO;
284 break;
285 default:
286 ret = -EINVAL;
287 }
288 break;
289
290 default:
291 ret = -EINVAL;
292 }
293
294 mutex_unlock(&data->mutex);
295 return ret;
296}
297
298static int hp206c_write_raw(struct iio_dev *indio_dev,
299 struct iio_chan_spec const *chan,
300 int val, int val2, long mask)
301{
302 int ret = 0;
303 struct hp206c_data *data = iio_priv(indio_dev);
304
305 if (mask != IIO_CHAN_INFO_OVERSAMPLING_RATIO)
306 return -EINVAL;
307 mutex_lock(&data->mutex);
308 switch (chan->type) {
309 case IIO_TEMP:
310 data->temp_osr_index = find_closest_descending(val,
311 hp206c_osr_rates, ARRAY_SIZE(hp206c_osr_rates));
312 break;
313 case IIO_PRESSURE:
314 data->pres_osr_index = find_closest_descending(val,
315 hp206c_osr_rates, ARRAY_SIZE(hp206c_osr_rates));
316 break;
317 default:
318 ret = -EINVAL;
319 }
320 mutex_unlock(&data->mutex);
321 return ret;
322}
323
324static const struct iio_chan_spec hp206c_channels[] = {
325 {
326 .type = IIO_TEMP,
327 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
328 BIT(IIO_CHAN_INFO_SCALE) |
329 BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
330 },
331 {
332 .type = IIO_PRESSURE,
333 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
334 BIT(IIO_CHAN_INFO_SCALE) |
335 BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
336 }
337};
338
339static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(hp206c_osr_rates_str);
340
341static struct attribute *hp206c_attributes[] = {
342 &iio_const_attr_sampling_frequency_available.dev_attr.attr,
343 NULL,
344};
345
346static const struct attribute_group hp206c_attribute_group = {
347 .attrs = hp206c_attributes,
348};
349
350static const struct iio_info hp206c_info = {
351 .attrs = &hp206c_attribute_group,
352 .read_raw = hp206c_read_raw,
353 .write_raw = hp206c_write_raw,
354 .driver_module = THIS_MODULE,
355};
356
357static int hp206c_probe(struct i2c_client *client,
358 const struct i2c_device_id *id)
359{
360 struct iio_dev *indio_dev;
361 struct hp206c_data *data;
362 int ret;
363
364 if (!i2c_check_functionality(client->adapter,
365 I2C_FUNC_SMBUS_BYTE |
366 I2C_FUNC_SMBUS_BYTE_DATA |
367 I2C_FUNC_SMBUS_READ_I2C_BLOCK)) {
368 dev_err(&client->dev, "Adapter does not support "
369 "all required i2c functionality\n");
370 return -ENODEV;
371 }
372
373 indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
374 if (!indio_dev)
375 return -ENOMEM;
376
377 data = iio_priv(indio_dev);
378 data->client = client;
379 mutex_init(&data->mutex);
380
381 indio_dev->info = &hp206c_info;
382 indio_dev->name = id->name;
383 indio_dev->dev.parent = &client->dev;
384 indio_dev->modes = INDIO_DIRECT_MODE;
385 indio_dev->channels = hp206c_channels;
386 indio_dev->num_channels = ARRAY_SIZE(hp206c_channels);
387
388 i2c_set_clientdata(client, indio_dev);
389
390 /* Do a soft reset on probe */
391 ret = hp206c_soft_reset(indio_dev);
392 if (ret) {
393 dev_err(&client->dev, "Failed to reset on startup: %d\n", ret);
394 return -ENODEV;
395 }
396
397 return devm_iio_device_register(&client->dev, indio_dev);
398}
399
400static const struct i2c_device_id hp206c_id[] = {
401 {"hp206c"},
402 {}
403};
404
405#ifdef CONFIG_ACPI
406static const struct acpi_device_id hp206c_acpi_match[] = {
407 {"HOP206C", 0},
408 { },
409};
410MODULE_DEVICE_TABLE(acpi, hp206c_acpi_match);
411#endif
412
413static struct i2c_driver hp206c_driver = {
414 .probe = hp206c_probe,
415 .id_table = hp206c_id,
416 .driver = {
417 .name = "hp206c",
418 .acpi_match_table = ACPI_PTR(hp206c_acpi_match),
419 },
420};
421
422module_i2c_driver(hp206c_driver);
423
424MODULE_DESCRIPTION("HOPERF HP206C precision barometer and altimeter sensor");
425MODULE_AUTHOR("Leonard Crestez <leonard.crestez@intel.com>");
426MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/pressure/ms5611.h b/drivers/iio/pressure/ms5611.h
index 8b08e4b7e3a9..ccda63c5b3c3 100644
--- a/drivers/iio/pressure/ms5611.h
+++ b/drivers/iio/pressure/ms5611.h
@@ -16,15 +16,11 @@
16#include <linux/iio/iio.h> 16#include <linux/iio/iio.h>
17#include <linux/mutex.h> 17#include <linux/mutex.h>
18 18
19struct regulator;
20
19#define MS5611_RESET 0x1e 21#define MS5611_RESET 0x1e
20#define MS5611_READ_ADC 0x00 22#define MS5611_READ_ADC 0x00
21#define MS5611_READ_PROM_WORD 0xA0 23#define MS5611_READ_PROM_WORD 0xA0
22#define MS5611_START_TEMP_CONV 0x58
23#define MS5611_START_PRESSURE_CONV 0x48
24
25#define MS5611_CONV_TIME_MIN 9040
26#define MS5611_CONV_TIME_MAX 10000
27
28#define MS5611_PROM_WORDS_NB 8 24#define MS5611_PROM_WORDS_NB 8
29 25
30enum { 26enum {
@@ -39,16 +35,31 @@ struct ms5611_chip_info {
39 s32 *temp, s32 *pressure); 35 s32 *temp, s32 *pressure);
40}; 36};
41 37
38/*
39 * OverSampling Rate descriptor.
40 * Warning: cmd MUST be kept aligned on a word boundary (see
41 * m5611_spi_read_adc_temp_and_pressure in ms5611_spi.c).
42 */
43struct ms5611_osr {
44 unsigned long conv_usec;
45 u8 cmd;
46 unsigned short rate;
47};
48
42struct ms5611_state { 49struct ms5611_state {
43 void *client; 50 void *client;
44 struct mutex lock; 51 struct mutex lock;
45 52
53 const struct ms5611_osr *pressure_osr;
54 const struct ms5611_osr *temp_osr;
55
46 int (*reset)(struct device *dev); 56 int (*reset)(struct device *dev);
47 int (*read_prom_word)(struct device *dev, int index, u16 *word); 57 int (*read_prom_word)(struct device *dev, int index, u16 *word);
48 int (*read_adc_temp_and_pressure)(struct device *dev, 58 int (*read_adc_temp_and_pressure)(struct device *dev,
49 s32 *temp, s32 *pressure); 59 s32 *temp, s32 *pressure);
50 60
51 struct ms5611_chip_info *chip_info; 61 struct ms5611_chip_info *chip_info;
62 struct regulator *vdd;
52}; 63};
53 64
54int ms5611_probe(struct iio_dev *indio_dev, struct device *dev, 65int ms5611_probe(struct iio_dev *indio_dev, struct device *dev,
diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c
index 992ad8d3b67a..76578b07bb6e 100644
--- a/drivers/iio/pressure/ms5611_core.c
+++ b/drivers/iio/pressure/ms5611_core.c
@@ -18,11 +18,44 @@
18#include <linux/delay.h> 18#include <linux/delay.h>
19#include <linux/regulator/consumer.h> 19#include <linux/regulator/consumer.h>
20 20
21#include <linux/iio/sysfs.h>
21#include <linux/iio/buffer.h> 22#include <linux/iio/buffer.h>
22#include <linux/iio/triggered_buffer.h> 23#include <linux/iio/triggered_buffer.h>
23#include <linux/iio/trigger_consumer.h> 24#include <linux/iio/trigger_consumer.h>
24#include "ms5611.h" 25#include "ms5611.h"
25 26
27#define MS5611_INIT_OSR(_cmd, _conv_usec, _rate) \
28 { .cmd = _cmd, .conv_usec = _conv_usec, .rate = _rate }
29
30static const struct ms5611_osr ms5611_avail_pressure_osr[] = {
31 MS5611_INIT_OSR(0x40, 600, 256),
32 MS5611_INIT_OSR(0x42, 1170, 512),
33 MS5611_INIT_OSR(0x44, 2280, 1024),
34 MS5611_INIT_OSR(0x46, 4540, 2048),
35 MS5611_INIT_OSR(0x48, 9040, 4096)
36};
37
38static const struct ms5611_osr ms5611_avail_temp_osr[] = {
39 MS5611_INIT_OSR(0x50, 600, 256),
40 MS5611_INIT_OSR(0x52, 1170, 512),
41 MS5611_INIT_OSR(0x54, 2280, 1024),
42 MS5611_INIT_OSR(0x56, 4540, 2048),
43 MS5611_INIT_OSR(0x58, 9040, 4096)
44};
45
46static const char ms5611_show_osr[] = "256 512 1024 2048 4096";
47
48static IIO_CONST_ATTR(oversampling_ratio_available, ms5611_show_osr);
49
50static struct attribute *ms5611_attributes[] = {
51 &iio_const_attr_oversampling_ratio_available.dev_attr.attr,
52 NULL,
53};
54
55static const struct attribute_group ms5611_attribute_group = {
56 .attrs = ms5611_attributes,
57};
58
26static bool ms5611_prom_is_valid(u16 *prom, size_t len) 59static bool ms5611_prom_is_valid(u16 *prom, size_t len)
27{ 60{
28 int i, j; 61 int i, j;
@@ -239,11 +272,70 @@ static int ms5611_read_raw(struct iio_dev *indio_dev,
239 default: 272 default:
240 return -EINVAL; 273 return -EINVAL;
241 } 274 }
275 case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
276 if (chan->type != IIO_TEMP && chan->type != IIO_PRESSURE)
277 break;
278 mutex_lock(&st->lock);
279 if (chan->type == IIO_TEMP)
280 *val = (int)st->temp_osr->rate;
281 else
282 *val = (int)st->pressure_osr->rate;
283 mutex_unlock(&st->lock);
284 return IIO_VAL_INT;
242 } 285 }
243 286
244 return -EINVAL; 287 return -EINVAL;
245} 288}
246 289
290static const struct ms5611_osr *ms5611_find_osr(int rate,
291 const struct ms5611_osr *osr,
292 size_t count)
293{
294 unsigned int r;
295
296 for (r = 0; r < count; r++)
297 if ((unsigned short)rate == osr[r].rate)
298 break;
299 if (r >= count)
300 return NULL;
301 return &osr[r];
302}
303
304static int ms5611_write_raw(struct iio_dev *indio_dev,
305 struct iio_chan_spec const *chan,
306 int val, int val2, long mask)
307{
308 struct ms5611_state *st = iio_priv(indio_dev);
309 const struct ms5611_osr *osr = NULL;
310
311 if (mask != IIO_CHAN_INFO_OVERSAMPLING_RATIO)
312 return -EINVAL;
313
314 if (chan->type == IIO_TEMP)
315 osr = ms5611_find_osr(val, ms5611_avail_temp_osr,
316 ARRAY_SIZE(ms5611_avail_temp_osr));
317 else if (chan->type == IIO_PRESSURE)
318 osr = ms5611_find_osr(val, ms5611_avail_pressure_osr,
319 ARRAY_SIZE(ms5611_avail_pressure_osr));
320 if (!osr)
321 return -EINVAL;
322
323 mutex_lock(&st->lock);
324
325 if (iio_buffer_enabled(indio_dev)) {
326 mutex_unlock(&st->lock);
327 return -EBUSY;
328 }
329
330 if (chan->type == IIO_TEMP)
331 st->temp_osr = osr;
332 else
333 st->pressure_osr = osr;
334
335 mutex_unlock(&st->lock);
336 return 0;
337}
338
247static const unsigned long ms5611_scan_masks[] = {0x3, 0}; 339static const unsigned long ms5611_scan_masks[] = {0x3, 0};
248 340
249static struct ms5611_chip_info chip_info_tbl[] = { 341static struct ms5611_chip_info chip_info_tbl[] = {
@@ -259,7 +351,8 @@ static const struct iio_chan_spec ms5611_channels[] = {
259 { 351 {
260 .type = IIO_PRESSURE, 352 .type = IIO_PRESSURE,
261 .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) | 353 .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
262 BIT(IIO_CHAN_INFO_SCALE), 354 BIT(IIO_CHAN_INFO_SCALE) |
355 BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
263 .scan_index = 0, 356 .scan_index = 0,
264 .scan_type = { 357 .scan_type = {
265 .sign = 's', 358 .sign = 's',
@@ -271,7 +364,8 @@ static const struct iio_chan_spec ms5611_channels[] = {
271 { 364 {
272 .type = IIO_TEMP, 365 .type = IIO_TEMP,
273 .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) | 366 .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
274 BIT(IIO_CHAN_INFO_SCALE), 367 BIT(IIO_CHAN_INFO_SCALE) |
368 BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
275 .scan_index = 1, 369 .scan_index = 1,
276 .scan_type = { 370 .scan_type = {
277 .sign = 's', 371 .sign = 's',
@@ -285,40 +379,68 @@ static const struct iio_chan_spec ms5611_channels[] = {
285 379
286static const struct iio_info ms5611_info = { 380static const struct iio_info ms5611_info = {
287 .read_raw = &ms5611_read_raw, 381 .read_raw = &ms5611_read_raw,
382 .write_raw = &ms5611_write_raw,
383 .attrs = &ms5611_attribute_group,
288 .driver_module = THIS_MODULE, 384 .driver_module = THIS_MODULE,
289}; 385};
290 386
291static int ms5611_init(struct iio_dev *indio_dev) 387static int ms5611_init(struct iio_dev *indio_dev)
292{ 388{
293 int ret; 389 int ret;
294 struct regulator *vdd = devm_regulator_get(indio_dev->dev.parent, 390 struct ms5611_state *st = iio_priv(indio_dev);
295 "vdd");
296 391
297 /* Enable attached regulator if any. */ 392 /* Enable attached regulator if any. */
298 if (!IS_ERR(vdd)) { 393 st->vdd = devm_regulator_get(indio_dev->dev.parent, "vdd");
299 ret = regulator_enable(vdd); 394 if (!IS_ERR(st->vdd)) {
395 ret = regulator_enable(st->vdd);
300 if (ret) { 396 if (ret) {
301 dev_err(indio_dev->dev.parent, 397 dev_err(indio_dev->dev.parent,
302 "failed to enable Vdd supply: %d\n", ret); 398 "failed to enable Vdd supply: %d\n", ret);
303 return ret; 399 return ret;
304 } 400 }
401 } else {
402 ret = PTR_ERR(st->vdd);
403 if (ret != -ENODEV)
404 return ret;
305 } 405 }
306 406
307 ret = ms5611_reset(indio_dev); 407 ret = ms5611_reset(indio_dev);
308 if (ret < 0) 408 if (ret < 0)
309 return ret; 409 goto err_regulator_disable;
310 410
311 return ms5611_read_prom(indio_dev); 411 ret = ms5611_read_prom(indio_dev);
412 if (ret < 0)
413 goto err_regulator_disable;
414
415 return 0;
416
417err_regulator_disable:
418 if (!IS_ERR_OR_NULL(st->vdd))
419 regulator_disable(st->vdd);
420 return ret;
421}
422
423static void ms5611_fini(const struct iio_dev *indio_dev)
424{
425 const struct ms5611_state *st = iio_priv(indio_dev);
426
427 if (!IS_ERR_OR_NULL(st->vdd))
428 regulator_disable(st->vdd);
312} 429}
313 430
314int ms5611_probe(struct iio_dev *indio_dev, struct device *dev, 431int ms5611_probe(struct iio_dev *indio_dev, struct device *dev,
315 const char *name, int type) 432 const char *name, int type)
316{ 433{
317 int ret; 434 int ret;
318 struct ms5611_state *st = iio_priv(indio_dev); 435 struct ms5611_state *st = iio_priv(indio_dev);
319 436
320 mutex_init(&st->lock); 437 mutex_init(&st->lock);
321 st->chip_info = &chip_info_tbl[type]; 438 st->chip_info = &chip_info_tbl[type];
439 st->temp_osr =
440 &ms5611_avail_temp_osr[ARRAY_SIZE(ms5611_avail_temp_osr) - 1];
441 st->pressure_osr =
442 &ms5611_avail_pressure_osr[ARRAY_SIZE(ms5611_avail_pressure_osr)
443 - 1];
322 indio_dev->dev.parent = dev; 444 indio_dev->dev.parent = dev;
323 indio_dev->name = name; 445 indio_dev->name = name;
324 indio_dev->info = &ms5611_info; 446 indio_dev->info = &ms5611_info;
@@ -335,7 +457,7 @@ int ms5611_probe(struct iio_dev *indio_dev, struct device *dev,
335 ms5611_trigger_handler, NULL); 457 ms5611_trigger_handler, NULL);
336 if (ret < 0) { 458 if (ret < 0) {
337 dev_err(dev, "iio triggered buffer setup failed\n"); 459 dev_err(dev, "iio triggered buffer setup failed\n");
338 return ret; 460 goto err_fini;
339 } 461 }
340 462
341 ret = iio_device_register(indio_dev); 463 ret = iio_device_register(indio_dev);
@@ -348,7 +470,8 @@ int ms5611_probe(struct iio_dev *indio_dev, struct device *dev,
348 470
349err_buffer_cleanup: 471err_buffer_cleanup:
350 iio_triggered_buffer_cleanup(indio_dev); 472 iio_triggered_buffer_cleanup(indio_dev);
351 473err_fini:
474 ms5611_fini(indio_dev);
352 return ret; 475 return ret;
353} 476}
354EXPORT_SYMBOL(ms5611_probe); 477EXPORT_SYMBOL(ms5611_probe);
@@ -357,6 +480,7 @@ int ms5611_remove(struct iio_dev *indio_dev)
357{ 480{
358 iio_device_unregister(indio_dev); 481 iio_device_unregister(indio_dev);
359 iio_triggered_buffer_cleanup(indio_dev); 482 iio_triggered_buffer_cleanup(indio_dev);
483 ms5611_fini(indio_dev);
360 484
361 return 0; 485 return 0;
362} 486}
diff --git a/drivers/iio/pressure/ms5611_i2c.c b/drivers/iio/pressure/ms5611_i2c.c
index 7f6fc8eee922..55fb5fc0b6ea 100644
--- a/drivers/iio/pressure/ms5611_i2c.c
+++ b/drivers/iio/pressure/ms5611_i2c.c
@@ -17,6 +17,7 @@
17#include <linux/delay.h> 17#include <linux/delay.h>
18#include <linux/i2c.h> 18#include <linux/i2c.h>
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/of_device.h>
20 21
21#include "ms5611.h" 22#include "ms5611.h"
22 23
@@ -62,23 +63,23 @@ static int ms5611_i2c_read_adc_temp_and_pressure(struct device *dev,
62{ 63{
63 int ret; 64 int ret;
64 struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev)); 65 struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev));
66 const struct ms5611_osr *osr = st->temp_osr;
65 67
66 ret = i2c_smbus_write_byte(st->client, MS5611_START_TEMP_CONV); 68 ret = i2c_smbus_write_byte(st->client, osr->cmd);
67 if (ret < 0) 69 if (ret < 0)
68 return ret; 70 return ret;
69 71
70 usleep_range(MS5611_CONV_TIME_MIN, MS5611_CONV_TIME_MAX); 72 usleep_range(osr->conv_usec, osr->conv_usec + (osr->conv_usec / 10UL));
71
72 ret = ms5611_i2c_read_adc(st, temp); 73 ret = ms5611_i2c_read_adc(st, temp);
73 if (ret < 0) 74 if (ret < 0)
74 return ret; 75 return ret;
75 76
76 ret = i2c_smbus_write_byte(st->client, MS5611_START_PRESSURE_CONV); 77 osr = st->pressure_osr;
78 ret = i2c_smbus_write_byte(st->client, osr->cmd);
77 if (ret < 0) 79 if (ret < 0)
78 return ret; 80 return ret;
79 81
80 usleep_range(MS5611_CONV_TIME_MIN, MS5611_CONV_TIME_MAX); 82 usleep_range(osr->conv_usec, osr->conv_usec + (osr->conv_usec / 10UL));
81
82 return ms5611_i2c_read_adc(st, pressure); 83 return ms5611_i2c_read_adc(st, pressure);
83} 84}
84 85
@@ -113,6 +114,17 @@ static int ms5611_i2c_remove(struct i2c_client *client)
113 return ms5611_remove(i2c_get_clientdata(client)); 114 return ms5611_remove(i2c_get_clientdata(client));
114} 115}
115 116
117#if defined(CONFIG_OF)
118static const struct of_device_id ms5611_i2c_matches[] = {
119 { .compatible = "meas,ms5611" },
120 { .compatible = "ms5611" },
121 { .compatible = "meas,ms5607" },
122 { .compatible = "ms5607" },
123 { }
124};
125MODULE_DEVICE_TABLE(of, ms5611_i2c_matches);
126#endif
127
116static const struct i2c_device_id ms5611_id[] = { 128static const struct i2c_device_id ms5611_id[] = {
117 { "ms5611", MS5611 }, 129 { "ms5611", MS5611 },
118 { "ms5607", MS5607 }, 130 { "ms5607", MS5607 },
@@ -123,6 +135,7 @@ MODULE_DEVICE_TABLE(i2c, ms5611_id);
123static struct i2c_driver ms5611_driver = { 135static struct i2c_driver ms5611_driver = {
124 .driver = { 136 .driver = {
125 .name = "ms5611", 137 .name = "ms5611",
138 .of_match_table = of_match_ptr(ms5611_i2c_matches)
126 }, 139 },
127 .id_table = ms5611_id, 140 .id_table = ms5611_id,
128 .probe = ms5611_i2c_probe, 141 .probe = ms5611_i2c_probe,
diff --git a/drivers/iio/pressure/ms5611_spi.c b/drivers/iio/pressure/ms5611_spi.c
index 5cc009e85f0e..932e05001e1a 100644
--- a/drivers/iio/pressure/ms5611_spi.c
+++ b/drivers/iio/pressure/ms5611_spi.c
@@ -12,6 +12,7 @@
12#include <linux/delay.h> 12#include <linux/delay.h>
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/spi/spi.h> 14#include <linux/spi/spi.h>
15#include <linux/of_device.h>
15 16
16#include "ms5611.h" 17#include "ms5611.h"
17 18
@@ -55,28 +56,29 @@ static int ms5611_spi_read_adc(struct device *dev, s32 *val)
55static int ms5611_spi_read_adc_temp_and_pressure(struct device *dev, 56static int ms5611_spi_read_adc_temp_and_pressure(struct device *dev,
56 s32 *temp, s32 *pressure) 57 s32 *temp, s32 *pressure)
57{ 58{
58 u8 cmd;
59 int ret; 59 int ret;
60 struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev)); 60 struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev));
61 const struct ms5611_osr *osr = st->temp_osr;
61 62
62 cmd = MS5611_START_TEMP_CONV; 63 /*
63 ret = spi_write_then_read(st->client, &cmd, 1, NULL, 0); 64 * Warning: &osr->cmd MUST be aligned on a word boundary since used as
65 * 2nd argument (void*) of spi_write_then_read.
66 */
67 ret = spi_write_then_read(st->client, &osr->cmd, 1, NULL, 0);
64 if (ret < 0) 68 if (ret < 0)
65 return ret; 69 return ret;
66 70
67 usleep_range(MS5611_CONV_TIME_MIN, MS5611_CONV_TIME_MAX); 71 usleep_range(osr->conv_usec, osr->conv_usec + (osr->conv_usec / 10UL));
68
69 ret = ms5611_spi_read_adc(dev, temp); 72 ret = ms5611_spi_read_adc(dev, temp);
70 if (ret < 0) 73 if (ret < 0)
71 return ret; 74 return ret;
72 75
73 cmd = MS5611_START_PRESSURE_CONV; 76 osr = st->pressure_osr;
74 ret = spi_write_then_read(st->client, &cmd, 1, NULL, 0); 77 ret = spi_write_then_read(st->client, &osr->cmd, 1, NULL, 0);
75 if (ret < 0) 78 if (ret < 0)
76 return ret; 79 return ret;
77 80
78 usleep_range(MS5611_CONV_TIME_MIN, MS5611_CONV_TIME_MAX); 81 usleep_range(osr->conv_usec, osr->conv_usec + (osr->conv_usec / 10UL));
79
80 return ms5611_spi_read_adc(dev, pressure); 82 return ms5611_spi_read_adc(dev, pressure);
81} 83}
82 84
@@ -106,7 +108,7 @@ static int ms5611_spi_probe(struct spi_device *spi)
106 st->client = spi; 108 st->client = spi;
107 109
108 return ms5611_probe(indio_dev, &spi->dev, spi_get_device_id(spi)->name, 110 return ms5611_probe(indio_dev, &spi->dev, spi_get_device_id(spi)->name,
109 spi_get_device_id(spi)->driver_data); 111 spi_get_device_id(spi)->driver_data);
110} 112}
111 113
112static int ms5611_spi_remove(struct spi_device *spi) 114static int ms5611_spi_remove(struct spi_device *spi)
@@ -114,6 +116,17 @@ static int ms5611_spi_remove(struct spi_device *spi)
114 return ms5611_remove(spi_get_drvdata(spi)); 116 return ms5611_remove(spi_get_drvdata(spi));
115} 117}
116 118
119#if defined(CONFIG_OF)
120static const struct of_device_id ms5611_spi_matches[] = {
121 { .compatible = "meas,ms5611" },
122 { .compatible = "ms5611" },
123 { .compatible = "meas,ms5607" },
124 { .compatible = "ms5607" },
125 { }
126};
127MODULE_DEVICE_TABLE(of, ms5611_spi_matches);
128#endif
129
117static const struct spi_device_id ms5611_id[] = { 130static const struct spi_device_id ms5611_id[] = {
118 { "ms5611", MS5611 }, 131 { "ms5611", MS5611 },
119 { "ms5607", MS5607 }, 132 { "ms5607", MS5607 },
@@ -124,6 +137,7 @@ MODULE_DEVICE_TABLE(spi, ms5611_id);
124static struct spi_driver ms5611_driver = { 137static struct spi_driver ms5611_driver = {
125 .driver = { 138 .driver = {
126 .name = "ms5611", 139 .name = "ms5611",
140 .of_match_table = of_match_ptr(ms5611_spi_matches)
127 }, 141 },
128 .id_table = ms5611_id, 142 .id_table = ms5611_id,
129 .probe = ms5611_spi_probe, 143 .probe = ms5611_spi_probe,
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index 172393ad34af..9e9b72a8f18f 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -64,6 +64,8 @@
64#define ST_PRESS_LPS331AP_DRDY_IRQ_INT2_MASK 0x20 64#define ST_PRESS_LPS331AP_DRDY_IRQ_INT2_MASK 0x20
65#define ST_PRESS_LPS331AP_IHL_IRQ_ADDR 0x22 65#define ST_PRESS_LPS331AP_IHL_IRQ_ADDR 0x22
66#define ST_PRESS_LPS331AP_IHL_IRQ_MASK 0x80 66#define ST_PRESS_LPS331AP_IHL_IRQ_MASK 0x80
67#define ST_PRESS_LPS331AP_OD_IRQ_ADDR 0x22
68#define ST_PRESS_LPS331AP_OD_IRQ_MASK 0x40
67#define ST_PRESS_LPS331AP_MULTIREAD_BIT true 69#define ST_PRESS_LPS331AP_MULTIREAD_BIT true
68#define ST_PRESS_LPS331AP_TEMP_OFFSET 42500 70#define ST_PRESS_LPS331AP_TEMP_OFFSET 42500
69 71
@@ -104,6 +106,8 @@
104#define ST_PRESS_LPS25H_DRDY_IRQ_INT2_MASK 0x10 106#define ST_PRESS_LPS25H_DRDY_IRQ_INT2_MASK 0x10
105#define ST_PRESS_LPS25H_IHL_IRQ_ADDR 0x22 107#define ST_PRESS_LPS25H_IHL_IRQ_ADDR 0x22
106#define ST_PRESS_LPS25H_IHL_IRQ_MASK 0x80 108#define ST_PRESS_LPS25H_IHL_IRQ_MASK 0x80
109#define ST_PRESS_LPS25H_OD_IRQ_ADDR 0x22
110#define ST_PRESS_LPS25H_OD_IRQ_MASK 0x40
107#define ST_PRESS_LPS25H_MULTIREAD_BIT true 111#define ST_PRESS_LPS25H_MULTIREAD_BIT true
108#define ST_PRESS_LPS25H_TEMP_OFFSET 42500 112#define ST_PRESS_LPS25H_TEMP_OFFSET 42500
109#define ST_PRESS_LPS25H_OUT_XL_ADDR 0x28 113#define ST_PRESS_LPS25H_OUT_XL_ADDR 0x28
@@ -226,6 +230,9 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
226 .mask_int2 = ST_PRESS_LPS331AP_DRDY_IRQ_INT2_MASK, 230 .mask_int2 = ST_PRESS_LPS331AP_DRDY_IRQ_INT2_MASK,
227 .addr_ihl = ST_PRESS_LPS331AP_IHL_IRQ_ADDR, 231 .addr_ihl = ST_PRESS_LPS331AP_IHL_IRQ_ADDR,
228 .mask_ihl = ST_PRESS_LPS331AP_IHL_IRQ_MASK, 232 .mask_ihl = ST_PRESS_LPS331AP_IHL_IRQ_MASK,
233 .addr_od = ST_PRESS_LPS331AP_OD_IRQ_ADDR,
234 .mask_od = ST_PRESS_LPS331AP_OD_IRQ_MASK,
235 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
229 }, 236 },
230 .multi_read_bit = ST_PRESS_LPS331AP_MULTIREAD_BIT, 237 .multi_read_bit = ST_PRESS_LPS331AP_MULTIREAD_BIT,
231 .bootime = 2, 238 .bootime = 2,
@@ -312,6 +319,9 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
312 .mask_int2 = ST_PRESS_LPS25H_DRDY_IRQ_INT2_MASK, 319 .mask_int2 = ST_PRESS_LPS25H_DRDY_IRQ_INT2_MASK,
313 .addr_ihl = ST_PRESS_LPS25H_IHL_IRQ_ADDR, 320 .addr_ihl = ST_PRESS_LPS25H_IHL_IRQ_ADDR,
314 .mask_ihl = ST_PRESS_LPS25H_IHL_IRQ_MASK, 321 .mask_ihl = ST_PRESS_LPS25H_IHL_IRQ_MASK,
322 .addr_od = ST_PRESS_LPS25H_OD_IRQ_ADDR,
323 .mask_od = ST_PRESS_LPS25H_OD_IRQ_MASK,
324 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
315 }, 325 },
316 .multi_read_bit = ST_PRESS_LPS25H_MULTIREAD_BIT, 326 .multi_read_bit = ST_PRESS_LPS25H_MULTIREAD_BIT,
317 .bootime = 2, 327 .bootime = 2,
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index bd90d2002afb..6480f60ebf6c 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -14,27 +14,13 @@ config ASHMEM
14 It is, in theory, a good memory allocator for low-memory devices, 14 It is, in theory, a good memory allocator for low-memory devices,
15 because it can discard shared memory units when under memory pressure. 15 because it can discard shared memory units when under memory pressure.
16 16
17config ANDROID_TIMED_OUTPUT
18 bool "Timed output class driver"
19 default y
20
21config ANDROID_TIMED_GPIO
22 tristate "Android timed gpio driver"
23 depends on GPIOLIB || COMPILE_TEST
24 depends on ANDROID_TIMED_OUTPUT
25 default n
26 ---help---
27 Unlike generic gpio is to allow programs to access and manipulate gpio
28 registers from user space, timed output/gpio is a system to allow changing
29 a gpio pin and restore it automatically after a specified timeout.
30
31config ANDROID_LOW_MEMORY_KILLER 17config ANDROID_LOW_MEMORY_KILLER
32 bool "Android Low Memory Killer" 18 bool "Android Low Memory Killer"
33 ---help--- 19 ---help---
34 Registers processes to be killed when low memory conditions, this is useful 20 Registers processes to be killed when low memory conditions, this is useful
35 as there is no particular swap space on android. 21 as there is no particular swap space on android.
36 22
37 The registered process will kills according to the priorities in android init 23 The registered process will kill according to the priorities in android init
38 scripts (/init.rc), and it defines priority values with minimum free memory size 24 scripts (/init.rc), and it defines priority values with minimum free memory size
39 for each priority. 25 for each priority.
40 26
@@ -52,6 +38,7 @@ config SW_SYNC
52 bool "Software synchronization objects" 38 bool "Software synchronization objects"
53 default n 39 default n
54 depends on SYNC 40 depends on SYNC
41 depends on SYNC_FILE
55 ---help--- 42 ---help---
56 A sync object driver that uses a 32bit counter to coordinate 43 A sync object driver that uses a 32bit counter to coordinate
57 synchronization. Useful when there is no hardware primitive backing 44 synchronization. Useful when there is no hardware primitive backing
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
index c7b6c99cc5ce..980d6dc4b265 100644
--- a/drivers/staging/android/Makefile
+++ b/drivers/staging/android/Makefile
@@ -3,8 +3,6 @@ ccflags-y += -I$(src) # needed for trace events
3obj-y += ion/ 3obj-y += ion/
4 4
5obj-$(CONFIG_ASHMEM) += ashmem.o 5obj-$(CONFIG_ASHMEM) += ashmem.o
6obj-$(CONFIG_ANDROID_TIMED_OUTPUT) += timed_output.o
7obj-$(CONFIG_ANDROID_TIMED_GPIO) += timed_gpio.o
8obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o 6obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o
9obj-$(CONFIG_SYNC) += sync.o sync_debug.o 7obj-$(CONFIG_SYNC) += sync.o sync_debug.o
10obj-$(CONFIG_SW_SYNC) += sw_sync.o 8obj-$(CONFIG_SW_SYNC) += sw_sync.o
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 85365672c931..a2cf93b59016 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -184,7 +184,7 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
184 struct scatterlist *sg; 184 struct scatterlist *sg;
185 int i, ret; 185 int i, ret;
186 186
187 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL); 187 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
188 if (!buffer) 188 if (!buffer)
189 return ERR_PTR(-ENOMEM); 189 return ERR_PTR(-ENOMEM);
190 190
@@ -341,7 +341,7 @@ static struct ion_handle *ion_handle_create(struct ion_client *client,
341{ 341{
342 struct ion_handle *handle; 342 struct ion_handle *handle;
343 343
344 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL); 344 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
345 if (!handle) 345 if (!handle)
346 return ERR_PTR(-ENOMEM); 346 return ERR_PTR(-ENOMEM);
347 kref_init(&handle->ref); 347 kref_init(&handle->ref);
@@ -396,7 +396,7 @@ static int ion_handle_put_nolock(struct ion_handle *handle)
396 return ret; 396 return ret;
397} 397}
398 398
399int ion_handle_put(struct ion_handle *handle) 399static int ion_handle_put(struct ion_handle *handle)
400{ 400{
401 struct ion_client *client = handle->client; 401 struct ion_client *client = handle->client;
402 int ret; 402 int ret;
@@ -438,8 +438,8 @@ static struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
438 return handle ? handle : ERR_PTR(-EINVAL); 438 return handle ? handle : ERR_PTR(-EINVAL);
439} 439}
440 440
441struct ion_handle *ion_handle_get_by_id(struct ion_client *client, 441static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
442 int id) 442 int id)
443{ 443{
444 struct ion_handle *handle; 444 struct ion_handle *handle;
445 445
@@ -827,7 +827,7 @@ struct ion_client *ion_client_create(struct ion_device *dev,
827 } 827 }
828 task_unlock(current->group_leader); 828 task_unlock(current->group_leader);
829 829
830 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL); 830 client = kzalloc(sizeof(*client), GFP_KERNEL);
831 if (!client) 831 if (!client)
832 goto err_put_task_struct; 832 goto err_put_task_struct;
833 833
@@ -1035,7 +1035,7 @@ static void ion_vm_open(struct vm_area_struct *vma)
1035 struct ion_buffer *buffer = vma->vm_private_data; 1035 struct ion_buffer *buffer = vma->vm_private_data;
1036 struct ion_vma_list *vma_list; 1036 struct ion_vma_list *vma_list;
1037 1037
1038 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL); 1038 vma_list = kmalloc(sizeof(*vma_list), GFP_KERNEL);
1039 if (!vma_list) 1039 if (!vma_list)
1040 return; 1040 return;
1041 vma_list->vma = vma; 1041 vma_list->vma = vma;
@@ -1650,7 +1650,7 @@ struct ion_device *ion_device_create(long (*custom_ioctl)
1650 struct ion_device *idev; 1650 struct ion_device *idev;
1651 int ret; 1651 int ret;
1652 1652
1653 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL); 1653 idev = kzalloc(sizeof(*idev), GFP_KERNEL);
1654 if (!idev) 1654 if (!idev)
1655 return ERR_PTR(-ENOMEM); 1655 return ERR_PTR(-ENOMEM);
1656 1656
diff --git a/drivers/staging/android/ion/ion_chunk_heap.c b/drivers/staging/android/ion/ion_chunk_heap.c
index 0813163f962f..e0553fee9b8a 100644
--- a/drivers/staging/android/ion/ion_chunk_heap.c
+++ b/drivers/staging/android/ion/ion_chunk_heap.c
@@ -55,7 +55,7 @@ static int ion_chunk_heap_allocate(struct ion_heap *heap,
55 if (allocated_size > chunk_heap->size - chunk_heap->allocated) 55 if (allocated_size > chunk_heap->size - chunk_heap->allocated)
56 return -ENOMEM; 56 return -ENOMEM;
57 57
58 table = kmalloc(sizeof(struct sg_table), GFP_KERNEL); 58 table = kmalloc(sizeof(*table), GFP_KERNEL);
59 if (!table) 59 if (!table)
60 return -ENOMEM; 60 return -ENOMEM;
61 ret = sg_alloc_table(table, num_chunks, GFP_KERNEL); 61 ret = sg_alloc_table(table, num_chunks, GFP_KERNEL);
@@ -154,7 +154,7 @@ struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
154 if (ret) 154 if (ret)
155 return ERR_PTR(ret); 155 return ERR_PTR(ret);
156 156
157 chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL); 157 chunk_heap = kzalloc(sizeof(*chunk_heap), GFP_KERNEL);
158 if (!chunk_heap) 158 if (!chunk_heap)
159 return ERR_PTR(-ENOMEM); 159 return ERR_PTR(-ENOMEM);
160 160
diff --git a/drivers/staging/android/ion/ion_dummy_driver.c b/drivers/staging/android/ion/ion_dummy_driver.c
index 5678870bff48..814a3c92a56e 100644
--- a/drivers/staging/android/ion/ion_dummy_driver.c
+++ b/drivers/staging/android/ion/ion_dummy_driver.c
@@ -68,6 +68,8 @@ static int __init ion_dummy_init(void)
68 int i, err; 68 int i, err;
69 69
70 idev = ion_device_create(NULL); 70 idev = ion_device_create(NULL);
71 if (IS_ERR(idev))
72 return PTR_ERR(idev);
71 heaps = kcalloc(dummy_ion_pdata.nr, sizeof(struct ion_heap *), 73 heaps = kcalloc(dummy_ion_pdata.nr, sizeof(struct ion_heap *),
72 GFP_KERNEL); 74 GFP_KERNEL);
73 if (!heaps) 75 if (!heaps)
diff --git a/drivers/staging/android/ion/ion_test.c b/drivers/staging/android/ion/ion_test.c
index 83a3af06d01c..5a396a1a8238 100644
--- a/drivers/staging/android/ion/ion_test.c
+++ b/drivers/staging/android/ion/ion_test.c
@@ -208,7 +208,7 @@ static int ion_test_open(struct inode *inode, struct file *file)
208 struct ion_test_data *data; 208 struct ion_test_data *data;
209 struct miscdevice *miscdev = file->private_data; 209 struct miscdevice *miscdev = file->private_data;
210 210
211 data = kzalloc(sizeof(struct ion_test_data), GFP_KERNEL); 211 data = kzalloc(sizeof(*data), GFP_KERNEL);
212 if (!data) 212 if (!data)
213 return -ENOMEM; 213 return -ENOMEM;
214 214
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index 2509e5df7244..24d2745e9437 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -131,7 +131,7 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
131 if (!p) 131 if (!p)
132 continue; 132 continue;
133 133
134 if (test_tsk_thread_flag(p, TIF_MEMDIE) && 134 if (task_lmk_waiting(p) &&
135 time_before_eq(jiffies, lowmem_deathpending_timeout)) { 135 time_before_eq(jiffies, lowmem_deathpending_timeout)) {
136 task_unlock(p); 136 task_unlock(p);
137 rcu_read_unlock(); 137 rcu_read_unlock();
@@ -162,13 +162,8 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
162 if (selected) { 162 if (selected) {
163 task_lock(selected); 163 task_lock(selected);
164 send_sig(SIGKILL, selected, 0); 164 send_sig(SIGKILL, selected, 0);
165 /*
166 * FIXME: lowmemorykiller shouldn't abuse global OOM killer
167 * infrastructure. There is no real reason why the selected
168 * task should have access to the memory reserves.
169 */
170 if (selected->mm) 165 if (selected->mm)
171 mark_oom_victim(selected); 166 task_set_lmk_waiting(selected);
172 task_unlock(selected); 167 task_unlock(selected);
173 lowmem_print(1, "Killing '%s' (%d), adj %hd,\n" 168 lowmem_print(1, "Killing '%s' (%d), adj %hd,\n"
174 " to free %ldkB on behalf of '%s' (%d) because\n" 169 " to free %ldkB on behalf of '%s' (%d) because\n"
diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c
index 3a8f21031440..1d14c83c7f7c 100644
--- a/drivers/staging/android/sync.c
+++ b/drivers/staging/android/sync.c
@@ -16,10 +16,7 @@
16 16
17#include <linux/debugfs.h> 17#include <linux/debugfs.h>
18#include <linux/export.h> 18#include <linux/export.h>
19#include <linux/file.h>
20#include <linux/fs.h>
21#include <linux/kernel.h> 19#include <linux/kernel.h>
22#include <linux/poll.h>
23#include <linux/sched.h> 20#include <linux/sched.h>
24#include <linux/seq_file.h> 21#include <linux/seq_file.h>
25#include <linux/slab.h> 22#include <linux/slab.h>
@@ -32,7 +29,6 @@
32#include "trace/sync.h" 29#include "trace/sync.h"
33 30
34static const struct fence_ops android_fence_ops; 31static const struct fence_ops android_fence_ops;
35static const struct file_operations sync_file_fops;
36 32
37struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops, 33struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
38 int size, const char *name) 34 int size, const char *name)
@@ -136,170 +132,6 @@ struct fence *sync_pt_create(struct sync_timeline *obj, int size)
136} 132}
137EXPORT_SYMBOL(sync_pt_create); 133EXPORT_SYMBOL(sync_pt_create);
138 134
139static struct sync_file *sync_file_alloc(int size, const char *name)
140{
141 struct sync_file *sync_file;
142
143 sync_file = kzalloc(size, GFP_KERNEL);
144 if (!sync_file)
145 return NULL;
146
147 sync_file->file = anon_inode_getfile("sync_file", &sync_file_fops,
148 sync_file, 0);
149 if (IS_ERR(sync_file->file))
150 goto err;
151
152 kref_init(&sync_file->kref);
153 strlcpy(sync_file->name, name, sizeof(sync_file->name));
154
155 init_waitqueue_head(&sync_file->wq);
156
157 return sync_file;
158
159err:
160 kfree(sync_file);
161 return NULL;
162}
163
164static void fence_check_cb_func(struct fence *f, struct fence_cb *cb)
165{
166 struct sync_file_cb *check;
167 struct sync_file *sync_file;
168
169 check = container_of(cb, struct sync_file_cb, cb);
170 sync_file = check->sync_file;
171
172 if (atomic_dec_and_test(&sync_file->status))
173 wake_up_all(&sync_file->wq);
174}
175
176/* TODO: implement a create which takes more that one fence */
177struct sync_file *sync_file_create(const char *name, struct fence *fence)
178{
179 struct sync_file *sync_file;
180
181 sync_file = sync_file_alloc(offsetof(struct sync_file, cbs[1]),
182 name);
183 if (!sync_file)
184 return NULL;
185
186 sync_file->num_fences = 1;
187 atomic_set(&sync_file->status, 1);
188
189 sync_file->cbs[0].fence = fence;
190 sync_file->cbs[0].sync_file = sync_file;
191 if (fence_add_callback(fence, &sync_file->cbs[0].cb,
192 fence_check_cb_func))
193 atomic_dec(&sync_file->status);
194
195 sync_file_debug_add(sync_file);
196
197 return sync_file;
198}
199EXPORT_SYMBOL(sync_file_create);
200
201struct sync_file *sync_file_fdget(int fd)
202{
203 struct file *file = fget(fd);
204
205 if (!file)
206 return NULL;
207
208 if (file->f_op != &sync_file_fops)
209 goto err;
210
211 return file->private_data;
212
213err:
214 fput(file);
215 return NULL;
216}
217EXPORT_SYMBOL(sync_file_fdget);
218
219void sync_file_put(struct sync_file *sync_file)
220{
221 fput(sync_file->file);
222}
223EXPORT_SYMBOL(sync_file_put);
224
225void sync_file_install(struct sync_file *sync_file, int fd)
226{
227 fd_install(fd, sync_file->file);
228}
229EXPORT_SYMBOL(sync_file_install);
230
231static void sync_file_add_pt(struct sync_file *sync_file, int *i,
232 struct fence *fence)
233{
234 sync_file->cbs[*i].fence = fence;
235 sync_file->cbs[*i].sync_file = sync_file;
236
237 if (!fence_add_callback(fence, &sync_file->cbs[*i].cb,
238 fence_check_cb_func)) {
239 fence_get(fence);
240 (*i)++;
241 }
242}
243
244struct sync_file *sync_file_merge(const char *name,
245 struct sync_file *a, struct sync_file *b)
246{
247 int num_fences = a->num_fences + b->num_fences;
248 struct sync_file *sync_file;
249 int i, i_a, i_b;
250 unsigned long size = offsetof(struct sync_file, cbs[num_fences]);
251
252 sync_file = sync_file_alloc(size, name);
253 if (!sync_file)
254 return NULL;
255
256 atomic_set(&sync_file->status, num_fences);
257
258 /*
259 * Assume sync_file a and b are both ordered and have no
260 * duplicates with the same context.
261 *
262 * If a sync_file can only be created with sync_file_merge
263 * and sync_file_create, this is a reasonable assumption.
264 */
265 for (i = i_a = i_b = 0; i_a < a->num_fences && i_b < b->num_fences; ) {
266 struct fence *pt_a = a->cbs[i_a].fence;
267 struct fence *pt_b = b->cbs[i_b].fence;
268
269 if (pt_a->context < pt_b->context) {
270 sync_file_add_pt(sync_file, &i, pt_a);
271
272 i_a++;
273 } else if (pt_a->context > pt_b->context) {
274 sync_file_add_pt(sync_file, &i, pt_b);
275
276 i_b++;
277 } else {
278 if (pt_a->seqno - pt_b->seqno <= INT_MAX)
279 sync_file_add_pt(sync_file, &i, pt_a);
280 else
281 sync_file_add_pt(sync_file, &i, pt_b);
282
283 i_a++;
284 i_b++;
285 }
286 }
287
288 for (; i_a < a->num_fences; i_a++)
289 sync_file_add_pt(sync_file, &i, a->cbs[i_a].fence);
290
291 for (; i_b < b->num_fences; i_b++)
292 sync_file_add_pt(sync_file, &i, b->cbs[i_b].fence);
293
294 if (num_fences > i)
295 atomic_sub(num_fences - i, &sync_file->status);
296 sync_file->num_fences = i;
297
298 sync_file_debug_add(sync_file);
299 return sync_file;
300}
301EXPORT_SYMBOL(sync_file_merge);
302
303static const char *android_fence_get_driver_name(struct fence *fence) 135static const char *android_fence_get_driver_name(struct fence *fence)
304{ 136{
305 struct sync_timeline *parent = fence_parent(fence); 137 struct sync_timeline *parent = fence_parent(fence);
@@ -387,191 +219,3 @@ static const struct fence_ops android_fence_ops = {
387 .fence_value_str = android_fence_value_str, 219 .fence_value_str = android_fence_value_str,
388 .timeline_value_str = android_fence_timeline_value_str, 220 .timeline_value_str = android_fence_timeline_value_str,
389}; 221};
390
391static void sync_file_free(struct kref *kref)
392{
393 struct sync_file *sync_file = container_of(kref, struct sync_file,
394 kref);
395 int i;
396
397 for (i = 0; i < sync_file->num_fences; ++i) {
398 fence_remove_callback(sync_file->cbs[i].fence,
399 &sync_file->cbs[i].cb);
400 fence_put(sync_file->cbs[i].fence);
401 }
402
403 kfree(sync_file);
404}
405
406static int sync_file_release(struct inode *inode, struct file *file)
407{
408 struct sync_file *sync_file = file->private_data;
409
410 sync_file_debug_remove(sync_file);
411
412 kref_put(&sync_file->kref, sync_file_free);
413 return 0;
414}
415
416static unsigned int sync_file_poll(struct file *file, poll_table *wait)
417{
418 struct sync_file *sync_file = file->private_data;
419 int status;
420
421 poll_wait(file, &sync_file->wq, wait);
422
423 status = atomic_read(&sync_file->status);
424
425 if (!status)
426 return POLLIN;
427 if (status < 0)
428 return POLLERR;
429 return 0;
430}
431
432static long sync_file_ioctl_merge(struct sync_file *sync_file,
433 unsigned long arg)
434{
435 int fd = get_unused_fd_flags(O_CLOEXEC);
436 int err;
437 struct sync_file *fence2, *fence3;
438 struct sync_merge_data data;
439
440 if (fd < 0)
441 return fd;
442
443 if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
444 err = -EFAULT;
445 goto err_put_fd;
446 }
447
448 fence2 = sync_file_fdget(data.fd2);
449 if (!fence2) {
450 err = -ENOENT;
451 goto err_put_fd;
452 }
453
454 data.name[sizeof(data.name) - 1] = '\0';
455 fence3 = sync_file_merge(data.name, sync_file, fence2);
456 if (!fence3) {
457 err = -ENOMEM;
458 goto err_put_fence2;
459 }
460
461 data.fence = fd;
462 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
463 err = -EFAULT;
464 goto err_put_fence3;
465 }
466
467 sync_file_install(fence3, fd);
468 sync_file_put(fence2);
469 return 0;
470
471err_put_fence3:
472 sync_file_put(fence3);
473
474err_put_fence2:
475 sync_file_put(fence2);
476
477err_put_fd:
478 put_unused_fd(fd);
479 return err;
480}
481
482static int sync_fill_fence_info(struct fence *fence, void *data, int size)
483{
484 struct sync_fence_info *info = data;
485
486 if (size < sizeof(*info))
487 return -ENOMEM;
488
489 strlcpy(info->obj_name, fence->ops->get_timeline_name(fence),
490 sizeof(info->obj_name));
491 strlcpy(info->driver_name, fence->ops->get_driver_name(fence),
492 sizeof(info->driver_name));
493 if (fence_is_signaled(fence))
494 info->status = fence->status >= 0 ? 1 : fence->status;
495 else
496 info->status = 0;
497 info->timestamp_ns = ktime_to_ns(fence->timestamp);
498
499 return sizeof(*info);
500}
501
502static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
503 unsigned long arg)
504{
505 struct sync_file_info *info;
506 __u32 size;
507 __u32 len = 0;
508 int ret, i;
509
510 if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
511 return -EFAULT;
512
513 if (size < sizeof(struct sync_file_info))
514 return -EINVAL;
515
516 if (size > 4096)
517 size = 4096;
518
519 info = kzalloc(size, GFP_KERNEL);
520 if (!info)
521 return -ENOMEM;
522
523 strlcpy(info->name, sync_file->name, sizeof(info->name));
524 info->status = atomic_read(&sync_file->status);
525 if (info->status >= 0)
526 info->status = !info->status;
527
528 len = sizeof(struct sync_file_info);
529
530 for (i = 0; i < sync_file->num_fences; ++i) {
531 struct fence *fence = sync_file->cbs[i].fence;
532
533 ret = sync_fill_fence_info(fence, (u8 *)info + len, size - len);
534
535 if (ret < 0)
536 goto out;
537
538 len += ret;
539 }
540
541 info->len = len;
542
543 if (copy_to_user((void __user *)arg, info, len))
544 ret = -EFAULT;
545 else
546 ret = 0;
547
548out:
549 kfree(info);
550
551 return ret;
552}
553
554static long sync_file_ioctl(struct file *file, unsigned int cmd,
555 unsigned long arg)
556{
557 struct sync_file *sync_file = file->private_data;
558
559 switch (cmd) {
560 case SYNC_IOC_MERGE:
561 return sync_file_ioctl_merge(sync_file, arg);
562
563 case SYNC_IOC_FENCE_INFO:
564 return sync_file_ioctl_fence_info(sync_file, arg);
565
566 default:
567 return -ENOTTY;
568 }
569}
570
571static const struct file_operations sync_file_fops = {
572 .release = sync_file_release,
573 .poll = sync_file_poll,
574 .unlocked_ioctl = sync_file_ioctl,
575 .compat_ioctl = sync_file_ioctl,
576};
577
diff --git a/drivers/staging/android/sync.h b/drivers/staging/android/sync.h
index d2a173433a7d..b56885c14839 100644
--- a/drivers/staging/android/sync.h
+++ b/drivers/staging/android/sync.h
@@ -20,10 +20,10 @@
20#include <linux/spinlock.h> 20#include <linux/spinlock.h>
21#include <linux/fence.h> 21#include <linux/fence.h>
22 22
23#include "uapi/sync.h" 23#include <linux/sync_file.h>
24#include <uapi/linux/sync_file.h>
24 25
25struct sync_timeline; 26struct sync_timeline;
26struct sync_file;
27 27
28/** 28/**
29 * struct sync_timeline_ops - sync object implementation ops 29 * struct sync_timeline_ops - sync object implementation ops
@@ -86,38 +86,6 @@ static inline struct sync_timeline *fence_parent(struct fence *fence)
86 child_list_lock); 86 child_list_lock);
87} 87}
88 88
89struct sync_file_cb {
90 struct fence_cb cb;
91 struct fence *fence;
92 struct sync_file *sync_file;
93};
94
95/**
96 * struct sync_file - sync file to export to the userspace
97 * @file: file representing this fence
98 * @kref: reference count on fence.
99 * @name: name of sync_file. Useful for debugging
100 * @sync_file_list: membership in global file list
101 * @num_fences number of sync_pts in the fence
102 * @wq: wait queue for fence signaling
103 * @status: 0: signaled, >0:active, <0: error
104 * @cbs: sync_pts callback information
105 */
106struct sync_file {
107 struct file *file;
108 struct kref kref;
109 char name[32];
110#ifdef CONFIG_DEBUG_FS
111 struct list_head sync_file_list;
112#endif
113 int num_fences;
114
115 wait_queue_head_t wq;
116 atomic_t status;
117
118 struct sync_file_cb cbs[];
119};
120
121/* 89/*
122 * API for sync_timeline implementers 90 * API for sync_timeline implementers
123 */ 91 */
@@ -167,61 +135,6 @@ void sync_timeline_signal(struct sync_timeline *obj);
167 */ 135 */
168struct fence *sync_pt_create(struct sync_timeline *parent, int size); 136struct fence *sync_pt_create(struct sync_timeline *parent, int size);
169 137
170/**
171 * sync_fence_create() - creates a sync fence
172 * @name: name of fence to create
173 * @fence: fence to add to the sync_fence
174 *
175 * Creates a sync_file containg @fence. Once this is called, the sync_file
176 * takes ownership of @fence.
177 */
178struct sync_file *sync_file_create(const char *name, struct fence *fence);
179
180/*
181 * API for sync_file consumers
182 */
183
184/**
185 * sync_file_merge() - merge two sync_files
186 * @name: name of new fence
187 * @a: sync_file a
188 * @b: sync_file b
189 *
190 * Creates a new sync_file which contains copies of all the fences in both
191 * @a and @b. @a and @b remain valid, independent sync_file. Returns the
192 * new merged sync_file or NULL in case of error.
193 */
194struct sync_file *sync_file_merge(const char *name,
195 struct sync_file *a, struct sync_file *b);
196
197/**
198 * sync_file_fdget() - get a sync_file from an fd
199 * @fd: fd referencing a fence
200 *
201 * Ensures @fd references a valid sync_file, increments the refcount of the
202 * backing file. Returns the sync_file or NULL in case of error.
203 */
204struct sync_file *sync_file_fdget(int fd);
205
206/**
207 * sync_file_put() - puts a reference of a sync_file
208 * @sync_file: sync_file to put
209 *
210 * Puts a reference on @sync_fence. If this is the last reference, the
211 * sync_fil and all it's sync_pts will be freed
212 */
213void sync_file_put(struct sync_file *sync_file);
214
215/**
216 * sync_file_install() - installs a sync_file into a file descriptor
217 * @sync_file: sync_file to install
218 * @fd: file descriptor in which to install the fence
219 *
220 * Installs @sync_file into @fd. @fd's should be acquired through
221 * get_unused_fd_flags(O_CLOEXEC).
222 */
223void sync_file_install(struct sync_file *sync_file, int fd);
224
225#ifdef CONFIG_DEBUG_FS 138#ifdef CONFIG_DEBUG_FS
226 139
227void sync_timeline_debug_add(struct sync_timeline *obj); 140void sync_timeline_debug_add(struct sync_timeline *obj);
diff --git a/drivers/staging/android/sync_debug.c b/drivers/staging/android/sync_debug.c
index 5a7ec58fbc09..5f57499c98bf 100644
--- a/drivers/staging/android/sync_debug.c
+++ b/drivers/staging/android/sync_debug.c
@@ -26,6 +26,7 @@
26#include <linux/uaccess.h> 26#include <linux/uaccess.h>
27#include <linux/anon_inodes.h> 27#include <linux/anon_inodes.h>
28#include <linux/time64.h> 28#include <linux/time64.h>
29#include <linux/sync_file.h>
29#include "sw_sync.h" 30#include "sw_sync.h"
30 31
31#ifdef CONFIG_DEBUG_FS 32#ifdef CONFIG_DEBUG_FS
@@ -262,8 +263,7 @@ static long sw_sync_ioctl_create_fence(struct sw_sync_timeline *obj,
262 goto err; 263 goto err;
263 } 264 }
264 265
265 data.name[sizeof(data.name) - 1] = '\0'; 266 sync_file = sync_file_create(fence);
266 sync_file = sync_file_create(data.name, fence);
267 if (!sync_file) { 267 if (!sync_file) {
268 fence_put(fence); 268 fence_put(fence);
269 err = -ENOMEM; 269 err = -ENOMEM;
@@ -272,12 +272,12 @@ static long sw_sync_ioctl_create_fence(struct sw_sync_timeline *obj,
272 272
273 data.fence = fd; 273 data.fence = fd;
274 if (copy_to_user((void __user *)arg, &data, sizeof(data))) { 274 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
275 sync_file_put(sync_file); 275 fput(sync_file->file);
276 err = -EFAULT; 276 err = -EFAULT;
277 goto err; 277 goto err;
278 } 278 }
279 279
280 sync_file_install(sync_file, fd); 280 fd_install(fd, sync_file->file);
281 281
282 return 0; 282 return 0;
283 283
diff --git a/drivers/staging/android/timed_gpio.c b/drivers/staging/android/timed_gpio.c
deleted file mode 100644
index 914fd1005467..000000000000
--- a/drivers/staging/android/timed_gpio.c
+++ /dev/null
@@ -1,166 +0,0 @@
1/* drivers/misc/timed_gpio.c
2 *
3 * Copyright (C) 2008 Google, Inc.
4 * Author: Mike Lockwood <lockwood@android.com>
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/module.h>
18#include <linux/platform_device.h>
19#include <linux/slab.h>
20#include <linux/hrtimer.h>
21#include <linux/err.h>
22#include <linux/gpio.h>
23#include <linux/ktime.h>
24
25#include "timed_output.h"
26#include "timed_gpio.h"
27
28struct timed_gpio_data {
29 struct timed_output_dev dev;
30 struct hrtimer timer;
31 spinlock_t lock;
32 unsigned gpio;
33 int max_timeout;
34 u8 active_low;
35};
36
37static enum hrtimer_restart gpio_timer_func(struct hrtimer *timer)
38{
39 struct timed_gpio_data *data =
40 container_of(timer, struct timed_gpio_data, timer);
41
42 gpio_direction_output(data->gpio, data->active_low ? 1 : 0);
43 return HRTIMER_NORESTART;
44}
45
46static int gpio_get_time(struct timed_output_dev *dev)
47{
48 struct timed_gpio_data *data;
49 ktime_t t;
50
51 data = container_of(dev, struct timed_gpio_data, dev);
52
53 if (!hrtimer_active(&data->timer))
54 return 0;
55
56 t = hrtimer_get_remaining(&data->timer);
57
58 return ktime_to_ms(t);
59}
60
61static void gpio_enable(struct timed_output_dev *dev, int value)
62{
63 struct timed_gpio_data *data =
64 container_of(dev, struct timed_gpio_data, dev);
65 unsigned long flags;
66
67 spin_lock_irqsave(&data->lock, flags);
68
69 /* cancel previous timer and set GPIO according to value */
70 hrtimer_cancel(&data->timer);
71 gpio_direction_output(data->gpio, data->active_low ? !value : !!value);
72
73 if (value > 0) {
74 if (value > data->max_timeout)
75 value = data->max_timeout;
76
77 hrtimer_start(&data->timer,
78 ktime_set(value / 1000, (value % 1000) * 1000000),
79 HRTIMER_MODE_REL);
80 }
81
82 spin_unlock_irqrestore(&data->lock, flags);
83}
84
85static int timed_gpio_probe(struct platform_device *pdev)
86{
87 struct timed_gpio_platform_data *pdata = pdev->dev.platform_data;
88 struct timed_gpio *cur_gpio;
89 struct timed_gpio_data *gpio_data, *gpio_dat;
90 int i, ret;
91
92 if (!pdata)
93 return -EBUSY;
94
95 gpio_data = devm_kcalloc(&pdev->dev, pdata->num_gpios,
96 sizeof(*gpio_data), GFP_KERNEL);
97 if (!gpio_data)
98 return -ENOMEM;
99
100 for (i = 0; i < pdata->num_gpios; i++) {
101 cur_gpio = &pdata->gpios[i];
102 gpio_dat = &gpio_data[i];
103
104 hrtimer_init(&gpio_dat->timer, CLOCK_MONOTONIC,
105 HRTIMER_MODE_REL);
106 gpio_dat->timer.function = gpio_timer_func;
107 spin_lock_init(&gpio_dat->lock);
108
109 gpio_dat->dev.name = cur_gpio->name;
110 gpio_dat->dev.get_time = gpio_get_time;
111 gpio_dat->dev.enable = gpio_enable;
112 ret = gpio_request(cur_gpio->gpio, cur_gpio->name);
113 if (ret < 0)
114 goto err_out;
115 ret = timed_output_dev_register(&gpio_dat->dev);
116 if (ret < 0) {
117 gpio_free(cur_gpio->gpio);
118 goto err_out;
119 }
120
121 gpio_dat->gpio = cur_gpio->gpio;
122 gpio_dat->max_timeout = cur_gpio->max_timeout;
123 gpio_dat->active_low = cur_gpio->active_low;
124 gpio_direction_output(gpio_dat->gpio, gpio_dat->active_low);
125 }
126
127 platform_set_drvdata(pdev, gpio_data);
128
129 return 0;
130
131err_out:
132 while (--i >= 0) {
133 timed_output_dev_unregister(&gpio_data[i].dev);
134 gpio_free(gpio_data[i].gpio);
135 }
136
137 return ret;
138}
139
140static int timed_gpio_remove(struct platform_device *pdev)
141{
142 struct timed_gpio_platform_data *pdata = pdev->dev.platform_data;
143 struct timed_gpio_data *gpio_data = platform_get_drvdata(pdev);
144 int i;
145
146 for (i = 0; i < pdata->num_gpios; i++) {
147 timed_output_dev_unregister(&gpio_data[i].dev);
148 gpio_free(gpio_data[i].gpio);
149 }
150
151 return 0;
152}
153
154static struct platform_driver timed_gpio_driver = {
155 .probe = timed_gpio_probe,
156 .remove = timed_gpio_remove,
157 .driver = {
158 .name = TIMED_GPIO_NAME,
159 },
160};
161
162module_platform_driver(timed_gpio_driver);
163
164MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
165MODULE_DESCRIPTION("timed gpio driver");
166MODULE_LICENSE("GPL");
diff --git a/drivers/staging/android/timed_gpio.h b/drivers/staging/android/timed_gpio.h
deleted file mode 100644
index d29e169d7ebe..000000000000
--- a/drivers/staging/android/timed_gpio.h
+++ /dev/null
@@ -1,33 +0,0 @@
1/* include/linux/timed_gpio.h
2 *
3 * Copyright (C) 2008 Google, Inc.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14*/
15
16#ifndef _LINUX_TIMED_GPIO_H
17#define _LINUX_TIMED_GPIO_H
18
19#define TIMED_GPIO_NAME "timed-gpio"
20
21struct timed_gpio {
22 const char *name;
23 unsigned gpio;
24 int max_timeout;
25 u8 active_low;
26};
27
28struct timed_gpio_platform_data {
29 int num_gpios;
30 struct timed_gpio *gpios;
31};
32
33#endif
diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
deleted file mode 100644
index aff9cdb007e5..000000000000
--- a/drivers/staging/android/timed_output.c
+++ /dev/null
@@ -1,110 +0,0 @@
1/* drivers/misc/timed_output.c
2 *
3 * Copyright (C) 2009 Google, Inc.
4 * Author: Mike Lockwood <lockwood@android.com>
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#define pr_fmt(fmt) "timed_output: " fmt
18
19#include <linux/init.h>
20#include <linux/export.h>
21#include <linux/types.h>
22#include <linux/device.h>
23#include <linux/fs.h>
24#include <linux/err.h>
25
26#include "timed_output.h"
27
28static struct class *timed_output_class;
29static atomic_t device_count;
30
31static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
32 char *buf)
33{
34 struct timed_output_dev *tdev = dev_get_drvdata(dev);
35 int remaining = tdev->get_time(tdev);
36
37 return sprintf(buf, "%d\n", remaining);
38}
39
40static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
41 const char *buf, size_t size)
42{
43 struct timed_output_dev *tdev = dev_get_drvdata(dev);
44 int value;
45 int rc;
46
47 rc = kstrtoint(buf, 0, &value);
48 if (rc != 0)
49 return -EINVAL;
50
51 tdev->enable(tdev, value);
52
53 return size;
54}
55static DEVICE_ATTR_RW(enable);
56
57static struct attribute *timed_output_attrs[] = {
58 &dev_attr_enable.attr,
59 NULL,
60};
61ATTRIBUTE_GROUPS(timed_output);
62
63static int create_timed_output_class(void)
64{
65 if (!timed_output_class) {
66 timed_output_class = class_create(THIS_MODULE, "timed_output");
67 if (IS_ERR(timed_output_class))
68 return PTR_ERR(timed_output_class);
69 atomic_set(&device_count, 0);
70 timed_output_class->dev_groups = timed_output_groups;
71 }
72
73 return 0;
74}
75
76int timed_output_dev_register(struct timed_output_dev *tdev)
77{
78 int ret;
79
80 if (!tdev || !tdev->name || !tdev->enable || !tdev->get_time)
81 return -EINVAL;
82
83 ret = create_timed_output_class();
84 if (ret < 0)
85 return ret;
86
87 tdev->index = atomic_inc_return(&device_count);
88 tdev->dev = device_create(timed_output_class, NULL,
89 MKDEV(0, tdev->index), NULL, "%s", tdev->name);
90 if (IS_ERR(tdev->dev))
91 return PTR_ERR(tdev->dev);
92
93 dev_set_drvdata(tdev->dev, tdev);
94 tdev->state = 0;
95 return 0;
96}
97EXPORT_SYMBOL_GPL(timed_output_dev_register);
98
99void timed_output_dev_unregister(struct timed_output_dev *tdev)
100{
101 tdev->enable(tdev, 0);
102 device_destroy(timed_output_class, MKDEV(0, tdev->index));
103}
104EXPORT_SYMBOL_GPL(timed_output_dev_unregister);
105
106static int __init timed_output_init(void)
107{
108 return create_timed_output_class();
109}
110device_initcall(timed_output_init);
diff --git a/drivers/staging/android/timed_output.h b/drivers/staging/android/timed_output.h
deleted file mode 100644
index 13d2ca51cbe8..000000000000
--- a/drivers/staging/android/timed_output.h
+++ /dev/null
@@ -1,37 +0,0 @@
1/* include/linux/timed_output.h
2 *
3 * Copyright (C) 2008 Google, Inc.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14*/
15
16#ifndef _LINUX_TIMED_OUTPUT_H
17#define _LINUX_TIMED_OUTPUT_H
18
19struct timed_output_dev {
20 const char *name;
21
22 /* enable the output and set the timer */
23 void (*enable)(struct timed_output_dev *sdev, int timeout);
24
25 /* returns the current number of milliseconds remaining on the timer */
26 int (*get_time)(struct timed_output_dev *sdev);
27
28 /* private data */
29 struct device *dev;
30 int index;
31 int state;
32};
33
34int timed_output_dev_register(struct timed_output_dev *dev);
35void timed_output_dev_unregister(struct timed_output_dev *dev);
36
37#endif
diff --git a/drivers/staging/board/armadillo800eva.c b/drivers/staging/board/armadillo800eva.c
index bb63ece4d766..4de4fd06eebc 100644
--- a/drivers/staging/board/armadillo800eva.c
+++ b/drivers/staging/board/armadillo800eva.c
@@ -87,10 +87,10 @@ static const struct board_staging_clk lcdc0_clocks[] __initconst = {
87 87
88static const struct board_staging_dev armadillo800eva_devices[] __initconst = { 88static const struct board_staging_dev armadillo800eva_devices[] __initconst = {
89 { 89 {
90 .pdev = &lcdc0_device, 90 .pdev = &lcdc0_device,
91 .clocks = lcdc0_clocks, 91 .clocks = lcdc0_clocks,
92 .nclocks = ARRAY_SIZE(lcdc0_clocks), 92 .nclocks = ARRAY_SIZE(lcdc0_clocks),
93 .domain = "/system-controller@e6180000/pm-domains/c5/a4lc@1" 93 .domain = "/system-controller@e6180000/pm-domains/c5/a4lc@1"
94 }, 94 },
95}; 95};
96 96
diff --git a/drivers/staging/comedi/comedi_buf.c b/drivers/staging/comedi/comedi_buf.c
index 90c28016c6c1..c7d7682b1412 100644
--- a/drivers/staging/comedi/comedi_buf.c
+++ b/drivers/staging/comedi/comedi_buf.c
@@ -80,14 +80,14 @@ static void __comedi_buf_free(struct comedi_device *dev,
80 80
81static void __comedi_buf_alloc(struct comedi_device *dev, 81static void __comedi_buf_alloc(struct comedi_device *dev,
82 struct comedi_subdevice *s, 82 struct comedi_subdevice *s,
83 unsigned n_pages) 83 unsigned int n_pages)
84{ 84{
85 struct comedi_async *async = s->async; 85 struct comedi_async *async = s->async;
86 struct page **pages = NULL; 86 struct page **pages = NULL;
87 struct comedi_buf_map *bm; 87 struct comedi_buf_map *bm;
88 struct comedi_buf_page *buf; 88 struct comedi_buf_page *buf;
89 unsigned long flags; 89 unsigned long flags;
90 unsigned i; 90 unsigned int i;
91 91
92 if (!IS_ENABLED(CONFIG_HAS_DMA) && s->async_dma_dir != DMA_NONE) { 92 if (!IS_ENABLED(CONFIG_HAS_DMA) && s->async_dma_dir != DMA_NONE) {
93 dev_err(dev->class_dev, 93 dev_err(dev->class_dev,
@@ -208,7 +208,7 @@ int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
208 208
209 /* allocate new buffer */ 209 /* allocate new buffer */
210 if (new_size) { 210 if (new_size) {
211 unsigned n_pages = new_size >> PAGE_SHIFT; 211 unsigned int n_pages = new_size >> PAGE_SHIFT;
212 212
213 __comedi_buf_alloc(dev, s, n_pages); 213 __comedi_buf_alloc(dev, s, n_pages);
214 214
@@ -302,7 +302,7 @@ static unsigned int comedi_buf_munge(struct comedi_subdevice *s,
302{ 302{
303 struct comedi_async *async = s->async; 303 struct comedi_async *async = s->async;
304 unsigned int count = 0; 304 unsigned int count = 0;
305 const unsigned num_sample_bytes = comedi_bytes_per_sample(s); 305 const unsigned int num_sample_bytes = comedi_bytes_per_sample(s);
306 306
307 if (!s->munge || (async->cmd.flags & CMDF_RAWDATA)) { 307 if (!s->munge || (async->cmd.flags & CMDF_RAWDATA)) {
308 async->munge_count += num_bytes; 308 async->munge_count += num_bytes;
@@ -395,7 +395,7 @@ EXPORT_SYMBOL_GPL(comedi_buf_write_free);
395unsigned int comedi_buf_read_n_available(struct comedi_subdevice *s) 395unsigned int comedi_buf_read_n_available(struct comedi_subdevice *s)
396{ 396{
397 struct comedi_async *async = s->async; 397 struct comedi_async *async = s->async;
398 unsigned num_bytes; 398 unsigned int num_bytes;
399 399
400 if (!async) 400 if (!async)
401 return 0; 401 return 0;
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index 7c7b477b0f28..629080f39db0 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -186,7 +186,7 @@ static bool comedi_clear_board_dev(struct comedi_device *dev)
186 return cleared; 186 return cleared;
187} 187}
188 188
189static struct comedi_device *comedi_clear_board_minor(unsigned minor) 189static struct comedi_device *comedi_clear_board_minor(unsigned int minor)
190{ 190{
191 struct comedi_device *dev; 191 struct comedi_device *dev;
192 192
@@ -209,8 +209,8 @@ static void comedi_free_board_dev(struct comedi_device *dev)
209 } 209 }
210} 210}
211 211
212static struct comedi_subdevice 212static struct comedi_subdevice *
213*comedi_subdevice_from_minor(const struct comedi_device *dev, unsigned minor) 213comedi_subdevice_from_minor(const struct comedi_device *dev, unsigned int minor)
214{ 214{
215 struct comedi_subdevice *s; 215 struct comedi_subdevice *s;
216 unsigned int i = minor - COMEDI_NUM_BOARD_MINORS; 216 unsigned int i = minor - COMEDI_NUM_BOARD_MINORS;
@@ -223,7 +223,7 @@ static struct comedi_subdevice
223 return s; 223 return s;
224} 224}
225 225
226static struct comedi_device *comedi_dev_get_from_board_minor(unsigned minor) 226static struct comedi_device *comedi_dev_get_from_board_minor(unsigned int minor)
227{ 227{
228 struct comedi_device *dev; 228 struct comedi_device *dev;
229 229
@@ -233,7 +233,8 @@ static struct comedi_device *comedi_dev_get_from_board_minor(unsigned minor)
233 return dev; 233 return dev;
234} 234}
235 235
236static struct comedi_device *comedi_dev_get_from_subdevice_minor(unsigned minor) 236static struct comedi_device *
237comedi_dev_get_from_subdevice_minor(unsigned int minor)
237{ 238{
238 struct comedi_device *dev; 239 struct comedi_device *dev;
239 struct comedi_subdevice *s; 240 struct comedi_subdevice *s;
@@ -258,7 +259,7 @@ static struct comedi_device *comedi_dev_get_from_subdevice_minor(unsigned minor)
258 * reference incremented. Return NULL if no COMEDI device exists with the 259 * reference incremented. Return NULL if no COMEDI device exists with the
259 * specified minor device number. 260 * specified minor device number.
260 */ 261 */
261struct comedi_device *comedi_dev_get_from_minor(unsigned minor) 262struct comedi_device *comedi_dev_get_from_minor(unsigned int minor)
262{ 263{
263 if (minor < COMEDI_NUM_BOARD_MINORS) 264 if (minor < COMEDI_NUM_BOARD_MINORS)
264 return comedi_dev_get_from_board_minor(minor); 265 return comedi_dev_get_from_board_minor(minor);
@@ -342,7 +343,8 @@ static struct comedi_subdevice *comedi_file_write_subdevice(struct file *file)
342} 343}
343 344
344static int resize_async_buffer(struct comedi_device *dev, 345static int resize_async_buffer(struct comedi_device *dev,
345 struct comedi_subdevice *s, unsigned new_size) 346 struct comedi_subdevice *s,
347 unsigned int new_size)
346{ 348{
347 struct comedi_async *async = s->async; 349 struct comedi_async *async = s->async;
348 int retval; 350 int retval;
@@ -616,19 +618,20 @@ static struct attribute *comedi_dev_attrs[] = {
616ATTRIBUTE_GROUPS(comedi_dev); 618ATTRIBUTE_GROUPS(comedi_dev);
617 619
618static void __comedi_clear_subdevice_runflags(struct comedi_subdevice *s, 620static void __comedi_clear_subdevice_runflags(struct comedi_subdevice *s,
619 unsigned bits) 621 unsigned int bits)
620{ 622{
621 s->runflags &= ~bits; 623 s->runflags &= ~bits;
622} 624}
623 625
624static void __comedi_set_subdevice_runflags(struct comedi_subdevice *s, 626static void __comedi_set_subdevice_runflags(struct comedi_subdevice *s,
625 unsigned bits) 627 unsigned int bits)
626{ 628{
627 s->runflags |= bits; 629 s->runflags |= bits;
628} 630}
629 631
630static void comedi_update_subdevice_runflags(struct comedi_subdevice *s, 632static void comedi_update_subdevice_runflags(struct comedi_subdevice *s,
631 unsigned mask, unsigned bits) 633 unsigned int mask,
634 unsigned int bits)
632{ 635{
633 unsigned long flags; 636 unsigned long flags;
634 637
@@ -638,15 +641,15 @@ static void comedi_update_subdevice_runflags(struct comedi_subdevice *s,
638 spin_unlock_irqrestore(&s->spin_lock, flags); 641 spin_unlock_irqrestore(&s->spin_lock, flags);
639} 642}
640 643
641static unsigned __comedi_get_subdevice_runflags(struct comedi_subdevice *s) 644static unsigned int __comedi_get_subdevice_runflags(struct comedi_subdevice *s)
642{ 645{
643 return s->runflags; 646 return s->runflags;
644} 647}
645 648
646static unsigned comedi_get_subdevice_runflags(struct comedi_subdevice *s) 649static unsigned int comedi_get_subdevice_runflags(struct comedi_subdevice *s)
647{ 650{
648 unsigned long flags; 651 unsigned long flags;
649 unsigned runflags; 652 unsigned int runflags;
650 653
651 spin_lock_irqsave(&s->spin_lock, flags); 654 spin_lock_irqsave(&s->spin_lock, flags);
652 runflags = __comedi_get_subdevice_runflags(s); 655 runflags = __comedi_get_subdevice_runflags(s);
@@ -654,12 +657,12 @@ static unsigned comedi_get_subdevice_runflags(struct comedi_subdevice *s)
654 return runflags; 657 return runflags;
655} 658}
656 659
657static bool comedi_is_runflags_running(unsigned runflags) 660static bool comedi_is_runflags_running(unsigned int runflags)
658{ 661{
659 return runflags & COMEDI_SRF_RUNNING; 662 return runflags & COMEDI_SRF_RUNNING;
660} 663}
661 664
662static bool comedi_is_runflags_in_error(unsigned runflags) 665static bool comedi_is_runflags_in_error(unsigned int runflags)
663{ 666{
664 return runflags & COMEDI_SRF_ERROR; 667 return runflags & COMEDI_SRF_ERROR;
665} 668}
@@ -673,7 +676,7 @@ static bool comedi_is_runflags_in_error(unsigned runflags)
673 */ 676 */
674bool comedi_is_subdevice_running(struct comedi_subdevice *s) 677bool comedi_is_subdevice_running(struct comedi_subdevice *s)
675{ 678{
676 unsigned runflags = comedi_get_subdevice_runflags(s); 679 unsigned int runflags = comedi_get_subdevice_runflags(s);
677 680
678 return comedi_is_runflags_running(runflags); 681 return comedi_is_runflags_running(runflags);
679} 682}
@@ -681,14 +684,14 @@ EXPORT_SYMBOL_GPL(comedi_is_subdevice_running);
681 684
682static bool __comedi_is_subdevice_running(struct comedi_subdevice *s) 685static bool __comedi_is_subdevice_running(struct comedi_subdevice *s)
683{ 686{
684 unsigned runflags = __comedi_get_subdevice_runflags(s); 687 unsigned int runflags = __comedi_get_subdevice_runflags(s);
685 688
686 return comedi_is_runflags_running(runflags); 689 return comedi_is_runflags_running(runflags);
687} 690}
688 691
689bool comedi_can_auto_free_spriv(struct comedi_subdevice *s) 692bool comedi_can_auto_free_spriv(struct comedi_subdevice *s)
690{ 693{
691 unsigned runflags = __comedi_get_subdevice_runflags(s); 694 unsigned int runflags = __comedi_get_subdevice_runflags(s);
692 695
693 return runflags & COMEDI_SRF_FREE_SPRIV; 696 return runflags & COMEDI_SRF_FREE_SPRIV;
694} 697}
@@ -2038,7 +2041,7 @@ static int do_setwsubd_ioctl(struct comedi_device *dev, unsigned long arg,
2038static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd, 2041static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd,
2039 unsigned long arg) 2042 unsigned long arg)
2040{ 2043{
2041 unsigned minor = iminor(file_inode(file)); 2044 unsigned int minor = iminor(file_inode(file));
2042 struct comedi_file *cfp = file->private_data; 2045 struct comedi_file *cfp = file->private_data;
2043 struct comedi_device *dev = cfp->dev; 2046 struct comedi_device *dev = cfp->dev;
2044 int rc; 2047 int rc;
@@ -2342,7 +2345,7 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
2342 2345
2343 add_wait_queue(&async->wait_head, &wait); 2346 add_wait_queue(&async->wait_head, &wait);
2344 while (count == 0 && !retval) { 2347 while (count == 0 && !retval) {
2345 unsigned runflags; 2348 unsigned int runflags;
2346 unsigned int wp, n1, n2; 2349 unsigned int wp, n1, n2;
2347 2350
2348 set_current_state(TASK_INTERRUPTIBLE); 2351 set_current_state(TASK_INTERRUPTIBLE);
@@ -2485,7 +2488,8 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
2485 n = min_t(size_t, m, nbytes); 2488 n = min_t(size_t, m, nbytes);
2486 2489
2487 if (n == 0) { 2490 if (n == 0) {
2488 unsigned runflags = comedi_get_subdevice_runflags(s); 2491 unsigned int runflags =
2492 comedi_get_subdevice_runflags(s);
2489 2493
2490 if (!comedi_is_runflags_running(runflags)) { 2494 if (!comedi_is_runflags_running(runflags)) {
2491 if (comedi_is_runflags_in_error(runflags)) 2495 if (comedi_is_runflags_in_error(runflags))
@@ -2573,7 +2577,7 @@ out:
2573 2577
2574static int comedi_open(struct inode *inode, struct file *file) 2578static int comedi_open(struct inode *inode, struct file *file)
2575{ 2579{
2576 const unsigned minor = iminor(inode); 2580 const unsigned int minor = iminor(inode);
2577 struct comedi_file *cfp; 2581 struct comedi_file *cfp;
2578 struct comedi_device *dev = comedi_dev_get_from_minor(minor); 2582 struct comedi_device *dev = comedi_dev_get_from_minor(minor);
2579 int rc; 2583 int rc;
@@ -2733,7 +2737,7 @@ struct comedi_device *comedi_alloc_board_minor(struct device *hardware_device)
2733{ 2737{
2734 struct comedi_device *dev; 2738 struct comedi_device *dev;
2735 struct device *csdev; 2739 struct device *csdev;
2736 unsigned i; 2740 unsigned int i;
2737 2741
2738 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 2742 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2739 if (!dev) 2743 if (!dev)
@@ -2791,7 +2795,7 @@ int comedi_alloc_subdevice_minor(struct comedi_subdevice *s)
2791{ 2795{
2792 struct comedi_device *dev = s->device; 2796 struct comedi_device *dev = s->device;
2793 struct device *csdev; 2797 struct device *csdev;
2794 unsigned i; 2798 unsigned int i;
2795 2799
2796 mutex_lock(&comedi_subdevice_minor_table_lock); 2800 mutex_lock(&comedi_subdevice_minor_table_lock);
2797 for (i = 0; i < COMEDI_NUM_SUBDEVICE_MINORS; ++i) { 2801 for (i = 0; i < COMEDI_NUM_SUBDEVICE_MINORS; ++i) {
@@ -2841,7 +2845,7 @@ void comedi_free_subdevice_minor(struct comedi_subdevice *s)
2841static void comedi_cleanup_board_minors(void) 2845static void comedi_cleanup_board_minors(void)
2842{ 2846{
2843 struct comedi_device *dev; 2847 struct comedi_device *dev;
2844 unsigned i; 2848 unsigned int i;
2845 2849
2846 for (i = 0; i < COMEDI_NUM_BOARD_MINORS; i++) { 2850 for (i = 0; i < COMEDI_NUM_BOARD_MINORS; i++) {
2847 dev = comedi_clear_board_minor(i); 2851 dev = comedi_clear_board_minor(i);
diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h
index 115807215484..dcb637665eb7 100644
--- a/drivers/staging/comedi/comedidev.h
+++ b/drivers/staging/comedi/comedidev.h
@@ -173,7 +173,7 @@ struct comedi_subdevice {
173 173
174 void *lock; 174 void *lock;
175 void *busy; 175 void *busy;
176 unsigned runflags; 176 unsigned int runflags;
177 spinlock_t spin_lock; /* generic spin-lock for COMEDI and drivers */ 177 spinlock_t spin_lock; /* generic spin-lock for COMEDI and drivers */
178 178
179 unsigned int io_bits; 179 unsigned int io_bits;
@@ -566,7 +566,7 @@ struct comedi_device {
566 566
567void comedi_event(struct comedi_device *dev, struct comedi_subdevice *s); 567void comedi_event(struct comedi_device *dev, struct comedi_subdevice *s);
568 568
569struct comedi_device *comedi_dev_get_from_minor(unsigned minor); 569struct comedi_device *comedi_dev_get_from_minor(unsigned int minor);
570int comedi_dev_put(struct comedi_device *dev); 570int comedi_dev_put(struct comedi_device *dev);
571 571
572bool comedi_is_subdevice_running(struct comedi_subdevice *s); 572bool comedi_is_subdevice_running(struct comedi_subdevice *s);
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index b63dd2ef78b5..44511d729450 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -564,7 +564,7 @@ unsigned int comedi_handle_events(struct comedi_device *dev,
564 if (events == 0) 564 if (events == 0)
565 return events; 565 return events;
566 566
567 if (events & COMEDI_CB_CANCEL_MASK) 567 if ((events & COMEDI_CB_CANCEL_MASK) && s->cancel)
568 s->cancel(dev, s); 568 s->cancel(dev, s);
569 569
570 comedi_event(dev, s); 570 comedi_event(dev, s);
@@ -575,38 +575,35 @@ EXPORT_SYMBOL_GPL(comedi_handle_events);
575 575
576static int insn_rw_emulate_bits(struct comedi_device *dev, 576static int insn_rw_emulate_bits(struct comedi_device *dev,
577 struct comedi_subdevice *s, 577 struct comedi_subdevice *s,
578 struct comedi_insn *insn, unsigned int *data) 578 struct comedi_insn *insn,
579 unsigned int *data)
579{ 580{
580 struct comedi_insn new_insn; 581 struct comedi_insn _insn;
582 unsigned int chan = CR_CHAN(insn->chanspec);
583 unsigned int base_chan = (chan < 32) ? 0 : chan;
584 unsigned int _data[2];
581 int ret; 585 int ret;
582 static const unsigned channels_per_bitfield = 32;
583
584 unsigned chan = CR_CHAN(insn->chanspec);
585 const unsigned base_bitfield_channel =
586 (chan < channels_per_bitfield) ? 0 : chan;
587 unsigned int new_data[2];
588 586
589 memset(new_data, 0, sizeof(new_data)); 587 memset(_data, 0, sizeof(_data));
590 memset(&new_insn, 0, sizeof(new_insn)); 588 memset(&_insn, 0, sizeof(_insn));
591 new_insn.insn = INSN_BITS; 589 _insn.insn = INSN_BITS;
592 new_insn.chanspec = base_bitfield_channel; 590 _insn.chanspec = base_chan;
593 new_insn.n = 2; 591 _insn.n = 2;
594 new_insn.subdev = insn->subdev; 592 _insn.subdev = insn->subdev;
595 593
596 if (insn->insn == INSN_WRITE) { 594 if (insn->insn == INSN_WRITE) {
597 if (!(s->subdev_flags & SDF_WRITABLE)) 595 if (!(s->subdev_flags & SDF_WRITABLE))
598 return -EINVAL; 596 return -EINVAL;
599 new_data[0] = 1 << (chan - base_bitfield_channel); /* mask */ 597 _data[0] = 1 << (chan - base_chan); /* mask */
600 new_data[1] = data[0] ? (1 << (chan - base_bitfield_channel)) 598 _data[1] = data[0] ? (1 << (chan - base_chan)) : 0; /* bits */
601 : 0; /* bits */
602 } 599 }
603 600
604 ret = s->insn_bits(dev, s, &new_insn, new_data); 601 ret = s->insn_bits(dev, s, &_insn, _data);
605 if (ret < 0) 602 if (ret < 0)
606 return ret; 603 return ret;
607 604
608 if (insn->insn == INSN_READ) 605 if (insn->insn == INSN_READ)
609 data[0] = (new_data[1] >> (chan - base_bitfield_channel)) & 1; 606 data[0] = (_data[1] >> (chan - base_chan)) & 1;
610 607
611 return 1; 608 return 1;
612} 609}
@@ -628,6 +625,9 @@ static int __comedi_device_postconfig_async(struct comedi_device *dev,
628 "async subdevices must have a do_cmdtest() function\n"); 625 "async subdevices must have a do_cmdtest() function\n");
629 return -EINVAL; 626 return -EINVAL;
630 } 627 }
628 if (!s->cancel)
629 dev_warn(dev->class_dev,
630 "async subdevices should have a cancel() function\n");
631 631
632 async = kzalloc(sizeof(*async), GFP_KERNEL); 632 async = kzalloc(sizeof(*async), GFP_KERNEL);
633 if (!async) 633 if (!async)
diff --git a/drivers/staging/comedi/drivers/amcc_s5933.h b/drivers/staging/comedi/drivers/amcc_s5933.h
index d4b8c0195bd3..f03e4c8c2021 100644
--- a/drivers/staging/comedi/drivers/amcc_s5933.h
+++ b/drivers/staging/comedi/drivers/amcc_s5933.h
@@ -1,16 +1,14 @@
1/* 1/*
2 comedi/drivers/amcc_s5933.h 2 * Stuff for AMCC S5933 PCI Controller
3 3 *
4 Stuff for AMCC S5933 PCI Controller 4 * Author: Michal Dobes <dobes@tesnet.cz>
5 5 *
6 Author: Michal Dobes <dobes@tesnet.cz> 6 * Inspirated from general-purpose AMCC S5933 PCI Matchmaker driver
7 7 * made by Andrea Cisternino <acister@pcape1.pi.infn.it>
8 Inspirated from general-purpose AMCC S5933 PCI Matchmaker driver 8 * and as result of espionage from MITE code made by David A. Schleef.
9 made by Andrea Cisternino <acister@pcape1.pi.infn.it> 9 * Thanks to AMCC for their on-line documentation and bus master DMA
10 and as result of espionage from MITE code made by David A. Schleef. 10 * example.
11 Thanks to AMCC for their on-line documentation and bus master DMA 11 */
12 example.
13*/
14 12
15#ifndef _AMCC_S5933_H_ 13#ifndef _AMCC_S5933_H_
16#define _AMCC_S5933_H_ 14#define _AMCC_S5933_H_
@@ -58,7 +56,7 @@
58#define INTCSR_INTR_ASSERTED 0x800000 56#define INTCSR_INTR_ASSERTED 0x800000
59 57
60/****************************************************************************/ 58/****************************************************************************/
61/* AMCC - PCI non-volatile ram command register (byte 3 of master control/status register) */ 59/* AMCC - PCI non-volatile ram command register (byte 3 of AMCC_OP_REG_MCSR) */
62/****************************************************************************/ 60/****************************************************************************/
63#define MCSR_NV_LOAD_LOW_ADDR 0x0 61#define MCSR_NV_LOAD_LOW_ADDR 0x0
64#define MCSR_NV_LOAD_HIGH_ADDR 0x20 62#define MCSR_NV_LOAD_HIGH_ADDR 0x20
diff --git a/drivers/staging/comedi/drivers/amplc_dio200_common.c b/drivers/staging/comedi/drivers/amplc_dio200_common.c
index d1539e798ffd..f6e4e984235d 100644
--- a/drivers/staging/comedi/drivers/amplc_dio200_common.c
+++ b/drivers/staging/comedi/drivers/amplc_dio200_common.c
@@ -101,7 +101,7 @@ struct dio200_subdev_8255 {
101}; 101};
102 102
103struct dio200_subdev_intr { 103struct dio200_subdev_intr {
104 spinlock_t spinlock; 104 spinlock_t spinlock; /* protects the 'active' flag */
105 unsigned int ofs; 105 unsigned int ofs;
106 unsigned int valid_isns; 106 unsigned int valid_isns;
107 unsigned int enabled_isns; 107 unsigned int enabled_isns;
@@ -221,7 +221,7 @@ static void dio200_start_intr(struct comedi_device *dev,
221 struct dio200_subdev_intr *subpriv = s->private; 221 struct dio200_subdev_intr *subpriv = s->private;
222 struct comedi_cmd *cmd = &s->async->cmd; 222 struct comedi_cmd *cmd = &s->async->cmd;
223 unsigned int n; 223 unsigned int n;
224 unsigned isn_bits; 224 unsigned int isn_bits;
225 225
226 /* Determine interrupt sources to enable. */ 226 /* Determine interrupt sources to enable. */
227 isn_bits = 0; 227 isn_bits = 0;
@@ -284,9 +284,9 @@ static int dio200_handle_read_intr(struct comedi_device *dev,
284{ 284{
285 const struct dio200_board *board = dev->board_ptr; 285 const struct dio200_board *board = dev->board_ptr;
286 struct dio200_subdev_intr *subpriv = s->private; 286 struct dio200_subdev_intr *subpriv = s->private;
287 unsigned triggered; 287 unsigned int triggered;
288 unsigned intstat; 288 unsigned int intstat;
289 unsigned cur_enabled; 289 unsigned int cur_enabled;
290 unsigned long flags; 290 unsigned long flags;
291 291
292 triggered = 0; 292 triggered = 0;
@@ -439,7 +439,7 @@ static int dio200_subdev_intr_cmd(struct comedi_device *dev,
439static int dio200_subdev_intr_init(struct comedi_device *dev, 439static int dio200_subdev_intr_init(struct comedi_device *dev,
440 struct comedi_subdevice *s, 440 struct comedi_subdevice *s,
441 unsigned int offset, 441 unsigned int offset,
442 unsigned valid_isns) 442 unsigned int valid_isns)
443{ 443{
444 const struct dio200_board *board = dev->board_ptr; 444 const struct dio200_board *board = dev->board_ptr;
445 struct dio200_subdev_intr *subpriv; 445 struct dio200_subdev_intr *subpriv;
diff --git a/drivers/staging/comedi/drivers/amplc_pc263.c b/drivers/staging/comedi/drivers/amplc_pc263.c
index b1946ce6ecc1..58b0b6b1a693 100644
--- a/drivers/staging/comedi/drivers/amplc_pc263.c
+++ b/drivers/staging/comedi/drivers/amplc_pc263.c
@@ -1,46 +1,44 @@
1/* 1/*
2 comedi/drivers/amplc_pc263.c 2 * Driver for Amplicon PC263 relay board.
3 Driver for Amplicon PC263 and PCI263 relay boards. 3 *
4 4 * Copyright (C) 2002 MEV Ltd. <http://www.mev.co.uk/>
5 Copyright (C) 2002 MEV Ltd. <http://www.mev.co.uk/> 5 *
6 6 * COMEDI - Linux Control and Measurement Device Interface
7 COMEDI - Linux Control and Measurement Device Interface 7 * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
8 Copyright (C) 2000 David A. Schleef <ds@schleef.org> 8 *
9 9 * This program is free software; you can redistribute it and/or modify
10 This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by
11 it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; either version 2 of the License, or
12 the Free Software Foundation; either version 2 of the License, or 12 * (at your option) any later version.
13 (at your option) any later version. 13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
14 19
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19*/
20/* 20/*
21Driver: amplc_pc263 21 * Driver: amplc_pc263
22Description: Amplicon PC263 22 * Description: Amplicon PC263
23Author: Ian Abbott <abbotti@mev.co.uk> 23 * Author: Ian Abbott <abbotti@mev.co.uk>
24Devices: [Amplicon] PC263 (pc263) 24 * Devices: [Amplicon] PC263 (pc263)
25Updated: Fri, 12 Apr 2013 15:19:36 +0100 25 * Updated: Fri, 12 Apr 2013 15:19:36 +0100
26Status: works 26 * Status: works
27 27 *
28Configuration options: 28 * Configuration options:
29 [0] - I/O port base address 29 * [0] - I/O port base address
30 30 *
31The board appears as one subdevice, with 16 digital outputs, each 31 * The board appears as one subdevice, with 16 digital outputs, each
32connected to a reed-relay. Relay contacts are closed when output is 1. 32 * connected to a reed-relay. Relay contacts are closed when output is 1.
33The state of the outputs can be read. 33 * The state of the outputs can be read.
34*/ 34 */
35 35
36#include <linux/module.h> 36#include <linux/module.h>
37#include "../comedidev.h" 37#include "../comedidev.h"
38 38
39/* PC263 registers */ 39/* PC263 registers */
40 40#define PC263_DO_0_7_REG 0x00
41/* 41#define PC263_DO_8_15_REG 0x01
42 * Board descriptions for Amplicon PC263.
43 */
44 42
45struct pc263_board { 43struct pc263_board {
46 const char *name; 44 const char *name;
@@ -58,8 +56,8 @@ static int pc263_do_insn_bits(struct comedi_device *dev,
58 unsigned int *data) 56 unsigned int *data)
59{ 57{
60 if (comedi_dio_update_state(s, data)) { 58 if (comedi_dio_update_state(s, data)) {
61 outb(s->state & 0xff, dev->iobase); 59 outb(s->state & 0xff, dev->iobase + PC263_DO_0_7_REG);
62 outb((s->state >> 8) & 0xff, dev->iobase + 1); 60 outb((s->state >> 8) & 0xff, dev->iobase + PC263_DO_8_15_REG);
63 } 61 }
64 62
65 data[1] = s->state; 63 data[1] = s->state;
@@ -80,28 +78,30 @@ static int pc263_attach(struct comedi_device *dev, struct comedi_devconfig *it)
80 if (ret) 78 if (ret)
81 return ret; 79 return ret;
82 80
81 /* Digital Output subdevice */
83 s = &dev->subdevices[0]; 82 s = &dev->subdevices[0];
84 /* digital output subdevice */ 83 s->type = COMEDI_SUBD_DO;
85 s->type = COMEDI_SUBD_DO; 84 s->subdev_flags = SDF_WRITABLE;
86 s->subdev_flags = SDF_WRITABLE; 85 s->n_chan = 16;
87 s->n_chan = 16; 86 s->maxdata = 1;
88 s->maxdata = 1; 87 s->range_table = &range_digital;
89 s->range_table = &range_digital; 88 s->insn_bits = pc263_do_insn_bits;
90 s->insn_bits = pc263_do_insn_bits; 89
91 /* read initial relay state */ 90 /* read initial relay state */
92 s->state = inb(dev->iobase) | (inb(dev->iobase + 1) << 8); 91 s->state = inb(dev->iobase + PC263_DO_0_7_REG) |
92 (inb(dev->iobase + PC263_DO_8_15_REG) << 8);
93 93
94 return 0; 94 return 0;
95} 95}
96 96
97static struct comedi_driver amplc_pc263_driver = { 97static struct comedi_driver amplc_pc263_driver = {
98 .driver_name = "amplc_pc263", 98 .driver_name = "amplc_pc263",
99 .module = THIS_MODULE, 99 .module = THIS_MODULE,
100 .attach = pc263_attach, 100 .attach = pc263_attach,
101 .detach = comedi_legacy_detach, 101 .detach = comedi_legacy_detach,
102 .board_name = &pc263_boards[0].name, 102 .board_name = &pc263_boards[0].name,
103 .offset = sizeof(struct pc263_board), 103 .offset = sizeof(struct pc263_board),
104 .num_names = ARRAY_SIZE(pc263_boards), 104 .num_names = ARRAY_SIZE(pc263_boards),
105}; 105};
106 106
107module_comedi_driver(amplc_pc263_driver); 107module_comedi_driver(amplc_pc263_driver);
diff --git a/drivers/staging/comedi/drivers/amplc_pci224.c b/drivers/staging/comedi/drivers/amplc_pci224.c
index cac011fdd375..2e6decf1b69d 100644
--- a/drivers/staging/comedi/drivers/amplc_pci224.c
+++ b/drivers/staging/comedi/drivers/amplc_pci224.c
@@ -132,48 +132,53 @@
132 * DACCON values. 132 * DACCON values.
133 */ 133 */
134/* (r/w) Scan trigger. */ 134/* (r/w) Scan trigger. */
135#define PCI224_DACCON_TRIG_MASK (7 << 0) 135#define PCI224_DACCON_TRIG(x) (((x) & 0x7) << 0)
136#define PCI224_DACCON_TRIG_NONE (0 << 0) /* none */ 136#define PCI224_DACCON_TRIG_MASK PCI224_DACCON_TRIG(7)
137#define PCI224_DACCON_TRIG_SW (1 << 0) /* software trig */ 137#define PCI224_DACCON_TRIG_NONE PCI224_DACCON_TRIG(0) /* none */
138#define PCI224_DACCON_TRIG_EXTP (2 << 0) /* ext +ve edge */ 138#define PCI224_DACCON_TRIG_SW PCI224_DACCON_TRIG(1) /* soft trig */
139#define PCI224_DACCON_TRIG_EXTN (3 << 0) /* ext -ve edge */ 139#define PCI224_DACCON_TRIG_EXTP PCI224_DACCON_TRIG(2) /* ext + edge */
140#define PCI224_DACCON_TRIG_Z2CT0 (4 << 0) /* Z2 CT0 out */ 140#define PCI224_DACCON_TRIG_EXTN PCI224_DACCON_TRIG(3) /* ext - edge */
141#define PCI224_DACCON_TRIG_Z2CT1 (5 << 0) /* Z2 CT1 out */ 141#define PCI224_DACCON_TRIG_Z2CT0 PCI224_DACCON_TRIG(4) /* Z2 CT0 out */
142#define PCI224_DACCON_TRIG_Z2CT2 (6 << 0) /* Z2 CT2 out */ 142#define PCI224_DACCON_TRIG_Z2CT1 PCI224_DACCON_TRIG(5) /* Z2 CT1 out */
143#define PCI224_DACCON_TRIG_Z2CT2 PCI224_DACCON_TRIG(6) /* Z2 CT2 out */
143/* (r/w) Polarity (PCI224 only, PCI234 always bipolar!). */ 144/* (r/w) Polarity (PCI224 only, PCI234 always bipolar!). */
144#define PCI224_DACCON_POLAR_MASK (1 << 3) 145#define PCI224_DACCON_POLAR(x) (((x) & 0x1) << 3)
145#define PCI224_DACCON_POLAR_UNI (0 << 3) /* range [0,Vref] */ 146#define PCI224_DACCON_POLAR_MASK PCI224_DACCON_POLAR(1)
146#define PCI224_DACCON_POLAR_BI (1 << 3) /* range [-Vref,Vref] */ 147#define PCI224_DACCON_POLAR_UNI PCI224_DACCON_POLAR(0) /* [0,+V] */
148#define PCI224_DACCON_POLAR_BI PCI224_DACCON_POLAR(1) /* [-V,+V] */
147/* (r/w) Internal Vref (PCI224 only, when LK1 in position 1-2). */ 149/* (r/w) Internal Vref (PCI224 only, when LK1 in position 1-2). */
148#define PCI224_DACCON_VREF_MASK (3 << 4) 150#define PCI224_DACCON_VREF(x) (((x) & 0x3) << 4)
149#define PCI224_DACCON_VREF_1_25 (0 << 4) /* Vref = 1.25V */ 151#define PCI224_DACCON_VREF_MASK PCI224_DACCON_VREF(3)
150#define PCI224_DACCON_VREF_2_5 (1 << 4) /* Vref = 2.5V */ 152#define PCI224_DACCON_VREF_1_25 PCI224_DACCON_VREF(0) /* 1.25V */
151#define PCI224_DACCON_VREF_5 (2 << 4) /* Vref = 5V */ 153#define PCI224_DACCON_VREF_2_5 PCI224_DACCON_VREF(1) /* 2.5V */
152#define PCI224_DACCON_VREF_10 (3 << 4) /* Vref = 10V */ 154#define PCI224_DACCON_VREF_5 PCI224_DACCON_VREF(2) /* 5V */
155#define PCI224_DACCON_VREF_10 PCI224_DACCON_VREF(3) /* 10V */
153/* (r/w) Wraparound mode enable (to play back stored waveform). */ 156/* (r/w) Wraparound mode enable (to play back stored waveform). */
154#define PCI224_DACCON_FIFOWRAP (1 << 7) 157#define PCI224_DACCON_FIFOWRAP BIT(7)
155/* (r/w) FIFO enable. It MUST be set! */ 158/* (r/w) FIFO enable. It MUST be set! */
156#define PCI224_DACCON_FIFOENAB (1 << 8) 159#define PCI224_DACCON_FIFOENAB BIT(8)
157/* (r/w) FIFO interrupt trigger level (most values are not very useful). */ 160/* (r/w) FIFO interrupt trigger level (most values are not very useful). */
158#define PCI224_DACCON_FIFOINTR_MASK (7 << 9) 161#define PCI224_DACCON_FIFOINTR(x) (((x) & 0x7) << 9)
159#define PCI224_DACCON_FIFOINTR_EMPTY (0 << 9) /* when empty */ 162#define PCI224_DACCON_FIFOINTR_MASK PCI224_DACCON_FIFOINTR(7)
160#define PCI224_DACCON_FIFOINTR_NEMPTY (1 << 9) /* when not empty */ 163#define PCI224_DACCON_FIFOINTR_EMPTY PCI224_DACCON_FIFOINTR(0) /* empty */
161#define PCI224_DACCON_FIFOINTR_NHALF (2 << 9) /* when not half full */ 164#define PCI224_DACCON_FIFOINTR_NEMPTY PCI224_DACCON_FIFOINTR(1) /* !empty */
162#define PCI224_DACCON_FIFOINTR_HALF (3 << 9) /* when half full */ 165#define PCI224_DACCON_FIFOINTR_NHALF PCI224_DACCON_FIFOINTR(2) /* !half */
163#define PCI224_DACCON_FIFOINTR_NFULL (4 << 9) /* when not full */ 166#define PCI224_DACCON_FIFOINTR_HALF PCI224_DACCON_FIFOINTR(3) /* half */
164#define PCI224_DACCON_FIFOINTR_FULL (5 << 9) /* when full */ 167#define PCI224_DACCON_FIFOINTR_NFULL PCI224_DACCON_FIFOINTR(4) /* !full */
168#define PCI224_DACCON_FIFOINTR_FULL PCI224_DACCON_FIFOINTR(5) /* full */
165/* (r-o) FIFO fill level. */ 169/* (r-o) FIFO fill level. */
166#define PCI224_DACCON_FIFOFL_MASK (7 << 12) 170#define PCI224_DACCON_FIFOFL(x) (((x) & 0x7) << 12)
167#define PCI224_DACCON_FIFOFL_EMPTY (1 << 12) /* 0 */ 171#define PCI224_DACCON_FIFOFL_MASK PCI224_DACCON_FIFOFL(7)
168#define PCI224_DACCON_FIFOFL_ONETOHALF (0 << 12) /* [1,2048] */ 172#define PCI224_DACCON_FIFOFL_EMPTY PCI224_DACCON_FIFOFL(1) /* 0 */
169#define PCI224_DACCON_FIFOFL_HALFTOFULL (4 << 12) /* [2049,4095] */ 173#define PCI224_DACCON_FIFOFL_ONETOHALF PCI224_DACCON_FIFOFL(0) /* 1-2048 */
170#define PCI224_DACCON_FIFOFL_FULL (6 << 12) /* 4096 */ 174#define PCI224_DACCON_FIFOFL_HALFTOFULL PCI224_DACCON_FIFOFL(4) /* 2049-4095 */
175#define PCI224_DACCON_FIFOFL_FULL PCI224_DACCON_FIFOFL(6) /* 4096 */
171/* (r-o) DAC busy flag. */ 176/* (r-o) DAC busy flag. */
172#define PCI224_DACCON_BUSY (1 << 15) 177#define PCI224_DACCON_BUSY BIT(15)
173/* (w-o) FIFO reset. */ 178/* (w-o) FIFO reset. */
174#define PCI224_DACCON_FIFORESET (1 << 12) 179#define PCI224_DACCON_FIFORESET BIT(12)
175/* (w-o) Global reset (not sure what it does). */ 180/* (w-o) Global reset (not sure what it does). */
176#define PCI224_DACCON_GLOBALRESET (1 << 13) 181#define PCI224_DACCON_GLOBALRESET BIT(13)
177 182
178/* 183/*
179 * DAC FIFO size. 184 * DAC FIFO size.
diff --git a/drivers/staging/comedi/drivers/amplc_pci230.c b/drivers/staging/comedi/drivers/amplc_pci230.c
index 907c39cc89d7..42945de31fe2 100644
--- a/drivers/staging/comedi/drivers/amplc_pci230.c
+++ b/drivers/staging/comedi/drivers/amplc_pci230.c
@@ -237,47 +237,50 @@
237/* 237/*
238 * DACCON read-write values. 238 * DACCON read-write values.
239 */ 239 */
240#define PCI230_DAC_OR_UNI (0 << 0) /* Output range unipolar */ 240#define PCI230_DAC_OR(x) (((x) & 0x1) << 0)
241#define PCI230_DAC_OR_BIP (1 << 0) /* Output range bipolar */ 241#define PCI230_DAC_OR_UNI PCI230_DAC_OR(0) /* Output unipolar */
242#define PCI230_DAC_OR_MASK (1 << 0) 242#define PCI230_DAC_OR_BIP PCI230_DAC_OR(1) /* Output bipolar */
243#define PCI230_DAC_OR_MASK PCI230_DAC_OR(1)
243/* 244/*
244 * The following applies only if DAC FIFO support is enabled in the EXTFUNC 245 * The following applies only if DAC FIFO support is enabled in the EXTFUNC
245 * register (and only for PCI230+ hardware version 2 onwards). 246 * register (and only for PCI230+ hardware version 2 onwards).
246 */ 247 */
247#define PCI230P2_DAC_FIFO_EN (1 << 8) /* FIFO enable */ 248#define PCI230P2_DAC_FIFO_EN BIT(8) /* FIFO enable */
248/* 249/*
249 * The following apply only if the DAC FIFO is enabled (and only for PCI230+ 250 * The following apply only if the DAC FIFO is enabled (and only for PCI230+
250 * hardware version 2 onwards). 251 * hardware version 2 onwards).
251 */ 252 */
252#define PCI230P2_DAC_TRIG_NONE (0 << 2) /* No trigger */ 253#define PCI230P2_DAC_TRIG(x) (((x) & 0x7) << 2)
253#define PCI230P2_DAC_TRIG_SW (1 << 2) /* Software trigger trigger */ 254#define PCI230P2_DAC_TRIG_NONE PCI230P2_DAC_TRIG(0) /* none */
254#define PCI230P2_DAC_TRIG_EXTP (2 << 2) /* EXTTRIG +ve edge trigger */ 255#define PCI230P2_DAC_TRIG_SW PCI230P2_DAC_TRIG(1) /* soft trig */
255#define PCI230P2_DAC_TRIG_EXTN (3 << 2) /* EXTTRIG -ve edge trigger */ 256#define PCI230P2_DAC_TRIG_EXTP PCI230P2_DAC_TRIG(2) /* ext + edge */
256#define PCI230P2_DAC_TRIG_Z2CT0 (4 << 2) /* CT0-OUT +ve edge trigger */ 257#define PCI230P2_DAC_TRIG_EXTN PCI230P2_DAC_TRIG(3) /* ext - edge */
257#define PCI230P2_DAC_TRIG_Z2CT1 (5 << 2) /* CT1-OUT +ve edge trigger */ 258#define PCI230P2_DAC_TRIG_Z2CT0 PCI230P2_DAC_TRIG(4) /* Z2 CT0 out */
258#define PCI230P2_DAC_TRIG_Z2CT2 (6 << 2) /* CT2-OUT +ve edge trigger */ 259#define PCI230P2_DAC_TRIG_Z2CT1 PCI230P2_DAC_TRIG(5) /* Z2 CT1 out */
259#define PCI230P2_DAC_TRIG_MASK (7 << 2) 260#define PCI230P2_DAC_TRIG_Z2CT2 PCI230P2_DAC_TRIG(6) /* Z2 CT2 out */
260#define PCI230P2_DAC_FIFO_WRAP (1 << 7) /* FIFO wraparound mode */ 261#define PCI230P2_DAC_TRIG_MASK PCI230P2_DAC_TRIG(7)
261#define PCI230P2_DAC_INT_FIFO_EMPTY (0 << 9) /* FIFO interrupt empty */ 262#define PCI230P2_DAC_FIFO_WRAP BIT(7) /* FIFO wraparound mode */
262#define PCI230P2_DAC_INT_FIFO_NEMPTY (1 << 9) 263#define PCI230P2_DAC_INT_FIFO(x) (((x) & 7) << 9)
263#define PCI230P2_DAC_INT_FIFO_NHALF (2 << 9) /* FIFO intr not half full */ 264#define PCI230P2_DAC_INT_FIFO_EMPTY PCI230P2_DAC_INT_FIFO(0) /* empty */
264#define PCI230P2_DAC_INT_FIFO_HALF (3 << 9) 265#define PCI230P2_DAC_INT_FIFO_NEMPTY PCI230P2_DAC_INT_FIFO(1) /* !empty */
265#define PCI230P2_DAC_INT_FIFO_NFULL (4 << 9) /* FIFO interrupt not full */ 266#define PCI230P2_DAC_INT_FIFO_NHALF PCI230P2_DAC_INT_FIFO(2) /* !half */
266#define PCI230P2_DAC_INT_FIFO_FULL (5 << 9) 267#define PCI230P2_DAC_INT_FIFO_HALF PCI230P2_DAC_INT_FIFO(3) /* half */
267#define PCI230P2_DAC_INT_FIFO_MASK (7 << 9) 268#define PCI230P2_DAC_INT_FIFO_NFULL PCI230P2_DAC_INT_FIFO(4) /* !full */
269#define PCI230P2_DAC_INT_FIFO_FULL PCI230P2_DAC_INT_FIFO(5) /* full */
270#define PCI230P2_DAC_INT_FIFO_MASK PCI230P2_DAC_INT_FIFO(7)
268 271
269/* 272/*
270 * DACCON read-only values. 273 * DACCON read-only values.
271 */ 274 */
272#define PCI230_DAC_BUSY (1 << 1) /* DAC busy. */ 275#define PCI230_DAC_BUSY BIT(1) /* DAC busy. */
273/* 276/*
274 * The following apply only if the DAC FIFO is enabled (and only for PCI230+ 277 * The following apply only if the DAC FIFO is enabled (and only for PCI230+
275 * hardware version 2 onwards). 278 * hardware version 2 onwards).
276 */ 279 */
277#define PCI230P2_DAC_FIFO_UNDERRUN_LATCHED (1 << 5) /* Underrun error */ 280#define PCI230P2_DAC_FIFO_UNDERRUN_LATCHED BIT(5) /* Underrun error */
278#define PCI230P2_DAC_FIFO_EMPTY (1 << 13) /* FIFO empty */ 281#define PCI230P2_DAC_FIFO_EMPTY BIT(13) /* FIFO empty */
279#define PCI230P2_DAC_FIFO_FULL (1 << 14) /* FIFO full */ 282#define PCI230P2_DAC_FIFO_FULL BIT(14) /* FIFO full */
280#define PCI230P2_DAC_FIFO_HALF (1 << 15) /* FIFO half full */ 283#define PCI230P2_DAC_FIFO_HALF BIT(15) /* FIFO half full */
281 284
282/* 285/*
283 * DACCON write-only, transient values. 286 * DACCON write-only, transient values.
@@ -286,8 +289,8 @@
286 * The following apply only if the DAC FIFO is enabled (and only for PCI230+ 289 * The following apply only if the DAC FIFO is enabled (and only for PCI230+
287 * hardware version 2 onwards). 290 * hardware version 2 onwards).
288 */ 291 */
289#define PCI230P2_DAC_FIFO_UNDERRUN_CLEAR (1 << 5) /* Clear underrun */ 292#define PCI230P2_DAC_FIFO_UNDERRUN_CLEAR BIT(5) /* Clear underrun */
290#define PCI230P2_DAC_FIFO_RESET (1 << 12) /* FIFO reset */ 293#define PCI230P2_DAC_FIFO_RESET BIT(12) /* FIFO reset */
291 294
292/* 295/*
293 * PCI230+ hardware version 2 DAC FIFO levels. 296 * PCI230+ hardware version 2 DAC FIFO levels.
@@ -304,44 +307,48 @@
304/* 307/*
305 * ADCCON read/write values. 308 * ADCCON read/write values.
306 */ 309 */
307#define PCI230_ADC_TRIG_NONE (0 << 0) /* No trigger */ 310#define PCI230_ADC_TRIG(x) (((x) & 0x7) << 0)
308#define PCI230_ADC_TRIG_SW (1 << 0) /* Software trigger trigger */ 311#define PCI230_ADC_TRIG_NONE PCI230_ADC_TRIG(0) /* none */
309#define PCI230_ADC_TRIG_EXTP (2 << 0) /* EXTTRIG +ve edge trigger */ 312#define PCI230_ADC_TRIG_SW PCI230_ADC_TRIG(1) /* soft trig */
310#define PCI230_ADC_TRIG_EXTN (3 << 0) /* EXTTRIG -ve edge trigger */ 313#define PCI230_ADC_TRIG_EXTP PCI230_ADC_TRIG(2) /* ext + edge */
311#define PCI230_ADC_TRIG_Z2CT0 (4 << 0) /* CT0-OUT +ve edge trigger */ 314#define PCI230_ADC_TRIG_EXTN PCI230_ADC_TRIG(3) /* ext - edge */
312#define PCI230_ADC_TRIG_Z2CT1 (5 << 0) /* CT1-OUT +ve edge trigger */ 315#define PCI230_ADC_TRIG_Z2CT0 PCI230_ADC_TRIG(4) /* Z2 CT0 out*/
313#define PCI230_ADC_TRIG_Z2CT2 (6 << 0) /* CT2-OUT +ve edge trigger */ 316#define PCI230_ADC_TRIG_Z2CT1 PCI230_ADC_TRIG(5) /* Z2 CT1 out */
314#define PCI230_ADC_TRIG_MASK (7 << 0) 317#define PCI230_ADC_TRIG_Z2CT2 PCI230_ADC_TRIG(6) /* Z2 CT2 out */
315#define PCI230_ADC_IR_UNI (0 << 3) /* Input range unipolar */ 318#define PCI230_ADC_TRIG_MASK PCI230_ADC_TRIG(7)
316#define PCI230_ADC_IR_BIP (1 << 3) /* Input range bipolar */ 319#define PCI230_ADC_IR(x) (((x) & 0x1) << 3)
317#define PCI230_ADC_IR_MASK (1 << 3) 320#define PCI230_ADC_IR_UNI PCI230_ADC_IR(0) /* Input unipolar */
318#define PCI230_ADC_IM_SE (0 << 4) /* Input mode single ended */ 321#define PCI230_ADC_IR_BIP PCI230_ADC_IR(1) /* Input bipolar */
319#define PCI230_ADC_IM_DIF (1 << 4) /* Input mode differential */ 322#define PCI230_ADC_IR_MASK PCI230_ADC_IR(1)
320#define PCI230_ADC_IM_MASK (1 << 4) 323#define PCI230_ADC_IM(x) (((x) & 0x1) << 4)
321#define PCI230_ADC_FIFO_EN (1 << 8) /* FIFO enable */ 324#define PCI230_ADC_IM_SE PCI230_ADC_IM(0) /* single ended */
322#define PCI230_ADC_INT_FIFO_EMPTY (0 << 9) 325#define PCI230_ADC_IM_DIF PCI230_ADC_IM(1) /* differential */
323#define PCI230_ADC_INT_FIFO_NEMPTY (1 << 9) /* FIFO interrupt not empty */ 326#define PCI230_ADC_IM_MASK PCI230_ADC_IM(1)
324#define PCI230_ADC_INT_FIFO_NHALF (2 << 9) 327#define PCI230_ADC_FIFO_EN BIT(8) /* FIFO enable */
325#define PCI230_ADC_INT_FIFO_HALF (3 << 9) /* FIFO interrupt half full */ 328#define PCI230_ADC_INT_FIFO(x) (((x) & 0x7) << 9)
326#define PCI230_ADC_INT_FIFO_NFULL (4 << 9) 329#define PCI230_ADC_INT_FIFO_EMPTY PCI230_ADC_INT_FIFO(0) /* empty */
327#define PCI230_ADC_INT_FIFO_FULL (5 << 9) /* FIFO interrupt full */ 330#define PCI230_ADC_INT_FIFO_NEMPTY PCI230_ADC_INT_FIFO(1) /* !empty */
328#define PCI230P_ADC_INT_FIFO_THRESH (7 << 9) /* FIFO interrupt threshold */ 331#define PCI230_ADC_INT_FIFO_NHALF PCI230_ADC_INT_FIFO(2) /* !half */
329#define PCI230_ADC_INT_FIFO_MASK (7 << 9) 332#define PCI230_ADC_INT_FIFO_HALF PCI230_ADC_INT_FIFO(3) /* half */
333#define PCI230_ADC_INT_FIFO_NFULL PCI230_ADC_INT_FIFO(4) /* !full */
334#define PCI230_ADC_INT_FIFO_FULL PCI230_ADC_INT_FIFO(5) /* full */
335#define PCI230P_ADC_INT_FIFO_THRESH PCI230_ADC_INT_FIFO(7) /* threshold */
336#define PCI230_ADC_INT_FIFO_MASK PCI230_ADC_INT_FIFO(7)
330 337
331/* 338/*
332 * ADCCON write-only, transient values. 339 * ADCCON write-only, transient values.
333 */ 340 */
334#define PCI230_ADC_FIFO_RESET (1 << 12) /* FIFO reset */ 341#define PCI230_ADC_FIFO_RESET BIT(12) /* FIFO reset */
335#define PCI230_ADC_GLOB_RESET (1 << 13) /* Global reset */ 342#define PCI230_ADC_GLOB_RESET BIT(13) /* Global reset */
336 343
337/* 344/*
338 * ADCCON read-only values. 345 * ADCCON read-only values.
339 */ 346 */
340#define PCI230_ADC_BUSY (1 << 15) /* ADC busy */ 347#define PCI230_ADC_BUSY BIT(15) /* ADC busy */
341#define PCI230_ADC_FIFO_EMPTY (1 << 12) /* FIFO empty */ 348#define PCI230_ADC_FIFO_EMPTY BIT(12) /* FIFO empty */
342#define PCI230_ADC_FIFO_FULL (1 << 13) /* FIFO full */ 349#define PCI230_ADC_FIFO_FULL BIT(13) /* FIFO full */
343#define PCI230_ADC_FIFO_HALF (1 << 14) /* FIFO half full */ 350#define PCI230_ADC_FIFO_HALF BIT(14) /* FIFO half full */
344#define PCI230_ADC_FIFO_FULL_LATCHED (1 << 5) /* FIFO overrun occurred */ 351#define PCI230_ADC_FIFO_FULL_LATCHED BIT(5) /* FIFO overrun occurred */
345 352
346/* 353/*
347 * PCI230 ADC FIFO levels. 354 * PCI230 ADC FIFO levels.
@@ -353,10 +360,10 @@
353 * PCI230+ EXTFUNC values. 360 * PCI230+ EXTFUNC values.
354 */ 361 */
355/* Route EXTTRIG pin to external gate inputs. */ 362/* Route EXTTRIG pin to external gate inputs. */
356#define PCI230P_EXTFUNC_GAT_EXTTRIG (1 << 0) 363#define PCI230P_EXTFUNC_GAT_EXTTRIG BIT(0)
357/* PCI230+ hardware version 2 values. */ 364/* PCI230+ hardware version 2 values. */
358/* Allow DAC FIFO to be enabled. */ 365/* Allow DAC FIFO to be enabled. */
359#define PCI230P2_EXTFUNC_DACFIFO (1 << 1) 366#define PCI230P2_EXTFUNC_DACFIFO BIT(1)
360 367
361/* 368/*
362 * Counter/timer clock input configuration sources. 369 * Counter/timer clock input configuration sources.
@@ -379,8 +386,12 @@
379#define GAT_GND 1 /* GND (i.e. disabled) */ 386#define GAT_GND 1 /* GND (i.e. disabled) */
380#define GAT_EXT 2 /* external gate input (PPCn on PCI230) */ 387#define GAT_EXT 2 /* external gate input (PPCn on PCI230) */
381#define GAT_NOUTNM2 3 /* inverted output of channel-2 modulo total */ 388#define GAT_NOUTNM2 3 /* inverted output of channel-2 modulo total */
382/* Macro to construct gate input configuration register value. */ 389
383#define GAT_CONFIG(chan, src) ((((chan) & 3) << 3) | ((src) & 7)) 390static inline unsigned int pci230_gat_config(unsigned int chan,
391 unsigned int src)
392{
393 return ((chan & 3) << 3) | (src & 7);
394}
384 395
385/* 396/*
386 * Summary of CLK_OUTNM1 and GAT_NOUTNM2 connections for PCI230 and PCI260: 397 * Summary of CLK_OUTNM1 and GAT_NOUTNM2 connections for PCI230 and PCI260:
@@ -398,20 +409,20 @@
398 * Interrupt enables/status register values. 409 * Interrupt enables/status register values.
399 */ 410 */
400#define PCI230_INT_DISABLE 0 411#define PCI230_INT_DISABLE 0
401#define PCI230_INT_PPI_C0 (1 << 0) 412#define PCI230_INT_PPI_C0 BIT(0)
402#define PCI230_INT_PPI_C3 (1 << 1) 413#define PCI230_INT_PPI_C3 BIT(1)
403#define PCI230_INT_ADC (1 << 2) 414#define PCI230_INT_ADC BIT(2)
404#define PCI230_INT_ZCLK_CT1 (1 << 5) 415#define PCI230_INT_ZCLK_CT1 BIT(5)
405/* For PCI230+ hardware version 2 when DAC FIFO enabled. */ 416/* For PCI230+ hardware version 2 when DAC FIFO enabled. */
406#define PCI230P2_INT_DAC (1 << 4) 417#define PCI230P2_INT_DAC BIT(4)
407 418
408/* 419/*
409 * (Potentially) shared resources and their owners 420 * (Potentially) shared resources and their owners
410 */ 421 */
411enum { 422enum {
412 RES_Z2CT0 = (1U << 0), /* Z2-CT0 */ 423 RES_Z2CT0 = BIT(0), /* Z2-CT0 */
413 RES_Z2CT1 = (1U << 1), /* Z2-CT1 */ 424 RES_Z2CT1 = BIT(1), /* Z2-CT1 */
414 RES_Z2CT2 = (1U << 2) /* Z2-CT2 */ 425 RES_Z2CT2 = BIT(2) /* Z2-CT2 */
415}; 426};
416 427
417enum { 428enum {
@@ -626,10 +637,10 @@ static void pci230_release_all_resources(struct comedi_device *dev,
626 pci230_release_shared(dev, (unsigned char)~0, owner); 637 pci230_release_shared(dev, (unsigned char)~0, owner);
627} 638}
628 639
629static unsigned int pci230_divide_ns(uint64_t ns, unsigned int timebase, 640static unsigned int pci230_divide_ns(u64 ns, unsigned int timebase,
630 unsigned int flags) 641 unsigned int flags)
631{ 642{
632 uint64_t div; 643 u64 div;
633 unsigned int rem; 644 unsigned int rem;
634 645
635 div = ns; 646 div = ns;
@@ -652,7 +663,7 @@ static unsigned int pci230_divide_ns(uint64_t ns, unsigned int timebase,
652 * Given desired period in ns, returns the required internal clock source 663 * Given desired period in ns, returns the required internal clock source
653 * and gets the initial count. 664 * and gets the initial count.
654 */ 665 */
655static unsigned int pci230_choose_clk_count(uint64_t ns, unsigned int *count, 666static unsigned int pci230_choose_clk_count(u64 ns, unsigned int *count,
656 unsigned int flags) 667 unsigned int flags)
657{ 668{
658 unsigned int clk_src, cnt; 669 unsigned int clk_src, cnt;
@@ -676,7 +687,7 @@ static void pci230_ns_to_single_timer(unsigned int *ns, unsigned int flags)
676} 687}
677 688
678static void pci230_ct_setup_ns_mode(struct comedi_device *dev, unsigned int ct, 689static void pci230_ct_setup_ns_mode(struct comedi_device *dev, unsigned int ct,
679 unsigned int mode, uint64_t ns, 690 unsigned int mode, u64 ns,
680 unsigned int flags) 691 unsigned int flags)
681{ 692{
682 unsigned int clk_src; 693 unsigned int clk_src;
@@ -1263,7 +1274,8 @@ static void pci230_ao_start(struct comedi_device *dev,
1263 irqflags); 1274 irqflags);
1264 } 1275 }
1265 /* Set CT1 gate high to start counting. */ 1276 /* Set CT1 gate high to start counting. */
1266 outb(GAT_CONFIG(1, GAT_VCC), dev->iobase + PCI230_ZGAT_SCE); 1277 outb(pci230_gat_config(1, GAT_VCC),
1278 dev->iobase + PCI230_ZGAT_SCE);
1267 break; 1279 break;
1268 case TRIG_INT: 1280 case TRIG_INT:
1269 async->inttrig = pci230_ao_inttrig_scan_begin; 1281 async->inttrig = pci230_ao_inttrig_scan_begin;
@@ -1351,7 +1363,8 @@ static int pci230_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
1351 * cmd->scan_begin_arg is sampling period in ns. 1363 * cmd->scan_begin_arg is sampling period in ns.
1352 * Gate it off for now. 1364 * Gate it off for now.
1353 */ 1365 */
1354 outb(GAT_CONFIG(1, GAT_GND), dev->iobase + PCI230_ZGAT_SCE); 1366 outb(pci230_gat_config(1, GAT_GND),
1367 dev->iobase + PCI230_ZGAT_SCE);
1355 pci230_ct_setup_ns_mode(dev, 1, I8254_MODE3, 1368 pci230_ct_setup_ns_mode(dev, 1, I8254_MODE3,
1356 cmd->scan_begin_arg, 1369 cmd->scan_begin_arg,
1357 cmd->flags); 1370 cmd->flags);
@@ -1792,9 +1805,9 @@ static int pci230_ai_inttrig_scan_begin(struct comedi_device *dev,
1792 spin_lock_irqsave(&devpriv->ai_stop_spinlock, irqflags); 1805 spin_lock_irqsave(&devpriv->ai_stop_spinlock, irqflags);
1793 if (devpriv->ai_cmd_started) { 1806 if (devpriv->ai_cmd_started) {
1794 /* Trigger scan by waggling CT0 gate source. */ 1807 /* Trigger scan by waggling CT0 gate source. */
1795 zgat = GAT_CONFIG(0, GAT_GND); 1808 zgat = pci230_gat_config(0, GAT_GND);
1796 outb(zgat, dev->iobase + PCI230_ZGAT_SCE); 1809 outb(zgat, dev->iobase + PCI230_ZGAT_SCE);
1797 zgat = GAT_CONFIG(0, GAT_VCC); 1810 zgat = pci230_gat_config(0, GAT_VCC);
1798 outb(zgat, dev->iobase + PCI230_ZGAT_SCE); 1811 outb(zgat, dev->iobase + PCI230_ZGAT_SCE);
1799 } 1812 }
1800 spin_unlock_irqrestore(&devpriv->ai_stop_spinlock, irqflags); 1813 spin_unlock_irqrestore(&devpriv->ai_stop_spinlock, irqflags);
@@ -1926,20 +1939,20 @@ static void pci230_ai_start(struct comedi_device *dev,
1926 * Conversion timer CT2 needs to be gated by 1939 * Conversion timer CT2 needs to be gated by
1927 * inverted output of monostable CT2. 1940 * inverted output of monostable CT2.
1928 */ 1941 */
1929 zgat = GAT_CONFIG(2, GAT_NOUTNM2); 1942 zgat = pci230_gat_config(2, GAT_NOUTNM2);
1930 } else { 1943 } else {
1931 /* 1944 /*
1932 * Conversion timer CT2 needs to be gated on 1945 * Conversion timer CT2 needs to be gated on
1933 * continuously. 1946 * continuously.
1934 */ 1947 */
1935 zgat = GAT_CONFIG(2, GAT_VCC); 1948 zgat = pci230_gat_config(2, GAT_VCC);
1936 } 1949 }
1937 outb(zgat, dev->iobase + PCI230_ZGAT_SCE); 1950 outb(zgat, dev->iobase + PCI230_ZGAT_SCE);
1938 if (cmd->scan_begin_src != TRIG_FOLLOW) { 1951 if (cmd->scan_begin_src != TRIG_FOLLOW) {
1939 /* Set monostable CT0 trigger source. */ 1952 /* Set monostable CT0 trigger source. */
1940 switch (cmd->scan_begin_src) { 1953 switch (cmd->scan_begin_src) {
1941 default: 1954 default:
1942 zgat = GAT_CONFIG(0, GAT_VCC); 1955 zgat = pci230_gat_config(0, GAT_VCC);
1943 break; 1956 break;
1944 case TRIG_EXT: 1957 case TRIG_EXT:
1945 /* 1958 /*
@@ -1950,21 +1963,21 @@ static void pci230_ai_start(struct comedi_device *dev,
1950 * input in order to use it as an external scan 1963 * input in order to use it as an external scan
1951 * trigger. 1964 * trigger.
1952 */ 1965 */
1953 zgat = GAT_CONFIG(0, GAT_EXT); 1966 zgat = pci230_gat_config(0, GAT_EXT);
1954 break; 1967 break;
1955 case TRIG_TIMER: 1968 case TRIG_TIMER:
1956 /* 1969 /*
1957 * Monostable CT0 triggered by rising edge on 1970 * Monostable CT0 triggered by rising edge on
1958 * inverted output of CT1 (falling edge on CT1). 1971 * inverted output of CT1 (falling edge on CT1).
1959 */ 1972 */
1960 zgat = GAT_CONFIG(0, GAT_NOUTNM2); 1973 zgat = pci230_gat_config(0, GAT_NOUTNM2);
1961 break; 1974 break;
1962 case TRIG_INT: 1975 case TRIG_INT:
1963 /* 1976 /*
1964 * Monostable CT0 is triggered by inttrig 1977 * Monostable CT0 is triggered by inttrig
1965 * function waggling the CT0 gate source. 1978 * function waggling the CT0 gate source.
1966 */ 1979 */
1967 zgat = GAT_CONFIG(0, GAT_VCC); 1980 zgat = pci230_gat_config(0, GAT_VCC);
1968 break; 1981 break;
1969 } 1982 }
1970 outb(zgat, dev->iobase + PCI230_ZGAT_SCE); 1983 outb(zgat, dev->iobase + PCI230_ZGAT_SCE);
@@ -1974,7 +1987,7 @@ static void pci230_ai_start(struct comedi_device *dev,
1974 * Scan period timer CT1 needs to be 1987 * Scan period timer CT1 needs to be
1975 * gated on to start counting. 1988 * gated on to start counting.
1976 */ 1989 */
1977 zgat = GAT_CONFIG(1, GAT_VCC); 1990 zgat = pci230_gat_config(1, GAT_VCC);
1978 outb(zgat, dev->iobase + PCI230_ZGAT_SCE); 1991 outb(zgat, dev->iobase + PCI230_ZGAT_SCE);
1979 break; 1992 break;
1980 case TRIG_INT: 1993 case TRIG_INT:
@@ -2216,7 +2229,7 @@ static int pci230_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
2216 * Note, counter/timer output 2 can be monitored on the 2229 * Note, counter/timer output 2 can be monitored on the
2217 * connector: PCI230 pin 21, PCI260 pin 18. 2230 * connector: PCI230 pin 21, PCI260 pin 18.
2218 */ 2231 */
2219 zgat = GAT_CONFIG(2, GAT_GND); 2232 zgat = pci230_gat_config(2, GAT_GND);
2220 outb(zgat, dev->iobase + PCI230_ZGAT_SCE); 2233 outb(zgat, dev->iobase + PCI230_ZGAT_SCE);
2221 /* Set counter/timer 2 to the specified conversion period. */ 2234 /* Set counter/timer 2 to the specified conversion period. */
2222 pci230_ct_setup_ns_mode(dev, 2, I8254_MODE3, cmd->convert_arg, 2235 pci230_ct_setup_ns_mode(dev, 2, I8254_MODE3, cmd->convert_arg,
@@ -2234,10 +2247,10 @@ static int pci230_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
2234 * monostable to stop it triggering. The trigger 2247 * monostable to stop it triggering. The trigger
2235 * source will be changed later. 2248 * source will be changed later.
2236 */ 2249 */
2237 zgat = GAT_CONFIG(0, GAT_VCC); 2250 zgat = pci230_gat_config(0, GAT_VCC);
2238 outb(zgat, dev->iobase + PCI230_ZGAT_SCE); 2251 outb(zgat, dev->iobase + PCI230_ZGAT_SCE);
2239 pci230_ct_setup_ns_mode(dev, 0, I8254_MODE1, 2252 pci230_ct_setup_ns_mode(dev, 0, I8254_MODE1,
2240 ((uint64_t)cmd->convert_arg * 2253 ((u64)cmd->convert_arg *
2241 cmd->scan_end_arg), 2254 cmd->scan_end_arg),
2242 CMDF_ROUND_UP); 2255 CMDF_ROUND_UP);
2243 if (cmd->scan_begin_src == TRIG_TIMER) { 2256 if (cmd->scan_begin_src == TRIG_TIMER) {
@@ -2247,7 +2260,7 @@ static int pci230_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
2247 * 2260 *
2248 * Set up CT1 but gate it off for now. 2261 * Set up CT1 but gate it off for now.
2249 */ 2262 */
2250 zgat = GAT_CONFIG(1, GAT_GND); 2263 zgat = pci230_gat_config(1, GAT_GND);
2251 outb(zgat, dev->iobase + PCI230_ZGAT_SCE); 2264 outb(zgat, dev->iobase + PCI230_ZGAT_SCE);
2252 pci230_ct_setup_ns_mode(dev, 1, I8254_MODE3, 2265 pci230_ct_setup_ns_mode(dev, 1, I8254_MODE3,
2253 cmd->scan_begin_arg, 2266 cmd->scan_begin_arg,
diff --git a/drivers/staging/comedi/drivers/amplc_pci263.c b/drivers/staging/comedi/drivers/amplc_pci263.c
index b6768aa90547..8d4069bc5716 100644
--- a/drivers/staging/comedi/drivers/amplc_pci263.c
+++ b/drivers/staging/comedi/drivers/amplc_pci263.c
@@ -1,49 +1,53 @@
1/* 1/*
2 comedi/drivers/amplc_pci263.c 2 * Driver for Amplicon PCI263 relay board.
3 Driver for Amplicon PCI263 relay board. 3 *
4 * Copyright (C) 2002 MEV Ltd. <http://www.mev.co.uk/>
5 *
6 * COMEDI - Linux Control and Measurement Device Interface
7 * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
4 19
5 Copyright (C) 2002 MEV Ltd. <http://www.mev.co.uk/>
6
7 COMEDI - Linux Control and Measurement Device Interface
8 Copyright (C) 2000 David A. Schleef <ds@schleef.org>
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19*/
20/* 20/*
21Driver: amplc_pci263 21 * Driver: amplc_pci263
22Description: Amplicon PCI263 22 * Description: Amplicon PCI263
23Author: Ian Abbott <abbotti@mev.co.uk> 23 * Author: Ian Abbott <abbotti@mev.co.uk>
24Devices: [Amplicon] PCI263 (amplc_pci263) 24 * Devices: [Amplicon] PCI263 (amplc_pci263)
25Updated: Fri, 12 Apr 2013 15:19:36 +0100 25 * Updated: Fri, 12 Apr 2013 15:19:36 +0100
26Status: works 26 * Status: works
27 27 *
28Configuration options: not applicable, uses PCI auto config 28 * Configuration options: not applicable, uses PCI auto config
29 29 *
30The board appears as one subdevice, with 16 digital outputs, each 30 * The board appears as one subdevice, with 16 digital outputs, each
31connected to a reed-relay. Relay contacts are closed when output is 1. 31 * connected to a reed-relay. Relay contacts are closed when output is 1.
32The state of the outputs can be read. 32 * The state of the outputs can be read.
33*/ 33 */
34 34
35#include <linux/module.h> 35#include <linux/module.h>
36 36
37#include "../comedi_pci.h" 37#include "../comedi_pci.h"
38 38
39/* PCI263 registers */
40#define PCI263_DO_0_7_REG 0x00
41#define PCI263_DO_8_15_REG 0x01
42
39static int pci263_do_insn_bits(struct comedi_device *dev, 43static int pci263_do_insn_bits(struct comedi_device *dev,
40 struct comedi_subdevice *s, 44 struct comedi_subdevice *s,
41 struct comedi_insn *insn, 45 struct comedi_insn *insn,
42 unsigned int *data) 46 unsigned int *data)
43{ 47{
44 if (comedi_dio_update_state(s, data)) { 48 if (comedi_dio_update_state(s, data)) {
45 outb(s->state & 0xff, dev->iobase); 49 outb(s->state & 0xff, dev->iobase + PCI263_DO_0_7_REG);
46 outb((s->state >> 8) & 0xff, dev->iobase + 1); 50 outb((s->state >> 8) & 0xff, dev->iobase + PCI263_DO_8_15_REG);
47 } 51 }
48 52
49 data[1] = s->state; 53 data[1] = s->state;
@@ -67,16 +71,18 @@ static int pci263_auto_attach(struct comedi_device *dev,
67 if (ret) 71 if (ret)
68 return ret; 72 return ret;
69 73
74 /* Digital Output subdevice */
70 s = &dev->subdevices[0]; 75 s = &dev->subdevices[0];
71 /* digital output subdevice */ 76 s->type = COMEDI_SUBD_DO;
72 s->type = COMEDI_SUBD_DO; 77 s->subdev_flags = SDF_WRITABLE;
73 s->subdev_flags = SDF_WRITABLE; 78 s->n_chan = 16;
74 s->n_chan = 16; 79 s->maxdata = 1;
75 s->maxdata = 1; 80 s->range_table = &range_digital;
76 s->range_table = &range_digital; 81 s->insn_bits = pci263_do_insn_bits;
77 s->insn_bits = pci263_do_insn_bits; 82
78 /* read initial relay state */ 83 /* read initial relay state */
79 s->state = inb(dev->iobase) | (inb(dev->iobase + 1) << 8); 84 s->state = inb(dev->iobase + PCI263_DO_0_7_REG) |
85 (inb(dev->iobase + PCI263_DO_8_15_REG) << 8);
80 86
81 return 0; 87 return 0;
82} 88}
diff --git a/drivers/staging/comedi/drivers/c6xdigio.c b/drivers/staging/comedi/drivers/c6xdigio.c
index 1a109e30d8ff..8ee732571588 100644
--- a/drivers/staging/comedi/drivers/c6xdigio.c
+++ b/drivers/staging/comedi/drivers/c6xdigio.c
@@ -47,8 +47,8 @@
47 */ 47 */
48#define C6XDIGIO_DATA_REG 0x00 48#define C6XDIGIO_DATA_REG 0x00
49#define C6XDIGIO_DATA_CHAN(x) (((x) + 1) << 4) 49#define C6XDIGIO_DATA_CHAN(x) (((x) + 1) << 4)
50#define C6XDIGIO_DATA_PWM (1 << 5) 50#define C6XDIGIO_DATA_PWM BIT(5)
51#define C6XDIGIO_DATA_ENCODER (1 << 6) 51#define C6XDIGIO_DATA_ENCODER BIT(6)
52#define C6XDIGIO_STATUS_REG 0x01 52#define C6XDIGIO_STATUS_REG 0x01
53#define C6XDIGIO_CTRL_REG 0x02 53#define C6XDIGIO_CTRL_REG 0x02
54 54
diff --git a/drivers/staging/comedi/drivers/comedi_8254.h b/drivers/staging/comedi/drivers/comedi_8254.h
index f4610ead6172..a12c29455d9d 100644
--- a/drivers/staging/comedi/drivers/comedi_8254.h
+++ b/drivers/staging/comedi/drivers/comedi_8254.h
@@ -53,13 +53,15 @@ struct comedi_subdevice;
53#define I8254_COUNTER2_REG 0x02 53#define I8254_COUNTER2_REG 0x02
54#define I8254_CTRL_REG 0x03 54#define I8254_CTRL_REG 0x03
55#define I8254_CTRL_SEL_CTR(x) ((x) << 6) 55#define I8254_CTRL_SEL_CTR(x) ((x) << 6)
56#define I8254_CTRL_READBACK_COUNT ((3 << 6) | (1 << 4)) 56#define I8254_CTRL_READBACK(x) (I8254_CTRL_SEL_CTR(3) | BIT(x))
57#define I8254_CTRL_READBACK_STATUS ((3 << 6) | (1 << 5)) 57#define I8254_CTRL_READBACK_COUNT I8254_CTRL_READBACK(4)
58#define I8254_CTRL_READBACK_STATUS I8254_CTRL_READBACK(5)
58#define I8254_CTRL_READBACK_SEL_CTR(x) (2 << (x)) 59#define I8254_CTRL_READBACK_SEL_CTR(x) (2 << (x))
59#define I8254_CTRL_LATCH (0 << 4) 60#define I8254_CTRL_RW(x) (((x) & 0x3) << 4)
60#define I8254_CTRL_LSB_ONLY (1 << 4) 61#define I8254_CTRL_LATCH I8254_CTRL_RW(0)
61#define I8254_CTRL_MSB_ONLY (2 << 4) 62#define I8254_CTRL_LSB_ONLY I8254_CTRL_RW(1)
62#define I8254_CTRL_LSB_MSB (3 << 4) 63#define I8254_CTRL_MSB_ONLY I8254_CTRL_RW(2)
64#define I8254_CTRL_LSB_MSB I8254_CTRL_RW(3)
63 65
64/* counter maps zero to 0x10000 */ 66/* counter maps zero to 0x10000 */
65#define I8254_MAX_COUNT 0x10000 67#define I8254_MAX_COUNT 0x10000
diff --git a/drivers/staging/comedi/drivers/das1800.c b/drivers/staging/comedi/drivers/das1800.c
index 940781183fac..e0a34c2687a8 100644
--- a/drivers/staging/comedi/drivers/das1800.c
+++ b/drivers/staging/comedi/drivers/das1800.c
@@ -1,98 +1,82 @@
1/* 1/*
2 comedi/drivers/das1800.c 2 * Comedi driver for Keithley DAS-1700/DAS-1800 series boards
3 Driver for Keitley das1700/das1800 series boards 3 * Copyright (C) 2000 Frank Mori Hess <fmhess@users.sourceforge.net>
4 Copyright (C) 2000 Frank Mori Hess <fmhess@users.sourceforge.net> 4 *
5 5 * COMEDI - Linux Control and Measurement Device Interface
6 COMEDI - Linux Control and Measurement Device Interface 6 * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
7 Copyright (C) 2000 David A. Schleef <ds@schleef.org> 7 *
8 8 * This program is free software; you can redistribute it and/or modify
9 This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by
10 it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or
11 the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version.
12 (at your option) any later version. 12 *
13 13 * This program is distributed in the hope that it will be useful,
14 This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details.
17 GNU General Public License for more details. 17 */
18*/
19/*
20Driver: das1800
21Description: Keithley Metrabyte DAS1800 (& compatibles)
22Author: Frank Mori Hess <fmhess@users.sourceforge.net>
23Devices: [Keithley Metrabyte] DAS-1701ST (das-1701st),
24 DAS-1701ST-DA (das-1701st-da), DAS-1701/AO (das-1701ao),
25 DAS-1702ST (das-1702st), DAS-1702ST-DA (das-1702st-da),
26 DAS-1702HR (das-1702hr), DAS-1702HR-DA (das-1702hr-da),
27 DAS-1702/AO (das-1702ao), DAS-1801ST (das-1801st),
28 DAS-1801ST-DA (das-1801st-da), DAS-1801HC (das-1801hc),
29 DAS-1801AO (das-1801ao), DAS-1802ST (das-1802st),
30 DAS-1802ST-DA (das-1802st-da), DAS-1802HR (das-1802hr),
31 DAS-1802HR-DA (das-1802hr-da), DAS-1802HC (das-1802hc),
32 DAS-1802AO (das-1802ao)
33Status: works
34
35The waveform analog output on the 'ao' cards is not supported.
36If you need it, send me (Frank Hess) an email.
37
38Configuration options:
39 [0] - I/O port base address
40 [1] - IRQ (optional, required for timed or externally triggered conversions)
41 [2] - DMA0 (optional, requires irq)
42 [3] - DMA1 (optional, requires irq and dma0)
43*/
44/*
45 18
46This driver supports the following Keithley boards: 19/*
47 20 * Driver: das1800
48das-1701st 21 * Description: Keithley Metrabyte DAS1800 (& compatibles)
49das-1701st-da 22 * Author: Frank Mori Hess <fmhess@users.sourceforge.net>
50das-1701ao 23 * Devices: [Keithley Metrabyte] DAS-1701ST (das-1701st),
51das-1702st 24 * DAS-1701ST-DA (das-1701st-da), DAS-1701/AO (das-1701ao),
52das-1702st-da 25 * DAS-1702ST (das-1702st), DAS-1702ST-DA (das-1702st-da),
53das-1702hr 26 * DAS-1702HR (das-1702hr), DAS-1702HR-DA (das-1702hr-da),
54das-1702hr-da 27 * DAS-1702/AO (das-1702ao), DAS-1801ST (das-1801st),
55das-1702ao 28 * DAS-1801ST-DA (das-1801st-da), DAS-1801HC (das-1801hc),
56das-1801st 29 * DAS-1801AO (das-1801ao), DAS-1802ST (das-1802st),
57das-1801st-da 30 * DAS-1802ST-DA (das-1802st-da), DAS-1802HR (das-1802hr),
58das-1801hc 31 * DAS-1802HR-DA (das-1802hr-da), DAS-1802HC (das-1802hc),
59das-1801ao 32 * DAS-1802AO (das-1802ao)
60das-1802st 33 * Status: works
61das-1802st-da 34 *
62das-1802hr 35 * Configuration options:
63das-1802hr-da 36 * [0] - I/O port base address
64das-1802hc 37 * [1] - IRQ (optional, required for analog input cmd support)
65das-1802ao 38 * [2] - DMA0 (optional, requires irq)
66 39 * [3] - DMA1 (optional, requires irq and dma0)
67Options: 40 *
68 [0] - base io address 41 * analog input cmd triggers supported:
69 [1] - irq (optional, required for timed or externally triggered conversions) 42 *
70 [2] - dma0 (optional, requires irq) 43 * start_src TRIG_NOW command starts immediately
71 [3] - dma1 (optional, requires irq and dma0) 44 * TRIG_EXT command starts on external pin TGIN
72 45 *
73irq can be omitted, although the cmd interface will not work without it. 46 * scan_begin_src TRIG_FOLLOW paced/external scans start immediately
74 47 * TRIG_TIMER burst scans start periodically
75analog input cmd triggers supported: 48 * TRIG_EXT burst scans start on external pin XPCLK
76 start_src: TRIG_NOW | TRIG_EXT 49 *
77 scan_begin_src: TRIG_FOLLOW | TRIG_TIMER | TRIG_EXT 50 * scan_end_src TRIG_COUNT scan ends after last channel
78 scan_end_src: TRIG_COUNT 51 *
79 convert_src: TRIG_TIMER | TRIG_EXT (TRIG_EXT requires scan_begin_src == TRIG_FOLLOW) 52 * convert_src TRIG_TIMER paced/burst conversions are timed
80 stop_src: TRIG_COUNT | TRIG_EXT | TRIG_NONE 53 * TRIG_EXT conversions on external pin XPCLK
81 54 * (requires scan_begin_src == TRIG_FOLLOW)
82scan_begin_src triggers TRIG_TIMER and TRIG_EXT use the card's 55 *
83'burst mode' which limits the valid conversion time to 64 microseconds 56 * stop_src TRIG_COUNT command stops after stop_arg scans
84(convert_arg <= 64000). This limitation does not apply if scan_begin_src 57 * TRIG_EXT command stops on external pin TGIN
85is TRIG_FOLLOW. 58 * TRIG_NONE command runs until canceled
86 59 *
87NOTES: 60 * If TRIG_EXT is used for both the start_src and stop_src, the first TGIN
88Only the DAS-1801ST has been tested by me. 61 * trigger starts the command, and the second trigger will stop it. If only
89Unipolar and bipolar ranges cannot be mixed in the channel/gain list. 62 * one is TRIG_EXT, the first trigger will either stop or start the command.
90 63 * The external pin TGIN is normally set for negative edge triggering. It
91TODO: 64 * can be set to positive edge with the CR_INVERT flag. If TRIG_EXT is used
92 Make it automatically allocate irq and dma channels if they are not specified 65 * for both the start_src and stop_src they must have the same polarity.
93 Add support for analog out on 'ao' cards 66 *
94 read insn for analog out 67 * Minimum conversion speed is limited to 64 microseconds (convert_arg <= 64000)
95*/ 68 * for 'burst' scans. This limitation does not apply for 'paced' scans. The
69 * maximum conversion speed is limited by the board (convert_arg >= ai_speed).
70 * Maximum conversion speeds are not always achievable depending on the
71 * board setup (see user manual).
72 *
73 * NOTES:
74 * Only the DAS-1801ST has been tested by me.
75 * Unipolar and bipolar ranges cannot be mixed in the channel/gain list.
76 *
77 * The waveform analog output on the 'ao' cards is not supported.
78 * If you need it, send me (Frank Hess) an email.
79 */
96 80
97#include <linux/module.h> 81#include <linux/module.h>
98#include <linux/interrupt.h> 82#include <linux/interrupt.h>
@@ -107,7 +91,6 @@ TODO:
107/* misc. defines */ 91/* misc. defines */
108#define DAS1800_SIZE 16 /* uses 16 io addresses */ 92#define DAS1800_SIZE 16 /* uses 16 io addresses */
109#define FIFO_SIZE 1024 /* 1024 sample fifo */ 93#define FIFO_SIZE 1024 /* 1024 sample fifo */
110#define UNIPOLAR 0x4 /* bit that determines whether input range is uni/bipolar */
111#define DMA_BUF_SIZE 0x1ff00 /* size in bytes of dma buffers */ 94#define DMA_BUF_SIZE 0x1ff00 /* size in bytes of dma buffers */
112 95
113/* Registers for the das1800 */ 96/* Registers for the das1800 */
@@ -125,6 +108,7 @@ TODO:
125#define CGSL 0x8 108#define CGSL 0x8
126#define TGEN 0x10 109#define TGEN 0x10
127#define TGSL 0x20 110#define TGSL 0x20
111#define TGPL 0x40
128#define ATEN 0x80 112#define ATEN 0x80
129#define DAS1800_CONTROL_B 0x5 113#define DAS1800_CONTROL_B 0x5
130#define DMA_CH5 0x1 114#define DMA_CH5 0x1
@@ -133,7 +117,7 @@ TODO:
133#define DMA_CH5_CH6 0x5 117#define DMA_CH5_CH6 0x5
134#define DMA_CH6_CH7 0x6 118#define DMA_CH6_CH7 0x6
135#define DMA_CH7_CH5 0x7 119#define DMA_CH7_CH5 0x7
136#define DMA_ENABLED 0x3 /* mask used to determine if dma is enabled */ 120#define DMA_ENABLED 0x3
137#define DMA_DUAL 0x4 121#define DMA_DUAL 0x4
138#define IRQ3 0x8 122#define IRQ3 0x8
139#define IRQ5 0x10 123#define IRQ5 0x10
@@ -151,319 +135,214 @@ TODO:
151#define SD 0x40 135#define SD 0x40
152#define UB 0x80 136#define UB 0x80
153#define DAS1800_STATUS 0x7 137#define DAS1800_STATUS 0x7
154/* bits that prevent interrupt status bits (and CVEN) from being cleared on write */
155#define CLEAR_INTR_MASK (CVEN_MASK | 0x1f)
156#define INT 0x1 138#define INT 0x1
157#define DMATC 0x2 139#define DMATC 0x2
158#define CT0TC 0x8 140#define CT0TC 0x8
159#define OVF 0x10 141#define OVF 0x10
160#define FHF 0x20 142#define FHF 0x20
161#define FNE 0x40 143#define FNE 0x40
162#define CVEN_MASK 0x40 /* masks CVEN on write */
163#define CVEN 0x80 144#define CVEN 0x80
145#define CVEN_MASK 0x40
146#define CLEAR_INTR_MASK (CVEN_MASK | 0x1f)
164#define DAS1800_BURST_LENGTH 0x8 147#define DAS1800_BURST_LENGTH 0x8
165#define DAS1800_BURST_RATE 0x9 148#define DAS1800_BURST_RATE 0x9
166#define DAS1800_QRAM_ADDRESS 0xa 149#define DAS1800_QRAM_ADDRESS 0xa
167#define DAS1800_COUNTER 0xc 150#define DAS1800_COUNTER 0xc
168 151
169#define IOBASE2 0x400 /* offset of additional ioports used on 'ao' cards */ 152#define IOBASE2 0x400
170 153
171enum { 154static const struct comedi_lrange das1801_ai_range = {
172 das1701st, das1701st_da, das1702st, das1702st_da, das1702hr,
173 das1702hr_da,
174 das1701ao, das1702ao, das1801st, das1801st_da, das1802st, das1802st_da,
175 das1802hr, das1802hr_da, das1801hc, das1802hc, das1801ao, das1802ao
176};
177
178/* analog input ranges */
179static const struct comedi_lrange range_ai_das1801 = {
180 8, { 155 8, {
181 BIP_RANGE(5), 156 BIP_RANGE(5), /* bipolar gain = 1 */
182 BIP_RANGE(1), 157 BIP_RANGE(1), /* bipolar gain = 10 */
183 BIP_RANGE(0.1), 158 BIP_RANGE(0.1), /* bipolar gain = 50 */
184 BIP_RANGE(0.02), 159 BIP_RANGE(0.02), /* bipolar gain = 250 */
185 UNI_RANGE(5), 160 UNI_RANGE(5), /* unipolar gain = 1 */
186 UNI_RANGE(1), 161 UNI_RANGE(1), /* unipolar gain = 10 */
187 UNI_RANGE(0.1), 162 UNI_RANGE(0.1), /* unipolar gain = 50 */
188 UNI_RANGE(0.02) 163 UNI_RANGE(0.02) /* unipolar gain = 250 */
189 } 164 }
190}; 165};
191 166
192static const struct comedi_lrange range_ai_das1802 = { 167static const struct comedi_lrange das1802_ai_range = {
193 8, { 168 8, {
194 BIP_RANGE(10), 169 BIP_RANGE(10), /* bipolar gain = 1 */
195 BIP_RANGE(5), 170 BIP_RANGE(5), /* bipolar gain = 2 */
196 BIP_RANGE(2.5), 171 BIP_RANGE(2.5), /* bipolar gain = 4 */
197 BIP_RANGE(1.25), 172 BIP_RANGE(1.25), /* bipolar gain = 8 */
198 UNI_RANGE(10), 173 UNI_RANGE(10), /* unipolar gain = 1 */
199 UNI_RANGE(5), 174 UNI_RANGE(5), /* unipolar gain = 2 */
200 UNI_RANGE(2.5), 175 UNI_RANGE(2.5), /* unipolar gain = 4 */
201 UNI_RANGE(1.25) 176 UNI_RANGE(1.25) /* unipolar gain = 8 */
202 } 177 }
203}; 178};
204 179
180/*
181 * The waveform analog outputs on the 'ao' boards are not currently
182 * supported. They have a comedi_lrange of:
183 * { 2, { BIP_RANGE(10), BIP_RANGE(5) } }
184 */
185
186enum das1800_boardid {
187 BOARD_DAS1701ST,
188 BOARD_DAS1701ST_DA,
189 BOARD_DAS1702ST,
190 BOARD_DAS1702ST_DA,
191 BOARD_DAS1702HR,
192 BOARD_DAS1702HR_DA,
193 BOARD_DAS1701AO,
194 BOARD_DAS1702AO,
195 BOARD_DAS1801ST,
196 BOARD_DAS1801ST_DA,
197 BOARD_DAS1802ST,
198 BOARD_DAS1802ST_DA,
199 BOARD_DAS1802HR,
200 BOARD_DAS1802HR_DA,
201 BOARD_DAS1801HC,
202 BOARD_DAS1802HC,
203 BOARD_DAS1801AO,
204 BOARD_DAS1802AO
205};
206
207/* board probe id values (hi byte of the digital input register) */
208#define DAS1800_ID_ST_DA 0x3
209#define DAS1800_ID_HR_DA 0x4
210#define DAS1800_ID_AO 0x5
211#define DAS1800_ID_HR 0x6
212#define DAS1800_ID_ST 0x7
213#define DAS1800_ID_HC 0x8
214
205struct das1800_board { 215struct das1800_board {
206 const char *name; 216 const char *name;
207 int ai_speed; /* max conversion period in nanoseconds */ 217 unsigned char id;
208 int resolution; /* bits of ai resolution */ 218 unsigned int ai_speed;
209 int qram_len; /* length of card's channel / gain queue */ 219 unsigned int is_01_series:1;
210 int common; /* supports AREF_COMMON flag */
211 int do_n_chan; /* number of digital output channels */
212 int ao_ability; /* 0 == no analog out, 1 == basic analog out, 2 == waveform analog out */
213 int ao_n_chan; /* number of analog out channels */
214 const struct comedi_lrange *range_ai; /* available input ranges */
215}; 220};
216 221
217/* Warning: the maximum conversion speeds listed below are
218 * not always achievable depending on board setup (see
219 * user manual.)
220 */
221static const struct das1800_board das1800_boards[] = { 222static const struct das1800_board das1800_boards[] = {
222 { 223 [BOARD_DAS1701ST] = {
223 .name = "das-1701st", 224 .name = "das-1701st",
224 .ai_speed = 6250, 225 .id = DAS1800_ID_ST,
225 .resolution = 12, 226 .ai_speed = 6250,
226 .qram_len = 256, 227 .is_01_series = 1,
227 .common = 1, 228 },
228 .do_n_chan = 4, 229 [BOARD_DAS1701ST_DA] = {
229 .ao_ability = 0, 230 .name = "das-1701st-da",
230 .ao_n_chan = 0, 231 .id = DAS1800_ID_ST_DA,
231 .range_ai = &range_ai_das1801, 232 .ai_speed = 6250,
232 }, 233 .is_01_series = 1,
233 { 234 },
234 .name = "das-1701st-da", 235 [BOARD_DAS1702ST] = {
235 .ai_speed = 6250, 236 .name = "das-1702st",
236 .resolution = 12, 237 .id = DAS1800_ID_ST,
237 .qram_len = 256, 238 .ai_speed = 6250,
238 .common = 1, 239 },
239 .do_n_chan = 4, 240 [BOARD_DAS1702ST_DA] = {
240 .ao_ability = 1, 241 .name = "das-1702st-da",
241 .ao_n_chan = 4, 242 .id = DAS1800_ID_ST_DA,
242 .range_ai = &range_ai_das1801, 243 .ai_speed = 6250,
243 }, 244 },
244 { 245 [BOARD_DAS1702HR] = {
245 .name = "das-1702st", 246 .name = "das-1702hr",
246 .ai_speed = 6250, 247 .id = DAS1800_ID_HR,
247 .resolution = 12, 248 .ai_speed = 20000,
248 .qram_len = 256, 249 },
249 .common = 1, 250 [BOARD_DAS1702HR_DA] = {
250 .do_n_chan = 4, 251 .name = "das-1702hr-da",
251 .ao_ability = 0, 252 .id = DAS1800_ID_HR_DA,
252 .ao_n_chan = 0, 253 .ai_speed = 20000,
253 .range_ai = &range_ai_das1802, 254 },
254 }, 255 [BOARD_DAS1701AO] = {
255 { 256 .name = "das-1701ao",
256 .name = "das-1702st-da", 257 .id = DAS1800_ID_AO,
257 .ai_speed = 6250, 258 .ai_speed = 6250,
258 .resolution = 12, 259 .is_01_series = 1,
259 .qram_len = 256, 260 },
260 .common = 1, 261 [BOARD_DAS1702AO] = {
261 .do_n_chan = 4, 262 .name = "das-1702ao",
262 .ao_ability = 1, 263 .id = DAS1800_ID_AO,
263 .ao_n_chan = 4, 264 .ai_speed = 6250,
264 .range_ai = &range_ai_das1802, 265 },
265 }, 266 [BOARD_DAS1801ST] = {
266 { 267 .name = "das-1801st",
267 .name = "das-1702hr", 268 .id = DAS1800_ID_ST,
268 .ai_speed = 20000, 269 .ai_speed = 3000,
269 .resolution = 16, 270 .is_01_series = 1,
270 .qram_len = 256, 271 },
271 .common = 1, 272 [BOARD_DAS1801ST_DA] = {
272 .do_n_chan = 4, 273 .name = "das-1801st-da",
273 .ao_ability = 0, 274 .id = DAS1800_ID_ST_DA,
274 .ao_n_chan = 0, 275 .ai_speed = 3000,
275 .range_ai = &range_ai_das1802, 276 .is_01_series = 1,
276 }, 277 },
277 { 278 [BOARD_DAS1802ST] = {
278 .name = "das-1702hr-da", 279 .name = "das-1802st",
279 .ai_speed = 20000, 280 .id = DAS1800_ID_ST,
280 .resolution = 16, 281 .ai_speed = 3000,
281 .qram_len = 256, 282 },
282 .common = 1, 283 [BOARD_DAS1802ST_DA] = {
283 .do_n_chan = 4, 284 .name = "das-1802st-da",
284 .ao_ability = 1, 285 .id = DAS1800_ID_ST_DA,
285 .ao_n_chan = 2, 286 .ai_speed = 3000,
286 .range_ai = &range_ai_das1802, 287 },
287 }, 288 [BOARD_DAS1802HR] = {
288 { 289 .name = "das-1802hr",
289 .name = "das-1701ao", 290 .id = DAS1800_ID_HR,
290 .ai_speed = 6250, 291 .ai_speed = 10000,
291 .resolution = 12, 292 },
292 .qram_len = 256, 293 [BOARD_DAS1802HR_DA] = {
293 .common = 1, 294 .name = "das-1802hr-da",
294 .do_n_chan = 4, 295 .id = DAS1800_ID_HR_DA,
295 .ao_ability = 2, 296 .ai_speed = 10000,
296 .ao_n_chan = 2, 297 },
297 .range_ai = &range_ai_das1801, 298 [BOARD_DAS1801HC] = {
298 }, 299 .name = "das-1801hc",
299 { 300 .id = DAS1800_ID_HC,
300 .name = "das-1702ao", 301 .ai_speed = 3000,
301 .ai_speed = 6250, 302 .is_01_series = 1,
302 .resolution = 12, 303 },
303 .qram_len = 256, 304 [BOARD_DAS1802HC] = {
304 .common = 1, 305 .name = "das-1802hc",
305 .do_n_chan = 4, 306 .id = DAS1800_ID_HC,
306 .ao_ability = 2, 307 .ai_speed = 3000,
307 .ao_n_chan = 2, 308 },
308 .range_ai = &range_ai_das1802, 309 [BOARD_DAS1801AO] = {
309 }, 310 .name = "das-1801ao",
310 { 311 .id = DAS1800_ID_AO,
311 .name = "das-1801st", 312 .ai_speed = 3000,
312 .ai_speed = 3000, 313 .is_01_series = 1,
313 .resolution = 12, 314 },
314 .qram_len = 256, 315 [BOARD_DAS1802AO] = {
315 .common = 1, 316 .name = "das-1802ao",
316 .do_n_chan = 4, 317 .id = DAS1800_ID_AO,
317 .ao_ability = 0, 318 .ai_speed = 3000,
318 .ao_n_chan = 0, 319 },
319 .range_ai = &range_ai_das1801,
320 },
321 {
322 .name = "das-1801st-da",
323 .ai_speed = 3000,
324 .resolution = 12,
325 .qram_len = 256,
326 .common = 1,
327 .do_n_chan = 4,
328 .ao_ability = 0,
329 .ao_n_chan = 4,
330 .range_ai = &range_ai_das1801,
331 },
332 {
333 .name = "das-1802st",
334 .ai_speed = 3000,
335 .resolution = 12,
336 .qram_len = 256,
337 .common = 1,
338 .do_n_chan = 4,
339 .ao_ability = 0,
340 .ao_n_chan = 0,
341 .range_ai = &range_ai_das1802,
342 },
343 {
344 .name = "das-1802st-da",
345 .ai_speed = 3000,
346 .resolution = 12,
347 .qram_len = 256,
348 .common = 1,
349 .do_n_chan = 4,
350 .ao_ability = 1,
351 .ao_n_chan = 4,
352 .range_ai = &range_ai_das1802,
353 },
354 {
355 .name = "das-1802hr",
356 .ai_speed = 10000,
357 .resolution = 16,
358 .qram_len = 256,
359 .common = 1,
360 .do_n_chan = 4,
361 .ao_ability = 0,
362 .ao_n_chan = 0,
363 .range_ai = &range_ai_das1802,
364 },
365 {
366 .name = "das-1802hr-da",
367 .ai_speed = 10000,
368 .resolution = 16,
369 .qram_len = 256,
370 .common = 1,
371 .do_n_chan = 4,
372 .ao_ability = 1,
373 .ao_n_chan = 2,
374 .range_ai = &range_ai_das1802,
375 },
376 {
377 .name = "das-1801hc",
378 .ai_speed = 3000,
379 .resolution = 12,
380 .qram_len = 64,
381 .common = 0,
382 .do_n_chan = 8,
383 .ao_ability = 1,
384 .ao_n_chan = 2,
385 .range_ai = &range_ai_das1801,
386 },
387 {
388 .name = "das-1802hc",
389 .ai_speed = 3000,
390 .resolution = 12,
391 .qram_len = 64,
392 .common = 0,
393 .do_n_chan = 8,
394 .ao_ability = 1,
395 .ao_n_chan = 2,
396 .range_ai = &range_ai_das1802,
397 },
398 {
399 .name = "das-1801ao",
400 .ai_speed = 3000,
401 .resolution = 12,
402 .qram_len = 256,
403 .common = 1,
404 .do_n_chan = 4,
405 .ao_ability = 2,
406 .ao_n_chan = 2,
407 .range_ai = &range_ai_das1801,
408 },
409 {
410 .name = "das-1802ao",
411 .ai_speed = 3000,
412 .resolution = 12,
413 .qram_len = 256,
414 .common = 1,
415 .do_n_chan = 4,
416 .ao_ability = 2,
417 .ao_n_chan = 2,
418 .range_ai = &range_ai_das1802,
419 },
420}; 320};
421 321
422struct das1800_private { 322struct das1800_private {
423 struct comedi_isadma *dma; 323 struct comedi_isadma *dma;
424 int irq_dma_bits; /* bits for control register b */ 324 int irq_dma_bits;
425 /* dma bits for control register b, stored so that dma can be
426 * turned on and off */
427 int dma_bits; 325 int dma_bits;
428 uint16_t *fifo_buf; /* bounce buffer for analog input FIFO */ 326 unsigned short *fifo_buf;
429 unsigned long iobase2; /* secondary io address used for analog out on 'ao' boards */ 327 unsigned long iobase2;
430 unsigned short ao_update_bits; /* remembers the last write to the 328 bool ai_is_unipolar;
431 * 'update' dac */
432};
433
434/* analog out range for 'ao' boards */
435/*
436static const struct comedi_lrange range_ao_2 = {
437 2, {
438 BIP_RANGE(10),
439 BIP_RANGE(5)
440 }
441}; 329};
442*/
443
444static inline uint16_t munge_bipolar_sample(const struct comedi_device *dev,
445 uint16_t sample)
446{
447 const struct das1800_board *board = dev->board_ptr;
448
449 sample += 1 << (board->resolution - 1);
450 return sample;
451}
452 330
453static void munge_data(struct comedi_device *dev, uint16_t *array, 331static void das1800_ai_munge(struct comedi_device *dev,
454 unsigned int num_elements) 332 struct comedi_subdevice *s,
333 void *data, unsigned int num_bytes,
334 unsigned int start_chan_index)
455{ 335{
336 struct das1800_private *devpriv = dev->private;
337 unsigned short *array = data;
338 unsigned int num_samples = comedi_bytes_to_samples(s, num_bytes);
456 unsigned int i; 339 unsigned int i;
457 int unipolar;
458 340
459 /* see if card is using a unipolar or bipolar range so we can munge data correctly */ 341 if (devpriv->ai_is_unipolar)
460 unipolar = inb(dev->iobase + DAS1800_CONTROL_C) & UB; 342 return;
461 343
462 /* convert to unsigned type if we are in a bipolar mode */ 344 for (i = 0; i < num_samples; i++)
463 if (!unipolar) { 345 array[i] = comedi_offset_munge(s, array[i]);
464 for (i = 0; i < num_elements; i++)
465 array[i] = munge_bipolar_sample(dev, array[i]);
466 }
467} 346}
468 347
469static void das1800_handle_fifo_half_full(struct comedi_device *dev, 348static void das1800_handle_fifo_half_full(struct comedi_device *dev,
@@ -473,7 +352,6 @@ static void das1800_handle_fifo_half_full(struct comedi_device *dev,
473 unsigned int nsamples = comedi_nsamples_left(s, FIFO_SIZE / 2); 352 unsigned int nsamples = comedi_nsamples_left(s, FIFO_SIZE / 2);
474 353
475 insw(dev->iobase + DAS1800_FIFO, devpriv->fifo_buf, nsamples); 354 insw(dev->iobase + DAS1800_FIFO, devpriv->fifo_buf, nsamples);
476 munge_data(dev, devpriv->fifo_buf, nsamples);
477 comedi_buf_write_samples(s, devpriv->fifo_buf, nsamples); 355 comedi_buf_write_samples(s, devpriv->fifo_buf, nsamples);
478} 356}
479 357
@@ -482,14 +360,9 @@ static void das1800_handle_fifo_not_empty(struct comedi_device *dev,
482{ 360{
483 struct comedi_cmd *cmd = &s->async->cmd; 361 struct comedi_cmd *cmd = &s->async->cmd;
484 unsigned short dpnt; 362 unsigned short dpnt;
485 int unipolar;
486
487 unipolar = inb(dev->iobase + DAS1800_CONTROL_C) & UB;
488 363
489 while (inb(dev->iobase + DAS1800_STATUS) & FNE) { 364 while (inb(dev->iobase + DAS1800_STATUS) & FNE) {
490 dpnt = inw(dev->iobase + DAS1800_FIFO); 365 dpnt = inw(dev->iobase + DAS1800_FIFO);
491 /* convert to unsigned type */
492 dpnt = munge_bipolar_sample(dev, dpnt);
493 comedi_buf_write_samples(s, &dpnt, 1); 366 comedi_buf_write_samples(s, &dpnt, 1);
494 367
495 if (cmd->stop_src == TRIG_COUNT && 368 if (cmd->stop_src == TRIG_COUNT &&
@@ -498,7 +371,6 @@ static void das1800_handle_fifo_not_empty(struct comedi_device *dev,
498 } 371 }
499} 372}
500 373
501/* Utility function used by das1800_flush_dma() and das1800_handle_dma() */
502static void das1800_flush_dma_channel(struct comedi_device *dev, 374static void das1800_flush_dma_channel(struct comedi_device *dev,
503 struct comedi_subdevice *s, 375 struct comedi_subdevice *s,
504 struct comedi_isadma_desc *desc) 376 struct comedi_isadma_desc *desc)
@@ -511,12 +383,9 @@ static void das1800_flush_dma_channel(struct comedi_device *dev,
511 nsamples = comedi_bytes_to_samples(s, nbytes); 383 nsamples = comedi_bytes_to_samples(s, nbytes);
512 nsamples = comedi_nsamples_left(s, nsamples); 384 nsamples = comedi_nsamples_left(s, nsamples);
513 385
514 munge_data(dev, desc->virt_addr, nsamples);
515 comedi_buf_write_samples(s, desc->virt_addr, nsamples); 386 comedi_buf_write_samples(s, desc->virt_addr, nsamples);
516} 387}
517 388
518/* flushes remaining data from board when external trigger has stopped acquisition
519 * and we are using dma transfers */
520static void das1800_flush_dma(struct comedi_device *dev, 389static void das1800_flush_dma(struct comedi_device *dev,
521 struct comedi_subdevice *s) 390 struct comedi_subdevice *s)
522{ 391{
@@ -560,27 +429,30 @@ static void das1800_handle_dma(struct comedi_device *dev,
560 } 429 }
561} 430}
562 431
563static int das1800_cancel(struct comedi_device *dev, struct comedi_subdevice *s) 432static int das1800_ai_cancel(struct comedi_device *dev,
433 struct comedi_subdevice *s)
564{ 434{
565 struct das1800_private *devpriv = dev->private; 435 struct das1800_private *devpriv = dev->private;
566 struct comedi_isadma *dma = devpriv->dma; 436 struct comedi_isadma *dma = devpriv->dma;
567 struct comedi_isadma_desc *desc; 437 struct comedi_isadma_desc *desc;
568 int i; 438 int i;
569 439
570 outb(0x0, dev->iobase + DAS1800_STATUS); /* disable conversions */ 440 /* disable and stop conversions */
571 outb(0x0, dev->iobase + DAS1800_CONTROL_B); /* disable interrupts and dma */ 441 outb(0x0, dev->iobase + DAS1800_STATUS);
572 outb(0x0, dev->iobase + DAS1800_CONTROL_A); /* disable and clear fifo and stop triggering */ 442 outb(0x0, dev->iobase + DAS1800_CONTROL_B);
443 outb(0x0, dev->iobase + DAS1800_CONTROL_A);
573 444
574 for (i = 0; i < 2; i++) { 445 if (dma) {
575 desc = &dma->desc[i]; 446 for (i = 0; i < 2; i++) {
576 if (desc->chan) 447 desc = &dma->desc[i];
577 comedi_isadma_disable(desc->chan); 448 if (desc->chan)
449 comedi_isadma_disable(desc->chan);
450 }
578 } 451 }
579 452
580 return 0; 453 return 0;
581} 454}
582 455
583/* the guts of the interrupt handler, that is shared with das1800_ai_poll */
584static void das1800_ai_handler(struct comedi_device *dev) 456static void das1800_ai_handler(struct comedi_device *dev)
585{ 457{
586 struct das1800_private *devpriv = dev->private; 458 struct das1800_private *devpriv = dev->private;
@@ -589,17 +461,16 @@ static void das1800_ai_handler(struct comedi_device *dev)
589 struct comedi_cmd *cmd = &async->cmd; 461 struct comedi_cmd *cmd = &async->cmd;
590 unsigned int status = inb(dev->iobase + DAS1800_STATUS); 462 unsigned int status = inb(dev->iobase + DAS1800_STATUS);
591 463
592 /* select adc for base address + 0 */ 464 /* select adc register (spinlock is already held) */
593 outb(ADC, dev->iobase + DAS1800_SELECT); 465 outb(ADC, dev->iobase + DAS1800_SELECT);
594 /* dma buffer full */ 466
595 if (devpriv->irq_dma_bits & DMA_ENABLED) { 467 /* get samples with dma, fifo, or polled as necessary */
596 /* look for data from dma transfer even if dma terminal count hasn't happened yet */ 468 if (devpriv->irq_dma_bits & DMA_ENABLED)
597 das1800_handle_dma(dev, s, status); 469 das1800_handle_dma(dev, s, status);
598 } else if (status & FHF) { /* if fifo half full */ 470 else if (status & FHF)
599 das1800_handle_fifo_half_full(dev, s); 471 das1800_handle_fifo_half_full(dev, s);
600 } else if (status & FNE) { /* if fifo not empty */ 472 else if (status & FNE)
601 das1800_handle_fifo_not_empty(dev, s); 473 das1800_handle_fifo_not_empty(dev, s);
602 }
603 474
604 /* if the card's fifo has overflowed */ 475 /* if the card's fifo has overflowed */
605 if (status & OVF) { 476 if (status & OVF) {
@@ -615,7 +486,7 @@ static void das1800_ai_handler(struct comedi_device *dev)
615 if (status & CT0TC) { 486 if (status & CT0TC) {
616 /* clear CT0TC interrupt bit */ 487 /* clear CT0TC interrupt bit */
617 outb(CLEAR_INTR_MASK & ~CT0TC, dev->iobase + DAS1800_STATUS); 488 outb(CLEAR_INTR_MASK & ~CT0TC, dev->iobase + DAS1800_STATUS);
618 /* make sure we get all remaining data from board before quitting */ 489 /* get all remaining samples before quitting */
619 if (devpriv->irq_dma_bits & DMA_ENABLED) 490 if (devpriv->irq_dma_bits & DMA_ENABLED)
620 das1800_flush_dma(dev, s); 491 das1800_flush_dma(dev, s);
621 else 492 else
@@ -634,9 +505,14 @@ static int das1800_ai_poll(struct comedi_device *dev,
634{ 505{
635 unsigned long flags; 506 unsigned long flags;
636 507
637 /* prevent race with interrupt handler */ 508 /*
509 * Protects the indirect addressing selected by DAS1800_SELECT
510 * in das1800_ai_handler() also prevents race with das1800_interrupt().
511 */
638 spin_lock_irqsave(&dev->spinlock, flags); 512 spin_lock_irqsave(&dev->spinlock, flags);
513
639 das1800_ai_handler(dev); 514 das1800_ai_handler(dev);
515
640 spin_unlock_irqrestore(&dev->spinlock, flags); 516 spin_unlock_irqrestore(&dev->spinlock, flags);
641 517
642 return comedi_buf_n_bytes_ready(s); 518 return comedi_buf_n_bytes_ready(s);
@@ -652,9 +528,12 @@ static irqreturn_t das1800_interrupt(int irq, void *d)
652 return IRQ_HANDLED; 528 return IRQ_HANDLED;
653 } 529 }
654 530
655 /* Prevent race with das1800_ai_poll() on multi processor systems. 531 /*
656 * Also protects indirect addressing in das1800_ai_handler */ 532 * Protects the indirect addressing selected by DAS1800_SELECT
533 * in das1800_ai_handler() also prevents race with das1800_ai_poll().
534 */
657 spin_lock(&dev->spinlock); 535 spin_lock(&dev->spinlock);
536
658 status = inb(dev->iobase + DAS1800_STATUS); 537 status = inb(dev->iobase + DAS1800_STATUS);
659 538
660 /* if interrupt was not caused by das-1800 */ 539 /* if interrupt was not caused by das-1800 */
@@ -671,46 +550,87 @@ static irqreturn_t das1800_interrupt(int irq, void *d)
671 return IRQ_HANDLED; 550 return IRQ_HANDLED;
672} 551}
673 552
674/* converts requested conversion timing to timing compatible with 553static int das1800_ai_fixup_paced_timing(struct comedi_device *dev,
675 * hardware, used only when card is in 'burst mode' 554 struct comedi_cmd *cmd)
676 */ 555{
677static unsigned int burst_convert_arg(unsigned int convert_arg, int flags) 556 unsigned int arg = cmd->convert_arg;
557
558 /*
559 * Paced mode:
560 * scan_begin_src is TRIG_FOLLOW
561 * convert_src is TRIG_TIMER
562 *
563 * The convert_arg sets the pacer sample acquisition time.
564 * The max acquisition speed is limited to the boards
565 * 'ai_speed' (this was already verified). The min speed is
566 * limited by the cascaded 8254 timer.
567 */
568 comedi_8254_cascade_ns_to_timer(dev->pacer, &arg, cmd->flags);
569 return comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
570}
571
572static int das1800_ai_fixup_burst_timing(struct comedi_device *dev,
573 struct comedi_cmd *cmd)
678{ 574{
679 unsigned int micro_sec; 575 unsigned int arg = cmd->convert_arg;
576 int err = 0;
680 577
681 /* in burst mode, the maximum conversion time is 64 microseconds */ 578 /*
682 if (convert_arg > 64000) 579 * Burst mode:
683 convert_arg = 64000; 580 * scan_begin_src is TRIG_TIMER or TRIG_EXT
581 * convert_src is TRIG_TIMER
582 *
583 * The convert_arg sets burst sample acquisition time.
584 * The max acquisition speed is limited to the boards
585 * 'ai_speed' (this was already verified). The min speed is
586 * limiited to 64 microseconds,
587 */
588 err |= comedi_check_trigger_arg_max(&arg, 64000);
684 589
685 /* the conversion time must be an integral number of microseconds */ 590 /* round to microseconds then verify */
686 switch (flags & CMDF_ROUND_MASK) { 591 switch (cmd->flags & CMDF_ROUND_MASK) {
687 case CMDF_ROUND_NEAREST: 592 case CMDF_ROUND_NEAREST:
688 default: 593 default:
689 micro_sec = (convert_arg + 500) / 1000; 594 arg = DIV_ROUND_CLOSEST(arg, 1000);
690 break; 595 break;
691 case CMDF_ROUND_DOWN: 596 case CMDF_ROUND_DOWN:
692 micro_sec = convert_arg / 1000; 597 arg = arg / 1000;
693 break; 598 break;
694 case CMDF_ROUND_UP: 599 case CMDF_ROUND_UP:
695 micro_sec = (convert_arg - 1) / 1000 + 1; 600 arg = DIV_ROUND_UP(arg, 1000);
696 break; 601 break;
697 } 602 }
603 err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg * 1000);
698 604
699 /* return number of nanoseconds */ 605 /*
700 return micro_sec * 1000; 606 * The pacer can be used to set the scan sample rate. The max scan
607 * speed is limited by the conversion speed and the number of channels
608 * to convert. The min speed is limited by the cascaded 8254 timer.
609 */
610 if (cmd->scan_begin_src == TRIG_TIMER) {
611 arg = cmd->convert_arg * cmd->chanlist_len;
612 err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg, arg);
613
614 arg = cmd->scan_begin_arg;
615 comedi_8254_cascade_ns_to_timer(dev->pacer, &arg, cmd->flags);
616 err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, arg);
617 }
618
619 return err;
701} 620}
702 621
703static int das1800_ai_check_chanlist(struct comedi_device *dev, 622static int das1800_ai_check_chanlist(struct comedi_device *dev,
704 struct comedi_subdevice *s, 623 struct comedi_subdevice *s,
705 struct comedi_cmd *cmd) 624 struct comedi_cmd *cmd)
706{ 625{
707 unsigned int unipolar0 = CR_RANGE(cmd->chanlist[0]) & UNIPOLAR; 626 unsigned int range = CR_RANGE(cmd->chanlist[0]);
627 bool unipolar0 = comedi_range_is_unipolar(s, range);
708 int i; 628 int i;
709 629
710 for (i = 1; i < cmd->chanlist_len; i++) { 630 for (i = 1; i < cmd->chanlist_len; i++) {
711 unsigned int unipolar = CR_RANGE(cmd->chanlist[i]) & UNIPOLAR; 631 range = CR_RANGE(cmd->chanlist[i]);
712 632
713 if (unipolar != unipolar0) { 633 if (unipolar0 != comedi_range_is_unipolar(s, range)) {
714 dev_dbg(dev->class_dev, 634 dev_dbg(dev->class_dev,
715 "unipolar and bipolar ranges cannot be mixed in the chanlist\n"); 635 "unipolar and bipolar ranges cannot be mixed in the chanlist\n");
716 return -EINVAL; 636 return -EINVAL;
@@ -720,14 +640,12 @@ static int das1800_ai_check_chanlist(struct comedi_device *dev,
720 return 0; 640 return 0;
721} 641}
722 642
723/* test analog input cmd */ 643static int das1800_ai_cmdtest(struct comedi_device *dev,
724static int das1800_ai_do_cmdtest(struct comedi_device *dev, 644 struct comedi_subdevice *s,
725 struct comedi_subdevice *s, 645 struct comedi_cmd *cmd)
726 struct comedi_cmd *cmd)
727{ 646{
728 const struct das1800_board *board = dev->board_ptr; 647 const struct das1800_board *board = dev->board_ptr;
729 int err = 0; 648 int err = 0;
730 unsigned int arg;
731 649
732 /* Step 1 : check if triggers are trivially valid */ 650 /* Step 1 : check if triggers are trivially valid */
733 651
@@ -752,16 +670,23 @@ static int das1800_ai_do_cmdtest(struct comedi_device *dev,
752 670
753 /* Step 2b : and mutually compatible */ 671 /* Step 2b : and mutually compatible */
754 672
673 /* burst scans must use timed conversions */
755 if (cmd->scan_begin_src != TRIG_FOLLOW && 674 if (cmd->scan_begin_src != TRIG_FOLLOW &&
756 cmd->convert_src != TRIG_TIMER) 675 cmd->convert_src != TRIG_TIMER)
757 err |= -EINVAL; 676 err |= -EINVAL;
758 677
678 /* the external pin TGIN must use the same polarity */
679 if (cmd->start_src == TRIG_EXT && cmd->stop_src == TRIG_EXT)
680 err |= comedi_check_trigger_arg_is(&cmd->start_arg,
681 cmd->stop_arg);
682
759 if (err) 683 if (err)
760 return 2; 684 return 2;
761 685
762 /* Step 3: check if arguments are trivially valid */ 686 /* Step 3: check if arguments are trivially valid */
763 687
764 err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0); 688 if (cmd->start_arg == TRIG_NOW)
689 err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
765 690
766 if (cmd->convert_src == TRIG_TIMER) { 691 if (cmd->convert_src == TRIG_TIMER) {
767 err |= comedi_check_trigger_arg_min(&cmd->convert_arg, 692 err |= comedi_check_trigger_arg_min(&cmd->convert_arg,
@@ -786,31 +711,13 @@ static int das1800_ai_do_cmdtest(struct comedi_device *dev,
786 if (err) 711 if (err)
787 return 3; 712 return 3;
788 713
789 /* step 4: fix up any arguments */ 714 /* Step 4: fix up any arguments */
790 715
791 if (cmd->scan_begin_src == TRIG_FOLLOW && 716 if (cmd->convert_src == TRIG_TIMER) {
792 cmd->convert_src == TRIG_TIMER) { 717 if (cmd->scan_begin_src == TRIG_FOLLOW)
793 /* we are not in burst mode */ 718 err |= das1800_ai_fixup_paced_timing(dev, cmd);
794 arg = cmd->convert_arg; 719 else /* TRIG_TIMER or TRIG_EXT */
795 comedi_8254_cascade_ns_to_timer(dev->pacer, &arg, cmd->flags); 720 err |= das1800_ai_fixup_burst_timing(dev, cmd);
796 err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
797 } else if (cmd->convert_src == TRIG_TIMER) {
798 /* we are in burst mode */
799 arg = burst_convert_arg(cmd->convert_arg, cmd->flags);
800 err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
801
802 if (cmd->scan_begin_src == TRIG_TIMER) {
803 arg = cmd->convert_arg * cmd->chanlist_len;
804 err |= comedi_check_trigger_arg_max(&cmd->
805 scan_begin_arg,
806 arg);
807
808 arg = cmd->scan_begin_arg;
809 comedi_8254_cascade_ns_to_timer(dev->pacer, &arg,
810 cmd->flags);
811 err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg,
812 arg);
813 }
814 } 721 }
815 722
816 if (err) 723 if (err)
@@ -826,74 +733,22 @@ static int das1800_ai_do_cmdtest(struct comedi_device *dev,
826 return 0; 733 return 0;
827} 734}
828 735
829/* returns appropriate bits for control register a, depending on command */ 736static unsigned char das1800_ai_chanspec_bits(struct comedi_subdevice *s,
830static int control_a_bits(const struct comedi_cmd *cmd) 737 unsigned int chanspec)
831{
832 int control_a;
833
834 control_a = FFEN; /* enable fifo */
835 if (cmd->stop_src == TRIG_EXT)
836 control_a |= ATEN;
837 switch (cmd->start_src) {
838 case TRIG_EXT:
839 control_a |= TGEN | CGSL;
840 break;
841 case TRIG_NOW:
842 control_a |= CGEN;
843 break;
844 default:
845 break;
846 }
847
848 return control_a;
849}
850
851/* returns appropriate bits for control register c, depending on command */
852static int control_c_bits(const struct comedi_cmd *cmd)
853{ 738{
854 int control_c; 739 unsigned int range = CR_RANGE(chanspec);
855 int aref; 740 unsigned int aref = CR_AREF(chanspec);
741 unsigned char bits;
856 742
857 /* set clock source to internal or external, select analog reference, 743 bits = UQEN;
858 * select unipolar / bipolar
859 */
860 aref = CR_AREF(cmd->chanlist[0]);
861 control_c = UQEN; /* enable upper qram addresses */
862 if (aref != AREF_DIFF) 744 if (aref != AREF_DIFF)
863 control_c |= SD; 745 bits |= SD;
864 if (aref == AREF_COMMON) 746 if (aref == AREF_COMMON)
865 control_c |= CMEN; 747 bits |= CMEN;
866 /* if a unipolar range was selected */ 748 if (comedi_range_is_unipolar(s, range))
867 if (CR_RANGE(cmd->chanlist[0]) & UNIPOLAR) 749 bits |= UB;
868 control_c |= UB;
869 switch (cmd->scan_begin_src) {
870 case TRIG_FOLLOW: /* not in burst mode */
871 switch (cmd->convert_src) {
872 case TRIG_TIMER:
873 /* trig on cascaded counters */
874 control_c |= IPCLK;
875 break;
876 case TRIG_EXT:
877 /* trig on falling edge of external trigger */
878 control_c |= XPCLK;
879 break;
880 default:
881 break;
882 }
883 break;
884 case TRIG_TIMER:
885 /* burst mode with internal pacer clock */
886 control_c |= BMDE | IPCLK;
887 break;
888 case TRIG_EXT:
889 /* burst mode with external trigger */
890 control_c |= BMDE | XPCLK;
891 break;
892 default:
893 break;
894 }
895 750
896 return control_c; 751 return bits;
897} 752}
898 753
899static unsigned int das1800_ai_transfer_size(struct comedi_device *dev, 754static unsigned int das1800_ai_transfer_size(struct comedi_device *dev,
@@ -934,13 +789,14 @@ static void das1800_ai_setup_dma(struct comedi_device *dev,
934{ 789{
935 struct das1800_private *devpriv = dev->private; 790 struct das1800_private *devpriv = dev->private;
936 struct comedi_isadma *dma = devpriv->dma; 791 struct comedi_isadma *dma = devpriv->dma;
937 struct comedi_isadma_desc *desc = &dma->desc[0]; 792 struct comedi_isadma_desc *desc;
938 unsigned int bytes; 793 unsigned int bytes;
939 794
940 if ((devpriv->irq_dma_bits & DMA_ENABLED) == 0) 795 if ((devpriv->irq_dma_bits & DMA_ENABLED) == 0)
941 return; 796 return;
942 797
943 dma->cur_dma = 0; 798 dma->cur_dma = 0;
799 desc = &dma->desc[0];
944 800
945 /* determine a dma transfer size to fill buffer in 0.3 sec */ 801 /* determine a dma transfer size to fill buffer in 0.3 sec */
946 bytes = das1800_ai_transfer_size(dev, s, desc->maxsize, 300000000); 802 bytes = das1800_ai_transfer_size(dev, s, desc->maxsize, 300000000);
@@ -956,43 +812,48 @@ static void das1800_ai_setup_dma(struct comedi_device *dev,
956 } 812 }
957} 813}
958 814
959/* programs channel/gain list into card */ 815static void das1800_ai_set_chanlist(struct comedi_device *dev,
960static void program_chanlist(struct comedi_device *dev, 816 unsigned int *chanlist, unsigned int len)
961 const struct comedi_cmd *cmd)
962{ 817{
963 int i, n, chan_range; 818 unsigned long flags;
964 unsigned long irq_flags; 819 unsigned int i;
965 const int range_mask = 0x3; /* masks unipolar/bipolar bit off range */ 820
966 const int range_bitshift = 8; 821 /* protects the indirect addressing selected by DAS1800_SELECT */
967 822 spin_lock_irqsave(&dev->spinlock, flags);
968 n = cmd->chanlist_len; 823
969 /* spinlock protects indirect addressing */ 824 /* select QRAM register and set start address */
970 spin_lock_irqsave(&dev->spinlock, irq_flags); 825 outb(QRAM, dev->iobase + DAS1800_SELECT);
971 outb(QRAM, dev->iobase + DAS1800_SELECT); /* select QRAM for baseAddress + 0x0 */ 826 outb(len - 1, dev->iobase + DAS1800_QRAM_ADDRESS);
972 outb(n - 1, dev->iobase + DAS1800_QRAM_ADDRESS); /*set QRAM address start */ 827
973 /* make channel / gain list */ 828 /* make channel / gain list */
974 for (i = 0; i < n; i++) { 829 for (i = 0; i < len; i++) {
975 chan_range = 830 unsigned int chan = CR_CHAN(chanlist[i]);
976 CR_CHAN(cmd->chanlist[i]) | 831 unsigned int range = CR_RANGE(chanlist[i]);
977 ((CR_RANGE(cmd->chanlist[i]) & range_mask) << 832 unsigned short val;
978 range_bitshift); 833
979 outw(chan_range, dev->iobase + DAS1800_QRAM); 834 val = chan | ((range & 0x3) << 8);
835 outw(val, dev->iobase + DAS1800_QRAM);
980 } 836 }
981 outb(n - 1, dev->iobase + DAS1800_QRAM_ADDRESS); /*finish write to QRAM */ 837
982 spin_unlock_irqrestore(&dev->spinlock, irq_flags); 838 /* finish write to QRAM */
839 outb(len - 1, dev->iobase + DAS1800_QRAM_ADDRESS);
840
841 spin_unlock_irqrestore(&dev->spinlock, flags);
983} 842}
984 843
985/* analog input do_cmd */ 844static int das1800_ai_cmd(struct comedi_device *dev,
986static int das1800_ai_do_cmd(struct comedi_device *dev, 845 struct comedi_subdevice *s)
987 struct comedi_subdevice *s)
988{ 846{
989 struct das1800_private *devpriv = dev->private; 847 struct das1800_private *devpriv = dev->private;
990 int control_a, control_c; 848 int control_a, control_c;
991 struct comedi_async *async = s->async; 849 struct comedi_async *async = s->async;
992 const struct comedi_cmd *cmd = &async->cmd; 850 const struct comedi_cmd *cmd = &async->cmd;
851 unsigned int range0 = CR_RANGE(cmd->chanlist[0]);
993 852
994 /* disable dma on CMDF_WAKE_EOS, or CMDF_PRIORITY 853 /*
995 * (because dma in handler is unsafe at hard real-time priority) */ 854 * Disable dma on CMDF_WAKE_EOS, or CMDF_PRIORITY (because dma in
855 * handler is unsafe at hard real-time priority).
856 */
996 if (cmd->flags & (CMDF_WAKE_EOS | CMDF_PRIORITY)) 857 if (cmd->flags & (CMDF_WAKE_EOS | CMDF_PRIORITY))
997 devpriv->irq_dma_bits &= ~DMA_ENABLED; 858 devpriv->irq_dma_bits &= ~DMA_ENABLED;
998 else 859 else
@@ -1006,14 +867,42 @@ static int das1800_ai_do_cmd(struct comedi_device *dev,
1006 devpriv->irq_dma_bits |= FIMD; 867 devpriv->irq_dma_bits |= FIMD;
1007 } 868 }
1008 869
1009 das1800_cancel(dev, s); 870 das1800_ai_cancel(dev, s);
871
872 devpriv->ai_is_unipolar = comedi_range_is_unipolar(s, range0);
1010 873
1011 /* determine proper bits for control registers */ 874 control_a = FFEN;
1012 control_a = control_a_bits(cmd); 875 if (cmd->stop_src == TRIG_EXT)
1013 control_c = control_c_bits(cmd); 876 control_a |= ATEN;
877 if (cmd->start_src == TRIG_EXT)
878 control_a |= TGEN | CGSL;
879 else /* TRIG_NOW */
880 control_a |= CGEN;
881 if (control_a & (ATEN | TGEN)) {
882 if ((cmd->start_arg & CR_INVERT) || (cmd->stop_arg & CR_INVERT))
883 control_a |= TGPL;
884 }
885
886 control_c = das1800_ai_chanspec_bits(s, cmd->chanlist[0]);
887 /* set clock source to internal or external */
888 if (cmd->scan_begin_src == TRIG_FOLLOW) {
889 /* not in burst mode */
890 if (cmd->convert_src == TRIG_TIMER) {
891 /* trig on cascaded counters */
892 control_c |= IPCLK;
893 } else { /* TRIG_EXT */
894 /* trig on falling edge of external trigger */
895 control_c |= XPCLK;
896 }
897 } else if (cmd->scan_begin_src == TRIG_TIMER) {
898 /* burst mode with internal pacer clock */
899 control_c |= BMDE | IPCLK;
900 } else { /* TRIG_EXT */
901 /* burst mode with external trigger */
902 control_c |= BMDE | XPCLK;
903 }
1014 904
1015 /* setup card and start */ 905 das1800_ai_set_chanlist(dev, cmd->chanlist, cmd->chanlist_len);
1016 program_chanlist(dev, cmd);
1017 906
1018 /* setup cascaded counters for conversion/scan frequency */ 907 /* setup cascaded counters for conversion/scan frequency */
1019 if ((cmd->scan_begin_src == TRIG_FOLLOW || 908 if ((cmd->scan_begin_src == TRIG_FOLLOW ||
@@ -1031,118 +920,117 @@ static int das1800_ai_do_cmd(struct comedi_device *dev,
1031 outb(control_c, dev->iobase + DAS1800_CONTROL_C); 920 outb(control_c, dev->iobase + DAS1800_CONTROL_C);
1032 /* set conversion rate and length for burst mode */ 921 /* set conversion rate and length for burst mode */
1033 if (control_c & BMDE) { 922 if (control_c & BMDE) {
1034 /* program conversion period with number of microseconds minus 1 */ 923 outb(cmd->convert_arg / 1000 - 1, /* microseconds - 1 */
1035 outb(cmd->convert_arg / 1000 - 1,
1036 dev->iobase + DAS1800_BURST_RATE); 924 dev->iobase + DAS1800_BURST_RATE);
1037 outb(cmd->chanlist_len - 1, dev->iobase + DAS1800_BURST_LENGTH); 925 outb(cmd->chanlist_len - 1, dev->iobase + DAS1800_BURST_LENGTH);
1038 } 926 }
1039 outb(devpriv->irq_dma_bits, dev->iobase + DAS1800_CONTROL_B); /* enable irq/dma */ 927
1040 outb(control_a, dev->iobase + DAS1800_CONTROL_A); /* enable fifo and triggering */ 928 /* enable and start conversions */
1041 outb(CVEN, dev->iobase + DAS1800_STATUS); /* enable conversions */ 929 outb(devpriv->irq_dma_bits, dev->iobase + DAS1800_CONTROL_B);
930 outb(control_a, dev->iobase + DAS1800_CONTROL_A);
931 outb(CVEN, dev->iobase + DAS1800_STATUS);
1042 932
1043 return 0; 933 return 0;
1044} 934}
1045 935
1046/* read analog input */ 936static int das1800_ai_eoc(struct comedi_device *dev,
1047static int das1800_ai_rinsn(struct comedi_device *dev, 937 struct comedi_subdevice *s,
1048 struct comedi_subdevice *s, 938 struct comedi_insn *insn,
1049 struct comedi_insn *insn, unsigned int *data) 939 unsigned long context)
1050{ 940{
1051 const struct das1800_board *board = dev->board_ptr; 941 unsigned char status;
1052 int i, n;
1053 int chan, range, aref, chan_range;
1054 int timeout = 1000;
1055 unsigned short dpnt;
1056 int conv_flags = 0;
1057 unsigned long irq_flags;
1058 942
1059 /* set up analog reference and unipolar / bipolar mode */ 943 status = inb(dev->iobase + DAS1800_STATUS);
1060 aref = CR_AREF(insn->chanspec); 944 if (status & FNE)
1061 conv_flags |= UQEN; 945 return 0;
1062 if (aref != AREF_DIFF) 946 return -EBUSY;
1063 conv_flags |= SD; 947}
1064 if (aref == AREF_COMMON) 948
1065 conv_flags |= CMEN; 949static int das1800_ai_insn_read(struct comedi_device *dev,
1066 /* if a unipolar range was selected */ 950 struct comedi_subdevice *s,
1067 if (CR_RANGE(insn->chanspec) & UNIPOLAR) 951 struct comedi_insn *insn,
1068 conv_flags |= UB; 952 unsigned int *data)
953{
954 unsigned int range = CR_RANGE(insn->chanspec);
955 bool is_unipolar = comedi_range_is_unipolar(s, range);
956 int ret = 0;
957 int n;
958 unsigned short dpnt;
959 unsigned long flags;
1069 960
1070 outb(conv_flags, dev->iobase + DAS1800_CONTROL_C); /* software conversion enabled */ 961 outb(das1800_ai_chanspec_bits(s, insn->chanspec),
962 dev->iobase + DAS1800_CONTROL_C); /* software pacer */
1071 outb(CVEN, dev->iobase + DAS1800_STATUS); /* enable conversions */ 963 outb(CVEN, dev->iobase + DAS1800_STATUS); /* enable conversions */
1072 outb(0x0, dev->iobase + DAS1800_CONTROL_A); /* reset fifo */ 964 outb(0x0, dev->iobase + DAS1800_CONTROL_A); /* reset fifo */
1073 outb(FFEN, dev->iobase + DAS1800_CONTROL_A); 965 outb(FFEN, dev->iobase + DAS1800_CONTROL_A);
1074 966
1075 chan = CR_CHAN(insn->chanspec); 967 das1800_ai_set_chanlist(dev, &insn->chanspec, 1);
1076 /* mask of unipolar/bipolar bit from range */ 968
1077 range = CR_RANGE(insn->chanspec) & 0x3; 969 /* protects the indirect addressing selected by DAS1800_SELECT */
1078 chan_range = chan | (range << 8); 970 spin_lock_irqsave(&dev->spinlock, flags);
1079 spin_lock_irqsave(&dev->spinlock, irq_flags); 971
1080 outb(QRAM, dev->iobase + DAS1800_SELECT); /* select QRAM for baseAddress + 0x0 */ 972 /* select ai fifo register */
1081 outb(0x0, dev->iobase + DAS1800_QRAM_ADDRESS); /* set QRAM address start */ 973 outb(ADC, dev->iobase + DAS1800_SELECT);
1082 outw(chan_range, dev->iobase + DAS1800_QRAM);
1083 outb(0x0, dev->iobase + DAS1800_QRAM_ADDRESS); /*finish write to QRAM */
1084 outb(ADC, dev->iobase + DAS1800_SELECT); /* select ADC for baseAddress + 0x0 */
1085 974
1086 for (n = 0; n < insn->n; n++) { 975 for (n = 0; n < insn->n; n++) {
1087 /* trigger conversion */ 976 /* trigger conversion */
1088 outb(0, dev->iobase + DAS1800_FIFO); 977 outb(0, dev->iobase + DAS1800_FIFO);
1089 for (i = 0; i < timeout; i++) { 978
1090 if (inb(dev->iobase + DAS1800_STATUS) & FNE) 979 ret = comedi_timeout(dev, s, insn, das1800_ai_eoc, 0);
1091 break; 980 if (ret)
1092 } 981 break;
1093 if (i == timeout) { 982
1094 dev_err(dev->class_dev, "timeout\n");
1095 n = -ETIME;
1096 goto exit;
1097 }
1098 dpnt = inw(dev->iobase + DAS1800_FIFO); 983 dpnt = inw(dev->iobase + DAS1800_FIFO);
1099 /* shift data to offset binary for bipolar ranges */ 984 if (!is_unipolar)
1100 if ((conv_flags & UB) == 0) 985 dpnt = comedi_offset_munge(s, dpnt);
1101 dpnt += 1 << (board->resolution - 1);
1102 data[n] = dpnt; 986 data[n] = dpnt;
1103 } 987 }
1104exit: 988 spin_unlock_irqrestore(&dev->spinlock, flags);
1105 spin_unlock_irqrestore(&dev->spinlock, irq_flags);
1106 989
1107 return n; 990 return ret ? ret : insn->n;
1108} 991}
1109 992
1110/* writes to an analog output channel */ 993static int das1800_ao_insn_write(struct comedi_device *dev,
1111static int das1800_ao_winsn(struct comedi_device *dev, 994 struct comedi_subdevice *s,
1112 struct comedi_subdevice *s, 995 struct comedi_insn *insn,
1113 struct comedi_insn *insn, unsigned int *data) 996 unsigned int *data)
1114{ 997{
1115 const struct das1800_board *board = dev->board_ptr; 998 unsigned int chan = CR_CHAN(insn->chanspec);
1116 struct das1800_private *devpriv = dev->private; 999 unsigned int update_chan = s->n_chan - 1;
1117 int chan = CR_CHAN(insn->chanspec); 1000 unsigned long flags;
1118/* int range = CR_RANGE(insn->chanspec); */ 1001 int i;
1119 int update_chan = board->ao_n_chan - 1; 1002
1120 unsigned short output; 1003 /* protects the indirect addressing selected by DAS1800_SELECT */
1121 unsigned long irq_flags; 1004 spin_lock_irqsave(&dev->spinlock, flags);
1122 1005
1123 /* card expects two's complement data */ 1006 for (i = 0; i < insn->n; i++) {
1124 output = data[0] - (1 << (board->resolution - 1)); 1007 unsigned int val = data[i];
1125 /* if the write is to the 'update' channel, we need to remember its value */ 1008
1126 if (chan == update_chan) 1009 s->readback[chan] = val;
1127 devpriv->ao_update_bits = output; 1010
1128 /* write to channel */ 1011 val = comedi_offset_munge(s, val);
1129 spin_lock_irqsave(&dev->spinlock, irq_flags); 1012
1130 outb(DAC(chan), dev->iobase + DAS1800_SELECT); /* select dac channel for baseAddress + 0x0 */ 1013 /* load this channel (and update if it's the last channel) */
1131 outw(output, dev->iobase + DAS1800_DAC); 1014 outb(DAC(chan), dev->iobase + DAS1800_SELECT);
1132 /* now we need to write to 'update' channel to update all dac channels */ 1015 outw(val, dev->iobase + DAS1800_DAC);
1133 if (chan != update_chan) { 1016
1134 outb(DAC(update_chan), dev->iobase + DAS1800_SELECT); /* select 'update' channel for baseAddress + 0x0 */ 1017 /* update all channels */
1135 outw(devpriv->ao_update_bits, dev->iobase + DAS1800_DAC); 1018 if (chan != update_chan) {
1019 val = comedi_offset_munge(s, s->readback[update_chan]);
1020
1021 outb(DAC(update_chan), dev->iobase + DAS1800_SELECT);
1022 outw(val, dev->iobase + DAS1800_DAC);
1023 }
1136 } 1024 }
1137 spin_unlock_irqrestore(&dev->spinlock, irq_flags); 1025 spin_unlock_irqrestore(&dev->spinlock, flags);
1138 1026
1139 return 1; 1027 return insn->n;
1140} 1028}
1141 1029
1142/* reads from digital input channels */ 1030static int das1800_di_insn_bits(struct comedi_device *dev,
1143static int das1800_di_rbits(struct comedi_device *dev, 1031 struct comedi_subdevice *s,
1144 struct comedi_subdevice *s, 1032 struct comedi_insn *insn,
1145 struct comedi_insn *insn, unsigned int *data) 1033 unsigned int *data)
1146{ 1034{
1147 data[1] = inb(dev->iobase + DAS1800_DIGITAL) & 0xf; 1035 data[1] = inb(dev->iobase + DAS1800_DIGITAL) & 0xf;
1148 data[0] = 0; 1036 data[0] = 0;
@@ -1150,10 +1038,10 @@ static int das1800_di_rbits(struct comedi_device *dev,
1150 return insn->n; 1038 return insn->n;
1151} 1039}
1152 1040
1153static int das1800_do_wbits(struct comedi_device *dev, 1041static int das1800_do_insn_bits(struct comedi_device *dev,
1154 struct comedi_subdevice *s, 1042 struct comedi_subdevice *s,
1155 struct comedi_insn *insn, 1043 struct comedi_insn *insn,
1156 unsigned int *data) 1044 unsigned int *data)
1157{ 1045{
1158 if (comedi_dio_update_state(s, data)) 1046 if (comedi_dio_update_state(s, data))
1159 outb(s->state, dev->iobase + DAS1800_DIGITAL); 1047 outb(s->state, dev->iobase + DAS1800_DIGITAL);
@@ -1216,68 +1104,68 @@ static void das1800_free_dma(struct comedi_device *dev)
1216 comedi_isadma_free(devpriv->dma); 1104 comedi_isadma_free(devpriv->dma);
1217} 1105}
1218 1106
1219static const struct das1800_board *das1800_probe(struct comedi_device *dev) 1107static int das1800_probe(struct comedi_device *dev)
1220{ 1108{
1221 const struct das1800_board *board = dev->board_ptr; 1109 const struct das1800_board *board = dev->board_ptr;
1222 int index = board ? board - das1800_boards : -EINVAL; 1110 unsigned char id;
1223 int id; 1111
1112 id = (inb(dev->iobase + DAS1800_DIGITAL) >> 4) & 0xf;
1224 1113
1225 /* 1114 /*
1226 * The dev->board_ptr will be set by comedi_device_attach() if the 1115 * The dev->board_ptr will be set by comedi_device_attach() if the
1227 * board name provided by the user matches a board->name in this 1116 * board name provided by the user matches a board->name in this
1228 * driver. If so, this function sanity checks the id to verify that 1117 * driver. If so, this function sanity checks the id to verify that
1229 * the board is correct. 1118 * the board is correct.
1230 *
1231 * If the dev->board_ptr is not set, the user is trying to attach
1232 * an unspecified board to this driver. In this case the id is used
1233 * to 'probe' for the correct dev->board_ptr.
1234 */ 1119 */
1235 id = (inb(dev->iobase + DAS1800_DIGITAL) >> 4) & 0xf; 1120 if (board) {
1121 if (board->id == id)
1122 return 0;
1123 dev_err(dev->class_dev,
1124 "probed id does not match board id (0x%x != 0x%x)\n",
1125 id, board->id);
1126 return -ENODEV;
1127 }
1128
1129 /*
1130 * If the dev->board_ptr is not set, the user is trying to attach
1131 * an unspecified board to this driver. In this case the id is used
1132 * to 'probe' for the dev->board_ptr.
1133 */
1236 switch (id) { 1134 switch (id) {
1237 case 0x3: 1135 case DAS1800_ID_ST_DA:
1238 if (index == das1801st_da || index == das1802st_da || 1136 /* das-1701st-da, das-1702st-da, das-1801st-da, das-1802st-da */
1239 index == das1701st_da || index == das1702st_da) 1137 board = &das1800_boards[BOARD_DAS1801ST_DA];
1240 return board;
1241 index = das1801st;
1242 break; 1138 break;
1243 case 0x4: 1139 case DAS1800_ID_HR_DA:
1244 if (index == das1802hr_da || index == das1702hr_da) 1140 /* das-1702hr-da, das-1802hr-da */
1245 return board; 1141 board = &das1800_boards[BOARD_DAS1802HR_DA];
1246 index = das1802hr;
1247 break; 1142 break;
1248 case 0x5: 1143 case DAS1800_ID_AO:
1249 if (index == das1801ao || index == das1802ao || 1144 /* das-1701ao, das-1702ao, das-1801ao, das-1802ao */
1250 index == das1701ao || index == das1702ao) 1145 board = &das1800_boards[BOARD_DAS1801AO];
1251 return board;
1252 index = das1801ao;
1253 break; 1146 break;
1254 case 0x6: 1147 case DAS1800_ID_HR:
1255 if (index == das1802hr || index == das1702hr) 1148 /* das-1702hr, das-1802hr */
1256 return board; 1149 board = &das1800_boards[BOARD_DAS1802HR];
1257 index = das1802hr;
1258 break; 1150 break;
1259 case 0x7: 1151 case DAS1800_ID_ST:
1260 if (index == das1801st || index == das1802st || 1152 /* das-1701st, das-1702st, das-1801st, das-1802st */
1261 index == das1701st || index == das1702st) 1153 board = &das1800_boards[BOARD_DAS1801ST];
1262 return board;
1263 index = das1801st;
1264 break; 1154 break;
1265 case 0x8: 1155 case DAS1800_ID_HC:
1266 if (index == das1801hc || index == das1802hc) 1156 /* das-1801hc, das-1802hc */
1267 return board; 1157 board = &das1800_boards[BOARD_DAS1801HC];
1268 index = das1801hc;
1269 break; 1158 break;
1270 default: 1159 default:
1271 dev_err(dev->class_dev, 1160 dev_err(dev->class_dev, "invalid probe id 0x%x\n", id);
1272 "Board model: probe returned 0x%x (unknown, please report)\n", 1161 return -ENODEV;
1273 id);
1274 return NULL;
1275 } 1162 }
1276 dev_err(dev->class_dev, 1163 dev->board_ptr = board;
1277 "Board model (probed, not recommended): %s series\n", 1164 dev->board_name = board->name;
1278 das1800_boards[index].name); 1165 dev_warn(dev->class_dev,
1279 1166 "probed id 0x%0x: %s series (not recommended)\n",
1280 return &das1800_boards[index]; 1167 id, board->name);
1168 return 0;
1281} 1169}
1282 1170
1283static int das1800_attach(struct comedi_device *dev, 1171static int das1800_attach(struct comedi_device *dev,
@@ -1287,7 +1175,9 @@ static int das1800_attach(struct comedi_device *dev,
1287 struct das1800_private *devpriv; 1175 struct das1800_private *devpriv;
1288 struct comedi_subdevice *s; 1176 struct comedi_subdevice *s;
1289 unsigned int irq = it->options[1]; 1177 unsigned int irq = it->options[1];
1178 bool is_16bit;
1290 int ret; 1179 int ret;
1180 int i;
1291 1181
1292 devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv)); 1182 devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
1293 if (!devpriv) 1183 if (!devpriv)
@@ -1297,16 +1187,15 @@ static int das1800_attach(struct comedi_device *dev,
1297 if (ret) 1187 if (ret)
1298 return ret; 1188 return ret;
1299 1189
1300 board = das1800_probe(dev); 1190 ret = das1800_probe(dev);
1301 if (!board) { 1191 if (ret)
1302 dev_err(dev->class_dev, "unable to determine board type\n"); 1192 return ret;
1303 return -ENODEV; 1193 board = dev->board_ptr;
1304 }
1305 dev->board_ptr = board;
1306 dev->board_name = board->name;
1307 1194
1308 /* if it is an 'ao' board with fancy analog out then we need extra io ports */ 1195 is_16bit = board->id == DAS1800_ID_HR || board->id == DAS1800_ID_HR_DA;
1309 if (board->ao_ability == 2) { 1196
1197 /* waveform 'ao' boards have additional io ports */
1198 if (board->id == DAS1800_ID_AO) {
1310 unsigned long iobase2 = dev->iobase + IOBASE2; 1199 unsigned long iobase2 = dev->iobase + IOBASE2;
1311 1200
1312 ret = __comedi_request_region(dev, iobase2, DAS1800_SIZE); 1201 ret = __comedi_request_region(dev, iobase2, DAS1800_SIZE);
@@ -1349,7 +1238,9 @@ static int das1800_attach(struct comedi_device *dev,
1349 if (dev->irq & it->options[2]) 1238 if (dev->irq & it->options[2])
1350 das1800_init_dma(dev, it); 1239 das1800_init_dma(dev, it);
1351 1240
1352 devpriv->fifo_buf = kmalloc_array(FIFO_SIZE, sizeof(uint16_t), GFP_KERNEL); 1241 devpriv->fifo_buf = kmalloc_array(FIFO_SIZE,
1242 sizeof(*devpriv->fifo_buf),
1243 GFP_KERNEL);
1353 if (!devpriv->fifo_buf) 1244 if (!devpriv->fifo_buf)
1354 return -ENOMEM; 1245 return -ENOMEM;
1355 1246
@@ -1362,70 +1253,94 @@ static int das1800_attach(struct comedi_device *dev,
1362 if (ret) 1253 if (ret)
1363 return ret; 1254 return ret;
1364 1255
1365 /* analog input subdevice */ 1256 /*
1257 * Analog Input subdevice
1258 *
1259 * The "hc" type boards have 64 analog input channels and a 64
1260 * entry QRAM fifo.
1261 *
1262 * All the other board types have 16 on-board channels. Each channel
1263 * can be expanded to 16 channels with the addition of an EXP-1800
1264 * expansion board for a total of 256 channels. The QRAM fifo on
1265 * these boards has 256 entries.
1266 *
1267 * From the datasheets it's not clear what the comedi channel to
1268 * actual physical channel mapping is when EXP-1800 boards are used.
1269 */
1366 s = &dev->subdevices[0]; 1270 s = &dev->subdevices[0];
1367 s->type = COMEDI_SUBD_AI; 1271 s->type = COMEDI_SUBD_AI;
1368 s->subdev_flags = SDF_READABLE | SDF_DIFF | SDF_GROUND; 1272 s->subdev_flags = SDF_READABLE | SDF_DIFF | SDF_GROUND;
1369 if (board->common) 1273 if (board->id != DAS1800_ID_HC)
1370 s->subdev_flags |= SDF_COMMON; 1274 s->subdev_flags |= SDF_COMMON;
1371 s->n_chan = board->qram_len; 1275 s->n_chan = (board->id == DAS1800_ID_HC) ? 64 : 256;
1372 s->maxdata = (1 << board->resolution) - 1; 1276 s->maxdata = is_16bit ? 0xffff : 0x0fff;
1373 s->range_table = board->range_ai; 1277 s->range_table = board->is_01_series ? &das1801_ai_range
1374 s->insn_read = das1800_ai_rinsn; 1278 : &das1802_ai_range;
1279 s->insn_read = das1800_ai_insn_read;
1375 if (dev->irq) { 1280 if (dev->irq) {
1376 dev->read_subdev = s; 1281 dev->read_subdev = s;
1377 s->subdev_flags |= SDF_CMD_READ; 1282 s->subdev_flags |= SDF_CMD_READ;
1378 s->len_chanlist = s->n_chan; 1283 s->len_chanlist = s->n_chan;
1379 s->do_cmd = das1800_ai_do_cmd; 1284 s->do_cmd = das1800_ai_cmd;
1380 s->do_cmdtest = das1800_ai_do_cmdtest; 1285 s->do_cmdtest = das1800_ai_cmdtest;
1381 s->poll = das1800_ai_poll; 1286 s->poll = das1800_ai_poll;
1382 s->cancel = das1800_cancel; 1287 s->cancel = das1800_ai_cancel;
1288 s->munge = das1800_ai_munge;
1383 } 1289 }
1384 1290
1385 /* analog out */ 1291 /* Analog Output subdevice */
1386 s = &dev->subdevices[1]; 1292 s = &dev->subdevices[1];
1387 if (board->ao_ability == 1) { 1293 if (board->id == DAS1800_ID_ST_DA || board->id == DAS1800_ID_HR_DA) {
1388 s->type = COMEDI_SUBD_AO; 1294 s->type = COMEDI_SUBD_AO;
1389 s->subdev_flags = SDF_WRITABLE; 1295 s->subdev_flags = SDF_WRITABLE;
1390 s->n_chan = board->ao_n_chan; 1296 s->n_chan = (board->id == DAS1800_ID_ST_DA) ? 4 : 2;
1391 s->maxdata = (1 << board->resolution) - 1; 1297 s->maxdata = is_16bit ? 0xffff : 0x0fff;
1392 s->range_table = &range_bipolar10; 1298 s->range_table = &range_bipolar10;
1393 s->insn_write = das1800_ao_winsn; 1299 s->insn_write = das1800_ao_insn_write;
1300
1301 ret = comedi_alloc_subdev_readback(s);
1302 if (ret)
1303 return ret;
1304
1305 /* initialize all channels to 0V */
1306 for (i = 0; i < s->n_chan; i++) {
1307 /* spinlock is not necessary during the attach */
1308 outb(DAC(i), dev->iobase + DAS1800_SELECT);
1309 outw(0, dev->iobase + DAS1800_DAC);
1310 }
1311 } else if (board->id == DAS1800_ID_AO) {
1312 /*
1313 * 'ao' boards have waveform analog outputs that are not
1314 * currently supported.
1315 */
1316 s->type = COMEDI_SUBD_UNUSED;
1394 } else { 1317 } else {
1395 s->type = COMEDI_SUBD_UNUSED; 1318 s->type = COMEDI_SUBD_UNUSED;
1396 } 1319 }
1397 1320
1398 /* di */ 1321 /* Digital Input subdevice */
1399 s = &dev->subdevices[2]; 1322 s = &dev->subdevices[2];
1400 s->type = COMEDI_SUBD_DI; 1323 s->type = COMEDI_SUBD_DI;
1401 s->subdev_flags = SDF_READABLE; 1324 s->subdev_flags = SDF_READABLE;
1402 s->n_chan = 4; 1325 s->n_chan = 4;
1403 s->maxdata = 1; 1326 s->maxdata = 1;
1404 s->range_table = &range_digital; 1327 s->range_table = &range_digital;
1405 s->insn_bits = das1800_di_rbits; 1328 s->insn_bits = das1800_di_insn_bits;
1406 1329
1407 /* do */ 1330 /* Digital Output subdevice */
1408 s = &dev->subdevices[3]; 1331 s = &dev->subdevices[3];
1409 s->type = COMEDI_SUBD_DO; 1332 s->type = COMEDI_SUBD_DO;
1410 s->subdev_flags = SDF_WRITABLE; 1333 s->subdev_flags = SDF_WRITABLE;
1411 s->n_chan = board->do_n_chan; 1334 s->n_chan = (board->id == DAS1800_ID_HC) ? 8 : 4;
1412 s->maxdata = 1; 1335 s->maxdata = 1;
1413 s->range_table = &range_digital; 1336 s->range_table = &range_digital;
1414 s->insn_bits = das1800_do_wbits; 1337 s->insn_bits = das1800_do_insn_bits;
1415 1338
1416 das1800_cancel(dev, dev->read_subdev); 1339 das1800_ai_cancel(dev, dev->read_subdev);
1417 1340
1418 /* initialize digital out channels */ 1341 /* initialize digital out channels */
1419 outb(0, dev->iobase + DAS1800_DIGITAL); 1342 outb(0, dev->iobase + DAS1800_DIGITAL);
1420 1343
1421 /* initialize analog out channels */
1422 if (board->ao_ability == 1) {
1423 /* select 'update' dac channel for baseAddress + 0x0 */
1424 outb(DAC(board->ao_n_chan - 1),
1425 dev->iobase + DAS1800_SELECT);
1426 outw(devpriv->ao_update_bits, dev->iobase + DAS1800_DAC);
1427 }
1428
1429 return 0; 1344 return 0;
1430}; 1345};
1431 1346
@@ -1454,5 +1369,5 @@ static struct comedi_driver das1800_driver = {
1454module_comedi_driver(das1800_driver); 1369module_comedi_driver(das1800_driver);
1455 1370
1456MODULE_AUTHOR("Comedi http://www.comedi.org"); 1371MODULE_AUTHOR("Comedi http://www.comedi.org");
1457MODULE_DESCRIPTION("Comedi low-level driver"); 1372MODULE_DESCRIPTION("Comedi driver for DAS1800 compatible ISA boards");
1458MODULE_LICENSE("GPL"); 1373MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/dt282x.c b/drivers/staging/comedi/drivers/dt282x.c
index 40bf00984fa5..d5295bbdd28c 100644
--- a/drivers/staging/comedi/drivers/dt282x.c
+++ b/drivers/staging/comedi/drivers/dt282x.c
@@ -69,49 +69,61 @@
69 * Register map 69 * Register map
70 */ 70 */
71#define DT2821_ADCSR_REG 0x00 71#define DT2821_ADCSR_REG 0x00
72#define DT2821_ADCSR_ADERR (1 << 15) 72#define DT2821_ADCSR_ADERR BIT(15)
73#define DT2821_ADCSR_ADCLK (1 << 9) 73#define DT2821_ADCSR_ADCLK BIT(9)
74#define DT2821_ADCSR_MUXBUSY (1 << 8) 74#define DT2821_ADCSR_MUXBUSY BIT(8)
75#define DT2821_ADCSR_ADDONE (1 << 7) 75#define DT2821_ADCSR_ADDONE BIT(7)
76#define DT2821_ADCSR_IADDONE (1 << 6) 76#define DT2821_ADCSR_IADDONE BIT(6)
77#define DT2821_ADCSR_GS(x) (((x) & 0x3) << 4) 77#define DT2821_ADCSR_GS(x) (((x) & 0x3) << 4)
78#define DT2821_ADCSR_CHAN(x) (((x) & 0xf) << 0) 78#define DT2821_ADCSR_CHAN(x) (((x) & 0xf) << 0)
79#define DT2821_CHANCSR_REG 0x02 79#define DT2821_CHANCSR_REG 0x02
80#define DT2821_CHANCSR_LLE (1 << 15) 80#define DT2821_CHANCSR_LLE BIT(15)
81#define DT2821_CHANCSR_PRESLA(x) (((x) & 0xf) >> 8) 81#define DT2821_CHANCSR_TO_PRESLA(x) (((x) >> 8) & 0xf)
82#define DT2821_CHANCSR_NUMB(x) ((((x) - 1) & 0xf) << 0) 82#define DT2821_CHANCSR_NUMB(x) ((((x) - 1) & 0xf) << 0)
83#define DT2821_ADDAT_REG 0x04 83#define DT2821_ADDAT_REG 0x04
84#define DT2821_DACSR_REG 0x06 84#define DT2821_DACSR_REG 0x06
85#define DT2821_DACSR_DAERR (1 << 15) 85#define DT2821_DACSR_DAERR BIT(15)
86#define DT2821_DACSR_YSEL(x) ((x) << 9) 86#define DT2821_DACSR_YSEL(x) ((x) << 9)
87#define DT2821_DACSR_SSEL (1 << 8) 87#define DT2821_DACSR_SSEL BIT(8)
88#define DT2821_DACSR_DACRDY (1 << 7) 88#define DT2821_DACSR_DACRDY BIT(7)
89#define DT2821_DACSR_IDARDY (1 << 6) 89#define DT2821_DACSR_IDARDY BIT(6)
90#define DT2821_DACSR_DACLK (1 << 5) 90#define DT2821_DACSR_DACLK BIT(5)
91#define DT2821_DACSR_HBOE (1 << 1) 91#define DT2821_DACSR_HBOE BIT(1)
92#define DT2821_DACSR_LBOE (1 << 0) 92#define DT2821_DACSR_LBOE BIT(0)
93#define DT2821_DADAT_REG 0x08 93#define DT2821_DADAT_REG 0x08
94#define DT2821_DIODAT_REG 0x0a 94#define DT2821_DIODAT_REG 0x0a
95#define DT2821_SUPCSR_REG 0x0c 95#define DT2821_SUPCSR_REG 0x0c
96#define DT2821_SUPCSR_DMAD (1 << 15) 96#define DT2821_SUPCSR_DMAD BIT(15)
97#define DT2821_SUPCSR_ERRINTEN (1 << 14) 97#define DT2821_SUPCSR_ERRINTEN BIT(14)
98#define DT2821_SUPCSR_CLRDMADNE (1 << 13) 98#define DT2821_SUPCSR_CLRDMADNE BIT(13)
99#define DT2821_SUPCSR_DDMA (1 << 12) 99#define DT2821_SUPCSR_DDMA BIT(12)
100#define DT2821_SUPCSR_DS_PIO (0 << 10) 100#define DT2821_SUPCSR_DS(x) (((x) & 0x3) << 10)
101#define DT2821_SUPCSR_DS_AD_CLK (1 << 10) 101#define DT2821_SUPCSR_DS_PIO DT2821_SUPCSR_DS(0)
102#define DT2821_SUPCSR_DS_DA_CLK (2 << 10) 102#define DT2821_SUPCSR_DS_AD_CLK DT2821_SUPCSR_DS(1)
103#define DT2821_SUPCSR_DS_AD_TRIG (3 << 10) 103#define DT2821_SUPCSR_DS_DA_CLK DT2821_SUPCSR_DS(2)
104#define DT2821_SUPCSR_BUFFB (1 << 9) 104#define DT2821_SUPCSR_DS_AD_TRIG DT2821_SUPCSR_DS(3)
105#define DT2821_SUPCSR_SCDN (1 << 8) 105#define DT2821_SUPCSR_BUFFB BIT(9)
106#define DT2821_SUPCSR_DACON (1 << 7) 106#define DT2821_SUPCSR_SCDN BIT(8)
107#define DT2821_SUPCSR_ADCINIT (1 << 6) 107#define DT2821_SUPCSR_DACON BIT(7)
108#define DT2821_SUPCSR_DACINIT (1 << 5) 108#define DT2821_SUPCSR_ADCINIT BIT(6)
109#define DT2821_SUPCSR_PRLD (1 << 4) 109#define DT2821_SUPCSR_DACINIT BIT(5)
110#define DT2821_SUPCSR_STRIG (1 << 3) 110#define DT2821_SUPCSR_PRLD BIT(4)
111#define DT2821_SUPCSR_XTRIG (1 << 2) 111#define DT2821_SUPCSR_STRIG BIT(3)
112#define DT2821_SUPCSR_XCLK (1 << 1) 112#define DT2821_SUPCSR_XTRIG BIT(2)
113#define DT2821_SUPCSR_BDINIT (1 << 0) 113#define DT2821_SUPCSR_XCLK BIT(1)
114#define DT2821_SUPCSR_BDINIT BIT(0)
114#define DT2821_TMRCTR_REG 0x0e 115#define DT2821_TMRCTR_REG 0x0e
116#define DT2821_TMRCTR_PRESCALE(x) (((x) & 0xf) << 8)
117#define DT2821_TMRCTR_DIVIDER(x) ((255 - ((x) & 0xff)) << 0)
118
119/* Pacer Clock */
120#define DT2821_OSC_BASE 250 /* 4 MHz (in nanoseconds) */
121#define DT2821_PRESCALE(x) BIT(x)
122#define DT2821_PRESCALE_MAX 15
123#define DT2821_DIVIDER_MAX 255
124#define DT2821_OSC_MAX (DT2821_OSC_BASE * \
125 DT2821_PRESCALE(DT2821_PRESCALE_MAX) * \
126 DT2821_DIVIDER_MAX)
115 127
116static const struct comedi_lrange range_dt282x_ai_lo_bipolar = { 128static const struct comedi_lrange range_dt282x_ai_lo_bipolar = {
117 4, { 129 4, {
@@ -364,10 +376,10 @@ static unsigned int dt282x_ns_to_timer(unsigned int *ns, unsigned int flags)
364{ 376{
365 unsigned int prescale, base, divider; 377 unsigned int prescale, base, divider;
366 378
367 for (prescale = 0; prescale < 16; prescale++) { 379 for (prescale = 0; prescale <= DT2821_PRESCALE_MAX; prescale++) {
368 if (prescale == 1) 380 if (prescale == 1) /* 0 and 1 are both divide by 1 */
369 continue; 381 continue;
370 base = 250 * (1 << prescale); 382 base = DT2821_OSC_BASE * DT2821_PRESCALE(prescale);
371 switch (flags & CMDF_ROUND_MASK) { 383 switch (flags & CMDF_ROUND_MASK) {
372 case CMDF_ROUND_NEAREST: 384 case CMDF_ROUND_NEAREST:
373 default: 385 default:
@@ -380,15 +392,17 @@ static unsigned int dt282x_ns_to_timer(unsigned int *ns, unsigned int flags)
380 divider = DIV_ROUND_UP(*ns, base); 392 divider = DIV_ROUND_UP(*ns, base);
381 break; 393 break;
382 } 394 }
383 if (divider < 256) { 395 if (divider <= DT2821_DIVIDER_MAX)
384 *ns = divider * base; 396 break;
385 return (prescale << 8) | (255 - divider); 397 }
386 } 398 if (divider > DT2821_DIVIDER_MAX) {
399 prescale = DT2821_PRESCALE_MAX;
400 divider = DT2821_DIVIDER_MAX;
401 base = DT2821_OSC_BASE * DT2821_PRESCALE(prescale);
387 } 402 }
388 base = 250 * (1 << 15);
389 divider = 255;
390 *ns = divider * base; 403 *ns = divider * base;
391 return (15 << 8) | (255 - divider); 404 return DT2821_TMRCTR_PRESCALE(prescale) |
405 DT2821_TMRCTR_DIVIDER(divider);
392} 406}
393 407
394static void dt282x_munge(struct comedi_device *dev, 408static void dt282x_munge(struct comedi_device *dev,
@@ -683,13 +697,8 @@ static int dt282x_ai_cmdtest(struct comedi_device *dev,
683 /* Step 3: check if arguments are trivially valid */ 697 /* Step 3: check if arguments are trivially valid */
684 698
685 err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0); 699 err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
686
687 err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0); 700 err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
688 701 err |= comedi_check_trigger_arg_max(&cmd->convert_arg, DT2821_OSC_MAX);
689 err |= comedi_check_trigger_arg_min(&cmd->convert_arg, 4000);
690
691#define SLOWEST_TIMER (250*(1<<15)*255)
692 err |= comedi_check_trigger_arg_max(&cmd->convert_arg, SLOWEST_TIMER);
693 err |= comedi_check_trigger_arg_min(&cmd->convert_arg, board->ai_speed); 702 err |= comedi_check_trigger_arg_min(&cmd->convert_arg, board->ai_speed);
694 err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg, 703 err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
695 cmd->chanlist_len); 704 cmd->chanlist_len);
@@ -1084,20 +1093,6 @@ static int dt282x_initialize(struct comedi_device *dev)
1084 return 0; 1093 return 0;
1085} 1094}
1086 1095
1087/*
1088 options:
1089 0 i/o base
1090 1 irq
1091 2 dma1
1092 3 dma2
1093 4 0=single ended, 1=differential
1094 5 ai 0=straight binary, 1=2's comp
1095 6 ao0 0=straight binary, 1=2's comp
1096 7 ao1 0=straight binary, 1=2's comp
1097 8 ai 0=±10 V, 1=0-10 V, 2=±5 V, 3=0-5 V
1098 9 ao0 0=±10 V, 1=0-10 V, 2=±5 V, 3=0-5 V, 4=±2.5 V
1099 10 ao1 0=±10 V, 1=0-10 V, 2=±5 V, 3=0-5 V, 4=±2.5 V
1100 */
1101static int dt282x_attach(struct comedi_device *dev, struct comedi_devconfig *it) 1096static int dt282x_attach(struct comedi_device *dev, struct comedi_devconfig *it)
1102{ 1097{
1103 const struct dt282x_board *board = dev->board_ptr; 1098 const struct dt282x_board *board = dev->board_ptr;
diff --git a/drivers/staging/comedi/drivers/mite.c b/drivers/staging/comedi/drivers/mite.c
index 8f24702c3380..b1c0860135d0 100644
--- a/drivers/staging/comedi/drivers/mite.c
+++ b/drivers/staging/comedi/drivers/mite.c
@@ -46,355 +46,451 @@
46 46
47#include <linux/module.h> 47#include <linux/module.h>
48#include <linux/slab.h> 48#include <linux/slab.h>
49#include <linux/log2.h>
49 50
50#include "../comedi_pci.h" 51#include "../comedi_pci.h"
51 52
52#include "mite.h" 53#include "mite.h"
53 54
54#define TOP_OF_PAGE(x) ((x)|(~(PAGE_MASK))) 55/*
56 * Mite registers
57 */
58#define MITE_UNKNOWN_DMA_BURST_REG 0x28
59#define UNKNOWN_DMA_BURST_ENABLE_BITS 0x600
60
61#define MITE_PCI_CONFIG_OFFSET 0x300
62#define MITE_CSIGR 0x460 /* chip signature */
63#define CSIGR_TO_IOWINS(x) (((x) >> 29) & 0x7)
64#define CSIGR_TO_WINS(x) (((x) >> 24) & 0x1f)
65#define CSIGR_TO_WPDEP(x) (((x) >> 20) & 0x7)
66#define CSIGR_TO_DMAC(x) (((x) >> 16) & 0xf)
67#define CSIGR_TO_IMODE(x) (((x) >> 12) & 0x3) /* pci=0x3 */
68#define CSIGR_TO_MMODE(x) (((x) >> 8) & 0x3) /* minimite=1 */
69#define CSIGR_TO_TYPE(x) (((x) >> 4) & 0xf) /* mite=0, minimite=1 */
70#define CSIGR_TO_VER(x) (((x) >> 0) & 0xf)
71
72#define MITE_CHAN(x) (0x500 + 0x100 * (x))
73#define MITE_CHOR(x) (0x00 + MITE_CHAN(x)) /* channel operation */
74#define CHOR_DMARESET BIT(31)
75#define CHOR_SET_SEND_TC BIT(11)
76#define CHOR_CLR_SEND_TC BIT(10)
77#define CHOR_SET_LPAUSE BIT(9)
78#define CHOR_CLR_LPAUSE BIT(8)
79#define CHOR_CLRDONE BIT(7)
80#define CHOR_CLRRB BIT(6)
81#define CHOR_CLRLC BIT(5)
82#define CHOR_FRESET BIT(4)
83#define CHOR_ABORT BIT(3) /* stop without emptying fifo */
84#define CHOR_STOP BIT(2) /* stop after emptying fifo */
85#define CHOR_CONT BIT(1)
86#define CHOR_START BIT(0)
87#define MITE_CHCR(x) (0x04 + MITE_CHAN(x)) /* channel control */
88#define CHCR_SET_DMA_IE BIT(31)
89#define CHCR_CLR_DMA_IE BIT(30)
90#define CHCR_SET_LINKP_IE BIT(29)
91#define CHCR_CLR_LINKP_IE BIT(28)
92#define CHCR_SET_SAR_IE BIT(27)
93#define CHCR_CLR_SAR_IE BIT(26)
94#define CHCR_SET_DONE_IE BIT(25)
95#define CHCR_CLR_DONE_IE BIT(24)
96#define CHCR_SET_MRDY_IE BIT(23)
97#define CHCR_CLR_MRDY_IE BIT(22)
98#define CHCR_SET_DRDY_IE BIT(21)
99#define CHCR_CLR_DRDY_IE BIT(20)
100#define CHCR_SET_LC_IE BIT(19)
101#define CHCR_CLR_LC_IE BIT(18)
102#define CHCR_SET_CONT_RB_IE BIT(17)
103#define CHCR_CLR_CONT_RB_IE BIT(16)
104#define CHCR_FIFO(x) (((x) & 0x1) << 15)
105#define CHCR_FIFODIS CHCR_FIFO(1)
106#define CHCR_FIFO_ON CHCR_FIFO(0)
107#define CHCR_BURST(x) (((x) & 0x1) << 14)
108#define CHCR_BURSTEN CHCR_BURST(1)
109#define CHCR_NO_BURSTEN CHCR_BURST(0)
110#define CHCR_BYTE_SWAP_DEVICE BIT(6)
111#define CHCR_BYTE_SWAP_MEMORY BIT(4)
112#define CHCR_DIR(x) (((x) & 0x1) << 3)
113#define CHCR_DEV_TO_MEM CHCR_DIR(1)
114#define CHCR_MEM_TO_DEV CHCR_DIR(0)
115#define CHCR_MODE(x) (((x) & 0x7) << 0)
116#define CHCR_NORMAL CHCR_MODE(0)
117#define CHCR_CONTINUE CHCR_MODE(1)
118#define CHCR_RINGBUFF CHCR_MODE(2)
119#define CHCR_LINKSHORT CHCR_MODE(4)
120#define CHCR_LINKLONG CHCR_MODE(5)
121#define MITE_TCR(x) (0x08 + MITE_CHAN(x)) /* transfer count */
122#define MITE_MCR(x) (0x0c + MITE_CHAN(x)) /* memory config */
123#define MITE_MAR(x) (0x10 + MITE_CHAN(x)) /* memory address */
124#define MITE_DCR(x) (0x14 + MITE_CHAN(x)) /* device config */
125#define DCR_NORMAL BIT(29)
126#define MITE_DAR(x) (0x18 + MITE_CHAN(x)) /* device address */
127#define MITE_LKCR(x) (0x1c + MITE_CHAN(x)) /* link config */
128#define MITE_LKAR(x) (0x20 + MITE_CHAN(x)) /* link address */
129#define MITE_LLKAR(x) (0x24 + MITE_CHAN(x)) /* see tnt5002 manual */
130#define MITE_BAR(x) (0x28 + MITE_CHAN(x)) /* base address */
131#define MITE_BCR(x) (0x2c + MITE_CHAN(x)) /* base count */
132#define MITE_SAR(x) (0x30 + MITE_CHAN(x)) /* ? address */
133#define MITE_WSCR(x) (0x34 + MITE_CHAN(x)) /* ? */
134#define MITE_WSER(x) (0x38 + MITE_CHAN(x)) /* ? */
135#define MITE_CHSR(x) (0x3c + MITE_CHAN(x)) /* channel status */
136#define CHSR_INT BIT(31)
137#define CHSR_LPAUSES BIT(29)
138#define CHSR_SARS BIT(27)
139#define CHSR_DONE BIT(25)
140#define CHSR_MRDY BIT(23)
141#define CHSR_DRDY BIT(21)
142#define CHSR_LINKC BIT(19)
143#define CHSR_CONTS_RB BIT(17)
144#define CHSR_ERROR BIT(15)
145#define CHSR_SABORT BIT(14)
146#define CHSR_HABORT BIT(13)
147#define CHSR_STOPS BIT(12)
148#define CHSR_OPERR(x) (((x) & 0x3) << 10)
149#define CHSR_OPERR_MASK CHSR_OPERR(3)
150#define CHSR_OPERR_NOERROR CHSR_OPERR(0)
151#define CHSR_OPERR_FIFOERROR CHSR_OPERR(1)
152#define CHSR_OPERR_LINKERROR CHSR_OPERR(1) /* ??? */
153#define CHSR_XFERR BIT(9)
154#define CHSR_END BIT(8)
155#define CHSR_DRQ1 BIT(7)
156#define CHSR_DRQ0 BIT(6)
157#define CHSR_LERR(x) (((x) & 0x3) << 4)
158#define CHSR_LERR_MASK CHSR_LERR(3)
159#define CHSR_LBERR CHSR_LERR(1)
160#define CHSR_LRERR CHSR_LERR(2)
161#define CHSR_LOERR CHSR_LERR(3)
162#define CHSR_MERR(x) (((x) & 0x3) << 2)
163#define CHSR_MERR_MASK CHSR_MERR(3)
164#define CHSR_MBERR CHSR_MERR(1)
165#define CHSR_MRERR CHSR_MERR(2)
166#define CHSR_MOERR CHSR_MERR(3)
167#define CHSR_DERR(x) (((x) & 0x3) << 0)
168#define CHSR_DERR_MASK CHSR_DERR(3)
169#define CHSR_DBERR CHSR_DERR(1)
170#define CHSR_DRERR CHSR_DERR(2)
171#define CHSR_DOERR CHSR_DERR(3)
172#define MITE_FCR(x) (0x40 + MITE_CHAN(x)) /* fifo count */
173
174/* common bits for the memory/device/link config registers */
175#define CR_RL(x) (((x) & 0x7) << 21)
176#define CR_REQS(x) (((x) & 0x7) << 16)
177#define CR_REQS_MASK CR_REQS(7)
178#define CR_ASEQ(x) (((x) & 0x3) << 10)
179#define CR_ASEQDONT CR_ASEQ(0)
180#define CR_ASEQUP CR_ASEQ(1)
181#define CR_ASEQDOWN CR_ASEQ(2)
182#define CR_ASEQ_MASK CR_ASEQ(3)
183#define CR_PSIZE(x) (((x) & 0x3) << 8)
184#define CR_PSIZE8 CR_PSIZE(1)
185#define CR_PSIZE16 CR_PSIZE(2)
186#define CR_PSIZE32 CR_PSIZE(3)
187#define CR_PORT(x) (((x) & 0x3) << 6)
188#define CR_PORTCPU CR_PORT(0)
189#define CR_PORTIO CR_PORT(1)
190#define CR_PORTVXI CR_PORT(2)
191#define CR_PORTMXI CR_PORT(3)
192#define CR_AMDEVICE BIT(0)
193
194static unsigned int MITE_IODWBSR_1_WSIZE_bits(unsigned int size)
195{
196 return (ilog2(size) - 1) & 0x1f;
197}
55 198
56struct mite_struct *mite_alloc(struct pci_dev *pcidev) 199static unsigned int mite_retry_limit(unsigned int retry_limit)
57{ 200{
58 struct mite_struct *mite; 201 unsigned int value = 0;
59 unsigned int i;
60 202
61 mite = kzalloc(sizeof(*mite), GFP_KERNEL); 203 if (retry_limit)
62 if (mite) { 204 value = 1 + ilog2(retry_limit);
63 spin_lock_init(&mite->lock); 205 if (value > 0x7)
64 mite->pcidev = pcidev; 206 value = 0x7;
65 for (i = 0; i < MAX_MITE_DMA_CHANNELS; ++i) { 207 return CR_RL(value);
66 mite->channels[i].mite = mite;
67 mite->channels[i].channel = i;
68 mite->channels[i].done = 1;
69 }
70 }
71 return mite;
72} 208}
73EXPORT_SYMBOL_GPL(mite_alloc);
74 209
75static void dump_chip_signature(u32 csigr_bits) 210static unsigned int mite_drq_reqs(unsigned int drq_line)
76{ 211{
77 pr_info("version = %i, type = %i, mite mode = %i, interface mode = %i\n", 212 /* This also works on m-series when using channels (drq_line) 4 or 5. */
78 mite_csigr_version(csigr_bits), mite_csigr_type(csigr_bits), 213 return CR_REQS((drq_line & 0x3) | 0x4);
79 mite_csigr_mmode(csigr_bits), mite_csigr_imode(csigr_bits));
80 pr_info("num channels = %i, write post fifo depth = %i, wins = %i, iowins = %i\n",
81 mite_csigr_dmac(csigr_bits), mite_csigr_wpdep(csigr_bits),
82 mite_csigr_wins(csigr_bits), mite_csigr_iowins(csigr_bits));
83} 214}
84 215
85static unsigned mite_fifo_size(struct mite_struct *mite, unsigned channel) 216static unsigned int mite_fifo_size(struct mite *mite, unsigned int channel)
86{ 217{
87 unsigned fcr_bits = readl(mite->mite_io_addr + MITE_FCR(channel)); 218 unsigned int fcr_bits = readl(mite->mmio + MITE_FCR(channel));
88 unsigned empty_count = (fcr_bits >> 16) & 0xff; 219 unsigned int empty_count = (fcr_bits >> 16) & 0xff;
89 unsigned full_count = fcr_bits & 0xff; 220 unsigned int full_count = fcr_bits & 0xff;
90 221
91 return empty_count + full_count; 222 return empty_count + full_count;
92} 223}
93 224
94int mite_setup2(struct comedi_device *dev, 225static u32 mite_device_bytes_transferred(struct mite_channel *mite_chan)
95 struct mite_struct *mite, bool use_win1)
96{ 226{
97 unsigned long length; 227 struct mite *mite = mite_chan->mite;
98 int i;
99 u32 csigr_bits;
100 unsigned unknown_dma_burst_bits;
101 228
102 pci_set_master(mite->pcidev); 229 return readl(mite->mmio + MITE_DAR(mite_chan->channel));
230}
103 231
104 mite->mite_io_addr = pci_ioremap_bar(mite->pcidev, 0); 232/**
105 if (!mite->mite_io_addr) { 233 * mite_bytes_in_transit() - Returns the number of unread bytes in the fifo.
106 dev_err(dev->class_dev, 234 * @mite_chan: MITE dma channel.
107 "Failed to remap mite io memory address\n"); 235 */
108 return -ENOMEM; 236u32 mite_bytes_in_transit(struct mite_channel *mite_chan)
109 } 237{
110 mite->mite_phys_addr = pci_resource_start(mite->pcidev, 0); 238 struct mite *mite = mite_chan->mite;
111 239
112 dev->mmio = pci_ioremap_bar(mite->pcidev, 1); 240 return readl(mite->mmio + MITE_FCR(mite_chan->channel)) & 0xff;
113 if (!dev->mmio) { 241}
114 dev_err(dev->class_dev, 242EXPORT_SYMBOL_GPL(mite_bytes_in_transit);
115 "Failed to remap daq io memory address\n");
116 return -ENOMEM;
117 }
118 mite->daq_phys_addr = pci_resource_start(mite->pcidev, 1);
119 length = pci_resource_len(mite->pcidev, 1);
120 243
121 if (use_win1) { 244/* returns lower bound for number of bytes transferred from device to memory */
122 writel(0, mite->mite_io_addr + MITE_IODWBSR); 245static u32 mite_bytes_written_to_memory_lb(struct mite_channel *mite_chan)
123 dev_info(dev->class_dev, 246{
124 "using I/O Window Base Size register 1\n"); 247 u32 device_byte_count;
125 writel(mite->daq_phys_addr | WENAB |
126 MITE_IODWBSR_1_WSIZE_bits(length),
127 mite->mite_io_addr + MITE_IODWBSR_1);
128 writel(0, mite->mite_io_addr + MITE_IODWCR_1);
129 } else {
130 writel(mite->daq_phys_addr | WENAB,
131 mite->mite_io_addr + MITE_IODWBSR);
132 }
133 /*
134 * Make sure dma bursts work. I got this from running a bus analyzer
135 * on a pxi-6281 and a pxi-6713. 6713 powered up with register value
136 * of 0x61f and bursts worked. 6281 powered up with register value of
137 * 0x1f and bursts didn't work. The NI windows driver reads the
138 * register, then does a bitwise-or of 0x600 with it and writes it back.
139 */
140 unknown_dma_burst_bits =
141 readl(mite->mite_io_addr + MITE_UNKNOWN_DMA_BURST_REG);
142 unknown_dma_burst_bits |= UNKNOWN_DMA_BURST_ENABLE_BITS;
143 writel(unknown_dma_burst_bits,
144 mite->mite_io_addr + MITE_UNKNOWN_DMA_BURST_REG);
145 248
146 csigr_bits = readl(mite->mite_io_addr + MITE_CSIGR); 249 device_byte_count = mite_device_bytes_transferred(mite_chan);
147 mite->num_channels = mite_csigr_dmac(csigr_bits); 250 return device_byte_count - mite_bytes_in_transit(mite_chan);
148 if (mite->num_channels > MAX_MITE_DMA_CHANNELS) {
149 dev_warn(dev->class_dev,
150 "mite: bug? chip claims to have %i dma channels. Setting to %i.\n",
151 mite->num_channels, MAX_MITE_DMA_CHANNELS);
152 mite->num_channels = MAX_MITE_DMA_CHANNELS;
153 }
154 dump_chip_signature(csigr_bits);
155 for (i = 0; i < mite->num_channels; i++) {
156 writel(CHOR_DMARESET, mite->mite_io_addr + MITE_CHOR(i));
157 /* disable interrupts */
158 writel(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE | CHCR_CLR_SAR_IE |
159 CHCR_CLR_DONE_IE | CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE |
160 CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE,
161 mite->mite_io_addr + MITE_CHCR(i));
162 }
163 mite->fifo_size = mite_fifo_size(mite, 0);
164 dev_info(dev->class_dev, "fifo size is %i.\n", mite->fifo_size);
165 return 0;
166} 251}
167EXPORT_SYMBOL_GPL(mite_setup2);
168 252
169void mite_detach(struct mite_struct *mite) 253/* returns upper bound for number of bytes transferred from device to memory */
254static u32 mite_bytes_written_to_memory_ub(struct mite_channel *mite_chan)
170{ 255{
171 if (!mite) 256 u32 in_transit_count;
172 return;
173
174 if (mite->mite_io_addr)
175 iounmap(mite->mite_io_addr);
176 257
177 kfree(mite); 258 in_transit_count = mite_bytes_in_transit(mite_chan);
259 return mite_device_bytes_transferred(mite_chan) - in_transit_count;
178} 260}
179EXPORT_SYMBOL_GPL(mite_detach);
180 261
181struct mite_dma_descriptor_ring *mite_alloc_ring(struct mite_struct *mite) 262/* returns lower bound for number of bytes read from memory to device */
263static u32 mite_bytes_read_from_memory_lb(struct mite_channel *mite_chan)
182{ 264{
183 struct mite_dma_descriptor_ring *ring = 265 u32 device_byte_count;
184 kmalloc(sizeof(struct mite_dma_descriptor_ring), GFP_KERNEL);
185 266
186 if (!ring) 267 device_byte_count = mite_device_bytes_transferred(mite_chan);
187 return NULL; 268 return device_byte_count + mite_bytes_in_transit(mite_chan);
188 ring->hw_dev = get_device(&mite->pcidev->dev); 269}
189 if (!ring->hw_dev) {
190 kfree(ring);
191 return NULL;
192 }
193 ring->n_links = 0;
194 ring->descriptors = NULL;
195 ring->descriptors_dma_addr = 0;
196 return ring;
197};
198EXPORT_SYMBOL_GPL(mite_alloc_ring);
199 270
200void mite_free_ring(struct mite_dma_descriptor_ring *ring) 271/* returns upper bound for number of bytes read from memory to device */
272static u32 mite_bytes_read_from_memory_ub(struct mite_channel *mite_chan)
201{ 273{
202 if (ring) { 274 u32 in_transit_count;
203 if (ring->descriptors) { 275
204 dma_free_coherent(ring->hw_dev, 276 in_transit_count = mite_bytes_in_transit(mite_chan);
205 ring->n_links * 277 return mite_device_bytes_transferred(mite_chan) + in_transit_count;
206 sizeof(struct mite_dma_descriptor), 278}
207 ring->descriptors,
208 ring->descriptors_dma_addr);
209 }
210 put_device(ring->hw_dev);
211 kfree(ring);
212 }
213};
214EXPORT_SYMBOL_GPL(mite_free_ring);
215 279
216struct mite_channel *mite_request_channel_in_range(struct mite_struct *mite, 280static void mite_sync_input_dma(struct mite_channel *mite_chan,
217 struct 281 struct comedi_subdevice *s)
218 mite_dma_descriptor_ring
219 *ring, unsigned min_channel,
220 unsigned max_channel)
221{ 282{
222 int i; 283 struct comedi_async *async = s->async;
223 unsigned long flags; 284 int count;
224 struct mite_channel *channel = NULL; 285 unsigned int nbytes, old_alloc_count;
286
287 old_alloc_count = async->buf_write_alloc_count;
288 /* write alloc as much as we can */
289 comedi_buf_write_alloc(s, async->prealloc_bufsz);
225 290
291 nbytes = mite_bytes_written_to_memory_lb(mite_chan);
292 if ((int)(mite_bytes_written_to_memory_ub(mite_chan) -
293 old_alloc_count) > 0) {
294 dev_warn(s->device->class_dev,
295 "mite: DMA overwrite of free area\n");
296 async->events |= COMEDI_CB_OVERFLOW;
297 return;
298 }
299
300 count = nbytes - async->buf_write_count;
226 /* 301 /*
227 * spin lock so mite_release_channel can be called safely 302 * it's possible count will be negative due to conservative value
228 * from interrupts 303 * returned by mite_bytes_written_to_memory_lb
229 */ 304 */
230 spin_lock_irqsave(&mite->lock, flags); 305 if (count > 0) {
231 for (i = min_channel; i <= max_channel; ++i) { 306 comedi_buf_write_free(s, count);
232 if (mite->channel_allocated[i] == 0) { 307 comedi_inc_scan_progress(s, count);
233 mite->channel_allocated[i] = 1; 308 async->events |= COMEDI_CB_BLOCK;
234 channel = &mite->channels[i];
235 channel->ring = ring;
236 break;
237 }
238 } 309 }
239 spin_unlock_irqrestore(&mite->lock, flags);
240 return channel;
241} 310}
242EXPORT_SYMBOL_GPL(mite_request_channel_in_range);
243 311
244void mite_release_channel(struct mite_channel *mite_chan) 312static void mite_sync_output_dma(struct mite_channel *mite_chan,
313 struct comedi_subdevice *s)
245{ 314{
246 struct mite_struct *mite = mite_chan->mite; 315 struct comedi_async *async = s->async;
247 unsigned long flags; 316 struct comedi_cmd *cmd = &async->cmd;
317 u32 stop_count = cmd->stop_arg * comedi_bytes_per_scan(s);
318 unsigned int old_alloc_count = async->buf_read_alloc_count;
319 u32 nbytes_ub, nbytes_lb;
320 int count;
321 bool finite_regen = (cmd->stop_src == TRIG_NONE && stop_count != 0);
248 322
249 /* spin lock to prevent races with mite_request_channel */ 323 /* read alloc as much as we can */
250 spin_lock_irqsave(&mite->lock, flags); 324 comedi_buf_read_alloc(s, async->prealloc_bufsz);
251 if (mite->channel_allocated[mite_chan->channel]) { 325 nbytes_lb = mite_bytes_read_from_memory_lb(mite_chan);
252 mite_dma_disarm(mite_chan); 326 if (cmd->stop_src == TRIG_COUNT && (int)(nbytes_lb - stop_count) > 0)
253 mite_dma_reset(mite_chan); 327 nbytes_lb = stop_count;
328 nbytes_ub = mite_bytes_read_from_memory_ub(mite_chan);
329 if (cmd->stop_src == TRIG_COUNT && (int)(nbytes_ub - stop_count) > 0)
330 nbytes_ub = stop_count;
331
332 if ((!finite_regen || stop_count > old_alloc_count) &&
333 ((int)(nbytes_ub - old_alloc_count) > 0)) {
334 dev_warn(s->device->class_dev, "mite: DMA underrun\n");
335 async->events |= COMEDI_CB_OVERFLOW;
336 return;
337 }
338
339 if (finite_regen) {
254 /* 340 /*
255 * disable all channel's interrupts (do it after disarm/reset so 341 * This is a special case where we continuously output a finite
256 * MITE_CHCR reg isn't changed while dma is still active!) 342 * buffer. In this case, we do not free any of the memory,
343 * hence we expect that old_alloc_count will reach a maximum of
344 * stop_count bytes.
257 */ 345 */
258 writel(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE | 346 return;
259 CHCR_CLR_SAR_IE | CHCR_CLR_DONE_IE | 347 }
260 CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE | 348
261 CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE, 349 count = nbytes_lb - async->buf_read_count;
262 mite->mite_io_addr + MITE_CHCR(mite_chan->channel)); 350 if (count > 0) {
263 mite->channel_allocated[mite_chan->channel] = 0; 351 comedi_buf_read_free(s, count);
264 mite_chan->ring = NULL; 352 async->events |= COMEDI_CB_BLOCK;
265 mmiowb();
266 } 353 }
267 spin_unlock_irqrestore(&mite->lock, flags);
268} 354}
269EXPORT_SYMBOL_GPL(mite_release_channel);
270 355
271void mite_dma_arm(struct mite_channel *mite_chan) 356/**
357 * mite_sync_dma() - Sync the MITE dma with the COMEDI async buffer.
358 * @mite_chan: MITE dma channel.
359 * @s: COMEDI subdevice.
360 */
361void mite_sync_dma(struct mite_channel *mite_chan, struct comedi_subdevice *s)
362{
363 if (mite_chan->dir == COMEDI_INPUT)
364 mite_sync_input_dma(mite_chan, s);
365 else
366 mite_sync_output_dma(mite_chan, s);
367}
368EXPORT_SYMBOL_GPL(mite_sync_dma);
369
370static unsigned int mite_get_status(struct mite_channel *mite_chan)
272{ 371{
273 struct mite_struct *mite = mite_chan->mite; 372 struct mite *mite = mite_chan->mite;
274 int chor; 373 unsigned int status;
275 unsigned long flags; 374 unsigned long flags;
276 375
277 /*
278 * memory barrier is intended to insure any twiddling with the buffer
279 * is done before writing to the mite to arm dma transfer
280 */
281 smp_mb();
282 /* arm */
283 chor = CHOR_START;
284 spin_lock_irqsave(&mite->lock, flags); 376 spin_lock_irqsave(&mite->lock, flags);
285 mite_chan->done = 0; 377 status = readl(mite->mmio + MITE_CHSR(mite_chan->channel));
286 writel(chor, mite->mite_io_addr + MITE_CHOR(mite_chan->channel)); 378 if (status & CHSR_DONE) {
379 mite_chan->done = 1;
380 writel(CHOR_CLRDONE,
381 mite->mmio + MITE_CHOR(mite_chan->channel));
382 }
287 mmiowb(); 383 mmiowb();
288 spin_unlock_irqrestore(&mite->lock, flags); 384 spin_unlock_irqrestore(&mite->lock, flags);
289 /* mite_dma_tcr(mite, channel); */ 385 return status;
290} 386}
291EXPORT_SYMBOL_GPL(mite_dma_arm);
292 387
293/**************************************/ 388/**
294 389 * mite_ack_linkc() - Check and ack the LINKC interrupt,
295int mite_buf_change(struct mite_dma_descriptor_ring *ring, 390 * @mite_chan: MITE dma channel.
296 struct comedi_subdevice *s) 391 * @s: COMEDI subdevice.
392 * @sync: flag to force a mite_sync_dma().
393 *
394 * This will also ack the DONE interrupt if active.
395 */
396void mite_ack_linkc(struct mite_channel *mite_chan,
397 struct comedi_subdevice *s,
398 bool sync)
297{ 399{
298 struct comedi_async *async = s->async; 400 struct mite *mite = mite_chan->mite;
299 unsigned int n_links; 401 unsigned int status;
300 402
301 if (ring->descriptors) { 403 status = mite_get_status(mite_chan);
302 dma_free_coherent(ring->hw_dev, 404 if (status & CHSR_LINKC) {
303 ring->n_links * 405 writel(CHOR_CLRLC, mite->mmio + MITE_CHOR(mite_chan->channel));
304 sizeof(struct mite_dma_descriptor), 406 sync = true;
305 ring->descriptors,
306 ring->descriptors_dma_addr);
307 } 407 }
308 ring->descriptors = NULL; 408 if (sync)
309 ring->descriptors_dma_addr = 0; 409 mite_sync_dma(mite_chan, s);
310 ring->n_links = 0;
311 410
312 if (async->prealloc_bufsz == 0) 411 if (status & CHSR_XFERR) {
313 return 0;
314
315 n_links = async->prealloc_bufsz >> PAGE_SHIFT;
316
317 ring->descriptors =
318 dma_alloc_coherent(ring->hw_dev,
319 n_links * sizeof(struct mite_dma_descriptor),
320 &ring->descriptors_dma_addr, GFP_KERNEL);
321 if (!ring->descriptors) {
322 dev_err(s->device->class_dev, 412 dev_err(s->device->class_dev,
323 "mite: ring buffer allocation failed\n"); 413 "mite: transfer error %08x\n", status);
324 return -ENOMEM; 414 s->async->events |= COMEDI_CB_ERROR;
325 } 415 }
326 ring->n_links = n_links;
327
328 return mite_init_ring_descriptors(ring, s, n_links << PAGE_SHIFT);
329} 416}
330EXPORT_SYMBOL_GPL(mite_buf_change); 417EXPORT_SYMBOL_GPL(mite_ack_linkc);
331 418
332/* 419/**
333 * initializes the ring buffer descriptors to provide correct DMA transfer links 420 * mite_done() - Check is a MITE dma transfer is complete.
334 * to the exact amount of memory required. When the ring buffer is allocated in 421 * @mite_chan: MITE dma channel.
335 * mite_buf_change, the default is to initialize the ring to refer to the entire 422 *
336 * DMA data buffer. A command may call this function later to re-initialize and 423 * This will also ack the DONE interrupt if active.
337 * shorten the amount of memory that will be transferred.
338 */ 424 */
339int mite_init_ring_descriptors(struct mite_dma_descriptor_ring *ring, 425int mite_done(struct mite_channel *mite_chan)
340 struct comedi_subdevice *s,
341 unsigned int nbytes)
342{ 426{
343 struct comedi_async *async = s->async; 427 struct mite *mite = mite_chan->mite;
344 unsigned int n_full_links = nbytes >> PAGE_SHIFT; 428 unsigned long flags;
345 unsigned int remainder = nbytes % PAGE_SIZE; 429 int done;
346 int i;
347
348 dev_dbg(s->device->class_dev,
349 "mite: init ring buffer to %u bytes\n", nbytes);
350
351 if ((n_full_links + (remainder > 0 ? 1 : 0)) > ring->n_links) {
352 dev_err(s->device->class_dev,
353 "mite: ring buffer too small for requested init\n");
354 return -ENOMEM;
355 }
356 430
357 /* We set the descriptors for all full links. */ 431 mite_get_status(mite_chan);
358 for (i = 0; i < n_full_links; ++i) { 432 spin_lock_irqsave(&mite->lock, flags);
359 ring->descriptors[i].count = cpu_to_le32(PAGE_SIZE); 433 done = mite_chan->done;
360 ring->descriptors[i].addr = 434 spin_unlock_irqrestore(&mite->lock, flags);
361 cpu_to_le32(async->buf_map->page_list[i].dma_addr); 435 return done;
362 ring->descriptors[i].next = 436}
363 cpu_to_le32(ring->descriptors_dma_addr + 437EXPORT_SYMBOL_GPL(mite_done);
364 (i + 1) * sizeof(struct mite_dma_descriptor));
365 }
366 438
367 /* the last link is either a remainder or was a full link. */ 439static void mite_dma_reset(struct mite_channel *mite_chan)
368 if (remainder > 0) { 440{
369 /* set the lesser count for the remainder link */ 441 writel(CHOR_DMARESET | CHOR_FRESET,
370 ring->descriptors[i].count = cpu_to_le32(remainder); 442 mite_chan->mite->mmio + MITE_CHOR(mite_chan->channel));
371 ring->descriptors[i].addr = 443}
372 cpu_to_le32(async->buf_map->page_list[i].dma_addr);
373 /* increment i so that assignment below refs last link */
374 ++i;
375 }
376 444
377 /* Assign the last link->next to point back to the head of the list. */ 445/**
378 ring->descriptors[i - 1].next = cpu_to_le32(ring->descriptors_dma_addr); 446 * mite_dma_arm() - Start a MITE dma transfer.
447 * @mite_chan: MITE dma channel.
448 */
449void mite_dma_arm(struct mite_channel *mite_chan)
450{
451 struct mite *mite = mite_chan->mite;
452 unsigned long flags;
379 453
380 /* 454 /*
381 * barrier is meant to insure that all the writes to the dma descriptors 455 * memory barrier is intended to insure any twiddling with the buffer
382 * have completed before the dma controller is commanded to read them 456 * is done before writing to the mite to arm dma transfer
383 */ 457 */
384 smp_wmb(); 458 smp_mb();
385 return 0; 459 spin_lock_irqsave(&mite->lock, flags);
460 mite_chan->done = 0;
461 /* arm */
462 writel(CHOR_START, mite->mmio + MITE_CHOR(mite_chan->channel));
463 mmiowb();
464 spin_unlock_irqrestore(&mite->lock, flags);
386} 465}
387EXPORT_SYMBOL_GPL(mite_init_ring_descriptors); 466EXPORT_SYMBOL_GPL(mite_dma_arm);
467
468/**
469 * mite_dma_disarm() - Stop a MITE dma transfer.
470 * @mite_chan: MITE dma channel.
471 */
472void mite_dma_disarm(struct mite_channel *mite_chan)
473{
474 struct mite *mite = mite_chan->mite;
475
476 /* disarm */
477 writel(CHOR_ABORT, mite->mmio + MITE_CHOR(mite_chan->channel));
478}
479EXPORT_SYMBOL_GPL(mite_dma_disarm);
388 480
481/**
482 * mite_prep_dma() - Prepare a MITE dma channel for transfers.
483 * @mite_chan: MITE dma channel.
484 * @num_device_bits: device transfer size (8, 16, or 32-bits).
485 * @num_memory_bits: memory transfer size (8, 16, or 32-bits).
486 */
389void mite_prep_dma(struct mite_channel *mite_chan, 487void mite_prep_dma(struct mite_channel *mite_chan,
390 unsigned int num_device_bits, unsigned int num_memory_bits) 488 unsigned int num_device_bits, unsigned int num_memory_bits)
391{ 489{
392 unsigned int chor, chcr, mcr, dcr, lkcr; 490 struct mite *mite = mite_chan->mite;
393 struct mite_struct *mite = mite_chan->mite; 491 unsigned int chcr, mcr, dcr, lkcr;
394 492
395 /* reset DMA and FIFO */ 493 mite_dma_reset(mite_chan);
396 chor = CHOR_DMARESET | CHOR_FRESET;
397 writel(chor, mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
398 494
399 /* short link chaining mode */ 495 /* short link chaining mode */
400 chcr = CHCR_SET_DMA_IE | CHCR_LINKSHORT | CHCR_SET_DONE_IE | 496 chcr = CHCR_SET_DMA_IE | CHCR_LINKSHORT | CHCR_SET_DONE_IE |
@@ -421,10 +517,10 @@ void mite_prep_dma(struct mite_channel *mite_chan,
421 if (mite_chan->dir == COMEDI_INPUT) 517 if (mite_chan->dir == COMEDI_INPUT)
422 chcr |= CHCR_DEV_TO_MEM; 518 chcr |= CHCR_DEV_TO_MEM;
423 519
424 writel(chcr, mite->mite_io_addr + MITE_CHCR(mite_chan->channel)); 520 writel(chcr, mite->mmio + MITE_CHCR(mite_chan->channel));
425 521
426 /* to/from memory */ 522 /* to/from memory */
427 mcr = CR_RL(64) | CR_ASEQUP; 523 mcr = mite_retry_limit(64) | CR_ASEQUP;
428 switch (num_memory_bits) { 524 switch (num_memory_bits) {
429 case 8: 525 case 8:
430 mcr |= CR_PSIZE8; 526 mcr |= CR_PSIZE8;
@@ -439,11 +535,11 @@ void mite_prep_dma(struct mite_channel *mite_chan,
439 pr_warn("bug! invalid mem bit width for dma transfer\n"); 535 pr_warn("bug! invalid mem bit width for dma transfer\n");
440 break; 536 break;
441 } 537 }
442 writel(mcr, mite->mite_io_addr + MITE_MCR(mite_chan->channel)); 538 writel(mcr, mite->mmio + MITE_MCR(mite_chan->channel));
443 539
444 /* from/to device */ 540 /* from/to device */
445 dcr = CR_RL(64) | CR_ASEQUP; 541 dcr = mite_retry_limit(64) | CR_ASEQUP;
446 dcr |= CR_PORTIO | CR_AMDEVICE | CR_REQSDRQ(mite_chan->channel); 542 dcr |= CR_PORTIO | CR_AMDEVICE | mite_drq_reqs(mite_chan->channel);
447 switch (num_device_bits) { 543 switch (num_device_bits) {
448 case 8: 544 case 8:
449 dcr |= CR_PSIZE8; 545 dcr |= CR_PSIZE8;
@@ -458,223 +554,402 @@ void mite_prep_dma(struct mite_channel *mite_chan,
458 pr_warn("bug! invalid dev bit width for dma transfer\n"); 554 pr_warn("bug! invalid dev bit width for dma transfer\n");
459 break; 555 break;
460 } 556 }
461 writel(dcr, mite->mite_io_addr + MITE_DCR(mite_chan->channel)); 557 writel(dcr, mite->mmio + MITE_DCR(mite_chan->channel));
462 558
463 /* reset the DAR */ 559 /* reset the DAR */
464 writel(0, mite->mite_io_addr + MITE_DAR(mite_chan->channel)); 560 writel(0, mite->mmio + MITE_DAR(mite_chan->channel));
465 561
466 /* the link is 32bits */ 562 /* the link is 32bits */
467 lkcr = CR_RL(64) | CR_ASEQUP | CR_PSIZE32; 563 lkcr = mite_retry_limit(64) | CR_ASEQUP | CR_PSIZE32;
468 writel(lkcr, mite->mite_io_addr + MITE_LKCR(mite_chan->channel)); 564 writel(lkcr, mite->mmio + MITE_LKCR(mite_chan->channel));
469 565
470 /* starting address for link chaining */ 566 /* starting address for link chaining */
471 writel(mite_chan->ring->descriptors_dma_addr, 567 writel(mite_chan->ring->dma_addr,
472 mite->mite_io_addr + MITE_LKAR(mite_chan->channel)); 568 mite->mmio + MITE_LKAR(mite_chan->channel));
473} 569}
474EXPORT_SYMBOL_GPL(mite_prep_dma); 570EXPORT_SYMBOL_GPL(mite_prep_dma);
475 571
476static u32 mite_device_bytes_transferred(struct mite_channel *mite_chan) 572static struct mite_channel *__mite_request_channel(struct mite *mite,
573 struct mite_ring *ring,
574 unsigned int min_channel,
575 unsigned int max_channel)
477{ 576{
478 struct mite_struct *mite = mite_chan->mite; 577 struct mite_channel *mite_chan = NULL;
578 unsigned long flags;
579 int i;
479 580
480 return readl(mite->mite_io_addr + MITE_DAR(mite_chan->channel)); 581 /*
582 * spin lock so mite_release_channel can be called safely
583 * from interrupts
584 */
585 spin_lock_irqsave(&mite->lock, flags);
586 for (i = min_channel; i <= max_channel; ++i) {
587 mite_chan = &mite->channels[i];
588 if (!mite_chan->ring) {
589 mite_chan->ring = ring;
590 break;
591 }
592 mite_chan = NULL;
593 }
594 spin_unlock_irqrestore(&mite->lock, flags);
595 return mite_chan;
481} 596}
482 597
483u32 mite_bytes_in_transit(struct mite_channel *mite_chan) 598/**
599 * mite_request_channel_in_range() - Request a MITE dma channel.
600 * @mite: MITE device.
601 * @ring: MITE dma ring.
602 * @min_channel: minimum channel index to use.
603 * @max_channel: maximum channel index to use.
604 */
605struct mite_channel *mite_request_channel_in_range(struct mite *mite,
606 struct mite_ring *ring,
607 unsigned int min_channel,
608 unsigned int max_channel)
484{ 609{
485 struct mite_struct *mite = mite_chan->mite; 610 return __mite_request_channel(mite, ring, min_channel, max_channel);
486
487 return readl(mite->mite_io_addr +
488 MITE_FCR(mite_chan->channel)) & 0x000000FF;
489} 611}
490EXPORT_SYMBOL_GPL(mite_bytes_in_transit); 612EXPORT_SYMBOL_GPL(mite_request_channel_in_range);
491 613
492/* returns lower bound for number of bytes transferred from device to memory */ 614/**
493u32 mite_bytes_written_to_memory_lb(struct mite_channel *mite_chan) 615 * mite_request_channel() - Request a MITE dma channel.
616 * @mite: MITE device.
617 * @ring: MITE dma ring.
618 */
619struct mite_channel *mite_request_channel(struct mite *mite,
620 struct mite_ring *ring)
494{ 621{
495 u32 device_byte_count; 622 return __mite_request_channel(mite, ring, 0, mite->num_channels - 1);
496
497 device_byte_count = mite_device_bytes_transferred(mite_chan);
498 return device_byte_count - mite_bytes_in_transit(mite_chan);
499} 623}
500EXPORT_SYMBOL_GPL(mite_bytes_written_to_memory_lb); 624EXPORT_SYMBOL_GPL(mite_request_channel);
501 625
502/* returns upper bound for number of bytes transferred from device to memory */ 626/**
503u32 mite_bytes_written_to_memory_ub(struct mite_channel *mite_chan) 627 * mite_release_channel() - Release a MITE dma channel.
628 * @mite_chan: MITE dma channel.
629 */
630void mite_release_channel(struct mite_channel *mite_chan)
504{ 631{
505 u32 in_transit_count; 632 struct mite *mite = mite_chan->mite;
633 unsigned long flags;
506 634
507 in_transit_count = mite_bytes_in_transit(mite_chan); 635 /* spin lock to prevent races with mite_request_channel */
508 return mite_device_bytes_transferred(mite_chan) - in_transit_count; 636 spin_lock_irqsave(&mite->lock, flags);
637 if (mite_chan->ring) {
638 mite_dma_disarm(mite_chan);
639 mite_dma_reset(mite_chan);
640 /*
641 * disable all channel's interrupts (do it after disarm/reset so
642 * MITE_CHCR reg isn't changed while dma is still active!)
643 */
644 writel(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE |
645 CHCR_CLR_SAR_IE | CHCR_CLR_DONE_IE |
646 CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE |
647 CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE,
648 mite->mmio + MITE_CHCR(mite_chan->channel));
649 mite_chan->ring = NULL;
650 mmiowb();
651 }
652 spin_unlock_irqrestore(&mite->lock, flags);
509} 653}
510EXPORT_SYMBOL_GPL(mite_bytes_written_to_memory_ub); 654EXPORT_SYMBOL_GPL(mite_release_channel);
511 655
512/* returns lower bound for number of bytes read from memory to device */ 656/**
513u32 mite_bytes_read_from_memory_lb(struct mite_channel *mite_chan) 657 * mite_init_ring_descriptors() - Initialize a MITE dma ring descriptors.
658 * @ring: MITE dma ring.
659 * @s: COMEDI subdevice.
660 * @nbytes: the size of the dma ring (in bytes).
661 *
662 * Initializes the ring buffer descriptors to provide correct DMA transfer
663 * links to the exact amount of memory required. When the ring buffer is
664 * allocated by mite_buf_change(), the default is to initialize the ring
665 * to refer to the entire DMA data buffer. A command may call this function
666 * later to re-initialize and shorten the amount of memory that will be
667 * transferred.
668 */
669int mite_init_ring_descriptors(struct mite_ring *ring,
670 struct comedi_subdevice *s,
671 unsigned int nbytes)
514{ 672{
515 u32 device_byte_count; 673 struct comedi_async *async = s->async;
674 struct mite_dma_desc *desc = NULL;
675 unsigned int n_full_links = nbytes >> PAGE_SHIFT;
676 unsigned int remainder = nbytes % PAGE_SIZE;
677 int i;
516 678
517 device_byte_count = mite_device_bytes_transferred(mite_chan); 679 dev_dbg(s->device->class_dev,
518 return device_byte_count + mite_bytes_in_transit(mite_chan); 680 "mite: init ring buffer to %u bytes\n", nbytes);
519}
520EXPORT_SYMBOL_GPL(mite_bytes_read_from_memory_lb);
521 681
522/* returns upper bound for number of bytes read from memory to device */ 682 if ((n_full_links + (remainder > 0 ? 1 : 0)) > ring->n_links) {
523u32 mite_bytes_read_from_memory_ub(struct mite_channel *mite_chan) 683 dev_err(s->device->class_dev,
524{ 684 "mite: ring buffer too small for requested init\n");
525 u32 in_transit_count; 685 return -ENOMEM;
686 }
526 687
527 in_transit_count = mite_bytes_in_transit(mite_chan); 688 /* We set the descriptors for all full links. */
528 return mite_device_bytes_transferred(mite_chan) + in_transit_count; 689 for (i = 0; i < n_full_links; ++i) {
529} 690 desc = &ring->descs[i];
530EXPORT_SYMBOL_GPL(mite_bytes_read_from_memory_ub); 691 desc->count = cpu_to_le32(PAGE_SIZE);
692 desc->addr = cpu_to_le32(async->buf_map->page_list[i].dma_addr);
693 desc->next = cpu_to_le32(ring->dma_addr +
694 (i + 1) * sizeof(*desc));
695 }
531 696
532unsigned mite_dma_tcr(struct mite_channel *mite_chan) 697 /* the last link is either a remainder or was a full link. */
533{ 698 if (remainder > 0) {
534 struct mite_struct *mite = mite_chan->mite; 699 desc = &ring->descs[i];
700 /* set the lesser count for the remainder link */
701 desc->count = cpu_to_le32(remainder);
702 desc->addr = cpu_to_le32(async->buf_map->page_list[i].dma_addr);
703 }
704
705 /* Assign the last link->next to point back to the head of the list. */
706 desc->next = cpu_to_le32(ring->dma_addr);
535 707
536 return readl(mite->mite_io_addr + MITE_TCR(mite_chan->channel)); 708 /*
709 * barrier is meant to insure that all the writes to the dma descriptors
710 * have completed before the dma controller is commanded to read them
711 */
712 smp_wmb();
713 return 0;
537} 714}
538EXPORT_SYMBOL_GPL(mite_dma_tcr); 715EXPORT_SYMBOL_GPL(mite_init_ring_descriptors);
539 716
540void mite_dma_disarm(struct mite_channel *mite_chan) 717static void mite_free_dma_descs(struct mite_ring *ring)
541{ 718{
542 struct mite_struct *mite = mite_chan->mite; 719 struct mite_dma_desc *descs = ring->descs;
543 unsigned chor;
544 720
545 /* disarm */ 721 if (descs) {
546 chor = CHOR_ABORT; 722 dma_free_coherent(ring->hw_dev,
547 writel(chor, mite->mite_io_addr + MITE_CHOR(mite_chan->channel)); 723 ring->n_links * sizeof(*descs),
724 descs, ring->dma_addr);
725 ring->descs = NULL;
726 ring->dma_addr = 0;
727 ring->n_links = 0;
728 }
548} 729}
549EXPORT_SYMBOL_GPL(mite_dma_disarm);
550 730
551int mite_sync_input_dma(struct mite_channel *mite_chan, 731/**
552 struct comedi_subdevice *s) 732 * mite_buf_change() - COMEDI subdevice (*buf_change) for a MITE dma ring.
733 * @ring: MITE dma ring.
734 * @s: COMEDI subdevice.
735 */
736int mite_buf_change(struct mite_ring *ring, struct comedi_subdevice *s)
553{ 737{
554 struct comedi_async *async = s->async; 738 struct comedi_async *async = s->async;
555 int count; 739 struct mite_dma_desc *descs;
556 unsigned int nbytes, old_alloc_count; 740 unsigned int n_links;
557 741
558 old_alloc_count = async->buf_write_alloc_count; 742 mite_free_dma_descs(ring);
559 /* write alloc as much as we can */
560 comedi_buf_write_alloc(s, async->prealloc_bufsz);
561 743
562 nbytes = mite_bytes_written_to_memory_lb(mite_chan); 744 if (async->prealloc_bufsz == 0)
563 if ((int)(mite_bytes_written_to_memory_ub(mite_chan) - 745 return 0;
564 old_alloc_count) > 0) { 746
565 dev_warn(s->device->class_dev, 747 n_links = async->prealloc_bufsz >> PAGE_SHIFT;
566 "mite: DMA overwrite of free area\n"); 748
567 async->events |= COMEDI_CB_OVERFLOW; 749 descs = dma_alloc_coherent(ring->hw_dev,
568 return -1; 750 n_links * sizeof(*descs),
751 &ring->dma_addr, GFP_KERNEL);
752 if (!descs) {
753 dev_err(s->device->class_dev,
754 "mite: ring buffer allocation failed\n");
755 return -ENOMEM;
569 } 756 }
757 ring->descs = descs;
758 ring->n_links = n_links;
570 759
571 count = nbytes - async->buf_write_count; 760 return mite_init_ring_descriptors(ring, s, n_links << PAGE_SHIFT);
572 /* 761}
573 * it's possible count will be negative due to conservative value 762EXPORT_SYMBOL_GPL(mite_buf_change);
574 * returned by mite_bytes_written_to_memory_lb
575 */
576 if (count <= 0)
577 return 0;
578 763
579 comedi_buf_write_free(s, count); 764/**
580 comedi_inc_scan_progress(s, count); 765 * mite_alloc_ring() - Allocate a MITE dma ring.
581 async->events |= COMEDI_CB_BLOCK; 766 * @mite: MITE device.
582 return 0; 767 */
768struct mite_ring *mite_alloc_ring(struct mite *mite)
769{
770 struct mite_ring *ring;
771
772 ring = kmalloc(sizeof(*ring), GFP_KERNEL);
773 if (!ring)
774 return NULL;
775 ring->hw_dev = get_device(&mite->pcidev->dev);
776 if (!ring->hw_dev) {
777 kfree(ring);
778 return NULL;
779 }
780 ring->n_links = 0;
781 ring->descs = NULL;
782 ring->dma_addr = 0;
783 return ring;
583} 784}
584EXPORT_SYMBOL_GPL(mite_sync_input_dma); 785EXPORT_SYMBOL_GPL(mite_alloc_ring);
585 786
586int mite_sync_output_dma(struct mite_channel *mite_chan, 787/**
587 struct comedi_subdevice *s) 788 * mite_free_ring() - Free a MITE dma ring and its descriptors.
789 * @ring: MITE dma ring.
790 */
791void mite_free_ring(struct mite_ring *ring)
588{ 792{
589 struct comedi_async *async = s->async; 793 if (ring) {
590 struct comedi_cmd *cmd = &async->cmd; 794 mite_free_dma_descs(ring);
591 u32 stop_count = cmd->stop_arg * comedi_bytes_per_scan(s); 795 put_device(ring->hw_dev);
592 unsigned int old_alloc_count = async->buf_read_alloc_count; 796 kfree(ring);
593 u32 nbytes_ub, nbytes_lb; 797 }
594 int count; 798}
595 bool finite_regen = (cmd->stop_src == TRIG_NONE && stop_count != 0); 799EXPORT_SYMBOL_GPL(mite_free_ring);
596 800
597 /* read alloc as much as we can */ 801static int mite_setup(struct comedi_device *dev, struct mite *mite,
598 comedi_buf_read_alloc(s, async->prealloc_bufsz); 802 bool use_win1)
599 nbytes_lb = mite_bytes_read_from_memory_lb(mite_chan); 803{
600 if (cmd->stop_src == TRIG_COUNT && (int)(nbytes_lb - stop_count) > 0) 804 resource_size_t daq_phys_addr;
601 nbytes_lb = stop_count; 805 unsigned long length;
602 nbytes_ub = mite_bytes_read_from_memory_ub(mite_chan); 806 int i;
603 if (cmd->stop_src == TRIG_COUNT && (int)(nbytes_ub - stop_count) > 0) 807 u32 csigr_bits;
604 nbytes_ub = stop_count; 808 unsigned int unknown_dma_burst_bits;
809 unsigned int wpdep;
605 810
606 if ((!finite_regen || stop_count > old_alloc_count) && 811 pci_set_master(mite->pcidev);
607 ((int)(nbytes_ub - old_alloc_count) > 0)) { 812
608 dev_warn(s->device->class_dev, "mite: DMA underrun\n"); 813 mite->mmio = pci_ioremap_bar(mite->pcidev, 0);
609 async->events |= COMEDI_CB_OVERFLOW; 814 if (!mite->mmio)
610 return -1; 815 return -ENOMEM;
816
817 dev->mmio = pci_ioremap_bar(mite->pcidev, 1);
818 if (!dev->mmio)
819 return -ENOMEM;
820 daq_phys_addr = pci_resource_start(mite->pcidev, 1);
821 length = pci_resource_len(mite->pcidev, 1);
822
823 if (use_win1) {
824 writel(0, mite->mmio + MITE_IODWBSR);
825 dev_dbg(dev->class_dev,
826 "mite: using I/O Window Base Size register 1\n");
827 writel(daq_phys_addr | WENAB |
828 MITE_IODWBSR_1_WSIZE_bits(length),
829 mite->mmio + MITE_IODWBSR_1);
830 writel(0, mite->mmio + MITE_IODWCR_1);
831 } else {
832 writel(daq_phys_addr | WENAB, mite->mmio + MITE_IODWBSR);
611 } 833 }
834 /*
835 * Make sure dma bursts work. I got this from running a bus analyzer
836 * on a pxi-6281 and a pxi-6713. 6713 powered up with register value
837 * of 0x61f and bursts worked. 6281 powered up with register value of
838 * 0x1f and bursts didn't work. The NI windows driver reads the
839 * register, then does a bitwise-or of 0x600 with it and writes it back.
840 *
841 * The bits 0x90180700 in MITE_UNKNOWN_DMA_BURST_REG can be
842 * written and read back. The bits 0x1f always read as 1.
843 * The rest always read as zero.
844 */
845 unknown_dma_burst_bits = readl(mite->mmio + MITE_UNKNOWN_DMA_BURST_REG);
846 unknown_dma_burst_bits |= UNKNOWN_DMA_BURST_ENABLE_BITS;
847 writel(unknown_dma_burst_bits, mite->mmio + MITE_UNKNOWN_DMA_BURST_REG);
612 848
613 if (finite_regen) { 849 csigr_bits = readl(mite->mmio + MITE_CSIGR);
614 /* 850 mite->num_channels = CSIGR_TO_DMAC(csigr_bits);
615 * This is a special case where we continuously output a finite 851 if (mite->num_channels > MAX_MITE_DMA_CHANNELS) {
616 * buffer. In this case, we do not free any of the memory, 852 dev_warn(dev->class_dev,
617 * hence we expect that old_alloc_count will reach a maximum of 853 "mite: bug? chip claims to have %i dma channels. Setting to %i.\n",
618 * stop_count bytes. 854 mite->num_channels, MAX_MITE_DMA_CHANNELS);
619 */ 855 mite->num_channels = MAX_MITE_DMA_CHANNELS;
620 return 0;
621 } 856 }
622 857
623 count = nbytes_lb - async->buf_read_count; 858 /* get the wpdep bits and convert it to the write port fifo depth */
624 if (count <= 0) 859 wpdep = CSIGR_TO_WPDEP(csigr_bits);
625 return 0; 860 if (wpdep)
861 wpdep = BIT(wpdep);
626 862
627 if (count) { 863 dev_dbg(dev->class_dev,
628 comedi_buf_read_free(s, count); 864 "mite: version = %i, type = %i, mite mode = %i, interface mode = %i\n",
629 async->events |= COMEDI_CB_BLOCK; 865 CSIGR_TO_VER(csigr_bits), CSIGR_TO_TYPE(csigr_bits),
866 CSIGR_TO_MMODE(csigr_bits), CSIGR_TO_IMODE(csigr_bits));
867 dev_dbg(dev->class_dev,
868 "mite: num channels = %i, write post fifo depth = %i, wins = %i, iowins = %i\n",
869 CSIGR_TO_DMAC(csigr_bits), wpdep,
870 CSIGR_TO_WINS(csigr_bits), CSIGR_TO_IOWINS(csigr_bits));
871
872 for (i = 0; i < mite->num_channels; i++) {
873 writel(CHOR_DMARESET, mite->mmio + MITE_CHOR(i));
874 /* disable interrupts */
875 writel(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE | CHCR_CLR_SAR_IE |
876 CHCR_CLR_DONE_IE | CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE |
877 CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE,
878 mite->mmio + MITE_CHCR(i));
630 } 879 }
880 mite->fifo_size = mite_fifo_size(mite, 0);
881 dev_dbg(dev->class_dev, "mite: fifo size is %i.\n", mite->fifo_size);
631 return 0; 882 return 0;
632} 883}
633EXPORT_SYMBOL_GPL(mite_sync_output_dma);
634 884
635unsigned mite_get_status(struct mite_channel *mite_chan) 885/**
886 * mite_attach() - Allocate and initialize a MITE device for a comedi driver.
887 * @dev: COMEDI device.
888 * @use_win1: flag to use I/O Window 1 instead of I/O Window 0.
889 *
890 * Called by a COMEDI drivers (*auto_attach).
891 *
892 * Returns a pointer to the MITE device on success, or NULL if the MITE cannot
893 * be allocated or remapped.
894 */
895struct mite *mite_attach(struct comedi_device *dev, bool use_win1)
636{ 896{
637 struct mite_struct *mite = mite_chan->mite; 897 struct pci_dev *pcidev = comedi_to_pci_dev(dev);
638 unsigned status; 898 struct mite *mite;
639 unsigned long flags; 899 unsigned int i;
900 int ret;
640 901
641 spin_lock_irqsave(&mite->lock, flags); 902 mite = kzalloc(sizeof(*mite), GFP_KERNEL);
642 status = readl(mite->mite_io_addr + MITE_CHSR(mite_chan->channel)); 903 if (!mite)
643 if (status & CHSR_DONE) { 904 return NULL;
644 mite_chan->done = 1; 905
645 writel(CHOR_CLRDONE, 906 spin_lock_init(&mite->lock);
646 mite->mite_io_addr + MITE_CHOR(mite_chan->channel)); 907 mite->pcidev = pcidev;
908 for (i = 0; i < MAX_MITE_DMA_CHANNELS; ++i) {
909 mite->channels[i].mite = mite;
910 mite->channels[i].channel = i;
911 mite->channels[i].done = 1;
647 } 912 }
648 mmiowb(); 913
649 spin_unlock_irqrestore(&mite->lock, flags); 914 ret = mite_setup(dev, mite, use_win1);
650 return status; 915 if (ret) {
916 if (mite->mmio)
917 iounmap(mite->mmio);
918 kfree(mite);
919 return NULL;
920 }
921
922 return mite;
651} 923}
652EXPORT_SYMBOL_GPL(mite_get_status); 924EXPORT_SYMBOL_GPL(mite_attach);
653 925
654int mite_done(struct mite_channel *mite_chan) 926/**
927 * mite_detach() - Unmap and free a MITE device for a comedi driver.
928 * @mite: MITE device.
929 *
930 * Called by a COMEDI drivers (*detach).
931 */
932void mite_detach(struct mite *mite)
655{ 933{
656 struct mite_struct *mite = mite_chan->mite; 934 if (!mite)
657 unsigned long flags; 935 return;
658 int done;
659 936
660 mite_get_status(mite_chan); 937 if (mite->mmio)
661 spin_lock_irqsave(&mite->lock, flags); 938 iounmap(mite->mmio);
662 done = mite_chan->done; 939
663 spin_unlock_irqrestore(&mite->lock, flags); 940 kfree(mite);
664 return done;
665} 941}
666EXPORT_SYMBOL_GPL(mite_done); 942EXPORT_SYMBOL_GPL(mite_detach);
667 943
668static int __init mite_module_init(void) 944static int __init mite_module_init(void)
669{ 945{
670 return 0; 946 return 0;
671} 947}
948module_init(mite_module_init);
672 949
673static void __exit mite_module_exit(void) 950static void __exit mite_module_exit(void)
674{ 951{
675} 952}
676
677module_init(mite_module_init);
678module_exit(mite_module_exit); 953module_exit(mite_module_exit);
679 954
680MODULE_AUTHOR("Comedi http://www.comedi.org"); 955MODULE_AUTHOR("Comedi http://www.comedi.org");
diff --git a/drivers/staging/comedi/drivers/mite.h b/drivers/staging/comedi/drivers/mite.h
index 87534b07ec81..b6349aed97d0 100644
--- a/drivers/staging/comedi/drivers/mite.h
+++ b/drivers/staging/comedi/drivers/mite.h
@@ -19,8 +19,6 @@
19#ifndef _MITE_H_ 19#ifndef _MITE_H_
20#define _MITE_H_ 20#define _MITE_H_
21 21
22#include <linux/io.h>
23#include <linux/log2.h>
24#include <linux/spinlock.h> 22#include <linux/spinlock.h>
25 23
26#define MAX_MITE_DMA_CHANNELS 8 24#define MAX_MITE_DMA_CHANNELS 8
@@ -30,323 +28,74 @@ struct comedi_subdevice;
30struct device; 28struct device;
31struct pci_dev; 29struct pci_dev;
32 30
33struct mite_dma_descriptor { 31struct mite_dma_desc {
34 __le32 count; 32 __le32 count;
35 __le32 addr; 33 __le32 addr;
36 __le32 next; 34 __le32 next;
37 u32 dar; 35 u32 dar;
38}; 36};
39 37
40struct mite_dma_descriptor_ring { 38struct mite_ring {
41 struct device *hw_dev; 39 struct device *hw_dev;
42 unsigned int n_links; 40 unsigned int n_links;
43 struct mite_dma_descriptor *descriptors; 41 struct mite_dma_desc *descs;
44 dma_addr_t descriptors_dma_addr; 42 dma_addr_t dma_addr;
45}; 43};
46 44
47struct mite_channel { 45struct mite_channel {
48 struct mite_struct *mite; 46 struct mite *mite;
49 unsigned channel; 47 unsigned int channel;
50 int dir; 48 int dir;
51 int done; 49 int done;
52 struct mite_dma_descriptor_ring *ring; 50 struct mite_ring *ring;
53}; 51};
54 52
55struct mite_struct { 53struct mite {
56 struct pci_dev *pcidev; 54 struct pci_dev *pcidev;
57 resource_size_t mite_phys_addr; 55 void __iomem *mmio;
58 void __iomem *mite_io_addr;
59 resource_size_t daq_phys_addr;
60 struct mite_channel channels[MAX_MITE_DMA_CHANNELS]; 56 struct mite_channel channels[MAX_MITE_DMA_CHANNELS];
61 short channel_allocated[MAX_MITE_DMA_CHANNELS];
62 int num_channels; 57 int num_channels;
63 unsigned fifo_size; 58 unsigned int fifo_size;
59 /* protects mite_channel from being released by the driver */
64 spinlock_t lock; 60 spinlock_t lock;
65}; 61};
66 62
67struct mite_struct *mite_alloc(struct pci_dev *pcidev); 63u32 mite_bytes_in_transit(struct mite_channel *);
68 64
69int mite_setup2(struct comedi_device *, struct mite_struct *, bool use_win1); 65void mite_sync_dma(struct mite_channel *, struct comedi_subdevice *);
66void mite_ack_linkc(struct mite_channel *, struct comedi_subdevice *s,
67 bool sync);
68int mite_done(struct mite_channel *);
70 69
71static inline int mite_setup(struct comedi_device *dev, 70void mite_dma_arm(struct mite_channel *);
72 struct mite_struct *mite) 71void mite_dma_disarm(struct mite_channel *);
73{
74 return mite_setup2(dev, mite, false);
75}
76 72
77void mite_detach(struct mite_struct *mite); 73void mite_prep_dma(struct mite_channel *,
78struct mite_dma_descriptor_ring *mite_alloc_ring(struct mite_struct *mite);
79void mite_free_ring(struct mite_dma_descriptor_ring *ring);
80struct mite_channel *
81mite_request_channel_in_range(struct mite_struct *mite,
82 struct mite_dma_descriptor_ring *ring,
83 unsigned min_channel, unsigned max_channel);
84static inline struct mite_channel *
85mite_request_channel(struct mite_struct *mite,
86 struct mite_dma_descriptor_ring *ring)
87{
88 return mite_request_channel_in_range(mite, ring, 0,
89 mite->num_channels - 1);
90}
91
92void mite_release_channel(struct mite_channel *mite_chan);
93
94unsigned mite_dma_tcr(struct mite_channel *mite_chan);
95void mite_dma_arm(struct mite_channel *mite_chan);
96void mite_dma_disarm(struct mite_channel *mite_chan);
97int mite_sync_input_dma(struct mite_channel *mite_chan,
98 struct comedi_subdevice *s);
99int mite_sync_output_dma(struct mite_channel *mite_chan,
100 struct comedi_subdevice *s);
101u32 mite_bytes_written_to_memory_lb(struct mite_channel *mite_chan);
102u32 mite_bytes_written_to_memory_ub(struct mite_channel *mite_chan);
103u32 mite_bytes_read_from_memory_lb(struct mite_channel *mite_chan);
104u32 mite_bytes_read_from_memory_ub(struct mite_channel *mite_chan);
105u32 mite_bytes_in_transit(struct mite_channel *mite_chan);
106unsigned mite_get_status(struct mite_channel *mite_chan);
107int mite_done(struct mite_channel *mite_chan);
108
109void mite_prep_dma(struct mite_channel *mite_chan,
110 unsigned int num_device_bits, unsigned int num_memory_bits); 74 unsigned int num_device_bits, unsigned int num_memory_bits);
111int mite_buf_change(struct mite_dma_descriptor_ring *ring,
112 struct comedi_subdevice *s);
113int mite_init_ring_descriptors(struct mite_dma_descriptor_ring *ring,
114 struct comedi_subdevice *s,
115 unsigned int nbytes);
116
117enum mite_registers {
118 /*
119 * The bits 0x90180700 in MITE_UNKNOWN_DMA_BURST_REG can be
120 * written and read back. The bits 0x1f always read as 1.
121 * The rest always read as zero.
122 */
123 MITE_UNKNOWN_DMA_BURST_REG = 0x28,
124 MITE_IODWBSR = 0xc0, /* IO Device Window Base Size Register */
125 MITE_IODWBSR_1 = 0xc4, /* IO Device Window Base Size Register 1 */
126 MITE_IODWCR_1 = 0xf4,
127 MITE_PCI_CONFIG_OFFSET = 0x300,
128 MITE_CSIGR = 0x460 /* chip signature */
129};
130
131#define MITE_CHAN(x) (0x500 + 0x100 * (x))
132#define MITE_CHOR(x) (0x00 + MITE_CHAN(x)) /* channel operation */
133#define MITE_CHCR(x) (0x04 + MITE_CHAN(x)) /* channel control */
134#define MITE_TCR(x) (0x08 + MITE_CHAN(x)) /* transfer count */
135#define MITE_MCR(x) (0x0c + MITE_CHAN(x)) /* memory configuration */
136#define MITE_MAR(x) (0x10 + MITE_CHAN(x)) /* memory address */
137#define MITE_DCR(x) (0x14 + MITE_CHAN(x)) /* device configuration */
138#define MITE_DAR(x) (0x18 + MITE_CHAN(x)) /* device address */
139#define MITE_LKCR(x) (0x1c + MITE_CHAN(x)) /* link configuration */
140#define MITE_LKAR(x) (0x20 + MITE_CHAN(x)) /* link address */
141#define MITE_LLKAR(x) (0x24 + MITE_CHAN(x)) /* see tnt5002 manual */
142#define MITE_BAR(x) (0x28 + MITE_CHAN(x)) /* base address */
143#define MITE_BCR(x) (0x2c + MITE_CHAN(x)) /* base count */
144#define MITE_SAR(x) (0x30 + MITE_CHAN(x)) /* ? address */
145#define MITE_WSCR(x) (0x34 + MITE_CHAN(x)) /* ? */
146#define MITE_WSER(x) (0x38 + MITE_CHAN(x)) /* ? */
147#define MITE_CHSR(x) (0x3c + MITE_CHAN(x)) /* channel status */
148#define MITE_FCR(x) (0x40 + MITE_CHAN(x)) /* fifo count */
149
150enum MITE_IODWBSR_bits {
151 WENAB = 0x80, /* window enable */
152};
153
154static inline unsigned MITE_IODWBSR_1_WSIZE_bits(unsigned size)
155{
156 unsigned order = 0;
157
158 BUG_ON(size == 0);
159 order = ilog2(size);
160 BUG_ON(order < 1);
161 return (order - 1) & 0x1f;
162}
163
164enum MITE_UNKNOWN_DMA_BURST_bits {
165 UNKNOWN_DMA_BURST_ENABLE_BITS = 0x600
166};
167
168static inline int mite_csigr_version(u32 csigr_bits)
169{
170 return csigr_bits & 0xf;
171};
172
173static inline int mite_csigr_type(u32 csigr_bits)
174{ /* original mite = 0, minimite = 1 */
175 return (csigr_bits >> 4) & 0xf;
176};
177
178static inline int mite_csigr_mmode(u32 csigr_bits)
179{ /* mite mode, minimite = 1 */
180 return (csigr_bits >> 8) & 0x3;
181};
182
183static inline int mite_csigr_imode(u32 csigr_bits)
184{ /* cpu port interface mode, pci = 0x3 */
185 return (csigr_bits >> 12) & 0x3;
186};
187
188static inline int mite_csigr_dmac(u32 csigr_bits)
189{ /* number of dma channels */
190 return (csigr_bits >> 16) & 0xf;
191};
192 75
193static inline int mite_csigr_wpdep(u32 csigr_bits) 76struct mite_channel *mite_request_channel_in_range(struct mite *,
194{ /* write post fifo depth */ 77 struct mite_ring *,
195 unsigned int wpdep_bits = (csigr_bits >> 20) & 0x7; 78 unsigned int min_channel,
79 unsigned int max_channel);
80struct mite_channel *mite_request_channel(struct mite *, struct mite_ring *);
81void mite_release_channel(struct mite_channel *);
196 82
197 return (wpdep_bits) ? (1 << (wpdep_bits - 1)) : 0; 83int mite_init_ring_descriptors(struct mite_ring *, struct comedi_subdevice *,
198} 84 unsigned int nbytes);
199 85int mite_buf_change(struct mite_ring *, struct comedi_subdevice *);
200static inline int mite_csigr_wins(u32 csigr_bits)
201{
202 return (csigr_bits >> 24) & 0x1f;
203};
204
205static inline int mite_csigr_iowins(u32 csigr_bits)
206{ /* number of io windows */
207 return (csigr_bits >> 29) & 0x7;
208};
209
210enum MITE_MCR_bits {
211 MCRPON = 0,
212};
213
214enum MITE_DCR_bits {
215 DCR_NORMAL = (1 << 29),
216 DCRPON = 0,
217};
218
219enum MITE_CHOR_bits {
220 CHOR_DMARESET = (1 << 31),
221 CHOR_SET_SEND_TC = (1 << 11),
222 CHOR_CLR_SEND_TC = (1 << 10),
223 CHOR_SET_LPAUSE = (1 << 9),
224 CHOR_CLR_LPAUSE = (1 << 8),
225 CHOR_CLRDONE = (1 << 7),
226 CHOR_CLRRB = (1 << 6),
227 CHOR_CLRLC = (1 << 5),
228 CHOR_FRESET = (1 << 4),
229 CHOR_ABORT = (1 << 3), /* stop without emptying fifo */
230 CHOR_STOP = (1 << 2), /* stop after emptying fifo */
231 CHOR_CONT = (1 << 1),
232 CHOR_START = (1 << 0),
233 CHOR_PON = (CHOR_CLR_SEND_TC | CHOR_CLR_LPAUSE),
234};
235
236enum MITE_CHCR_bits {
237 CHCR_SET_DMA_IE = (1 << 31),
238 CHCR_CLR_DMA_IE = (1 << 30),
239 CHCR_SET_LINKP_IE = (1 << 29),
240 CHCR_CLR_LINKP_IE = (1 << 28),
241 CHCR_SET_SAR_IE = (1 << 27),
242 CHCR_CLR_SAR_IE = (1 << 26),
243 CHCR_SET_DONE_IE = (1 << 25),
244 CHCR_CLR_DONE_IE = (1 << 24),
245 CHCR_SET_MRDY_IE = (1 << 23),
246 CHCR_CLR_MRDY_IE = (1 << 22),
247 CHCR_SET_DRDY_IE = (1 << 21),
248 CHCR_CLR_DRDY_IE = (1 << 20),
249 CHCR_SET_LC_IE = (1 << 19),
250 CHCR_CLR_LC_IE = (1 << 18),
251 CHCR_SET_CONT_RB_IE = (1 << 17),
252 CHCR_CLR_CONT_RB_IE = (1 << 16),
253 CHCR_FIFODIS = (1 << 15),
254 CHCR_FIFO_ON = 0,
255 CHCR_BURSTEN = (1 << 14),
256 CHCR_NO_BURSTEN = 0,
257 CHCR_BYTE_SWAP_DEVICE = (1 << 6),
258 CHCR_BYTE_SWAP_MEMORY = (1 << 4),
259 CHCR_DIR = (1 << 3),
260 CHCR_DEV_TO_MEM = CHCR_DIR,
261 CHCR_MEM_TO_DEV = 0,
262 CHCR_NORMAL = (0 << 0),
263 CHCR_CONTINUE = (1 << 0),
264 CHCR_RINGBUFF = (2 << 0),
265 CHCR_LINKSHORT = (4 << 0),
266 CHCR_LINKLONG = (5 << 0),
267 CHCRPON =
268 (CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE | CHCR_CLR_SAR_IE |
269 CHCR_CLR_DONE_IE | CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE |
270 CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE),
271};
272
273enum ConfigRegister_bits {
274 CR_REQS_MASK = 0x7 << 16,
275 CR_ASEQDONT = 0x0 << 10,
276 CR_ASEQUP = 0x1 << 10,
277 CR_ASEQDOWN = 0x2 << 10,
278 CR_ASEQ_MASK = 0x3 << 10,
279 CR_PSIZE8 = (1 << 8),
280 CR_PSIZE16 = (2 << 8),
281 CR_PSIZE32 = (3 << 8),
282 CR_PORTCPU = (0 << 6),
283 CR_PORTIO = (1 << 6),
284 CR_PORTVXI = (2 << 6),
285 CR_PORTMXI = (3 << 6),
286 CR_AMDEVICE = (1 << 0),
287};
288
289static inline int CR_REQS(int source)
290{
291 return (source & 0x7) << 16;
292};
293
294static inline int CR_REQSDRQ(unsigned drq_line)
295{
296 /* This also works on m-series when using channels (drq_line) 4 or 5. */
297 return CR_REQS((drq_line & 0x3) | 0x4);
298}
299
300static inline int CR_RL(unsigned int retry_limit)
301{
302 int value = 0;
303 86
304 if (retry_limit) 87struct mite_ring *mite_alloc_ring(struct mite *);
305 value = 1 + ilog2(retry_limit); 88void mite_free_ring(struct mite_ring *);
306 if (value > 0x7)
307 value = 0x7;
308 return (value & 0x7) << 21;
309}
310 89
311enum CHSR_bits { 90struct mite *mite_attach(struct comedi_device *, bool use_win1);
312 CHSR_INT = (1 << 31), 91void mite_detach(struct mite *);
313 CHSR_LPAUSES = (1 << 29),
314 CHSR_SARS = (1 << 27),
315 CHSR_DONE = (1 << 25),
316 CHSR_MRDY = (1 << 23),
317 CHSR_DRDY = (1 << 21),
318 CHSR_LINKC = (1 << 19),
319 CHSR_CONTS_RB = (1 << 17),
320 CHSR_ERROR = (1 << 15),
321 CHSR_SABORT = (1 << 14),
322 CHSR_HABORT = (1 << 13),
323 CHSR_STOPS = (1 << 12),
324 CHSR_OPERR_mask = (3 << 10),
325 CHSR_OPERR_NOERROR = (0 << 10),
326 CHSR_OPERR_FIFOERROR = (1 << 10),
327 CHSR_OPERR_LINKERROR = (1 << 10), /* ??? */
328 CHSR_XFERR = (1 << 9),
329 CHSR_END = (1 << 8),
330 CHSR_DRQ1 = (1 << 7),
331 CHSR_DRQ0 = (1 << 6),
332 CHSR_LxERR_mask = (3 << 4),
333 CHSR_LBERR = (1 << 4),
334 CHSR_LRERR = (2 << 4),
335 CHSR_LOERR = (3 << 4),
336 CHSR_MxERR_mask = (3 << 2),
337 CHSR_MBERR = (1 << 2),
338 CHSR_MRERR = (2 << 2),
339 CHSR_MOERR = (3 << 2),
340 CHSR_DxERR_mask = (3 << 0),
341 CHSR_DBERR = (1 << 0),
342 CHSR_DRERR = (2 << 0),
343 CHSR_DOERR = (3 << 0),
344};
345 92
346static inline void mite_dma_reset(struct mite_channel *mite_chan) 93/*
347{ 94 * Mite registers (used outside of the mite driver)
348 writel(CHOR_DMARESET | CHOR_FRESET, 95 */
349 mite_chan->mite->mite_io_addr + MITE_CHOR(mite_chan->channel)); 96#define MITE_IODWBSR 0xc0 /* IO Device Window Base Size */
350}; 97#define MITE_IODWBSR_1 0xc4 /* IO Device Window1 Base Size */
98#define WENAB BIT(7) /* window enable */
99#define MITE_IODWCR_1 0xf4
351 100
352#endif 101#endif
diff --git a/drivers/staging/comedi/drivers/ni_660x.c b/drivers/staging/comedi/drivers/ni_660x.c
index 46647c64f369..0dcb826a9f1f 100644
--- a/drivers/staging/comedi/drivers/ni_660x.c
+++ b/drivers/staging/comedi/drivers/ni_660x.c
@@ -1,17 +1,16 @@
1/* 1/*
2 comedi/drivers/ni_660x.c 2 * Hardware driver for NI 660x devices
3 Hardware driver for NI 660x devices 3 *
4 4 * This program is free software; you can redistribute it and/or modify
5 This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by
6 it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or
7 the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version.
8 (at your option) any later version. 8 *
9 9 * This program is distributed in the hope that it will be useful,
10 This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details.
13 GNU General Public License for more details. 13 */
14*/
15 14
16/* 15/*
17 * Driver: ni_660x 16 * Driver: ni_660x
@@ -42,91 +41,13 @@
42#include "mite.h" 41#include "mite.h"
43#include "ni_tio.h" 42#include "ni_tio.h"
44 43
45enum ni_660x_constants {
46 min_counter_pfi_chan = 8,
47 max_dio_pfi_chan = 31,
48 counters_per_chip = 4
49};
50
51#define NUM_PFI_CHANNELS 40
52/* really there are only up to 3 dma channels, but the register layout allows
53for 4 */
54#define MAX_DMA_CHANNEL 4
55
56/* See Register-Level Programmer Manual page 3.1 */ 44/* See Register-Level Programmer Manual page 3.1 */
57enum ni_660x_register { 45enum ni_660x_register {
58 NI660X_G0_INT_ACK, 46 /* see enum ni_gpct_register */
59 NI660X_G0_STATUS, 47 NI660X_STC_DIO_PARALLEL_INPUT = NITIO_NUM_REGS,
60 NI660X_G1_INT_ACK,
61 NI660X_G1_STATUS,
62 NI660X_G01_STATUS,
63 NI660X_G0_CMD,
64 NI660X_STC_DIO_PARALLEL_INPUT,
65 NI660X_G1_CMD,
66 NI660X_G0_HW_SAVE,
67 NI660X_G1_HW_SAVE,
68 NI660X_STC_DIO_OUTPUT, 48 NI660X_STC_DIO_OUTPUT,
69 NI660X_STC_DIO_CONTROL, 49 NI660X_STC_DIO_CONTROL,
70 NI660X_G0_SW_SAVE,
71 NI660X_G1_SW_SAVE,
72 NI660X_G0_MODE,
73 NI660X_G01_STATUS1,
74 NI660X_G1_MODE,
75 NI660X_STC_DIO_SERIAL_INPUT, 50 NI660X_STC_DIO_SERIAL_INPUT,
76 NI660X_G0_LOADA,
77 NI660X_G01_STATUS2,
78 NI660X_G0_LOADB,
79 NI660X_G1_LOADA,
80 NI660X_G1_LOADB,
81 NI660X_G0_INPUT_SEL,
82 NI660X_G1_INPUT_SEL,
83 NI660X_G0_AUTO_INC,
84 NI660X_G1_AUTO_INC,
85 NI660X_G01_RESET,
86 NI660X_G0_INT_ENA,
87 NI660X_G1_INT_ENA,
88 NI660X_G0_CNT_MODE,
89 NI660X_G1_CNT_MODE,
90 NI660X_G0_GATE2,
91 NI660X_G1_GATE2,
92 NI660X_G0_DMA_CFG,
93 NI660X_G0_DMA_STATUS,
94 NI660X_G1_DMA_CFG,
95 NI660X_G1_DMA_STATUS,
96 NI660X_G2_INT_ACK,
97 NI660X_G2_STATUS,
98 NI660X_G3_INT_ACK,
99 NI660X_G3_STATUS,
100 NI660X_G23_STATUS,
101 NI660X_G2_CMD,
102 NI660X_G3_CMD,
103 NI660X_G2_HW_SAVE,
104 NI660X_G3_HW_SAVE,
105 NI660X_G2_SW_SAVE,
106 NI660X_G3_SW_SAVE,
107 NI660X_G2_MODE,
108 NI660X_G23_STATUS1,
109 NI660X_G3_MODE,
110 NI660X_G2_LOADA,
111 NI660X_G23_STATUS2,
112 NI660X_G2_LOADB,
113 NI660X_G3_LOADA,
114 NI660X_G3_LOADB,
115 NI660X_G2_INPUT_SEL,
116 NI660X_G3_INPUT_SEL,
117 NI660X_G2_AUTO_INC,
118 NI660X_G3_AUTO_INC,
119 NI660X_G23_RESET,
120 NI660X_G2_INT_ENA,
121 NI660X_G3_INT_ENA,
122 NI660X_G2_CNT_MODE,
123 NI660X_G3_CNT_MODE,
124 NI660X_G3_GATE2,
125 NI660X_G2_GATE2,
126 NI660X_G2_DMA_CFG,
127 NI660X_G2_DMA_STATUS,
128 NI660X_G3_DMA_CFG,
129 NI660X_G3_DMA_STATUS,
130 NI660X_DIO32_INPUT, 51 NI660X_DIO32_INPUT,
131 NI660X_DIO32_OUTPUT, 52 NI660X_DIO32_OUTPUT,
132 NI660X_CLK_CFG, 53 NI660X_CLK_CFG,
@@ -156,224 +77,134 @@ enum ni_660x_register {
156 NI660X_NUM_REGS, 77 NI660X_NUM_REGS,
157}; 78};
158 79
159static inline unsigned IOConfigReg(unsigned pfi_channel) 80#define NI660X_CLK_CFG_COUNTER_SWAP BIT(21)
160{
161 unsigned reg = NI660X_IO_CFG_0_1 + pfi_channel / 2;
162
163 BUG_ON(reg > NI660X_IO_CFG_38_39);
164 return reg;
165}
166
167enum ni_660x_register_width {
168 DATA_1B,
169 DATA_2B,
170 DATA_4B
171};
172 81
173enum ni_660x_register_direction { 82#define NI660X_GLOBAL_INT_COUNTER0 BIT(8)
174 NI_660x_READ, 83#define NI660X_GLOBAL_INT_COUNTER1 BIT(9)
175 NI_660x_WRITE, 84#define NI660X_GLOBAL_INT_COUNTER2 BIT(10)
176 NI_660x_READ_WRITE 85#define NI660X_GLOBAL_INT_COUNTER3 BIT(11)
177}; 86#define NI660X_GLOBAL_INT_CASCADE BIT(29)
87#define NI660X_GLOBAL_INT_GLOBAL_POL BIT(30)
88#define NI660X_GLOBAL_INT_GLOBAL BIT(31)
178 89
179enum ni_660x_pfi_output_select { 90#define NI660X_DMA_CFG_SEL(_c, _s) (((_s) & 0x1f) << (8 * (_c)))
180 pfi_output_select_high_Z = 0, 91#define NI660X_DMA_CFG_SEL_MASK(_c) NI660X_DMA_CFG_SEL((_c), 0x1f)
181 pfi_output_select_counter = 1, 92#define NI660X_DMA_CFG_SEL_NONE(_c) NI660X_DMA_CFG_SEL((_c), 0x1f)
182 pfi_output_select_do = 2, 93#define NI660X_DMA_CFG_RESET(_c) NI660X_DMA_CFG_SEL((_c), 0x80)
183 num_pfi_output_selects
184};
185 94
186enum ni_660x_subdevices { 95#define NI660X_IO_CFG(x) (NI660X_IO_CFG_0_1 + ((x) / 2))
187 NI_660X_DIO_SUBDEV = 1, 96#define NI660X_IO_CFG_OUT_SEL(_c, _s) (((_s) & 0x3) << (((_c) % 2) ? 0 : 8))
188 NI_660X_GPCT_SUBDEV_0 = 2 97#define NI660X_IO_CFG_OUT_SEL_MASK(_c) NI660X_IO_CFG_OUT_SEL((_c), 0x3)
189}; 98#define NI660X_IO_CFG_IN_SEL(_c, _s) (((_s) & 0x7) << (((_c) % 2) ? 4 : 12))
190static inline unsigned NI_660X_GPCT_SUBDEV(unsigned index) 99#define NI660X_IO_CFG_IN_SEL_MASK(_c) NI660X_IO_CFG_IN_SEL((_c), 0x7)
191{
192 return NI_660X_GPCT_SUBDEV_0 + index;
193}
194 100
195struct NI_660xRegisterData { 101struct ni_660x_register_data {
196 const char *name; /* Register Name */
197 int offset; /* Offset from base address from GPCT chip */ 102 int offset; /* Offset from base address from GPCT chip */
198 enum ni_660x_register_direction direction; 103 char size; /* 2 or 4 bytes */
199 enum ni_660x_register_width size; /* 1 byte, 2 bytes, or 4 bytes */
200};
201
202static const struct NI_660xRegisterData registerData[NI660X_NUM_REGS] = {
203 {"G0 Interrupt Acknowledge", 0x004, NI_660x_WRITE, DATA_2B},
204 {"G0 Status Register", 0x004, NI_660x_READ, DATA_2B},
205 {"G1 Interrupt Acknowledge", 0x006, NI_660x_WRITE, DATA_2B},
206 {"G1 Status Register", 0x006, NI_660x_READ, DATA_2B},
207 {"G01 Status Register ", 0x008, NI_660x_READ, DATA_2B},
208 {"G0 Command Register", 0x00C, NI_660x_WRITE, DATA_2B},
209 {"STC DIO Parallel Input", 0x00E, NI_660x_READ, DATA_2B},
210 {"G1 Command Register", 0x00E, NI_660x_WRITE, DATA_2B},
211 {"G0 HW Save Register", 0x010, NI_660x_READ, DATA_4B},
212 {"G1 HW Save Register", 0x014, NI_660x_READ, DATA_4B},
213 {"STC DIO Output", 0x014, NI_660x_WRITE, DATA_2B},
214 {"STC DIO Control", 0x016, NI_660x_WRITE, DATA_2B},
215 {"G0 SW Save Register", 0x018, NI_660x_READ, DATA_4B},
216 {"G1 SW Save Register", 0x01C, NI_660x_READ, DATA_4B},
217 {"G0 Mode Register", 0x034, NI_660x_WRITE, DATA_2B},
218 {"G01 Joint Status 1 Register", 0x036, NI_660x_READ, DATA_2B},
219 {"G1 Mode Register", 0x036, NI_660x_WRITE, DATA_2B},
220 {"STC DIO Serial Input", 0x038, NI_660x_READ, DATA_2B},
221 {"G0 Load A Register", 0x038, NI_660x_WRITE, DATA_4B},
222 {"G01 Joint Status 2 Register", 0x03A, NI_660x_READ, DATA_2B},
223 {"G0 Load B Register", 0x03C, NI_660x_WRITE, DATA_4B},
224 {"G1 Load A Register", 0x040, NI_660x_WRITE, DATA_4B},
225 {"G1 Load B Register", 0x044, NI_660x_WRITE, DATA_4B},
226 {"G0 Input Select Register", 0x048, NI_660x_WRITE, DATA_2B},
227 {"G1 Input Select Register", 0x04A, NI_660x_WRITE, DATA_2B},
228 {"G0 Autoincrement Register", 0x088, NI_660x_WRITE, DATA_2B},
229 {"G1 Autoincrement Register", 0x08A, NI_660x_WRITE, DATA_2B},
230 {"G01 Joint Reset Register", 0x090, NI_660x_WRITE, DATA_2B},
231 {"G0 Interrupt Enable", 0x092, NI_660x_WRITE, DATA_2B},
232 {"G1 Interrupt Enable", 0x096, NI_660x_WRITE, DATA_2B},
233 {"G0 Counting Mode Register", 0x0B0, NI_660x_WRITE, DATA_2B},
234 {"G1 Counting Mode Register", 0x0B2, NI_660x_WRITE, DATA_2B},
235 {"G0 Second Gate Register", 0x0B4, NI_660x_WRITE, DATA_2B},
236 {"G1 Second Gate Register", 0x0B6, NI_660x_WRITE, DATA_2B},
237 {"G0 DMA Config Register", 0x0B8, NI_660x_WRITE, DATA_2B},
238 {"G0 DMA Status Register", 0x0B8, NI_660x_READ, DATA_2B},
239 {"G1 DMA Config Register", 0x0BA, NI_660x_WRITE, DATA_2B},
240 {"G1 DMA Status Register", 0x0BA, NI_660x_READ, DATA_2B},
241 {"G2 Interrupt Acknowledge", 0x104, NI_660x_WRITE, DATA_2B},
242 {"G2 Status Register", 0x104, NI_660x_READ, DATA_2B},
243 {"G3 Interrupt Acknowledge", 0x106, NI_660x_WRITE, DATA_2B},
244 {"G3 Status Register", 0x106, NI_660x_READ, DATA_2B},
245 {"G23 Status Register", 0x108, NI_660x_READ, DATA_2B},
246 {"G2 Command Register", 0x10C, NI_660x_WRITE, DATA_2B},
247 {"G3 Command Register", 0x10E, NI_660x_WRITE, DATA_2B},
248 {"G2 HW Save Register", 0x110, NI_660x_READ, DATA_4B},
249 {"G3 HW Save Register", 0x114, NI_660x_READ, DATA_4B},
250 {"G2 SW Save Register", 0x118, NI_660x_READ, DATA_4B},
251 {"G3 SW Save Register", 0x11C, NI_660x_READ, DATA_4B},
252 {"G2 Mode Register", 0x134, NI_660x_WRITE, DATA_2B},
253 {"G23 Joint Status 1 Register", 0x136, NI_660x_READ, DATA_2B},
254 {"G3 Mode Register", 0x136, NI_660x_WRITE, DATA_2B},
255 {"G2 Load A Register", 0x138, NI_660x_WRITE, DATA_4B},
256 {"G23 Joint Status 2 Register", 0x13A, NI_660x_READ, DATA_2B},
257 {"G2 Load B Register", 0x13C, NI_660x_WRITE, DATA_4B},
258 {"G3 Load A Register", 0x140, NI_660x_WRITE, DATA_4B},
259 {"G3 Load B Register", 0x144, NI_660x_WRITE, DATA_4B},
260 {"G2 Input Select Register", 0x148, NI_660x_WRITE, DATA_2B},
261 {"G3 Input Select Register", 0x14A, NI_660x_WRITE, DATA_2B},
262 {"G2 Autoincrement Register", 0x188, NI_660x_WRITE, DATA_2B},
263 {"G3 Autoincrement Register", 0x18A, NI_660x_WRITE, DATA_2B},
264 {"G23 Joint Reset Register", 0x190, NI_660x_WRITE, DATA_2B},
265 {"G2 Interrupt Enable", 0x192, NI_660x_WRITE, DATA_2B},
266 {"G3 Interrupt Enable", 0x196, NI_660x_WRITE, DATA_2B},
267 {"G2 Counting Mode Register", 0x1B0, NI_660x_WRITE, DATA_2B},
268 {"G3 Counting Mode Register", 0x1B2, NI_660x_WRITE, DATA_2B},
269 {"G3 Second Gate Register", 0x1B6, NI_660x_WRITE, DATA_2B},
270 {"G2 Second Gate Register", 0x1B4, NI_660x_WRITE, DATA_2B},
271 {"G2 DMA Config Register", 0x1B8, NI_660x_WRITE, DATA_2B},
272 {"G2 DMA Status Register", 0x1B8, NI_660x_READ, DATA_2B},
273 {"G3 DMA Config Register", 0x1BA, NI_660x_WRITE, DATA_2B},
274 {"G3 DMA Status Register", 0x1BA, NI_660x_READ, DATA_2B},
275 {"32 bit Digital Input", 0x414, NI_660x_READ, DATA_4B},
276 {"32 bit Digital Output", 0x510, NI_660x_WRITE, DATA_4B},
277 {"Clock Config Register", 0x73C, NI_660x_WRITE, DATA_4B},
278 {"Global Interrupt Status Register", 0x754, NI_660x_READ, DATA_4B},
279 {"DMA Configuration Register", 0x76C, NI_660x_WRITE, DATA_4B},
280 {"Global Interrupt Config Register", 0x770, NI_660x_WRITE, DATA_4B},
281 {"IO Config Register 0-1", 0x77C, NI_660x_READ_WRITE, DATA_2B},
282 {"IO Config Register 2-3", 0x77E, NI_660x_READ_WRITE, DATA_2B},
283 {"IO Config Register 4-5", 0x780, NI_660x_READ_WRITE, DATA_2B},
284 {"IO Config Register 6-7", 0x782, NI_660x_READ_WRITE, DATA_2B},
285 {"IO Config Register 8-9", 0x784, NI_660x_READ_WRITE, DATA_2B},
286 {"IO Config Register 10-11", 0x786, NI_660x_READ_WRITE, DATA_2B},
287 {"IO Config Register 12-13", 0x788, NI_660x_READ_WRITE, DATA_2B},
288 {"IO Config Register 14-15", 0x78A, NI_660x_READ_WRITE, DATA_2B},
289 {"IO Config Register 16-17", 0x78C, NI_660x_READ_WRITE, DATA_2B},
290 {"IO Config Register 18-19", 0x78E, NI_660x_READ_WRITE, DATA_2B},
291 {"IO Config Register 20-21", 0x790, NI_660x_READ_WRITE, DATA_2B},
292 {"IO Config Register 22-23", 0x792, NI_660x_READ_WRITE, DATA_2B},
293 {"IO Config Register 24-25", 0x794, NI_660x_READ_WRITE, DATA_2B},
294 {"IO Config Register 26-27", 0x796, NI_660x_READ_WRITE, DATA_2B},
295 {"IO Config Register 28-29", 0x798, NI_660x_READ_WRITE, DATA_2B},
296 {"IO Config Register 30-31", 0x79A, NI_660x_READ_WRITE, DATA_2B},
297 {"IO Config Register 32-33", 0x79C, NI_660x_READ_WRITE, DATA_2B},
298 {"IO Config Register 34-35", 0x79E, NI_660x_READ_WRITE, DATA_2B},
299 {"IO Config Register 36-37", 0x7A0, NI_660x_READ_WRITE, DATA_2B},
300 {"IO Config Register 38-39", 0x7A2, NI_660x_READ_WRITE, DATA_2B}
301};
302
303/* kind of ENABLE for the second counter */
304enum clock_config_register_bits {
305 CounterSwap = 0x1 << 21
306};
307
308/* ioconfigreg */
309static inline unsigned ioconfig_bitshift(unsigned pfi_channel)
310{
311 return (pfi_channel % 2) ? 0 : 8;
312}
313
314static inline unsigned pfi_output_select_mask(unsigned pfi_channel)
315{
316 return 0x3 << ioconfig_bitshift(pfi_channel);
317}
318
319static inline unsigned pfi_output_select_bits(unsigned pfi_channel,
320 unsigned output_select)
321{
322 return (output_select & 0x3) << ioconfig_bitshift(pfi_channel);
323}
324
325static inline unsigned pfi_input_select_mask(unsigned pfi_channel)
326{
327 return 0x7 << (4 + ioconfig_bitshift(pfi_channel));
328}
329
330static inline unsigned pfi_input_select_bits(unsigned pfi_channel,
331 unsigned input_select)
332{
333 return (input_select & 0x7) << (4 + ioconfig_bitshift(pfi_channel));
334}
335
336/* dma configuration register bits */
337static inline unsigned dma_select_mask(unsigned dma_channel)
338{
339 BUG_ON(dma_channel >= MAX_DMA_CHANNEL);
340 return 0x1f << (8 * dma_channel);
341}
342
343enum dma_selection {
344 dma_selection_none = 0x1f,
345};
346
347static inline unsigned dma_select_bits(unsigned dma_channel, unsigned selection)
348{
349 BUG_ON(dma_channel >= MAX_DMA_CHANNEL);
350 return (selection << (8 * dma_channel)) & dma_select_mask(dma_channel);
351}
352
353static inline unsigned dma_reset_bit(unsigned dma_channel)
354{
355 BUG_ON(dma_channel >= MAX_DMA_CHANNEL);
356 return 0x80 << (8 * dma_channel);
357}
358
359enum global_interrupt_status_register_bits {
360 Counter_0_Int_Bit = 0x100,
361 Counter_1_Int_Bit = 0x200,
362 Counter_2_Int_Bit = 0x400,
363 Counter_3_Int_Bit = 0x800,
364 Cascade_Int_Bit = 0x20000000,
365 Global_Int_Bit = 0x80000000
366}; 104};
367 105
368enum global_interrupt_config_register_bits { 106static const struct ni_660x_register_data ni_660x_reg_data[NI660X_NUM_REGS] = {
369 Cascade_Int_Enable_Bit = 0x20000000, 107 [NITIO_G0_INT_ACK] = { 0x004, 2 }, /* write */
370 Global_Int_Polarity_Bit = 0x40000000, 108 [NITIO_G0_STATUS] = { 0x004, 2 }, /* read */
371 Global_Int_Enable_Bit = 0x80000000 109 [NITIO_G1_INT_ACK] = { 0x006, 2 }, /* write */
110 [NITIO_G1_STATUS] = { 0x006, 2 }, /* read */
111 [NITIO_G01_STATUS] = { 0x008, 2 }, /* read */
112 [NITIO_G0_CMD] = { 0x00c, 2 }, /* write */
113 [NI660X_STC_DIO_PARALLEL_INPUT] = { 0x00e, 2 }, /* read */
114 [NITIO_G1_CMD] = { 0x00e, 2 }, /* write */
115 [NITIO_G0_HW_SAVE] = { 0x010, 4 }, /* read */
116 [NITIO_G1_HW_SAVE] = { 0x014, 4 }, /* read */
117 [NI660X_STC_DIO_OUTPUT] = { 0x014, 2 }, /* write */
118 [NI660X_STC_DIO_CONTROL] = { 0x016, 2 }, /* write */
119 [NITIO_G0_SW_SAVE] = { 0x018, 4 }, /* read */
120 [NITIO_G1_SW_SAVE] = { 0x01c, 4 }, /* read */
121 [NITIO_G0_MODE] = { 0x034, 2 }, /* write */
122 [NITIO_G01_STATUS1] = { 0x036, 2 }, /* read */
123 [NITIO_G1_MODE] = { 0x036, 2 }, /* write */
124 [NI660X_STC_DIO_SERIAL_INPUT] = { 0x038, 2 }, /* read */
125 [NITIO_G0_LOADA] = { 0x038, 4 }, /* write */
126 [NITIO_G01_STATUS2] = { 0x03a, 2 }, /* read */
127 [NITIO_G0_LOADB] = { 0x03c, 4 }, /* write */
128 [NITIO_G1_LOADA] = { 0x040, 4 }, /* write */
129 [NITIO_G1_LOADB] = { 0x044, 4 }, /* write */
130 [NITIO_G0_INPUT_SEL] = { 0x048, 2 }, /* write */
131 [NITIO_G1_INPUT_SEL] = { 0x04a, 2 }, /* write */
132 [NITIO_G0_AUTO_INC] = { 0x088, 2 }, /* write */
133 [NITIO_G1_AUTO_INC] = { 0x08a, 2 }, /* write */
134 [NITIO_G01_RESET] = { 0x090, 2 }, /* write */
135 [NITIO_G0_INT_ENA] = { 0x092, 2 }, /* write */
136 [NITIO_G1_INT_ENA] = { 0x096, 2 }, /* write */
137 [NITIO_G0_CNT_MODE] = { 0x0b0, 2 }, /* write */
138 [NITIO_G1_CNT_MODE] = { 0x0b2, 2 }, /* write */
139 [NITIO_G0_GATE2] = { 0x0b4, 2 }, /* write */
140 [NITIO_G1_GATE2] = { 0x0b6, 2 }, /* write */
141 [NITIO_G0_DMA_CFG] = { 0x0b8, 2 }, /* write */
142 [NITIO_G0_DMA_STATUS] = { 0x0b8, 2 }, /* read */
143 [NITIO_G1_DMA_CFG] = { 0x0ba, 2 }, /* write */
144 [NITIO_G1_DMA_STATUS] = { 0x0ba, 2 }, /* read */
145 [NITIO_G2_INT_ACK] = { 0x104, 2 }, /* write */
146 [NITIO_G2_STATUS] = { 0x104, 2 }, /* read */
147 [NITIO_G3_INT_ACK] = { 0x106, 2 }, /* write */
148 [NITIO_G3_STATUS] = { 0x106, 2 }, /* read */
149 [NITIO_G23_STATUS] = { 0x108, 2 }, /* read */
150 [NITIO_G2_CMD] = { 0x10c, 2 }, /* write */
151 [NITIO_G3_CMD] = { 0x10e, 2 }, /* write */
152 [NITIO_G2_HW_SAVE] = { 0x110, 4 }, /* read */
153 [NITIO_G3_HW_SAVE] = { 0x114, 4 }, /* read */
154 [NITIO_G2_SW_SAVE] = { 0x118, 4 }, /* read */
155 [NITIO_G3_SW_SAVE] = { 0x11c, 4 }, /* read */
156 [NITIO_G2_MODE] = { 0x134, 2 }, /* write */
157 [NITIO_G23_STATUS1] = { 0x136, 2 }, /* read */
158 [NITIO_G3_MODE] = { 0x136, 2 }, /* write */
159 [NITIO_G2_LOADA] = { 0x138, 4 }, /* write */
160 [NITIO_G23_STATUS2] = { 0x13a, 2 }, /* read */
161 [NITIO_G2_LOADB] = { 0x13c, 4 }, /* write */
162 [NITIO_G3_LOADA] = { 0x140, 4 }, /* write */
163 [NITIO_G3_LOADB] = { 0x144, 4 }, /* write */
164 [NITIO_G2_INPUT_SEL] = { 0x148, 2 }, /* write */
165 [NITIO_G3_INPUT_SEL] = { 0x14a, 2 }, /* write */
166 [NITIO_G2_AUTO_INC] = { 0x188, 2 }, /* write */
167 [NITIO_G3_AUTO_INC] = { 0x18a, 2 }, /* write */
168 [NITIO_G23_RESET] = { 0x190, 2 }, /* write */
169 [NITIO_G2_INT_ENA] = { 0x192, 2 }, /* write */
170 [NITIO_G3_INT_ENA] = { 0x196, 2 }, /* write */
171 [NITIO_G2_CNT_MODE] = { 0x1b0, 2 }, /* write */
172 [NITIO_G3_CNT_MODE] = { 0x1b2, 2 }, /* write */
173 [NITIO_G2_GATE2] = { 0x1b4, 2 }, /* write */
174 [NITIO_G3_GATE2] = { 0x1b6, 2 }, /* write */
175 [NITIO_G2_DMA_CFG] = { 0x1b8, 2 }, /* write */
176 [NITIO_G2_DMA_STATUS] = { 0x1b8, 2 }, /* read */
177 [NITIO_G3_DMA_CFG] = { 0x1ba, 2 }, /* write */
178 [NITIO_G3_DMA_STATUS] = { 0x1ba, 2 }, /* read */
179 [NI660X_DIO32_INPUT] = { 0x414, 4 }, /* read */
180 [NI660X_DIO32_OUTPUT] = { 0x510, 4 }, /* write */
181 [NI660X_CLK_CFG] = { 0x73c, 4 }, /* write */
182 [NI660X_GLOBAL_INT_STATUS] = { 0x754, 4 }, /* read */
183 [NI660X_DMA_CFG] = { 0x76c, 4 }, /* write */
184 [NI660X_GLOBAL_INT_CFG] = { 0x770, 4 }, /* write */
185 [NI660X_IO_CFG_0_1] = { 0x77c, 2 }, /* read/write */
186 [NI660X_IO_CFG_2_3] = { 0x77e, 2 }, /* read/write */
187 [NI660X_IO_CFG_4_5] = { 0x780, 2 }, /* read/write */
188 [NI660X_IO_CFG_6_7] = { 0x782, 2 }, /* read/write */
189 [NI660X_IO_CFG_8_9] = { 0x784, 2 }, /* read/write */
190 [NI660X_IO_CFG_10_11] = { 0x786, 2 }, /* read/write */
191 [NI660X_IO_CFG_12_13] = { 0x788, 2 }, /* read/write */
192 [NI660X_IO_CFG_14_15] = { 0x78a, 2 }, /* read/write */
193 [NI660X_IO_CFG_16_17] = { 0x78c, 2 }, /* read/write */
194 [NI660X_IO_CFG_18_19] = { 0x78e, 2 }, /* read/write */
195 [NI660X_IO_CFG_20_21] = { 0x790, 2 }, /* read/write */
196 [NI660X_IO_CFG_22_23] = { 0x792, 2 }, /* read/write */
197 [NI660X_IO_CFG_24_25] = { 0x794, 2 }, /* read/write */
198 [NI660X_IO_CFG_26_27] = { 0x796, 2 }, /* read/write */
199 [NI660X_IO_CFG_28_29] = { 0x798, 2 }, /* read/write */
200 [NI660X_IO_CFG_30_31] = { 0x79a, 2 }, /* read/write */
201 [NI660X_IO_CFG_32_33] = { 0x79c, 2 }, /* read/write */
202 [NI660X_IO_CFG_34_35] = { 0x79e, 2 }, /* read/write */
203 [NI660X_IO_CFG_36_37] = { 0x7a0, 2 }, /* read/write */
204 [NI660X_IO_CFG_38_39] = { 0x7a2, 2 } /* read/write */
372}; 205};
373 206
374/* Offset of the GPCT chips from the base-address of the card */ 207#define NI660X_CHIP_OFFSET 0x800
375/* First chip is at base-address + 0x00, etc. */
376static const unsigned GPCT_OFFSET[2] = { 0x0, 0x800 };
377 208
378enum ni_660x_boardid { 209enum ni_660x_boardid {
379 BOARD_PCI6601, 210 BOARD_PCI6601,
@@ -385,7 +216,7 @@ enum ni_660x_boardid {
385 216
386struct ni_660x_board { 217struct ni_660x_board {
387 const char *name; 218 const char *name;
388 unsigned n_chips; /* total number of TIO chips */ 219 unsigned int n_chips; /* total number of TIO chips */
389}; 220};
390 221
391static const struct ni_660x_board ni_660x_boards[] = { 222static const struct ni_660x_board ni_660x_boards[] = {
@@ -411,280 +242,95 @@ static const struct ni_660x_board ni_660x_boards[] = {
411 }, 242 },
412}; 243};
413 244
414#define NI_660X_MAX_NUM_CHIPS 2 245#define NI660X_NUM_PFI_CHANNELS 40
415#define NI_660X_MAX_NUM_COUNTERS (NI_660X_MAX_NUM_CHIPS * counters_per_chip) 246
247/* there are only up to 3 dma channels, but the register layout allows for 4 */
248#define NI660X_MAX_DMA_CHANNEL 4
249
250#define NI660X_COUNTERS_PER_CHIP 4
251#define NI660X_MAX_CHIPS 2
252#define NI660X_MAX_COUNTERS (NI660X_MAX_CHIPS * \
253 NI660X_COUNTERS_PER_CHIP)
416 254
417struct ni_660x_private { 255struct ni_660x_private {
418 struct mite_struct *mite; 256 struct mite *mite;
419 struct ni_gpct_device *counter_dev; 257 struct ni_gpct_device *counter_dev;
420 uint64_t pfi_direction_bits; 258 struct mite_ring *ring[NI660X_MAX_CHIPS][NI660X_COUNTERS_PER_CHIP];
421 struct mite_dma_descriptor_ring 259 /* protects mite channel request/release */
422 *mite_rings[NI_660X_MAX_NUM_CHIPS][counters_per_chip];
423 spinlock_t mite_channel_lock; 260 spinlock_t mite_channel_lock;
424 /* interrupt_lock prevents races between interrupt and comedi_poll */ 261 /* prevents races between interrupt and comedi_poll */
425 spinlock_t interrupt_lock; 262 spinlock_t interrupt_lock;
426 unsigned dma_configuration_soft_copies[NI_660X_MAX_NUM_CHIPS]; 263 unsigned int dma_cfg[NI660X_MAX_CHIPS];
427 spinlock_t soft_reg_copy_lock; 264 unsigned int io_cfg[NI660X_NUM_PFI_CHANNELS];
428 unsigned short pfi_output_selects[NUM_PFI_CHANNELS]; 265 u64 io_dir;
429}; 266};
430 267
431static inline unsigned ni_660x_num_counters(struct comedi_device *dev) 268static void ni_660x_write(struct comedi_device *dev, unsigned int chip,
432{ 269 unsigned int bits, unsigned int reg)
433 const struct ni_660x_board *board = dev->board_ptr;
434
435 return board->n_chips * counters_per_chip;
436}
437
438static enum ni_660x_register ni_gpct_to_660x_register(enum ni_gpct_register reg)
439{
440 switch (reg) {
441 case NITIO_G0_AUTO_INC:
442 return NI660X_G0_AUTO_INC;
443 case NITIO_G1_AUTO_INC:
444 return NI660X_G1_AUTO_INC;
445 case NITIO_G2_AUTO_INC:
446 return NI660X_G2_AUTO_INC;
447 case NITIO_G3_AUTO_INC:
448 return NI660X_G3_AUTO_INC;
449 case NITIO_G0_CMD:
450 return NI660X_G0_CMD;
451 case NITIO_G1_CMD:
452 return NI660X_G1_CMD;
453 case NITIO_G2_CMD:
454 return NI660X_G2_CMD;
455 case NITIO_G3_CMD:
456 return NI660X_G3_CMD;
457 case NITIO_G0_HW_SAVE:
458 return NI660X_G0_HW_SAVE;
459 case NITIO_G1_HW_SAVE:
460 return NI660X_G1_HW_SAVE;
461 case NITIO_G2_HW_SAVE:
462 return NI660X_G2_HW_SAVE;
463 case NITIO_G3_HW_SAVE:
464 return NI660X_G3_HW_SAVE;
465 case NITIO_G0_SW_SAVE:
466 return NI660X_G0_SW_SAVE;
467 case NITIO_G1_SW_SAVE:
468 return NI660X_G1_SW_SAVE;
469 case NITIO_G2_SW_SAVE:
470 return NI660X_G2_SW_SAVE;
471 case NITIO_G3_SW_SAVE:
472 return NI660X_G3_SW_SAVE;
473 case NITIO_G0_MODE:
474 return NI660X_G0_MODE;
475 case NITIO_G1_MODE:
476 return NI660X_G1_MODE;
477 case NITIO_G2_MODE:
478 return NI660X_G2_MODE;
479 case NITIO_G3_MODE:
480 return NI660X_G3_MODE;
481 case NITIO_G0_LOADA:
482 return NI660X_G0_LOADA;
483 case NITIO_G1_LOADA:
484 return NI660X_G1_LOADA;
485 case NITIO_G2_LOADA:
486 return NI660X_G2_LOADA;
487 case NITIO_G3_LOADA:
488 return NI660X_G3_LOADA;
489 case NITIO_G0_LOADB:
490 return NI660X_G0_LOADB;
491 case NITIO_G1_LOADB:
492 return NI660X_G1_LOADB;
493 case NITIO_G2_LOADB:
494 return NI660X_G2_LOADB;
495 case NITIO_G3_LOADB:
496 return NI660X_G3_LOADB;
497 case NITIO_G0_INPUT_SEL:
498 return NI660X_G0_INPUT_SEL;
499 case NITIO_G1_INPUT_SEL:
500 return NI660X_G1_INPUT_SEL;
501 case NITIO_G2_INPUT_SEL:
502 return NI660X_G2_INPUT_SEL;
503 case NITIO_G3_INPUT_SEL:
504 return NI660X_G3_INPUT_SEL;
505 case NITIO_G01_STATUS:
506 return NI660X_G01_STATUS;
507 case NITIO_G23_STATUS:
508 return NI660X_G23_STATUS;
509 case NITIO_G01_RESET:
510 return NI660X_G01_RESET;
511 case NITIO_G23_RESET:
512 return NI660X_G23_RESET;
513 case NITIO_G01_STATUS1:
514 return NI660X_G01_STATUS1;
515 case NITIO_G23_STATUS1:
516 return NI660X_G23_STATUS1;
517 case NITIO_G01_STATUS2:
518 return NI660X_G01_STATUS2;
519 case NITIO_G23_STATUS2:
520 return NI660X_G23_STATUS2;
521 case NITIO_G0_CNT_MODE:
522 return NI660X_G0_CNT_MODE;
523 case NITIO_G1_CNT_MODE:
524 return NI660X_G1_CNT_MODE;
525 case NITIO_G2_CNT_MODE:
526 return NI660X_G2_CNT_MODE;
527 case NITIO_G3_CNT_MODE:
528 return NI660X_G3_CNT_MODE;
529 case NITIO_G0_GATE2:
530 return NI660X_G0_GATE2;
531 case NITIO_G1_GATE2:
532 return NI660X_G1_GATE2;
533 case NITIO_G2_GATE2:
534 return NI660X_G2_GATE2;
535 case NITIO_G3_GATE2:
536 return NI660X_G3_GATE2;
537 case NITIO_G0_DMA_CFG:
538 return NI660X_G0_DMA_CFG;
539 case NITIO_G0_DMA_STATUS:
540 return NI660X_G0_DMA_STATUS;
541 case NITIO_G1_DMA_CFG:
542 return NI660X_G1_DMA_CFG;
543 case NITIO_G1_DMA_STATUS:
544 return NI660X_G1_DMA_STATUS;
545 case NITIO_G2_DMA_CFG:
546 return NI660X_G2_DMA_CFG;
547 case NITIO_G2_DMA_STATUS:
548 return NI660X_G2_DMA_STATUS;
549 case NITIO_G3_DMA_CFG:
550 return NI660X_G3_DMA_CFG;
551 case NITIO_G3_DMA_STATUS:
552 return NI660X_G3_DMA_STATUS;
553 case NITIO_G0_INT_ACK:
554 return NI660X_G0_INT_ACK;
555 case NITIO_G1_INT_ACK:
556 return NI660X_G1_INT_ACK;
557 case NITIO_G2_INT_ACK:
558 return NI660X_G2_INT_ACK;
559 case NITIO_G3_INT_ACK:
560 return NI660X_G3_INT_ACK;
561 case NITIO_G0_STATUS:
562 return NI660X_G0_STATUS;
563 case NITIO_G1_STATUS:
564 return NI660X_G1_STATUS;
565 case NITIO_G2_STATUS:
566 return NI660X_G2_STATUS;
567 case NITIO_G3_STATUS:
568 return NI660X_G3_STATUS;
569 case NITIO_G0_INT_ENA:
570 return NI660X_G0_INT_ENA;
571 case NITIO_G1_INT_ENA:
572 return NI660X_G1_INT_ENA;
573 case NITIO_G2_INT_ENA:
574 return NI660X_G2_INT_ENA;
575 case NITIO_G3_INT_ENA:
576 return NI660X_G3_INT_ENA;
577 default:
578 BUG();
579 return 0;
580 }
581}
582
583static inline void ni_660x_write_register(struct comedi_device *dev,
584 unsigned chip, unsigned bits,
585 enum ni_660x_register reg)
586{ 270{
587 unsigned int addr = GPCT_OFFSET[chip] + registerData[reg].offset; 271 unsigned int addr = (chip * NI660X_CHIP_OFFSET) +
272 ni_660x_reg_data[reg].offset;
588 273
589 switch (registerData[reg].size) { 274 if (ni_660x_reg_data[reg].size == 2)
590 case DATA_2B:
591 writew(bits, dev->mmio + addr); 275 writew(bits, dev->mmio + addr);
592 break; 276 else
593 case DATA_4B:
594 writel(bits, dev->mmio + addr); 277 writel(bits, dev->mmio + addr);
595 break;
596 default:
597 BUG();
598 break;
599 }
600} 278}
601 279
602static inline unsigned ni_660x_read_register(struct comedi_device *dev, 280static unsigned int ni_660x_read(struct comedi_device *dev,
603 unsigned chip, 281 unsigned int chip, unsigned int reg)
604 enum ni_660x_register reg)
605{ 282{
606 unsigned int addr = GPCT_OFFSET[chip] + registerData[reg].offset; 283 unsigned int addr = (chip * NI660X_CHIP_OFFSET) +
284 ni_660x_reg_data[reg].offset;
607 285
608 switch (registerData[reg].size) { 286 if (ni_660x_reg_data[reg].size == 2)
609 case DATA_2B:
610 return readw(dev->mmio + addr); 287 return readw(dev->mmio + addr);
611 case DATA_4B: 288 return readl(dev->mmio + addr);
612 return readl(dev->mmio + addr);
613 default:
614 BUG();
615 break;
616 }
617 return 0;
618} 289}
619 290
620static void ni_gpct_write_register(struct ni_gpct *counter, unsigned bits, 291static void ni_660x_gpct_write(struct ni_gpct *counter, unsigned int bits,
621 enum ni_gpct_register reg) 292 enum ni_gpct_register reg)
622{ 293{
623 struct comedi_device *dev = counter->counter_dev->dev; 294 struct comedi_device *dev = counter->counter_dev->dev;
624 enum ni_660x_register ni_660x_register = ni_gpct_to_660x_register(reg);
625 unsigned chip = counter->chip_index;
626 295
627 ni_660x_write_register(dev, chip, bits, ni_660x_register); 296 ni_660x_write(dev, counter->chip_index, bits, reg);
628} 297}
629 298
630static unsigned ni_gpct_read_register(struct ni_gpct *counter, 299static unsigned int ni_660x_gpct_read(struct ni_gpct *counter,
631 enum ni_gpct_register reg) 300 enum ni_gpct_register reg)
632{ 301{
633 struct comedi_device *dev = counter->counter_dev->dev; 302 struct comedi_device *dev = counter->counter_dev->dev;
634 enum ni_660x_register ni_660x_register = ni_gpct_to_660x_register(reg);
635 unsigned chip = counter->chip_index;
636
637 return ni_660x_read_register(dev, chip, ni_660x_register);
638}
639
640static inline struct mite_dma_descriptor_ring *mite_ring(struct ni_660x_private
641 *priv,
642 struct ni_gpct
643 *counter)
644{
645 unsigned chip = counter->chip_index;
646 303
647 return priv->mite_rings[chip][counter->counter_index]; 304 return ni_660x_read(dev, counter->chip_index, reg);
648} 305}
649 306
650static inline void ni_660x_set_dma_channel(struct comedi_device *dev, 307static inline void ni_660x_set_dma_channel(struct comedi_device *dev,
651 unsigned mite_channel, 308 unsigned int mite_channel,
652 struct ni_gpct *counter) 309 struct ni_gpct *counter)
653{ 310{
654 struct ni_660x_private *devpriv = dev->private; 311 struct ni_660x_private *devpriv = dev->private;
655 unsigned chip = counter->chip_index; 312 unsigned int chip = counter->chip_index;
656 unsigned long flags; 313
657 314 devpriv->dma_cfg[chip] &= ~NI660X_DMA_CFG_SEL_MASK(mite_channel);
658 spin_lock_irqsave(&devpriv->soft_reg_copy_lock, flags); 315 devpriv->dma_cfg[chip] |= NI660X_DMA_CFG_SEL(mite_channel,
659 devpriv->dma_configuration_soft_copies[chip] &= 316 counter->counter_index);
660 ~dma_select_mask(mite_channel); 317 ni_660x_write(dev, chip, devpriv->dma_cfg[chip] |
661 devpriv->dma_configuration_soft_copies[chip] |= 318 NI660X_DMA_CFG_RESET(mite_channel),
662 dma_select_bits(mite_channel, counter->counter_index); 319 NI660X_DMA_CFG);
663 ni_660x_write_register(dev, chip,
664 devpriv->dma_configuration_soft_copies[chip] |
665 dma_reset_bit(mite_channel), NI660X_DMA_CFG);
666 mmiowb(); 320 mmiowb();
667 spin_unlock_irqrestore(&devpriv->soft_reg_copy_lock, flags);
668} 321}
669 322
670static inline void ni_660x_unset_dma_channel(struct comedi_device *dev, 323static inline void ni_660x_unset_dma_channel(struct comedi_device *dev,
671 unsigned mite_channel, 324 unsigned int mite_channel,
672 struct ni_gpct *counter) 325 struct ni_gpct *counter)
673{ 326{
674 struct ni_660x_private *devpriv = dev->private; 327 struct ni_660x_private *devpriv = dev->private;
675 unsigned chip = counter->chip_index; 328 unsigned int chip = counter->chip_index;
676 unsigned long flags;
677 329
678 spin_lock_irqsave(&devpriv->soft_reg_copy_lock, flags); 330 devpriv->dma_cfg[chip] &= ~NI660X_DMA_CFG_SEL_MASK(mite_channel);
679 devpriv->dma_configuration_soft_copies[chip] &= 331 devpriv->dma_cfg[chip] |= NI660X_DMA_CFG_SEL_NONE(mite_channel);
680 ~dma_select_mask(mite_channel); 332 ni_660x_write(dev, chip, devpriv->dma_cfg[chip], NI660X_DMA_CFG);
681 devpriv->dma_configuration_soft_copies[chip] |=
682 dma_select_bits(mite_channel, dma_selection_none);
683 ni_660x_write_register(dev, chip,
684 devpriv->dma_configuration_soft_copies[chip],
685 NI660X_DMA_CFG);
686 mmiowb(); 333 mmiowb();
687 spin_unlock_irqrestore(&devpriv->soft_reg_copy_lock, flags);
688} 334}
689 335
690static int ni_660x_request_mite_channel(struct comedi_device *dev, 336static int ni_660x_request_mite_channel(struct comedi_device *dev,
@@ -692,13 +338,13 @@ static int ni_660x_request_mite_channel(struct comedi_device *dev,
692 enum comedi_io_direction direction) 338 enum comedi_io_direction direction)
693{ 339{
694 struct ni_660x_private *devpriv = dev->private; 340 struct ni_660x_private *devpriv = dev->private;
695 unsigned long flags; 341 struct mite_ring *ring;
696 struct mite_channel *mite_chan; 342 struct mite_channel *mite_chan;
343 unsigned long flags;
697 344
698 spin_lock_irqsave(&devpriv->mite_channel_lock, flags); 345 spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
699 BUG_ON(counter->mite_chan); 346 ring = devpriv->ring[counter->chip_index][counter->counter_index];
700 mite_chan = mite_request_channel(devpriv->mite, 347 mite_chan = mite_request_channel(devpriv->mite, ring);
701 mite_ring(devpriv, counter));
702 if (!mite_chan) { 348 if (!mite_chan) {
703 spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags); 349 spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
704 dev_err(dev->class_dev, 350 dev_err(dev->class_dev,
@@ -757,7 +403,7 @@ static int ni_660x_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
757 403
758static void set_tio_counterswap(struct comedi_device *dev, int chip) 404static void set_tio_counterswap(struct comedi_device *dev, int chip)
759{ 405{
760 unsigned bits = 0; 406 unsigned int bits = 0;
761 407
762 /* 408 /*
763 * See P. 3.5 of the Register-Level Programming manual. 409 * See P. 3.5 of the Register-Level Programming manual.
@@ -766,9 +412,9 @@ static void set_tio_counterswap(struct comedi_device *dev, int chip)
766 * first chip. 412 * first chip.
767 */ 413 */
768 if (chip) 414 if (chip)
769 bits = CounterSwap; 415 bits = NI660X_CLK_CFG_COUNTER_SWAP;
770 416
771 ni_660x_write_register(dev, chip, bits, NI660X_CLK_CFG); 417 ni_660x_write(dev, chip, bits, NI660X_CLK_CFG);
772} 418}
773 419
774static void ni_660x_handle_gpct_interrupt(struct comedi_device *dev, 420static void ni_660x_handle_gpct_interrupt(struct comedi_device *dev,
@@ -785,17 +431,20 @@ static irqreturn_t ni_660x_interrupt(int irq, void *d)
785 struct comedi_device *dev = d; 431 struct comedi_device *dev = d;
786 struct ni_660x_private *devpriv = dev->private; 432 struct ni_660x_private *devpriv = dev->private;
787 struct comedi_subdevice *s; 433 struct comedi_subdevice *s;
788 unsigned i; 434 unsigned int i;
789 unsigned long flags; 435 unsigned long flags;
790 436
791 if (!dev->attached) 437 if (!dev->attached)
792 return IRQ_NONE; 438 return IRQ_NONE;
439 /* make sure dev->attached is checked before doing anything else */
440 smp_mb();
441
793 /* lock to avoid race with comedi_poll */ 442 /* lock to avoid race with comedi_poll */
794 spin_lock_irqsave(&devpriv->interrupt_lock, flags); 443 spin_lock_irqsave(&devpriv->interrupt_lock, flags);
795 smp_mb(); 444 for (i = 0; i < dev->n_subdevices; ++i) {
796 for (i = 0; i < ni_660x_num_counters(dev); ++i) { 445 s = &dev->subdevices[i];
797 s = &dev->subdevices[NI_660X_GPCT_SUBDEV(i)]; 446 if (s->type == COMEDI_SUBD_COUNTER)
798 ni_660x_handle_gpct_interrupt(dev, s); 447 ni_660x_handle_gpct_interrupt(dev, s);
799 } 448 }
800 spin_unlock_irqrestore(&devpriv->interrupt_lock, flags); 449 spin_unlock_irqrestore(&devpriv->interrupt_lock, flags);
801 return IRQ_HANDLED; 450 return IRQ_HANDLED;
@@ -810,7 +459,7 @@ static int ni_660x_input_poll(struct comedi_device *dev,
810 459
811 /* lock to avoid race with comedi_poll */ 460 /* lock to avoid race with comedi_poll */
812 spin_lock_irqsave(&devpriv->interrupt_lock, flags); 461 spin_lock_irqsave(&devpriv->interrupt_lock, flags);
813 mite_sync_input_dma(counter->mite_chan, s); 462 mite_sync_dma(counter->mite_chan, s);
814 spin_unlock_irqrestore(&devpriv->interrupt_lock, flags); 463 spin_unlock_irqrestore(&devpriv->interrupt_lock, flags);
815 return comedi_buf_read_n_available(s); 464 return comedi_buf_read_n_available(s);
816} 465}
@@ -820,9 +469,11 @@ static int ni_660x_buf_change(struct comedi_device *dev,
820{ 469{
821 struct ni_660x_private *devpriv = dev->private; 470 struct ni_660x_private *devpriv = dev->private;
822 struct ni_gpct *counter = s->private; 471 struct ni_gpct *counter = s->private;
472 struct mite_ring *ring;
823 int ret; 473 int ret;
824 474
825 ret = mite_buf_change(mite_ring(devpriv, counter), s); 475 ring = devpriv->ring[counter->chip_index][counter->counter_index];
476 ret = mite_buf_change(ring, s);
826 if (ret < 0) 477 if (ret < 0)
827 return ret; 478 return ret;
828 479
@@ -832,7 +483,7 @@ static int ni_660x_buf_change(struct comedi_device *dev,
832static int ni_660x_allocate_private(struct comedi_device *dev) 483static int ni_660x_allocate_private(struct comedi_device *dev)
833{ 484{
834 struct ni_660x_private *devpriv; 485 struct ni_660x_private *devpriv;
835 unsigned i; 486 unsigned int i;
836 487
837 devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv)); 488 devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
838 if (!devpriv) 489 if (!devpriv)
@@ -840,9 +491,8 @@ static int ni_660x_allocate_private(struct comedi_device *dev)
840 491
841 spin_lock_init(&devpriv->mite_channel_lock); 492 spin_lock_init(&devpriv->mite_channel_lock);
842 spin_lock_init(&devpriv->interrupt_lock); 493 spin_lock_init(&devpriv->interrupt_lock);
843 spin_lock_init(&devpriv->soft_reg_copy_lock); 494 for (i = 0; i < NI660X_NUM_PFI_CHANNELS; ++i)
844 for (i = 0; i < NUM_PFI_CHANNELS; ++i) 495 devpriv->io_cfg[i] = NI_660X_PFI_OUTPUT_COUNTER;
845 devpriv->pfi_output_selects[i] = pfi_output_select_counter;
846 496
847 return 0; 497 return 0;
848} 498}
@@ -851,14 +501,13 @@ static int ni_660x_alloc_mite_rings(struct comedi_device *dev)
851{ 501{
852 const struct ni_660x_board *board = dev->board_ptr; 502 const struct ni_660x_board *board = dev->board_ptr;
853 struct ni_660x_private *devpriv = dev->private; 503 struct ni_660x_private *devpriv = dev->private;
854 unsigned i; 504 unsigned int i;
855 unsigned j; 505 unsigned int j;
856 506
857 for (i = 0; i < board->n_chips; ++i) { 507 for (i = 0; i < board->n_chips; ++i) {
858 for (j = 0; j < counters_per_chip; ++j) { 508 for (j = 0; j < NI660X_COUNTERS_PER_CHIP; ++j) {
859 devpriv->mite_rings[i][j] = 509 devpriv->ring[i][j] = mite_alloc_ring(devpriv->mite);
860 mite_alloc_ring(devpriv->mite); 510 if (!devpriv->ring[i][j])
861 if (!devpriv->mite_rings[i][j])
862 return -ENOMEM; 511 return -ENOMEM;
863 } 512 }
864 } 513 }
@@ -869,120 +518,101 @@ static void ni_660x_free_mite_rings(struct comedi_device *dev)
869{ 518{
870 const struct ni_660x_board *board = dev->board_ptr; 519 const struct ni_660x_board *board = dev->board_ptr;
871 struct ni_660x_private *devpriv = dev->private; 520 struct ni_660x_private *devpriv = dev->private;
872 unsigned i; 521 unsigned int i;
873 unsigned j; 522 unsigned int j;
874 523
875 for (i = 0; i < board->n_chips; ++i) { 524 for (i = 0; i < board->n_chips; ++i) {
876 for (j = 0; j < counters_per_chip; ++j) 525 for (j = 0; j < NI660X_COUNTERS_PER_CHIP; ++j)
877 mite_free_ring(devpriv->mite_rings[i][j]); 526 mite_free_ring(devpriv->ring[i][j]);
878 }
879}
880
881static void init_tio_chip(struct comedi_device *dev, int chipset)
882{
883 struct ni_660x_private *devpriv = dev->private;
884 unsigned i;
885
886 /* init dma configuration register */
887 devpriv->dma_configuration_soft_copies[chipset] = 0;
888 for (i = 0; i < MAX_DMA_CHANNEL; ++i) {
889 devpriv->dma_configuration_soft_copies[chipset] |=
890 dma_select_bits(i, dma_selection_none) & dma_select_mask(i);
891 } 527 }
892 ni_660x_write_register(dev, chipset,
893 devpriv->dma_configuration_soft_copies[chipset],
894 NI660X_DMA_CFG);
895 for (i = 0; i < NUM_PFI_CHANNELS; ++i)
896 ni_660x_write_register(dev, chipset, 0, IOConfigReg(i));
897} 528}
898 529
899static int ni_660x_dio_insn_bits(struct comedi_device *dev, 530static int ni_660x_dio_insn_bits(struct comedi_device *dev,
900 struct comedi_subdevice *s, 531 struct comedi_subdevice *s,
901 struct comedi_insn *insn, unsigned int *data) 532 struct comedi_insn *insn,
533 unsigned int *data)
902{ 534{
903 unsigned base_bitfield_channel = CR_CHAN(insn->chanspec); 535 unsigned int shift = CR_CHAN(insn->chanspec);
904 536 unsigned int mask = data[0] << shift;
905 /* Check if we have to write some bits */ 537 unsigned int bits = data[1] << shift;
906 if (data[0]) { 538
907 s->state &= ~(data[0] << base_bitfield_channel); 539 /*
908 s->state |= (data[0] & data[1]) << base_bitfield_channel; 540 * There are 40 channels in this subdevice but only 32 are usable
909 /* Write out the new digital output lines */ 541 * as DIO. The shift adjusts the mask/bits to account for the base
910 ni_660x_write_register(dev, 0, s->state, NI660X_DIO32_OUTPUT); 542 * channel in insn->chanspec. The state update can then be handled
543 * normally for the 32 usable channels.
544 */
545 if (mask) {
546 s->state &= ~mask;
547 s->state |= (bits & mask);
548 ni_660x_write(dev, 0, s->state, NI660X_DIO32_OUTPUT);
911 } 549 }
912 /* on return, data[1] contains the value of the digital 550
913 * input and output lines. */ 551 /*
914 data[1] = (ni_660x_read_register(dev, 0, NI660X_DIO32_INPUT) >> 552 * Return the input channels, shifted back to account for the base
915 base_bitfield_channel); 553 * channel.
554 */
555 data[1] = ni_660x_read(dev, 0, NI660X_DIO32_INPUT) >> shift;
916 556
917 return insn->n; 557 return insn->n;
918} 558}
919 559
920static void ni_660x_select_pfi_output(struct comedi_device *dev, 560static void ni_660x_select_pfi_output(struct comedi_device *dev,
921 unsigned pfi_channel, 561 unsigned int chan, unsigned int out_sel)
922 unsigned output_select)
923{ 562{
924 const struct ni_660x_board *board = dev->board_ptr; 563 const struct ni_660x_board *board = dev->board_ptr;
925 static const unsigned counter_4_7_first_pfi = 8; 564 unsigned int active_chip = 0;
926 static const unsigned counter_4_7_last_pfi = 23; 565 unsigned int idle_chip = 0;
927 unsigned active_chipset = 0; 566 unsigned int bits;
928 unsigned idle_chipset = 0;
929 unsigned active_bits;
930 unsigned idle_bits;
931 567
932 if (board->n_chips > 1) { 568 if (board->n_chips > 1) {
933 if (output_select == pfi_output_select_counter && 569 if (out_sel == NI_660X_PFI_OUTPUT_COUNTER &&
934 pfi_channel >= counter_4_7_first_pfi && 570 chan >= 8 && chan <= 23) {
935 pfi_channel <= counter_4_7_last_pfi) { 571 /* counters 4-7 pfi channels */
936 active_chipset = 1; 572 active_chip = 1;
937 idle_chipset = 0; 573 idle_chip = 0;
938 } else { 574 } else {
939 active_chipset = 0; 575 /* counters 0-3 pfi channels */
940 idle_chipset = 1; 576 active_chip = 0;
577 idle_chip = 1;
941 } 578 }
942 } 579 }
943 580
944 if (idle_chipset != active_chipset) { 581 if (idle_chip != active_chip) {
945 idle_bits = 582 /* set the pfi channel to high-z on the inactive chip */
946 ni_660x_read_register(dev, idle_chipset, 583 bits = ni_660x_read(dev, idle_chip, NI660X_IO_CFG(chan));
947 IOConfigReg(pfi_channel)); 584 bits &= ~NI660X_IO_CFG_OUT_SEL_MASK(chan);
948 idle_bits &= ~pfi_output_select_mask(pfi_channel); 585 bits |= NI660X_IO_CFG_OUT_SEL(chan, 0); /* high-z */
949 idle_bits |= 586 ni_660x_write(dev, idle_chip, bits, NI660X_IO_CFG(chan));
950 pfi_output_select_bits(pfi_channel,
951 pfi_output_select_high_Z);
952 ni_660x_write_register(dev, idle_chipset, idle_bits,
953 IOConfigReg(pfi_channel));
954 } 587 }
955 588
956 active_bits = 589 /* set the pfi channel output on the active chip */
957 ni_660x_read_register(dev, active_chipset, 590 bits = ni_660x_read(dev, active_chip, NI660X_IO_CFG(chan));
958 IOConfigReg(pfi_channel)); 591 bits &= ~NI660X_IO_CFG_OUT_SEL_MASK(chan);
959 active_bits &= ~pfi_output_select_mask(pfi_channel); 592 bits |= NI660X_IO_CFG_OUT_SEL(chan, out_sel);
960 active_bits |= pfi_output_select_bits(pfi_channel, output_select); 593 ni_660x_write(dev, active_chip, bits, NI660X_IO_CFG(chan));
961 ni_660x_write_register(dev, active_chipset, active_bits,
962 IOConfigReg(pfi_channel));
963} 594}
964 595
965static int ni_660x_set_pfi_routing(struct comedi_device *dev, unsigned chan, 596static int ni_660x_set_pfi_routing(struct comedi_device *dev,
966 unsigned source) 597 unsigned int chan, unsigned int source)
967{ 598{
968 struct ni_660x_private *devpriv = dev->private; 599 struct ni_660x_private *devpriv = dev->private;
969 600
970 if (source > num_pfi_output_selects) 601 switch (source) {
971 return -EINVAL; 602 case NI_660X_PFI_OUTPUT_COUNTER:
972 if (source == pfi_output_select_high_Z) 603 if (chan < 8)
973 return -EINVAL;
974 if (chan < min_counter_pfi_chan) {
975 if (source == pfi_output_select_counter)
976 return -EINVAL; 604 return -EINVAL;
977 } else if (chan > max_dio_pfi_chan) { 605 break;
978 if (source == pfi_output_select_do) 606 case NI_660X_PFI_OUTPUT_DIO:
607 if (chan > 31)
979 return -EINVAL; 608 return -EINVAL;
609 default:
610 return -EINVAL;
980 } 611 }
981 612
982 devpriv->pfi_output_selects[chan] = source; 613 devpriv->io_cfg[chan] = source;
983 if (devpriv->pfi_direction_bits & (((uint64_t) 1) << chan)) 614 if (devpriv->io_dir & (1ULL << chan))
984 ni_660x_select_pfi_output(dev, chan, 615 ni_660x_select_pfi_output(dev, chan, devpriv->io_cfg[chan]);
985 devpriv->pfi_output_selects[chan]);
986 return 0; 616 return 0;
987} 617}
988 618
@@ -993,25 +623,24 @@ static int ni_660x_dio_insn_config(struct comedi_device *dev,
993{ 623{
994 struct ni_660x_private *devpriv = dev->private; 624 struct ni_660x_private *devpriv = dev->private;
995 unsigned int chan = CR_CHAN(insn->chanspec); 625 unsigned int chan = CR_CHAN(insn->chanspec);
996 uint64_t bit = 1ULL << chan; 626 u64 bit = 1ULL << chan;
997 unsigned int val; 627 unsigned int val;
998 int ret; 628 int ret;
999 629
1000 switch (data[0]) { 630 switch (data[0]) {
1001 case INSN_CONFIG_DIO_OUTPUT: 631 case INSN_CONFIG_DIO_OUTPUT:
1002 devpriv->pfi_direction_bits |= bit; 632 devpriv->io_dir |= bit;
1003 ni_660x_select_pfi_output(dev, chan, 633 ni_660x_select_pfi_output(dev, chan, devpriv->io_cfg[chan]);
1004 devpriv->pfi_output_selects[chan]);
1005 break; 634 break;
1006 635
1007 case INSN_CONFIG_DIO_INPUT: 636 case INSN_CONFIG_DIO_INPUT:
1008 devpriv->pfi_direction_bits &= ~bit; 637 devpriv->io_dir &= ~bit;
1009 ni_660x_select_pfi_output(dev, chan, pfi_output_select_high_Z); 638 ni_660x_select_pfi_output(dev, chan, 0); /* high-z */
1010 break; 639 break;
1011 640
1012 case INSN_CONFIG_DIO_QUERY: 641 case INSN_CONFIG_DIO_QUERY:
1013 data[1] = (devpriv->pfi_direction_bits & bit) ? COMEDI_OUTPUT 642 data[1] = (devpriv->io_dir & bit) ? COMEDI_OUTPUT
1014 : COMEDI_INPUT; 643 : COMEDI_INPUT;
1015 break; 644 break;
1016 645
1017 case INSN_CONFIG_SET_ROUTING: 646 case INSN_CONFIG_SET_ROUTING:
@@ -1021,14 +650,14 @@ static int ni_660x_dio_insn_config(struct comedi_device *dev,
1021 break; 650 break;
1022 651
1023 case INSN_CONFIG_GET_ROUTING: 652 case INSN_CONFIG_GET_ROUTING:
1024 data[1] = devpriv->pfi_output_selects[chan]; 653 data[1] = devpriv->io_cfg[chan];
1025 break; 654 break;
1026 655
1027 case INSN_CONFIG_FILTER: 656 case INSN_CONFIG_FILTER:
1028 val = ni_660x_read_register(dev, 0, IOConfigReg(chan)); 657 val = ni_660x_read(dev, 0, NI660X_IO_CFG(chan));
1029 val &= ~pfi_input_select_mask(chan); 658 val &= ~NI660X_IO_CFG_IN_SEL_MASK(chan);
1030 val |= pfi_input_select_bits(chan, data[1]); 659 val |= NI660X_IO_CFG_IN_SEL(chan, data[1]);
1031 ni_660x_write_register(dev, 0, val, IOConfigReg(chan)); 660 ni_660x_write(dev, 0, val, NI660X_IO_CFG(chan));
1032 break; 661 break;
1033 662
1034 default: 663 default:
@@ -1038,6 +667,33 @@ static int ni_660x_dio_insn_config(struct comedi_device *dev,
1038 return insn->n; 667 return insn->n;
1039} 668}
1040 669
670static void ni_660x_init_tio_chips(struct comedi_device *dev,
671 unsigned int n_chips)
672{
673 struct ni_660x_private *devpriv = dev->private;
674 unsigned int chip;
675 unsigned int chan;
676
677 /*
678 * We use the ioconfig registers to control dio direction, so zero
679 * output enables in stc dio control reg.
680 */
681 ni_660x_write(dev, 0, 0, NI660X_STC_DIO_CONTROL);
682
683 for (chip = 0; chip < n_chips; ++chip) {
684 /* init dma configuration register */
685 devpriv->dma_cfg[chip] = 0;
686 for (chan = 0; chan < NI660X_MAX_DMA_CHANNEL; ++chan)
687 devpriv->dma_cfg[chip] |= NI660X_DMA_CFG_SEL_NONE(chan);
688 ni_660x_write(dev, chip, devpriv->dma_cfg[chip],
689 NI660X_DMA_CFG);
690
691 /* init ioconfig registers */
692 for (chan = 0; chan < NI660X_NUM_PFI_CHANNELS; ++chan)
693 ni_660x_write(dev, chip, 0, NI660X_IO_CFG(chan));
694 }
695}
696
1041static int ni_660x_auto_attach(struct comedi_device *dev, 697static int ni_660x_auto_attach(struct comedi_device *dev,
1042 unsigned long context) 698 unsigned long context)
1043{ 699{
@@ -1045,9 +701,12 @@ static int ni_660x_auto_attach(struct comedi_device *dev,
1045 const struct ni_660x_board *board = NULL; 701 const struct ni_660x_board *board = NULL;
1046 struct ni_660x_private *devpriv; 702 struct ni_660x_private *devpriv;
1047 struct comedi_subdevice *s; 703 struct comedi_subdevice *s;
704 struct ni_gpct_device *gpct_dev;
705 unsigned int n_counters;
706 int subdev;
1048 int ret; 707 int ret;
1049 unsigned i; 708 unsigned int i;
1050 unsigned global_interrupt_config_bits; 709 unsigned int global_interrupt_config_bits;
1051 710
1052 if (context < ARRAY_SIZE(ni_660x_boards)) 711 if (context < ARRAY_SIZE(ni_660x_boards))
1053 board = &ni_660x_boards[context]; 712 board = &ni_660x_boards[context];
@@ -1065,91 +724,147 @@ static int ni_660x_auto_attach(struct comedi_device *dev,
1065 return ret; 724 return ret;
1066 devpriv = dev->private; 725 devpriv = dev->private;
1067 726
1068 devpriv->mite = mite_alloc(pcidev); 727 devpriv->mite = mite_attach(dev, true); /* use win1 */
1069 if (!devpriv->mite) 728 if (!devpriv->mite)
1070 return -ENOMEM; 729 return -ENOMEM;
1071 730
1072 ret = mite_setup2(dev, devpriv->mite, true);
1073 if (ret < 0)
1074 return ret;
1075
1076 ret = ni_660x_alloc_mite_rings(dev); 731 ret = ni_660x_alloc_mite_rings(dev);
1077 if (ret < 0) 732 if (ret < 0)
1078 return ret; 733 return ret;
1079 734
1080 ret = comedi_alloc_subdevices(dev, 2 + NI_660X_MAX_NUM_COUNTERS); 735 ni_660x_init_tio_chips(dev, board->n_chips);
736
737 n_counters = board->n_chips * NI660X_COUNTERS_PER_CHIP;
738 gpct_dev = ni_gpct_device_construct(dev,
739 ni_660x_gpct_write,
740 ni_660x_gpct_read,
741 ni_gpct_variant_660x,
742 n_counters);
743 if (!gpct_dev)
744 return -ENOMEM;
745 devpriv->counter_dev = gpct_dev;
746
747 ret = comedi_alloc_subdevices(dev, 2 + NI660X_MAX_COUNTERS);
1081 if (ret) 748 if (ret)
1082 return ret; 749 return ret;
1083 750
1084 s = &dev->subdevices[0]; 751 subdev = 0;
752
753 s = &dev->subdevices[subdev++];
1085 /* Old GENERAL-PURPOSE COUNTER/TIME (GPCT) subdevice, no longer used */ 754 /* Old GENERAL-PURPOSE COUNTER/TIME (GPCT) subdevice, no longer used */
1086 s->type = COMEDI_SUBD_UNUSED; 755 s->type = COMEDI_SUBD_UNUSED;
1087 756
1088 s = &dev->subdevices[NI_660X_DIO_SUBDEV]; 757 /*
1089 /* DIGITAL I/O SUBDEVICE */ 758 * Digital I/O subdevice
1090 s->type = COMEDI_SUBD_DIO; 759 *
1091 s->subdev_flags = SDF_READABLE | SDF_WRITABLE; 760 * There are 40 channels but only the first 32 can be digital I/Os.
1092 s->n_chan = NUM_PFI_CHANNELS; 761 * The last 8 are dedicated to counters 0 and 1.
1093 s->maxdata = 1; 762 *
1094 s->range_table = &range_digital; 763 * Counter 0-3 signals are from the first TIO chip.
1095 s->insn_bits = ni_660x_dio_insn_bits; 764 * Counter 4-7 signals are from the second TIO chip.
1096 s->insn_config = ni_660x_dio_insn_config; 765 *
1097 /* we use the ioconfig registers to control dio direction, so zero 766 * Comedi External
1098 output enables in stc dio control reg */ 767 * PFI Chan DIO Chan Counter Signal
1099 ni_660x_write_register(dev, 0, 0, NI660X_STC_DIO_CONTROL); 768 * ------- -------- --------------
1100 769 * 0 0
1101 devpriv->counter_dev = ni_gpct_device_construct(dev, 770 * 1 1
1102 &ni_gpct_write_register, 771 * 2 2
1103 &ni_gpct_read_register, 772 * 3 3
1104 ni_gpct_variant_660x, 773 * 4 4
1105 ni_660x_num_counters 774 * 5 5
1106 (dev)); 775 * 6 6
1107 if (!devpriv->counter_dev) 776 * 7 7
1108 return -ENOMEM; 777 * 8 8 CTR 7 OUT
1109 for (i = 0; i < NI_660X_MAX_NUM_COUNTERS; ++i) { 778 * 9 9 CTR 7 AUX
1110 s = &dev->subdevices[NI_660X_GPCT_SUBDEV(i)]; 779 * 10 10 CTR 7 GATE
1111 if (i < ni_660x_num_counters(dev)) { 780 * 11 11 CTR 7 SOURCE
1112 s->type = COMEDI_SUBD_COUNTER; 781 * 12 12 CTR 6 OUT
1113 s->subdev_flags = SDF_READABLE | SDF_WRITABLE | 782 * 13 13 CTR 6 AUX
783 * 14 14 CTR 6 GATE
784 * 15 15 CTR 6 SOURCE
785 * 16 16 CTR 5 OUT
786 * 17 17 CTR 5 AUX
787 * 18 18 CTR 5 GATE
788 * 19 19 CTR 5 SOURCE
789 * 20 20 CTR 4 OUT
790 * 21 21 CTR 4 AUX
791 * 22 22 CTR 4 GATE
792 * 23 23 CTR 4 SOURCE
793 * 24 24 CTR 3 OUT
794 * 25 25 CTR 3 AUX
795 * 26 26 CTR 3 GATE
796 * 27 27 CTR 3 SOURCE
797 * 28 28 CTR 2 OUT
798 * 29 29 CTR 2 AUX
799 * 30 30 CTR 2 GATE
800 * 31 31 CTR 2 SOURCE
801 * 32 CTR 1 OUT
802 * 33 CTR 1 AUX
803 * 34 CTR 1 GATE
804 * 35 CTR 1 SOURCE
805 * 36 CTR 0 OUT
806 * 37 CTR 0 AUX
807 * 38 CTR 0 GATE
808 * 39 CTR 0 SOURCE
809 */
810 s = &dev->subdevices[subdev++];
811 s->type = COMEDI_SUBD_DIO;
812 s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
813 s->n_chan = NI660X_NUM_PFI_CHANNELS;
814 s->maxdata = 1;
815 s->range_table = &range_digital;
816 s->insn_bits = ni_660x_dio_insn_bits;
817 s->insn_config = ni_660x_dio_insn_config;
818
819 /*
820 * Default the DIO channels as:
821 * chan 0-7: DIO inputs
822 * chan 8-39: counter signal inputs
823 */
824 for (i = 0; i < s->n_chan; ++i) {
825 unsigned int source = (i < 8) ? NI_660X_PFI_OUTPUT_DIO
826 : NI_660X_PFI_OUTPUT_COUNTER;
827
828 ni_660x_set_pfi_routing(dev, i, source);
829 ni_660x_select_pfi_output(dev, i, 0); /* high-z */
830 }
831
832 /* Counter subdevices (4 NI TIO General Purpose Counters per chip) */
833 for (i = 0; i < NI660X_MAX_COUNTERS; ++i) {
834 s = &dev->subdevices[subdev++];
835 if (i < n_counters) {
836 struct ni_gpct *counter = &gpct_dev->counters[i];
837
838 counter->chip_index = i / NI660X_COUNTERS_PER_CHIP;
839 counter->counter_index = i % NI660X_COUNTERS_PER_CHIP;
840
841 s->type = COMEDI_SUBD_COUNTER;
842 s->subdev_flags = SDF_READABLE | SDF_WRITABLE |
1114 SDF_LSAMPL | SDF_CMD_READ; 843 SDF_LSAMPL | SDF_CMD_READ;
1115 s->n_chan = 3; 844 s->n_chan = 3;
1116 s->maxdata = 0xffffffff; 845 s->maxdata = 0xffffffff;
1117 s->insn_read = ni_tio_insn_read; 846 s->insn_read = ni_tio_insn_read;
1118 s->insn_write = ni_tio_insn_write; 847 s->insn_write = ni_tio_insn_write;
1119 s->insn_config = ni_tio_insn_config; 848 s->insn_config = ni_tio_insn_config;
1120 s->do_cmd = &ni_660x_cmd; 849 s->len_chanlist = 1;
1121 s->len_chanlist = 1; 850 s->do_cmd = ni_660x_cmd;
1122 s->do_cmdtest = ni_tio_cmdtest; 851 s->do_cmdtest = ni_tio_cmdtest;
1123 s->cancel = &ni_660x_cancel; 852 s->cancel = ni_660x_cancel;
1124 s->poll = &ni_660x_input_poll; 853 s->poll = ni_660x_input_poll;
854 s->buf_change = ni_660x_buf_change;
1125 s->async_dma_dir = DMA_BIDIRECTIONAL; 855 s->async_dma_dir = DMA_BIDIRECTIONAL;
1126 s->buf_change = &ni_660x_buf_change; 856 s->private = counter;
1127 s->private = &devpriv->counter_dev->counters[i];
1128 857
1129 devpriv->counter_dev->counters[i].chip_index = 858 ni_tio_init_counter(counter);
1130 i / counters_per_chip;
1131 devpriv->counter_dev->counters[i].counter_index =
1132 i % counters_per_chip;
1133 } else { 859 } else {
1134 s->type = COMEDI_SUBD_UNUSED; 860 s->type = COMEDI_SUBD_UNUSED;
1135 } 861 }
1136 } 862 }
1137 for (i = 0; i < board->n_chips; ++i) 863
1138 init_tio_chip(dev, i); 864 /*
1139 865 * To be safe, set counterswap bits on tio chips after all the counter
1140 for (i = 0; i < ni_660x_num_counters(dev); ++i) 866 * outputs have been set to high impedance mode.
1141 ni_tio_init_counter(&devpriv->counter_dev->counters[i]); 867 */
1142
1143 for (i = 0; i < NUM_PFI_CHANNELS; ++i) {
1144 if (i < min_counter_pfi_chan)
1145 ni_660x_set_pfi_routing(dev, i, pfi_output_select_do);
1146 else
1147 ni_660x_set_pfi_routing(dev, i,
1148 pfi_output_select_counter);
1149 ni_660x_select_pfi_output(dev, i, pfi_output_select_high_Z);
1150 }
1151 /* to be safe, set counterswap bits on tio chips after all the counter
1152 outputs have been set to high impedance mode */
1153 for (i = 0; i < board->n_chips; ++i) 868 for (i = 0; i < board->n_chips; ++i)
1154 set_tio_counterswap(dev, i); 869 set_tio_counterswap(dev, i);
1155 870
@@ -1160,11 +875,11 @@ static int ni_660x_auto_attach(struct comedi_device *dev,
1160 return ret; 875 return ret;
1161 } 876 }
1162 dev->irq = pcidev->irq; 877 dev->irq = pcidev->irq;
1163 global_interrupt_config_bits = Global_Int_Enable_Bit; 878 global_interrupt_config_bits = NI660X_GLOBAL_INT_GLOBAL;
1164 if (board->n_chips > 1) 879 if (board->n_chips > 1)
1165 global_interrupt_config_bits |= Cascade_Int_Enable_Bit; 880 global_interrupt_config_bits |= NI660X_GLOBAL_INT_CASCADE;
1166 ni_660x_write_register(dev, 0, global_interrupt_config_bits, 881 ni_660x_write(dev, 0, global_interrupt_config_bits,
1167 NI660X_GLOBAL_INT_CFG); 882 NI660X_GLOBAL_INT_CFG);
1168 883
1169 return 0; 884 return 0;
1170} 885}
@@ -1173,11 +888,12 @@ static void ni_660x_detach(struct comedi_device *dev)
1173{ 888{
1174 struct ni_660x_private *devpriv = dev->private; 889 struct ni_660x_private *devpriv = dev->private;
1175 890
1176 if (dev->irq) 891 if (dev->irq) {
892 ni_660x_write(dev, 0, 0, NI660X_GLOBAL_INT_CFG);
1177 free_irq(dev->irq, dev); 893 free_irq(dev->irq, dev);
894 }
1178 if (devpriv) { 895 if (devpriv) {
1179 if (devpriv->counter_dev) 896 ni_gpct_device_destroy(devpriv->counter_dev);
1180 ni_gpct_device_destroy(devpriv->counter_dev);
1181 ni_660x_free_mite_rings(dev); 897 ni_660x_free_mite_rings(dev);
1182 mite_detach(devpriv->mite); 898 mite_detach(devpriv->mite);
1183 } 899 }
@@ -1218,5 +934,5 @@ static struct pci_driver ni_660x_pci_driver = {
1218module_comedi_pci_driver(ni_660x_driver, ni_660x_pci_driver); 934module_comedi_pci_driver(ni_660x_driver, ni_660x_pci_driver);
1219 935
1220MODULE_AUTHOR("Comedi http://www.comedi.org"); 936MODULE_AUTHOR("Comedi http://www.comedi.org");
1221MODULE_DESCRIPTION("Comedi low-level driver"); 937MODULE_DESCRIPTION("Comedi driver for NI 660x counter/timer boards");
1222MODULE_LICENSE("GPL"); 938MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_labpc.h b/drivers/staging/comedi/drivers/ni_labpc.h
index 83f878adbd53..be8d5cd3f7f0 100644
--- a/drivers/staging/comedi/drivers/ni_labpc.h
+++ b/drivers/staging/comedi/drivers/ni_labpc.h
@@ -1,27 +1,22 @@
1/* 1/*
2 ni_labpc.h 2 * Header for ni_labpc ISA/PCMCIA/PCI drivers
3 3 *
4 Header for ni_labpc.c and ni_labpc_cs.c 4 * Copyright (C) 2003 Frank Mori Hess <fmhess@users.sourceforge.net>
5 5 *
6 Copyright (C) 2003 Frank Mori Hess <fmhess@users.sourceforge.net> 6 * This program is free software; you can redistribute it and/or modify
7 7 * it under the terms of the GNU General Public License as published by
8 This program is free software; you can redistribute it and/or modify 8 * the Free Software Foundation; either version 2 of the License, or
9 it under the terms of the GNU General Public License as published by 9 * (at your option) any later version.
10 the Free Software Foundation; either version 2 of the License, or 10 *
11 (at your option) any later version. 11 * This program is distributed in the hope that it will be useful,
12 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 This program is distributed in the hope that it will be useful, 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * GNU General Public License for more details.
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 */
16 GNU General Public License for more details.
17*/
18 16
19#ifndef _NI_LABPC_H 17#ifndef _NI_LABPC_H
20#define _NI_LABPC_H 18#define _NI_LABPC_H
21 19
22#define EEPROM_SIZE 256 /* 256 byte eeprom */
23#define NUM_AO_CHAN 2 /* boards have two analog output channels */
24
25enum transfer_type { fifo_not_empty_transfer, fifo_half_full_transfer, 20enum transfer_type { fifo_not_empty_transfer, fifo_half_full_transfer,
26 isa_dma_transfer 21 isa_dma_transfer
27}; 22};
diff --git a/drivers/staging/comedi/drivers/ni_labpc_common.c b/drivers/staging/comedi/drivers/ni_labpc_common.c
index 863afb28ee28..b0dfb8eed16d 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_common.c
+++ b/drivers/staging/comedi/drivers/ni_labpc_common.c
@@ -84,8 +84,10 @@ static const struct comedi_lrange range_labpc_ao = {
84 } 84 }
85}; 85};
86 86
87/* functions that do inb/outb and readb/writeb so we can use 87/*
88 * function pointers to decide which to use */ 88 * functions that do inb/outb and readb/writeb so we can use
89 * function pointers to decide which to use
90 */
89static unsigned int labpc_inb(struct comedi_device *dev, unsigned long reg) 91static unsigned int labpc_inb(struct comedi_device *dev, unsigned long reg)
90{ 92{
91 return inb(dev->iobase + reg); 93 return inb(dev->iobase + reg);
@@ -656,19 +658,24 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
656 658
657 /* figure out what method we will use to transfer data */ 659 /* figure out what method we will use to transfer data */
658 if (devpriv->dma && 660 if (devpriv->dma &&
659 /* dma unsafe at RT priority, 661 (cmd->flags & (CMDF_WAKE_EOS | CMDF_PRIORITY)) == 0) {
660 * and too much setup time for CMDF_WAKE_EOS */ 662 /*
661 (cmd->flags & (CMDF_WAKE_EOS | CMDF_PRIORITY)) == 0) 663 * dma unsafe at RT priority,
664 * and too much setup time for CMDF_WAKE_EOS
665 */
662 xfer = isa_dma_transfer; 666 xfer = isa_dma_transfer;
663 else if (/* pc-plus has no fifo-half full interrupt */ 667 } else if (board->is_labpc1200 &&
664 board->is_labpc1200 && 668 (cmd->flags & CMDF_WAKE_EOS) == 0 &&
665 /* wake-end-of-scan should interrupt on fifo not empty */ 669 (cmd->stop_src != TRIG_COUNT || devpriv->count > 256)) {
666 (cmd->flags & CMDF_WAKE_EOS) == 0 && 670 /*
667 /* make sure we are taking more than just a few points */ 671 * pc-plus has no fifo-half full interrupt
668 (cmd->stop_src != TRIG_COUNT || devpriv->count > 256)) 672 * wake-end-of-scan should interrupt on fifo not empty
673 * make sure we are taking more than just a few points
674 */
669 xfer = fifo_half_full_transfer; 675 xfer = fifo_half_full_transfer;
670 else 676 } else {
671 xfer = fifo_not_empty_transfer; 677 xfer = fifo_not_empty_transfer;
678 }
672 devpriv->current_transfer = xfer; 679 devpriv->current_transfer = xfer;
673 680
674 labpc_ai_set_chan_and_gain(dev, mode, chan, range, aref); 681 labpc_ai_set_chan_and_gain(dev, mode, chan, range, aref);
@@ -679,9 +686,11 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
679 /* manual says to set scan enable bit on second pass */ 686 /* manual says to set scan enable bit on second pass */
680 if (mode == MODE_MULT_CHAN_UP || mode == MODE_MULT_CHAN_DOWN) { 687 if (mode == MODE_MULT_CHAN_UP || mode == MODE_MULT_CHAN_DOWN) {
681 devpriv->cmd1 |= CMD1_SCANEN; 688 devpriv->cmd1 |= CMD1_SCANEN;
682 /* need a brief delay before enabling scan, or scan 689 /*
683 * list will get screwed when you switch 690 * Need a brief delay before enabling scan, or scan
684 * between scan up to scan down mode - dunno why */ 691 * list will get screwed when you switch between
692 * scan up to scan down mode - dunno why.
693 */
685 udelay(1); 694 udelay(1);
686 devpriv->write_byte(dev, devpriv->cmd1, CMD1_REG); 695 devpriv->write_byte(dev, devpriv->cmd1, CMD1_REG);
687 } 696 }
@@ -728,8 +737,10 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
728 devpriv->cmd4 = 0; 737 devpriv->cmd4 = 0;
729 if (cmd->convert_src != TRIG_EXT) 738 if (cmd->convert_src != TRIG_EXT)
730 devpriv->cmd4 |= CMD4_ECLKRCV; 739 devpriv->cmd4 |= CMD4_ECLKRCV;
731 /* XXX should discard first scan when using interval scanning 740 /*
732 * since manual says it is not synced with scan clock */ 741 * XXX should discard first scan when using interval scanning
742 * since manual says it is not synced with scan clock.
743 */
733 if (!labpc_use_continuous_mode(cmd, mode)) { 744 if (!labpc_use_continuous_mode(cmd, mode)) {
734 devpriv->cmd4 |= CMD4_INTSCAN; 745 devpriv->cmd4 |= CMD4_INTSCAN;
735 if (cmd->scan_begin_src == TRIG_EXT) 746 if (cmd->scan_begin_src == TRIG_EXT)
@@ -795,8 +806,10 @@ static int labpc_drain_fifo(struct comedi_device *dev)
795 return 0; 806 return 0;
796} 807}
797 808
798/* makes sure all data acquired by board is transferred to comedi (used 809/*
799 * when acquisition is terminated by stop_src == TRIG_EXT). */ 810 * Makes sure all data acquired by board is transferred to comedi (used
811 * when acquisition is terminated by stop_src == TRIG_EXT).
812 */
800static void labpc_drain_dregs(struct comedi_device *dev) 813static void labpc_drain_dregs(struct comedi_device *dev)
801{ 814{
802 struct labpc_private *devpriv = dev->private; 815 struct labpc_private *devpriv = dev->private;
@@ -907,9 +920,11 @@ static int labpc_ao_insn_write(struct comedi_device *dev,
907 920
908 channel = CR_CHAN(insn->chanspec); 921 channel = CR_CHAN(insn->chanspec);
909 922
910 /* turn off pacing of analog output channel */ 923 /*
911 /* note: hardware bug in daqcard-1200 means pacing cannot 924 * Turn off pacing of analog output channel.
912 * be independently enabled/disabled for its the two channels */ 925 * NOTE: hardware bug in daqcard-1200 means pacing cannot
926 * be independently enabled/disabled for its the two channels.
927 */
913 spin_lock_irqsave(&dev->spinlock, flags); 928 spin_lock_irqsave(&dev->spinlock, flags);
914 devpriv->cmd2 &= ~CMD2_LDAC(channel); 929 devpriv->cmd2 &= ~CMD2_LDAC(channel);
915 devpriv->write_byte(dev, devpriv->cmd2, CMD2_REG); 930 devpriv->write_byte(dev, devpriv->cmd2, CMD2_REG);
@@ -1261,7 +1276,7 @@ int labpc_common_attach(struct comedi_device *dev,
1261 if (board->has_ao) { 1276 if (board->has_ao) {
1262 s->type = COMEDI_SUBD_AO; 1277 s->type = COMEDI_SUBD_AO;
1263 s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_GROUND; 1278 s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_GROUND;
1264 s->n_chan = NUM_AO_CHAN; 1279 s->n_chan = 2;
1265 s->maxdata = 0x0fff; 1280 s->maxdata = 0x0fff;
1266 s->range_table = &range_labpc_ao; 1281 s->range_table = &range_labpc_ao;
1267 s->insn_write = labpc_ao_insn_write; 1282 s->insn_write = labpc_ao_insn_write;
@@ -1307,12 +1322,12 @@ int labpc_common_attach(struct comedi_device *dev,
1307 s->type = COMEDI_SUBD_UNUSED; 1322 s->type = COMEDI_SUBD_UNUSED;
1308 } 1323 }
1309 1324
1310 /* EEPROM */ 1325 /* EEPROM (256 bytes) */
1311 s = &dev->subdevices[4]; 1326 s = &dev->subdevices[4];
1312 if (board->is_labpc1200) { 1327 if (board->is_labpc1200) {
1313 s->type = COMEDI_SUBD_MEMORY; 1328 s->type = COMEDI_SUBD_MEMORY;
1314 s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL; 1329 s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
1315 s->n_chan = EEPROM_SIZE; 1330 s->n_chan = 256;
1316 s->maxdata = 0xff; 1331 s->maxdata = 0xff;
1317 s->insn_write = labpc_eeprom_insn_write; 1332 s->insn_write = labpc_eeprom_insn_write;
1318 1333
diff --git a/drivers/staging/comedi/drivers/ni_labpc_cs.c b/drivers/staging/comedi/drivers/ni_labpc_cs.c
index a1c69ac075d5..3d4d0b9ad4e1 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_cs.c
+++ b/drivers/staging/comedi/drivers/ni_labpc_cs.c
@@ -1,57 +1,50 @@
1/* 1/*
2 comedi/drivers/ni_labpc_cs.c 2 * Driver for National Instruments daqcard-1200 boards
3 Driver for National Instruments daqcard-1200 boards 3 * Copyright (C) 2001, 2002, 2003 Frank Mori Hess <fmhess@users.sourceforge.net>
4 Copyright (C) 2001, 2002, 2003 Frank Mori Hess <fmhess@users.sourceforge.net> 4 *
5 5 * PCMCIA crap is adapted from dummy_cs.c 1.31 2001/08/24 12:13:13
6 PCMCIA crap is adapted from dummy_cs.c 1.31 2001/08/24 12:13:13 6 * from the pcmcia package.
7 from the pcmcia package. 7 * The initial developer of the pcmcia dummy_cs.c code is David A. Hinds
8 The initial developer of the pcmcia dummy_cs.c code is David A. Hinds 8 * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
9 <dahinds@users.sourceforge.net>. Portions created by David A. Hinds 9 * are Copyright (C) 1999 David A. Hinds.
10 are Copyright (C) 1999 David A. Hinds. 10 *
11 11 * This program is free software; you can redistribute it and/or modify
12 This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by
13 it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2 of the License, or
14 the Free Software Foundation; either version 2 of the License, or 14 * (at your option) any later version.
15 (at your option) any later version. 15 *
16 16 * This program is distributed in the hope that it will be useful,
17 This program is distributed in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * General Public License for more details.
20 GNU General Public License for more details. 20 */
21*/
22/*
23Driver: ni_labpc_cs
24Description: National Instruments Lab-PC (& compatibles)
25Author: Frank Mori Hess <fmhess@users.sourceforge.net>
26Devices: [National Instruments] DAQCard-1200 (daqcard-1200)
27Status: works
28
29Thanks go to Fredrik Lingvall for much testing and perseverance in
30helping to debug daqcard-1200 support.
31
32The 1200 series boards have onboard calibration dacs for correcting
33analog input/output offsets and gains. The proper settings for these
34caldacs are stored on the board's eeprom. To read the caldac values
35from the eeprom and store them into a file that can be then be used by
36comedilib, use the comedi_calibrate program.
37
38Configuration options:
39 none
40
41The daqcard-1200 has quirky chanlist requirements
42when scanning multiple channels. Multiple channel scan
43sequence must start at highest channel, then decrement down to
44channel 0. Chanlists consisting of all one channel
45are also legal, and allow you to pace conversions in bursts.
46
47*/
48 21
49/* 22/*
50 23 * Driver: ni_labpc_cs
51NI manuals: 24 * Description: National Instruments Lab-PC (& compatibles)
52340988a (daqcard-1200) 25 * Author: Frank Mori Hess <fmhess@users.sourceforge.net>
53 26 * Devices: [National Instruments] DAQCard-1200 (daqcard-1200)
54*/ 27 * Status: works
28 *
29 * Thanks go to Fredrik Lingvall for much testing and perseverance in
30 * helping to debug daqcard-1200 support.
31 *
32 * The 1200 series boards have onboard calibration dacs for correcting
33 * analog input/output offsets and gains. The proper settings for these
34 * caldacs are stored on the board's eeprom. To read the caldac values
35 * from the eeprom and store them into a file that can be then be used by
36 * comedilib, use the comedi_calibrate program.
37 *
38 * Configuration options: none
39 *
40 * The daqcard-1200 has quirky chanlist requirements when scanning multiple
41 * channels. Multiple channel scan sequence must start at highest channel,
42 * then decrement down to channel 0. Chanlists consisting of all one channel
43 * are also legal, and allow you to pace conversions in bursts.
44 *
45 * NI manuals:
46 * 340988a (daqcard-1200)
47 */
55 48
56#include <linux/module.h> 49#include <linux/module.h>
57 50
diff --git a/drivers/staging/comedi/drivers/ni_labpc_pci.c b/drivers/staging/comedi/drivers/ni_labpc_pci.c
index 77d403801db5..cac089193121 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_pci.c
+++ b/drivers/staging/comedi/drivers/ni_labpc_pci.c
@@ -51,8 +51,8 @@ static const struct labpc_boardinfo labpc_pci_boards[] = {
51}; 51};
52 52
53/* ripped from mite.h and mite_setup2() to avoid mite dependency */ 53/* ripped from mite.h and mite_setup2() to avoid mite dependency */
54#define MITE_IODWBSR 0xc0 /* IO Device Window Base Size Register */ 54#define MITE_IODWBSR 0xc0 /* IO Device Window Base Size Register */
55#define WENAB (1 << 7) /* window enable */ 55#define WENAB BIT(7) /* window enable */
56 56
57static int labpc_pci_mite_init(struct pci_dev *pcidev) 57static int labpc_pci_mite_init(struct pci_dev *pcidev)
58{ 58{
diff --git a/drivers/staging/comedi/drivers/ni_labpc_regs.h b/drivers/staging/comedi/drivers/ni_labpc_regs.h
index 2a274a3e4e73..8c52179e36fc 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_regs.h
+++ b/drivers/staging/comedi/drivers/ni_labpc_regs.h
@@ -9,32 +9,32 @@
9 * Register map (all registers are 8-bit) 9 * Register map (all registers are 8-bit)
10 */ 10 */
11#define STAT1_REG 0x00 /* R: Status 1 reg */ 11#define STAT1_REG 0x00 /* R: Status 1 reg */
12#define STAT1_DAVAIL (1 << 0) 12#define STAT1_DAVAIL BIT(0)
13#define STAT1_OVERRUN (1 << 1) 13#define STAT1_OVERRUN BIT(1)
14#define STAT1_OVERFLOW (1 << 2) 14#define STAT1_OVERFLOW BIT(2)
15#define STAT1_CNTINT (1 << 3) 15#define STAT1_CNTINT BIT(3)
16#define STAT1_GATA0 (1 << 5) 16#define STAT1_GATA0 BIT(5)
17#define STAT1_EXTGATA0 (1 << 6) 17#define STAT1_EXTGATA0 BIT(6)
18#define CMD1_REG 0x00 /* W: Command 1 reg */ 18#define CMD1_REG 0x00 /* W: Command 1 reg */
19#define CMD1_MA(x) (((x) & 0x7) << 0) 19#define CMD1_MA(x) (((x) & 0x7) << 0)
20#define CMD1_TWOSCMP (1 << 3) 20#define CMD1_TWOSCMP BIT(3)
21#define CMD1_GAIN(x) (((x) & 0x7) << 4) 21#define CMD1_GAIN(x) (((x) & 0x7) << 4)
22#define CMD1_SCANEN (1 << 7) 22#define CMD1_SCANEN BIT(7)
23#define CMD2_REG 0x01 /* W: Command 2 reg */ 23#define CMD2_REG 0x01 /* W: Command 2 reg */
24#define CMD2_PRETRIG (1 << 0) 24#define CMD2_PRETRIG BIT(0)
25#define CMD2_HWTRIG (1 << 1) 25#define CMD2_HWTRIG BIT(1)
26#define CMD2_SWTRIG (1 << 2) 26#define CMD2_SWTRIG BIT(2)
27#define CMD2_TBSEL (1 << 3) 27#define CMD2_TBSEL BIT(3)
28#define CMD2_2SDAC0 (1 << 4) 28#define CMD2_2SDAC0 BIT(4)
29#define CMD2_2SDAC1 (1 << 5) 29#define CMD2_2SDAC1 BIT(5)
30#define CMD2_LDAC(x) (1 << (6 + (x))) 30#define CMD2_LDAC(x) BIT(6 + ((x) & 0x1))
31#define CMD3_REG 0x02 /* W: Command 3 reg */ 31#define CMD3_REG 0x02 /* W: Command 3 reg */
32#define CMD3_DMAEN (1 << 0) 32#define CMD3_DMAEN BIT(0)
33#define CMD3_DIOINTEN (1 << 1) 33#define CMD3_DIOINTEN BIT(1)
34#define CMD3_DMATCINTEN (1 << 2) 34#define CMD3_DMATCINTEN BIT(2)
35#define CMD3_CNTINTEN (1 << 3) 35#define CMD3_CNTINTEN BIT(3)
36#define CMD3_ERRINTEN (1 << 4) 36#define CMD3_ERRINTEN BIT(4)
37#define CMD3_FIFOINTEN (1 << 5) 37#define CMD3_FIFOINTEN BIT(5)
38#define ADC_START_CONVERT_REG 0x03 /* W: Start Convert reg */ 38#define ADC_START_CONVERT_REG 0x03 /* W: Start Convert reg */
39#define DAC_LSB_REG(x) (0x04 + 2 * (x)) /* W: DAC0/1 LSB reg */ 39#define DAC_LSB_REG(x) (0x04 + 2 * (x)) /* W: DAC0/1 LSB reg */
40#define DAC_MSB_REG(x) (0x05 + 2 * (x)) /* W: DAC0/1 MSB reg */ 40#define DAC_MSB_REG(x) (0x05 + 2 * (x)) /* W: DAC0/1 MSB reg */
@@ -43,32 +43,32 @@
43#define DMATC_CLEAR_REG 0x0a /* W: DMA Interrupt Clear reg */ 43#define DMATC_CLEAR_REG 0x0a /* W: DMA Interrupt Clear reg */
44#define TIMER_CLEAR_REG 0x0c /* W: Timer Interrupt Clear reg */ 44#define TIMER_CLEAR_REG 0x0c /* W: Timer Interrupt Clear reg */
45#define CMD6_REG 0x0e /* W: Command 6 reg */ 45#define CMD6_REG 0x0e /* W: Command 6 reg */
46#define CMD6_NRSE (1 << 0) 46#define CMD6_NRSE BIT(0)
47#define CMD6_ADCUNI (1 << 1) 47#define CMD6_ADCUNI BIT(1)
48#define CMD6_DACUNI(x) (1 << (2 + (x))) 48#define CMD6_DACUNI(x) BIT(2 + ((x) & 0x1))
49#define CMD6_HFINTEN (1 << 5) 49#define CMD6_HFINTEN BIT(5)
50#define CMD6_DQINTEN (1 << 6) 50#define CMD6_DQINTEN BIT(6)
51#define CMD6_SCANUP (1 << 7) 51#define CMD6_SCANUP BIT(7)
52#define CMD4_REG 0x0f /* W: Command 3 reg */ 52#define CMD4_REG 0x0f /* W: Command 3 reg */
53#define CMD4_INTSCAN (1 << 0) 53#define CMD4_INTSCAN BIT(0)
54#define CMD4_EOIRCV (1 << 1) 54#define CMD4_EOIRCV BIT(1)
55#define CMD4_ECLKDRV (1 << 2) 55#define CMD4_ECLKDRV BIT(2)
56#define CMD4_SEDIFF (1 << 3) 56#define CMD4_SEDIFF BIT(3)
57#define CMD4_ECLKRCV (1 << 4) 57#define CMD4_ECLKRCV BIT(4)
58#define DIO_BASE_REG 0x10 /* R/W: 8255 DIO base reg */ 58#define DIO_BASE_REG 0x10 /* R/W: 8255 DIO base reg */
59#define COUNTER_A_BASE_REG 0x14 /* R/W: 8253 Counter A base reg */ 59#define COUNTER_A_BASE_REG 0x14 /* R/W: 8253 Counter A base reg */
60#define COUNTER_B_BASE_REG 0x18 /* R/W: 8253 Counter B base reg */ 60#define COUNTER_B_BASE_REG 0x18 /* R/W: 8253 Counter B base reg */
61#define CMD5_REG 0x1c /* W: Command 5 reg */ 61#define CMD5_REG 0x1c /* W: Command 5 reg */
62#define CMD5_WRTPRT (1 << 2) 62#define CMD5_WRTPRT BIT(2)
63#define CMD5_DITHEREN (1 << 3) 63#define CMD5_DITHEREN BIT(3)
64#define CMD5_CALDACLD (1 << 4) 64#define CMD5_CALDACLD BIT(4)
65#define CMD5_SCLK (1 << 5) 65#define CMD5_SCLK BIT(5)
66#define CMD5_SDATA (1 << 6) 66#define CMD5_SDATA BIT(6)
67#define CMD5_EEPROMCS (1 << 7) 67#define CMD5_EEPROMCS BIT(7)
68#define STAT2_REG 0x1d /* R: Status 2 reg */ 68#define STAT2_REG 0x1d /* R: Status 2 reg */
69#define STAT2_PROMOUT (1 << 0) 69#define STAT2_PROMOUT BIT(0)
70#define STAT2_OUTA1 (1 << 1) 70#define STAT2_OUTA1 BIT(1)
71#define STAT2_FIFONHF (1 << 2) 71#define STAT2_FIFONHF BIT(2)
72#define INTERVAL_COUNT_REG 0x1e /* W: Interval Counter Data reg */ 72#define INTERVAL_COUNT_REG 0x1e /* W: Interval Counter Data reg */
73#define INTERVAL_STROBE_REG 0x1f /* W: Interval Counter Strobe reg */ 73#define INTERVAL_STROBE_REG 0x1f /* W: Interval Counter Strobe reg */
74 74
diff --git a/drivers/staging/comedi/drivers/ni_mio_c_common.c b/drivers/staging/comedi/drivers/ni_mio_c_common.c
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/drivers/staging/comedi/drivers/ni_mio_c_common.c
+++ /dev/null
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index dcaf7e89f299..8dabb19519a5 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -1,56 +1,53 @@
1/* 1/*
2 comedi/drivers/ni_mio_common.c 2 * Hardware driver for DAQ-STC based boards
3 Hardware driver for DAQ-STC based boards 3 *
4 4 * COMEDI - Linux Control and Measurement Device Interface
5 COMEDI - Linux Control and Measurement Device Interface 5 * Copyright (C) 1997-2001 David A. Schleef <ds@schleef.org>
6 Copyright (C) 1997-2001 David A. Schleef <ds@schleef.org> 6 * Copyright (C) 2002-2006 Frank Mori Hess <fmhess@users.sourceforge.net>
7 Copyright (C) 2002-2006 Frank Mori Hess <fmhess@users.sourceforge.net> 7 *
8 8 * This program is free software; you can redistribute it and/or modify
9 This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by
10 it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or
11 the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version.
12 (at your option) any later version. 12 *
13 13 * This program is distributed in the hope that it will be useful,
14 This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details.
17 GNU General Public License for more details. 17 */
18*/
19 18
20/* 19/*
21 This file is meant to be included by another file, e.g., 20 * This file is meant to be included by another file, e.g.,
22 ni_atmio.c or ni_pcimio.c. 21 * ni_atmio.c or ni_pcimio.c.
23 22 *
24 Interrupt support originally added by Truxton Fulton 23 * Interrupt support originally added by Truxton Fulton <trux@truxton.com>
25 <trux@truxton.com> 24 *
26 25 * References (ftp://ftp.natinst.com/support/manuals):
27 References (from ftp://ftp.natinst.com/support/manuals): 26 * 340747b.pdf AT-MIO E series Register Level Programmer Manual
28 27 * 341079b.pdf PCI E Series RLPM
29 340747b.pdf AT-MIO E series Register Level Programmer Manual 28 * 340934b.pdf DAQ-STC reference manual
30 341079b.pdf PCI E Series RLPM 29 *
31 340934b.pdf DAQ-STC reference manual 30 * 67xx and 611x registers (ftp://ftp.ni.com/support/daq/mhddk/documentation/)
32 67xx and 611x registers (from ftp://ftp.ni.com/support/daq/mhddk/documentation/) 31 * release_ni611x.pdf
33 release_ni611x.pdf 32 * release_ni67xx.pdf
34 release_ni67xx.pdf 33 *
35 Other possibly relevant info: 34 * Other possibly relevant info:
36 35 * 320517c.pdf User manual (obsolete)
37 320517c.pdf User manual (obsolete) 36 * 320517f.pdf User manual (new)
38 320517f.pdf User manual (new) 37 * 320889a.pdf delete
39 320889a.pdf delete 38 * 320906c.pdf maximum signal ratings
40 320906c.pdf maximum signal ratings 39 * 321066a.pdf about 16x
41 321066a.pdf about 16x 40 * 321791a.pdf discontinuation of at-mio-16e-10 rev. c
42 321791a.pdf discontinuation of at-mio-16e-10 rev. c 41 * 321808a.pdf about at-mio-16e-10 rev P
43 321808a.pdf about at-mio-16e-10 rev P 42 * 321837a.pdf discontinuation of at-mio-16de-10 rev d
44 321837a.pdf discontinuation of at-mio-16de-10 rev d 43 * 321838a.pdf about at-mio-16de-10 rev N
45 321838a.pdf about at-mio-16de-10 rev N 44 *
46 45 * ISSUES:
47 ISSUES: 46 * - the interrupt routine needs to be cleaned up
48 47 *
49 - the interrupt routine needs to be cleaned up 48 * 2006-02-07: S-Series PCI-6143: Support has been added but is not
50 49 * fully tested as yet. Terry Barnaby, BEAM Ltd.
51 2006-02-07: S-Series PCI-6143: Support has been added but is not 50 */
52 fully tested as yet. Terry Barnaby, BEAM Ltd.
53*/
54 51
55#include <linux/interrupt.h> 52#include <linux/interrupt.h>
56#include <linux/sched.h> 53#include <linux/sched.h>
@@ -216,19 +213,8 @@ enum ni_common_subdevices {
216 NI_FREQ_OUT_SUBDEV, 213 NI_FREQ_OUT_SUBDEV,
217 NI_NUM_SUBDEVICES 214 NI_NUM_SUBDEVICES
218}; 215};
219static inline unsigned NI_GPCT_SUBDEV(unsigned counter_index) 216
220{ 217#define NI_GPCT_SUBDEV(x) (NI_GPCT0_SUBDEV + (x))
221 switch (counter_index) {
222 case 0:
223 return NI_GPCT0_SUBDEV;
224 case 1:
225 return NI_GPCT1_SUBDEV;
226 default:
227 break;
228 }
229 BUG();
230 return NI_GPCT0_SUBDEV;
231}
232 218
233enum timebase_nanoseconds { 219enum timebase_nanoseconds {
234 TIMEBASE_1_NS = 50, 220 TIMEBASE_1_NS = 50,
@@ -242,7 +228,7 @@ enum timebase_nanoseconds {
242 228
243static const int num_adc_stages_611x = 3; 229static const int num_adc_stages_611x = 3;
244 230
245static void ni_writel(struct comedi_device *dev, uint32_t data, int reg) 231static void ni_writel(struct comedi_device *dev, unsigned int data, int reg)
246{ 232{
247 if (dev->mmio) 233 if (dev->mmio)
248 writel(data, dev->mmio + reg); 234 writel(data, dev->mmio + reg);
@@ -250,7 +236,7 @@ static void ni_writel(struct comedi_device *dev, uint32_t data, int reg)
250 outl(data, dev->iobase + reg); 236 outl(data, dev->iobase + reg);
251} 237}
252 238
253static void ni_writew(struct comedi_device *dev, uint16_t data, int reg) 239static void ni_writew(struct comedi_device *dev, unsigned int data, int reg)
254{ 240{
255 if (dev->mmio) 241 if (dev->mmio)
256 writew(data, dev->mmio + reg); 242 writew(data, dev->mmio + reg);
@@ -258,7 +244,7 @@ static void ni_writew(struct comedi_device *dev, uint16_t data, int reg)
258 outw(data, dev->iobase + reg); 244 outw(data, dev->iobase + reg);
259} 245}
260 246
261static void ni_writeb(struct comedi_device *dev, uint8_t data, int reg) 247static void ni_writeb(struct comedi_device *dev, unsigned int data, int reg)
262{ 248{
263 if (dev->mmio) 249 if (dev->mmio)
264 writeb(data, dev->mmio + reg); 250 writeb(data, dev->mmio + reg);
@@ -266,7 +252,7 @@ static void ni_writeb(struct comedi_device *dev, uint8_t data, int reg)
266 outb(data, dev->iobase + reg); 252 outb(data, dev->iobase + reg);
267} 253}
268 254
269static uint32_t ni_readl(struct comedi_device *dev, int reg) 255static unsigned int ni_readl(struct comedi_device *dev, int reg)
270{ 256{
271 if (dev->mmio) 257 if (dev->mmio)
272 return readl(dev->mmio + reg); 258 return readl(dev->mmio + reg);
@@ -274,7 +260,7 @@ static uint32_t ni_readl(struct comedi_device *dev, int reg)
274 return inl(dev->iobase + reg); 260 return inl(dev->iobase + reg);
275} 261}
276 262
277static uint16_t ni_readw(struct comedi_device *dev, int reg) 263static unsigned int ni_readw(struct comedi_device *dev, int reg)
278{ 264{
279 if (dev->mmio) 265 if (dev->mmio)
280 return readw(dev->mmio + reg); 266 return readw(dev->mmio + reg);
@@ -282,7 +268,7 @@ static uint16_t ni_readw(struct comedi_device *dev, int reg)
282 return inw(dev->iobase + reg); 268 return inw(dev->iobase + reg);
283} 269}
284 270
285static uint8_t ni_readb(struct comedi_device *dev, int reg) 271static unsigned int ni_readb(struct comedi_device *dev, int reg)
286{ 272{
287 if (dev->mmio) 273 if (dev->mmio)
288 return readb(dev->mmio + reg); 274 return readb(dev->mmio + reg);
@@ -457,7 +443,8 @@ static unsigned int m_series_stc_read(struct comedi_device *dev,
457 } 443 }
458} 444}
459 445
460static void ni_stc_writew(struct comedi_device *dev, uint16_t data, int reg) 446static void ni_stc_writew(struct comedi_device *dev,
447 unsigned int data, int reg)
461{ 448{
462 struct ni_private *devpriv = dev->private; 449 struct ni_private *devpriv = dev->private;
463 unsigned long flags; 450 unsigned long flags;
@@ -476,7 +463,8 @@ static void ni_stc_writew(struct comedi_device *dev, uint16_t data, int reg)
476 } 463 }
477} 464}
478 465
479static void ni_stc_writel(struct comedi_device *dev, uint32_t data, int reg) 466static void ni_stc_writel(struct comedi_device *dev,
467 unsigned int data, int reg)
480{ 468{
481 struct ni_private *devpriv = dev->private; 469 struct ni_private *devpriv = dev->private;
482 470
@@ -488,11 +476,11 @@ static void ni_stc_writel(struct comedi_device *dev, uint32_t data, int reg)
488 } 476 }
489} 477}
490 478
491static uint16_t ni_stc_readw(struct comedi_device *dev, int reg) 479static unsigned int ni_stc_readw(struct comedi_device *dev, int reg)
492{ 480{
493 struct ni_private *devpriv = dev->private; 481 struct ni_private *devpriv = dev->private;
494 unsigned long flags; 482 unsigned long flags;
495 uint16_t val; 483 unsigned int val;
496 484
497 if (devpriv->is_m_series) { 485 if (devpriv->is_m_series) {
498 val = m_series_stc_read(dev, reg); 486 val = m_series_stc_read(dev, reg);
@@ -509,10 +497,10 @@ static uint16_t ni_stc_readw(struct comedi_device *dev, int reg)
509 return val; 497 return val;
510} 498}
511 499
512static uint32_t ni_stc_readl(struct comedi_device *dev, int reg) 500static unsigned int ni_stc_readl(struct comedi_device *dev, int reg)
513{ 501{
514 struct ni_private *devpriv = dev->private; 502 struct ni_private *devpriv = dev->private;
515 uint32_t val; 503 unsigned int val;
516 504
517 if (devpriv->is_m_series) { 505 if (devpriv->is_m_series) {
518 val = m_series_stc_read(dev, reg); 506 val = m_series_stc_read(dev, reg);
@@ -524,7 +512,8 @@ static uint32_t ni_stc_readl(struct comedi_device *dev, int reg)
524} 512}
525 513
526static inline void ni_set_bitfield(struct comedi_device *dev, int reg, 514static inline void ni_set_bitfield(struct comedi_device *dev, int reg,
527 unsigned bit_mask, unsigned bit_values) 515 unsigned int bit_mask,
516 unsigned int bit_values)
528{ 517{
529 struct ni_private *devpriv = dev->private; 518 struct ni_private *devpriv = dev->private;
530 unsigned long flags; 519 unsigned long flags;
@@ -556,6 +545,11 @@ static inline void ni_set_bitfield(struct comedi_device *dev, int reg,
556 devpriv->g0_g1_select_reg |= bit_values & bit_mask; 545 devpriv->g0_g1_select_reg |= bit_values & bit_mask;
557 ni_writeb(dev, devpriv->g0_g1_select_reg, reg); 546 ni_writeb(dev, devpriv->g0_g1_select_reg, reg);
558 break; 547 break;
548 case NI_M_CDIO_DMA_SEL_REG:
549 devpriv->cdio_dma_select_reg &= ~bit_mask;
550 devpriv->cdio_dma_select_reg |= bit_values & bit_mask;
551 ni_writeb(dev, devpriv->cdio_dma_select_reg, reg);
552 break;
559 default: 553 default:
560 dev_err(dev->class_dev, "called with invalid register %d\n", 554 dev_err(dev->class_dev, "called with invalid register %d\n",
561 reg); 555 reg);
@@ -566,116 +560,35 @@ static inline void ni_set_bitfield(struct comedi_device *dev, int reg,
566} 560}
567 561
568#ifdef PCIDMA 562#ifdef PCIDMA
569/* DMA channel setup */
570static inline unsigned ni_stc_dma_channel_select_bitfield(unsigned channel)
571{
572 if (channel < 4)
573 return 1 << channel;
574 if (channel == 4)
575 return 0x3;
576 if (channel == 5)
577 return 0x5;
578 BUG();
579 return 0;
580}
581
582static inline void ni_set_ai_dma_channel(struct comedi_device *dev,
583 unsigned channel)
584{
585 unsigned bits = ni_stc_dma_channel_select_bitfield(channel);
586
587 ni_set_bitfield(dev, NI_E_DMA_AI_AO_SEL_REG,
588 NI_E_DMA_AI_SEL_MASK, NI_E_DMA_AI_SEL(bits));
589}
590
591static inline void ni_set_ai_dma_no_channel(struct comedi_device *dev)
592{
593 ni_set_bitfield(dev, NI_E_DMA_AI_AO_SEL_REG, NI_E_DMA_AI_SEL_MASK, 0);
594}
595
596static inline void ni_set_ao_dma_channel(struct comedi_device *dev,
597 unsigned channel)
598{
599 unsigned bits = ni_stc_dma_channel_select_bitfield(channel);
600
601 ni_set_bitfield(dev, NI_E_DMA_AI_AO_SEL_REG,
602 NI_E_DMA_AO_SEL_MASK, NI_E_DMA_AO_SEL(bits));
603}
604
605static inline void ni_set_ao_dma_no_channel(struct comedi_device *dev)
606{
607 ni_set_bitfield(dev, NI_E_DMA_AI_AO_SEL_REG, NI_E_DMA_AO_SEL_MASK, 0);
608}
609
610static inline void ni_set_gpct_dma_channel(struct comedi_device *dev,
611 unsigned gpct_index,
612 unsigned channel)
613{
614 unsigned bits = ni_stc_dma_channel_select_bitfield(channel);
615
616 ni_set_bitfield(dev, NI_E_DMA_G0_G1_SEL_REG,
617 NI_E_DMA_G0_G1_SEL_MASK(gpct_index),
618 NI_E_DMA_G0_G1_SEL(gpct_index, bits));
619}
620
621static inline void ni_set_gpct_dma_no_channel(struct comedi_device *dev,
622 unsigned gpct_index)
623{
624 ni_set_bitfield(dev, NI_E_DMA_G0_G1_SEL_REG,
625 NI_E_DMA_G0_G1_SEL_MASK(gpct_index), 0);
626}
627
628static inline void ni_set_cdo_dma_channel(struct comedi_device *dev,
629 unsigned mite_channel)
630{
631 struct ni_private *devpriv = dev->private;
632 unsigned long flags;
633 unsigned bits;
634
635 spin_lock_irqsave(&devpriv->soft_reg_copy_lock, flags);
636 devpriv->cdio_dma_select_reg &= ~NI_M_CDIO_DMA_SEL_CDO_MASK;
637 /*
638 * XXX just guessing ni_stc_dma_channel_select_bitfield()
639 * returns the right bits, under the assumption the cdio dma
640 * selection works just like ai/ao/gpct.
641 * Definitely works for dma channels 0 and 1.
642 */
643 bits = ni_stc_dma_channel_select_bitfield(mite_channel);
644 devpriv->cdio_dma_select_reg |= NI_M_CDIO_DMA_SEL_CDO(bits);
645 ni_writeb(dev, devpriv->cdio_dma_select_reg, NI_M_CDIO_DMA_SEL_REG);
646 mmiowb();
647 spin_unlock_irqrestore(&devpriv->soft_reg_copy_lock, flags);
648}
649
650static inline void ni_set_cdo_dma_no_channel(struct comedi_device *dev)
651{
652 struct ni_private *devpriv = dev->private;
653 unsigned long flags;
654 563
655 spin_lock_irqsave(&devpriv->soft_reg_copy_lock, flags); 564/* selects the MITE channel to use for DMA */
656 devpriv->cdio_dma_select_reg &= ~NI_M_CDIO_DMA_SEL_CDO_MASK; 565#define NI_STC_DMA_CHAN_SEL(x) (((x) < 4) ? BIT(x) : \
657 ni_writeb(dev, devpriv->cdio_dma_select_reg, NI_M_CDIO_DMA_SEL_REG); 566 ((x) == 4) ? 0x3 : \
658 mmiowb(); 567 ((x) == 5) ? 0x5 : 0x0)
659 spin_unlock_irqrestore(&devpriv->soft_reg_copy_lock, flags);
660}
661 568
569/* DMA channel setup */
662static int ni_request_ai_mite_channel(struct comedi_device *dev) 570static int ni_request_ai_mite_channel(struct comedi_device *dev)
663{ 571{
664 struct ni_private *devpriv = dev->private; 572 struct ni_private *devpriv = dev->private;
573 struct mite_channel *mite_chan;
665 unsigned long flags; 574 unsigned long flags;
575 unsigned int bits;
666 576
667 spin_lock_irqsave(&devpriv->mite_channel_lock, flags); 577 spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
668 BUG_ON(devpriv->ai_mite_chan); 578 mite_chan = mite_request_channel(devpriv->mite, devpriv->ai_mite_ring);
669 devpriv->ai_mite_chan = 579 if (!mite_chan) {
670 mite_request_channel(devpriv->mite, devpriv->ai_mite_ring);
671 if (!devpriv->ai_mite_chan) {
672 spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags); 580 spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
673 dev_err(dev->class_dev, 581 dev_err(dev->class_dev,
674 "failed to reserve mite dma channel for analog input\n"); 582 "failed to reserve mite dma channel for analog input\n");
675 return -EBUSY; 583 return -EBUSY;
676 } 584 }
677 devpriv->ai_mite_chan->dir = COMEDI_INPUT; 585 mite_chan->dir = COMEDI_INPUT;
678 ni_set_ai_dma_channel(dev, devpriv->ai_mite_chan->channel); 586 devpriv->ai_mite_chan = mite_chan;
587
588 bits = NI_STC_DMA_CHAN_SEL(mite_chan->channel);
589 ni_set_bitfield(dev, NI_E_DMA_AI_AO_SEL_REG,
590 NI_E_DMA_AI_SEL_MASK, NI_E_DMA_AI_SEL(bits));
591
679 spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags); 592 spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
680 return 0; 593 return 0;
681} 594}
@@ -683,37 +596,42 @@ static int ni_request_ai_mite_channel(struct comedi_device *dev)
683static int ni_request_ao_mite_channel(struct comedi_device *dev) 596static int ni_request_ao_mite_channel(struct comedi_device *dev)
684{ 597{
685 struct ni_private *devpriv = dev->private; 598 struct ni_private *devpriv = dev->private;
599 struct mite_channel *mite_chan;
686 unsigned long flags; 600 unsigned long flags;
601 unsigned int bits;
687 602
688 spin_lock_irqsave(&devpriv->mite_channel_lock, flags); 603 spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
689 BUG_ON(devpriv->ao_mite_chan); 604 mite_chan = mite_request_channel(devpriv->mite, devpriv->ao_mite_ring);
690 devpriv->ao_mite_chan = 605 if (!mite_chan) {
691 mite_request_channel(devpriv->mite, devpriv->ao_mite_ring);
692 if (!devpriv->ao_mite_chan) {
693 spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags); 606 spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
694 dev_err(dev->class_dev, 607 dev_err(dev->class_dev,
695 "failed to reserve mite dma channel for analog outut\n"); 608 "failed to reserve mite dma channel for analog outut\n");
696 return -EBUSY; 609 return -EBUSY;
697 } 610 }
698 devpriv->ao_mite_chan->dir = COMEDI_OUTPUT; 611 mite_chan->dir = COMEDI_OUTPUT;
699 ni_set_ao_dma_channel(dev, devpriv->ao_mite_chan->channel); 612 devpriv->ao_mite_chan = mite_chan;
613
614 bits = NI_STC_DMA_CHAN_SEL(mite_chan->channel);
615 ni_set_bitfield(dev, NI_E_DMA_AI_AO_SEL_REG,
616 NI_E_DMA_AO_SEL_MASK, NI_E_DMA_AO_SEL(bits));
617
700 spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags); 618 spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
701 return 0; 619 return 0;
702} 620}
703 621
704static int ni_request_gpct_mite_channel(struct comedi_device *dev, 622static int ni_request_gpct_mite_channel(struct comedi_device *dev,
705 unsigned gpct_index, 623 unsigned int gpct_index,
706 enum comedi_io_direction direction) 624 enum comedi_io_direction direction)
707{ 625{
708 struct ni_private *devpriv = dev->private; 626 struct ni_private *devpriv = dev->private;
709 unsigned long flags; 627 struct ni_gpct *counter = &devpriv->counter_dev->counters[gpct_index];
710 struct mite_channel *mite_chan; 628 struct mite_channel *mite_chan;
629 unsigned long flags;
630 unsigned int bits;
711 631
712 spin_lock_irqsave(&devpriv->mite_channel_lock, flags); 632 spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
713 BUG_ON(devpriv->counter_dev->counters[gpct_index].mite_chan); 633 mite_chan = mite_request_channel(devpriv->mite,
714 mite_chan = 634 devpriv->gpct_mite_ring[gpct_index]);
715 mite_request_channel(devpriv->mite,
716 devpriv->gpct_mite_ring[gpct_index]);
717 if (!mite_chan) { 635 if (!mite_chan) {
718 spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags); 636 spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
719 dev_err(dev->class_dev, 637 dev_err(dev->class_dev,
@@ -721,37 +639,50 @@ static int ni_request_gpct_mite_channel(struct comedi_device *dev,
721 return -EBUSY; 639 return -EBUSY;
722 } 640 }
723 mite_chan->dir = direction; 641 mite_chan->dir = direction;
724 ni_tio_set_mite_channel(&devpriv->counter_dev->counters[gpct_index], 642 ni_tio_set_mite_channel(counter, mite_chan);
725 mite_chan); 643
726 ni_set_gpct_dma_channel(dev, gpct_index, mite_chan->channel); 644 bits = NI_STC_DMA_CHAN_SEL(mite_chan->channel);
645 ni_set_bitfield(dev, NI_E_DMA_G0_G1_SEL_REG,
646 NI_E_DMA_G0_G1_SEL_MASK(gpct_index),
647 NI_E_DMA_G0_G1_SEL(gpct_index, bits));
648
727 spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags); 649 spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
728 return 0; 650 return 0;
729} 651}
730 652
731#endif /* PCIDMA */
732
733static int ni_request_cdo_mite_channel(struct comedi_device *dev) 653static int ni_request_cdo_mite_channel(struct comedi_device *dev)
734{ 654{
735#ifdef PCIDMA
736 struct ni_private *devpriv = dev->private; 655 struct ni_private *devpriv = dev->private;
656 struct mite_channel *mite_chan;
737 unsigned long flags; 657 unsigned long flags;
658 unsigned int bits;
738 659
739 spin_lock_irqsave(&devpriv->mite_channel_lock, flags); 660 spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
740 BUG_ON(devpriv->cdo_mite_chan); 661 mite_chan = mite_request_channel(devpriv->mite, devpriv->cdo_mite_ring);
741 devpriv->cdo_mite_chan = 662 if (!mite_chan) {
742 mite_request_channel(devpriv->mite, devpriv->cdo_mite_ring);
743 if (!devpriv->cdo_mite_chan) {
744 spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags); 663 spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
745 dev_err(dev->class_dev, 664 dev_err(dev->class_dev,
746 "failed to reserve mite dma channel for correlated digital output\n"); 665 "failed to reserve mite dma channel for correlated digital output\n");
747 return -EBUSY; 666 return -EBUSY;
748 } 667 }
749 devpriv->cdo_mite_chan->dir = COMEDI_OUTPUT; 668 mite_chan->dir = COMEDI_OUTPUT;
750 ni_set_cdo_dma_channel(dev, devpriv->cdo_mite_chan->channel); 669 devpriv->cdo_mite_chan = mite_chan;
670
671 /*
672 * XXX just guessing NI_STC_DMA_CHAN_SEL()
673 * returns the right bits, under the assumption the cdio dma
674 * selection works just like ai/ao/gpct.
675 * Definitely works for dma channels 0 and 1.
676 */
677 bits = NI_STC_DMA_CHAN_SEL(mite_chan->channel);
678 ni_set_bitfield(dev, NI_M_CDIO_DMA_SEL_REG,
679 NI_M_CDIO_DMA_SEL_CDO_MASK,
680 NI_M_CDIO_DMA_SEL_CDO(bits));
681
751 spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags); 682 spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
752#endif /* PCIDMA */
753 return 0; 683 return 0;
754} 684}
685#endif /* PCIDMA */
755 686
756static void ni_release_ai_mite_channel(struct comedi_device *dev) 687static void ni_release_ai_mite_channel(struct comedi_device *dev)
757{ 688{
@@ -761,7 +692,8 @@ static void ni_release_ai_mite_channel(struct comedi_device *dev)
761 692
762 spin_lock_irqsave(&devpriv->mite_channel_lock, flags); 693 spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
763 if (devpriv->ai_mite_chan) { 694 if (devpriv->ai_mite_chan) {
764 ni_set_ai_dma_no_channel(dev); 695 ni_set_bitfield(dev, NI_E_DMA_AI_AO_SEL_REG,
696 NI_E_DMA_AI_SEL_MASK, 0);
765 mite_release_channel(devpriv->ai_mite_chan); 697 mite_release_channel(devpriv->ai_mite_chan);
766 devpriv->ai_mite_chan = NULL; 698 devpriv->ai_mite_chan = NULL;
767 } 699 }
@@ -777,7 +709,8 @@ static void ni_release_ao_mite_channel(struct comedi_device *dev)
777 709
778 spin_lock_irqsave(&devpriv->mite_channel_lock, flags); 710 spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
779 if (devpriv->ao_mite_chan) { 711 if (devpriv->ao_mite_chan) {
780 ni_set_ao_dma_no_channel(dev); 712 ni_set_bitfield(dev, NI_E_DMA_AI_AO_SEL_REG,
713 NI_E_DMA_AO_SEL_MASK, 0);
781 mite_release_channel(devpriv->ao_mite_chan); 714 mite_release_channel(devpriv->ao_mite_chan);
782 devpriv->ao_mite_chan = NULL; 715 devpriv->ao_mite_chan = NULL;
783 } 716 }
@@ -787,7 +720,7 @@ static void ni_release_ao_mite_channel(struct comedi_device *dev)
787 720
788#ifdef PCIDMA 721#ifdef PCIDMA
789static void ni_release_gpct_mite_channel(struct comedi_device *dev, 722static void ni_release_gpct_mite_channel(struct comedi_device *dev,
790 unsigned gpct_index) 723 unsigned int gpct_index)
791{ 724{
792 struct ni_private *devpriv = dev->private; 725 struct ni_private *devpriv = dev->private;
793 unsigned long flags; 726 unsigned long flags;
@@ -797,7 +730,8 @@ static void ni_release_gpct_mite_channel(struct comedi_device *dev,
797 struct mite_channel *mite_chan = 730 struct mite_channel *mite_chan =
798 devpriv->counter_dev->counters[gpct_index].mite_chan; 731 devpriv->counter_dev->counters[gpct_index].mite_chan;
799 732
800 ni_set_gpct_dma_no_channel(dev, gpct_index); 733 ni_set_bitfield(dev, NI_E_DMA_G0_G1_SEL_REG,
734 NI_E_DMA_G0_G1_SEL_MASK(gpct_index), 0);
801 ni_tio_set_mite_channel(&devpriv-> 735 ni_tio_set_mite_channel(&devpriv->
802 counter_dev->counters[gpct_index], 736 counter_dev->counters[gpct_index],
803 NULL); 737 NULL);
@@ -805,30 +739,27 @@ static void ni_release_gpct_mite_channel(struct comedi_device *dev,
805 } 739 }
806 spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags); 740 spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
807} 741}
808#endif /* PCIDMA */
809 742
810static void ni_release_cdo_mite_channel(struct comedi_device *dev) 743static void ni_release_cdo_mite_channel(struct comedi_device *dev)
811{ 744{
812#ifdef PCIDMA
813 struct ni_private *devpriv = dev->private; 745 struct ni_private *devpriv = dev->private;
814 unsigned long flags; 746 unsigned long flags;
815 747
816 spin_lock_irqsave(&devpriv->mite_channel_lock, flags); 748 spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
817 if (devpriv->cdo_mite_chan) { 749 if (devpriv->cdo_mite_chan) {
818 ni_set_cdo_dma_no_channel(dev); 750 ni_set_bitfield(dev, NI_M_CDIO_DMA_SEL_REG,
751 NI_M_CDIO_DMA_SEL_CDO_MASK, 0);
819 mite_release_channel(devpriv->cdo_mite_chan); 752 mite_release_channel(devpriv->cdo_mite_chan);
820 devpriv->cdo_mite_chan = NULL; 753 devpriv->cdo_mite_chan = NULL;
821 } 754 }
822 spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags); 755 spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
823#endif /* PCIDMA */
824} 756}
825 757
826#ifdef PCIDMA
827static void ni_e_series_enable_second_irq(struct comedi_device *dev, 758static void ni_e_series_enable_second_irq(struct comedi_device *dev,
828 unsigned gpct_index, short enable) 759 unsigned int gpct_index, short enable)
829{ 760{
830 struct ni_private *devpriv = dev->private; 761 struct ni_private *devpriv = dev->private;
831 uint16_t val = 0; 762 unsigned int val = 0;
832 int reg; 763 int reg;
833 764
834 if (devpriv->is_m_series || gpct_index > 1) 765 if (devpriv->is_m_series || gpct_index > 1)
@@ -875,8 +806,10 @@ static void ni_clear_ai_fifo(struct comedi_device *dev)
875 ni_writeb(dev, 0, NI_M_STATIC_AI_CTRL_REG(0)); 806 ni_writeb(dev, 0, NI_M_STATIC_AI_CTRL_REG(0));
876 ni_writeb(dev, 1, NI_M_STATIC_AI_CTRL_REG(0)); 807 ni_writeb(dev, 1, NI_M_STATIC_AI_CTRL_REG(0));
877#if 0 808#if 0
878 /* the NI example code does 3 convert pulses for 625x boards, 809 /*
879 but that appears to be wrong in practice. */ 810 * The NI example code does 3 convert pulses for 625x
811 * boards, But that appears to be wrong in practice.
812 */
880 ni_stc_writew(dev, NISTC_AI_CMD1_CONVERT_PULSE, 813 ni_stc_writew(dev, NISTC_AI_CMD1_CONVERT_PULSE,
881 NISTC_AI_CMD1_REG); 814 NISTC_AI_CMD1_REG);
882 ni_stc_writew(dev, NISTC_AI_CMD1_CONVERT_PULSE, 815 ni_stc_writew(dev, NISTC_AI_CMD1_CONVERT_PULSE,
@@ -888,8 +821,8 @@ static void ni_clear_ai_fifo(struct comedi_device *dev)
888 } 821 }
889} 822}
890 823
891static inline void ni_ao_win_outw(struct comedi_device *dev, uint16_t data, 824static inline void ni_ao_win_outw(struct comedi_device *dev,
892 int addr) 825 unsigned int data, int addr)
893{ 826{
894 struct ni_private *devpriv = dev->private; 827 struct ni_private *devpriv = dev->private;
895 unsigned long flags; 828 unsigned long flags;
@@ -900,8 +833,8 @@ static inline void ni_ao_win_outw(struct comedi_device *dev, uint16_t data,
900 spin_unlock_irqrestore(&devpriv->window_lock, flags); 833 spin_unlock_irqrestore(&devpriv->window_lock, flags);
901} 834}
902 835
903static inline void ni_ao_win_outl(struct comedi_device *dev, uint32_t data, 836static inline void ni_ao_win_outl(struct comedi_device *dev,
904 int addr) 837 unsigned int data, int addr)
905{ 838{
906 struct ni_private *devpriv = dev->private; 839 struct ni_private *devpriv = dev->private;
907 unsigned long flags; 840 unsigned long flags;
@@ -925,20 +858,21 @@ static inline unsigned short ni_ao_win_inw(struct comedi_device *dev, int addr)
925 return data; 858 return data;
926} 859}
927 860
928/* ni_set_bits( ) allows different parts of the ni_mio_common driver to 861/*
929* share registers (such as Interrupt_A_Register) without interfering with 862 * ni_set_bits( ) allows different parts of the ni_mio_common driver to
930* each other. 863 * share registers (such as Interrupt_A_Register) without interfering with
931* 864 * each other.
932* NOTE: the switch/case statements are optimized out for a constant argument 865 *
933* so this is actually quite fast--- If you must wrap another function around this 866 * NOTE: the switch/case statements are optimized out for a constant argument
934* make it inline to avoid a large speed penalty. 867 * so this is actually quite fast--- If you must wrap another function around
935* 868 * this make it inline to avoid a large speed penalty.
936* value should only be 1 or 0. 869 *
937*/ 870 * value should only be 1 or 0.
871 */
938static inline void ni_set_bits(struct comedi_device *dev, int reg, 872static inline void ni_set_bits(struct comedi_device *dev, int reg,
939 unsigned bits, unsigned value) 873 unsigned int bits, unsigned int value)
940{ 874{
941 unsigned bit_values; 875 unsigned int bit_values;
942 876
943 if (value) 877 if (value)
944 bit_values = bits; 878 bit_values = bits;
@@ -956,7 +890,7 @@ static void ni_sync_ai_dma(struct comedi_device *dev)
956 890
957 spin_lock_irqsave(&devpriv->mite_channel_lock, flags); 891 spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
958 if (devpriv->ai_mite_chan) 892 if (devpriv->ai_mite_chan)
959 mite_sync_input_dma(devpriv->ai_mite_chan, s); 893 mite_sync_dma(devpriv->ai_mite_chan, s);
960 spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags); 894 spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
961} 895}
962 896
@@ -972,9 +906,8 @@ static int ni_ai_drain_dma(struct comedi_device *dev)
972 if (devpriv->ai_mite_chan) { 906 if (devpriv->ai_mite_chan) {
973 for (i = 0; i < timeout; i++) { 907 for (i = 0; i < timeout; i++) {
974 if ((ni_stc_readw(dev, NISTC_AI_STATUS1_REG) & 908 if ((ni_stc_readw(dev, NISTC_AI_STATUS1_REG) &
975 NISTC_AI_STATUS1_FIFO_E) 909 NISTC_AI_STATUS1_FIFO_E) &&
976 && mite_bytes_in_transit(devpriv->ai_mite_chan) == 910 mite_bytes_in_transit(devpriv->ai_mite_chan) == 0)
977 0)
978 break; 911 break;
979 udelay(5); 912 udelay(5);
980 } 913 }
@@ -994,19 +927,6 @@ static int ni_ai_drain_dma(struct comedi_device *dev)
994 return retval; 927 return retval;
995} 928}
996 929
997static void mite_handle_b_linkc(struct mite_struct *mite,
998 struct comedi_device *dev)
999{
1000 struct ni_private *devpriv = dev->private;
1001 struct comedi_subdevice *s = dev->write_subdev;
1002 unsigned long flags;
1003
1004 spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
1005 if (devpriv->ao_mite_chan)
1006 mite_sync_output_dma(devpriv->ao_mite_chan, s);
1007 spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
1008}
1009
1010static int ni_ao_wait_for_dma_load(struct comedi_device *dev) 930static int ni_ao_wait_for_dma_load(struct comedi_device *dev)
1011{ 931{
1012 static const int timeout = 10000; 932 static const int timeout = 10000;
@@ -1018,9 +938,11 @@ static int ni_ao_wait_for_dma_load(struct comedi_device *dev)
1018 b_status = ni_stc_readw(dev, NISTC_AO_STATUS1_REG); 938 b_status = ni_stc_readw(dev, NISTC_AO_STATUS1_REG);
1019 if (b_status & NISTC_AO_STATUS1_FIFO_HF) 939 if (b_status & NISTC_AO_STATUS1_FIFO_HF)
1020 break; 940 break;
1021 /* if we poll too often, the pci bus activity seems 941 /*
1022 to slow the dma transfer down */ 942 * If we poll too often, the pci bus activity seems
1023 udelay(10); 943 * to slow the dma transfer down.
944 */
945 usleep_range(10, 100);
1024 } 946 }
1025 if (i == timeout) { 947 if (i == timeout) {
1026 dev_err(dev->class_dev, "timed out waiting for dma load\n"); 948 dev_err(dev->class_dev, "timed out waiting for dma load\n");
@@ -1038,7 +960,7 @@ static void ni_ao_fifo_load(struct comedi_device *dev,
1038 struct ni_private *devpriv = dev->private; 960 struct ni_private *devpriv = dev->private;
1039 int i; 961 int i;
1040 unsigned short d; 962 unsigned short d;
1041 u32 packed_data; 963 unsigned int packed_data;
1042 964
1043 for (i = 0; i < n; i++) { 965 for (i = 0; i < n; i++) {
1044 comedi_buf_read_samples(s, &d, 1); 966 comedi_buf_read_samples(s, &d, 1);
@@ -1128,7 +1050,7 @@ static void ni_ai_fifo_read(struct comedi_device *dev,
1128{ 1050{
1129 struct ni_private *devpriv = dev->private; 1051 struct ni_private *devpriv = dev->private;
1130 struct comedi_async *async = s->async; 1052 struct comedi_async *async = s->async;
1131 u32 dl; 1053 unsigned int dl;
1132 unsigned short data; 1054 unsigned short data;
1133 int i; 1055 int i;
1134 1056
@@ -1148,7 +1070,10 @@ static void ni_ai_fifo_read(struct comedi_device *dev,
1148 comedi_buf_write_samples(s, &data, 1); 1070 comedi_buf_write_samples(s, &data, 1);
1149 } 1071 }
1150 } else if (devpriv->is_6143) { 1072 } else if (devpriv->is_6143) {
1151 /* This just reads the FIFO assuming the data is present, no checks on the FIFO status are performed */ 1073 /*
1074 * This just reads the FIFO assuming the data is present,
1075 * no checks on the FIFO status are performed.
1076 */
1152 for (i = 0; i < n / 2; i++) { 1077 for (i = 0; i < n / 2; i++) {
1153 dl = ni_readl(dev, NI6143_AI_FIFO_DATA_REG); 1078 dl = ni_readl(dev, NI6143_AI_FIFO_DATA_REG);
1154 1079
@@ -1192,16 +1117,13 @@ static void ni_handle_fifo_half_full(struct comedi_device *dev)
1192} 1117}
1193#endif 1118#endif
1194 1119
1195/* 1120/* Empties the AI fifo */
1196 Empties the AI fifo
1197*/
1198static void ni_handle_fifo_dregs(struct comedi_device *dev) 1121static void ni_handle_fifo_dregs(struct comedi_device *dev)
1199{ 1122{
1200 struct ni_private *devpriv = dev->private; 1123 struct ni_private *devpriv = dev->private;
1201 struct comedi_subdevice *s = dev->read_subdev; 1124 struct comedi_subdevice *s = dev->read_subdev;
1202 u32 dl; 1125 unsigned int dl;
1203 unsigned short data; 1126 unsigned short data;
1204 unsigned short fifo_empty;
1205 int i; 1127 int i;
1206 1128
1207 if (devpriv->is_611x) { 1129 if (devpriv->is_611x) {
@@ -1237,15 +1159,16 @@ static void ni_handle_fifo_dregs(struct comedi_device *dev)
1237 } 1159 }
1238 1160
1239 } else { 1161 } else {
1240 fifo_empty = ni_stc_readw(dev, NISTC_AI_STATUS1_REG) & 1162 unsigned short fe; /* fifo empty */
1241 NISTC_AI_STATUS1_FIFO_E; 1163
1242 while (fifo_empty == 0) { 1164 fe = ni_stc_readw(dev, NISTC_AI_STATUS1_REG) &
1165 NISTC_AI_STATUS1_FIFO_E;
1166 while (fe == 0) {
1243 for (i = 0; 1167 for (i = 0;
1244 i < ARRAY_SIZE(devpriv->ai_fifo_buffer); i++) { 1168 i < ARRAY_SIZE(devpriv->ai_fifo_buffer); i++) {
1245 fifo_empty = ni_stc_readw(dev, 1169 fe = ni_stc_readw(dev, NISTC_AI_STATUS1_REG) &
1246 NISTC_AI_STATUS1_REG) & 1170 NISTC_AI_STATUS1_FIFO_E;
1247 NISTC_AI_STATUS1_FIFO_E; 1171 if (fe)
1248 if (fifo_empty)
1249 break; 1172 break;
1250 devpriv->ai_fifo_buffer[i] = 1173 devpriv->ai_fifo_buffer[i] =
1251 ni_readw(dev, NI_E_AI_FIFO_DATA_REG); 1174 ni_readw(dev, NI_E_AI_FIFO_DATA_REG);
@@ -1260,7 +1183,7 @@ static void get_last_sample_611x(struct comedi_device *dev)
1260 struct ni_private *devpriv = dev->private; 1183 struct ni_private *devpriv = dev->private;
1261 struct comedi_subdevice *s = dev->read_subdev; 1184 struct comedi_subdevice *s = dev->read_subdev;
1262 unsigned short data; 1185 unsigned short data;
1263 u32 dl; 1186 unsigned int dl;
1264 1187
1265 if (!devpriv->is_611x) 1188 if (!devpriv->is_611x)
1266 return; 1189 return;
@@ -1278,7 +1201,7 @@ static void get_last_sample_6143(struct comedi_device *dev)
1278 struct ni_private *devpriv = dev->private; 1201 struct ni_private *devpriv = dev->private;
1279 struct comedi_subdevice *s = dev->read_subdev; 1202 struct comedi_subdevice *s = dev->read_subdev;
1280 unsigned short data; 1203 unsigned short data;
1281 u32 dl; 1204 unsigned int dl;
1282 1205
1283 if (!devpriv->is_6143) 1206 if (!devpriv->is_6143)
1284 return; 1207 return;
@@ -1365,42 +1288,23 @@ static void ack_a_interrupt(struct comedi_device *dev, unsigned short a_status)
1365 ni_stc_writew(dev, ack, NISTC_INTA_ACK_REG); 1288 ni_stc_writew(dev, ack, NISTC_INTA_ACK_REG);
1366} 1289}
1367 1290
1368static void handle_a_interrupt(struct comedi_device *dev, unsigned short status, 1291static void handle_a_interrupt(struct comedi_device *dev,
1369 unsigned ai_mite_status) 1292 struct comedi_subdevice *s,
1293 unsigned short status)
1370{ 1294{
1371 struct comedi_subdevice *s = dev->read_subdev;
1372 struct comedi_cmd *cmd = &s->async->cmd; 1295 struct comedi_cmd *cmd = &s->async->cmd;
1373 1296
1374 /* 67xx boards don't have ai subdevice, but their gpct0 might generate an a interrupt */
1375 if (s->type == COMEDI_SUBD_UNUSED)
1376 return;
1377
1378#ifdef PCIDMA
1379 if (ai_mite_status & CHSR_LINKC)
1380 ni_sync_ai_dma(dev);
1381
1382 if (ai_mite_status & ~(CHSR_INT | CHSR_LINKC | CHSR_DONE | CHSR_MRDY |
1383 CHSR_DRDY | CHSR_DRQ1 | CHSR_DRQ0 | CHSR_ERROR |
1384 CHSR_SABORT | CHSR_XFERR | CHSR_LxERR_mask)) {
1385 dev_err(dev->class_dev,
1386 "unknown mite interrupt (ai_mite_status=%08x)\n",
1387 ai_mite_status);
1388 s->async->events |= COMEDI_CB_ERROR;
1389 /* disable_irq(dev->irq); */
1390 }
1391#endif
1392
1393 /* test for all uncommon interrupt events at the same time */ 1297 /* test for all uncommon interrupt events at the same time */
1394 if (status & (NISTC_AI_STATUS1_ERR | 1298 if (status & (NISTC_AI_STATUS1_ERR |
1395 NISTC_AI_STATUS1_SC_TC | NISTC_AI_STATUS1_START1)) { 1299 NISTC_AI_STATUS1_SC_TC | NISTC_AI_STATUS1_START1)) {
1396 if (status == 0xffff) { 1300 if (status == 0xffff) {
1397 dev_err(dev->class_dev, "Card removed?\n"); 1301 dev_err(dev->class_dev, "Card removed?\n");
1398 /* we probably aren't even running a command now, 1302 /*
1399 * so it's a good idea to be careful. */ 1303 * We probably aren't even running a command now,
1400 if (comedi_is_subdevice_running(s)) { 1304 * so it's a good idea to be careful.
1305 */
1306 if (comedi_is_subdevice_running(s))
1401 s->async->events |= COMEDI_CB_ERROR; 1307 s->async->events |= COMEDI_CB_ERROR;
1402 comedi_handle_events(dev, s);
1403 }
1404 return; 1308 return;
1405 } 1309 }
1406 if (status & NISTC_AI_STATUS1_ERR) { 1310 if (status & NISTC_AI_STATUS1_ERR) {
@@ -1412,8 +1316,6 @@ static void handle_a_interrupt(struct comedi_device *dev, unsigned short status,
1412 s->async->events |= COMEDI_CB_ERROR; 1316 s->async->events |= COMEDI_CB_ERROR;
1413 if (status & NISTC_AI_STATUS1_OVER) 1317 if (status & NISTC_AI_STATUS1_OVER)
1414 s->async->events |= COMEDI_CB_OVERFLOW; 1318 s->async->events |= COMEDI_CB_OVERFLOW;
1415
1416 comedi_handle_events(dev, s);
1417 return; 1319 return;
1418 } 1320 }
1419 if (status & NISTC_AI_STATUS1_SC_TC) { 1321 if (status & NISTC_AI_STATUS1_SC_TC) {
@@ -1425,8 +1327,11 @@ static void handle_a_interrupt(struct comedi_device *dev, unsigned short status,
1425 if (status & NISTC_AI_STATUS1_FIFO_HF) { 1327 if (status & NISTC_AI_STATUS1_FIFO_HF) {
1426 int i; 1328 int i;
1427 static const int timeout = 10; 1329 static const int timeout = 10;
1428 /* pcmcia cards (at least 6036) seem to stop producing interrupts if we 1330 /*
1429 *fail to get the fifo less than half full, so loop to be sure.*/ 1331 * PCMCIA cards (at least 6036) seem to stop producing
1332 * interrupts if we fail to get the fifo less than half
1333 * full, so loop to be sure.
1334 */
1430 for (i = 0; i < timeout; ++i) { 1335 for (i = 0; i < timeout; ++i) {
1431 ni_handle_fifo_half_full(dev); 1336 ni_handle_fifo_half_full(dev);
1432 if ((ni_stc_readw(dev, NISTC_AI_STATUS1_REG) & 1337 if ((ni_stc_readw(dev, NISTC_AI_STATUS1_REG) &
@@ -1438,8 +1343,6 @@ static void handle_a_interrupt(struct comedi_device *dev, unsigned short status,
1438 1343
1439 if (status & NISTC_AI_STATUS1_STOP) 1344 if (status & NISTC_AI_STATUS1_STOP)
1440 ni_handle_eos(dev, s); 1345 ni_handle_eos(dev, s);
1441
1442 comedi_handle_events(dev, s);
1443} 1346}
1444 1347
1445static void ack_b_interrupt(struct comedi_device *dev, unsigned short b_status) 1348static void ack_b_interrupt(struct comedi_device *dev, unsigned short b_status)
@@ -1465,29 +1368,9 @@ static void ack_b_interrupt(struct comedi_device *dev, unsigned short b_status)
1465} 1368}
1466 1369
1467static void handle_b_interrupt(struct comedi_device *dev, 1370static void handle_b_interrupt(struct comedi_device *dev,
1468 unsigned short b_status, unsigned ao_mite_status) 1371 struct comedi_subdevice *s,
1372 unsigned short b_status)
1469{ 1373{
1470 struct comedi_subdevice *s = dev->write_subdev;
1471 /* unsigned short ack=0; */
1472
1473#ifdef PCIDMA
1474 /* Currently, mite.c requires us to handle LINKC */
1475 if (ao_mite_status & CHSR_LINKC) {
1476 struct ni_private *devpriv = dev->private;
1477
1478 mite_handle_b_linkc(devpriv->mite, dev);
1479 }
1480
1481 if (ao_mite_status & ~(CHSR_INT | CHSR_LINKC | CHSR_DONE | CHSR_MRDY |
1482 CHSR_DRDY | CHSR_DRQ1 | CHSR_DRQ0 | CHSR_ERROR |
1483 CHSR_SABORT | CHSR_XFERR | CHSR_LxERR_mask)) {
1484 dev_err(dev->class_dev,
1485 "unknown mite interrupt (ao_mite_status=%08x)\n",
1486 ao_mite_status);
1487 s->async->events |= COMEDI_CB_ERROR;
1488 }
1489#endif
1490
1491 if (b_status == 0xffff) 1374 if (b_status == 0xffff)
1492 return; 1375 return;
1493 if (b_status & NISTC_AO_STATUS1_OVERRUN) { 1376 if (b_status & NISTC_AO_STATUS1_OVERRUN) {
@@ -1515,8 +1398,6 @@ static void handle_b_interrupt(struct comedi_device *dev,
1515 } 1398 }
1516 } 1399 }
1517#endif 1400#endif
1518
1519 comedi_handle_events(dev, s);
1520} 1401}
1521 1402
1522static void ni_ai_munge(struct comedi_device *dev, struct comedi_subdevice *s, 1403static void ni_ai_munge(struct comedi_device *dev, struct comedi_subdevice *s,
@@ -1606,8 +1487,11 @@ static int ni_ao_setup_MITE_dma(struct comedi_device *dev)
1606 if (devpriv->is_611x || devpriv->is_6713) { 1487 if (devpriv->is_611x || devpriv->is_6713) {
1607 mite_prep_dma(devpriv->ao_mite_chan, 32, 32); 1488 mite_prep_dma(devpriv->ao_mite_chan, 32, 32);
1608 } else { 1489 } else {
1609 /* doing 32 instead of 16 bit wide transfers from memory 1490 /*
1610 makes the mite do 32 bit pci transfers, doubling pci bandwidth. */ 1491 * Doing 32 instead of 16 bit wide transfers from
1492 * memory makes the mite do 32 bit pci transfers,
1493 * doubling pci bandwidth.
1494 */
1611 mite_prep_dma(devpriv->ao_mite_chan, 16, 32); 1495 mite_prep_dma(devpriv->ao_mite_chan, 16, 32);
1612 } 1496 }
1613 mite_dma_arm(devpriv->ao_mite_chan); 1497 mite_dma_arm(devpriv->ao_mite_chan);
@@ -1622,16 +1506,15 @@ static int ni_ao_setup_MITE_dma(struct comedi_device *dev)
1622#endif /* PCIDMA */ 1506#endif /* PCIDMA */
1623 1507
1624/* 1508/*
1625 used for both cancel ioctl and board initialization 1509 * used for both cancel ioctl and board initialization
1626 1510 *
1627 this is pretty harsh for a cancel, but it works... 1511 * this is pretty harsh for a cancel, but it works...
1628 */ 1512 */
1629
1630static int ni_ai_reset(struct comedi_device *dev, struct comedi_subdevice *s) 1513static int ni_ai_reset(struct comedi_device *dev, struct comedi_subdevice *s)
1631{ 1514{
1632 struct ni_private *devpriv = dev->private; 1515 struct ni_private *devpriv = dev->private;
1633 unsigned ai_personal; 1516 unsigned int ai_personal;
1634 unsigned ai_out_ctrl; 1517 unsigned int ai_out_ctrl;
1635 1518
1636 ni_release_ai_mite_channel(dev); 1519 ni_release_ai_mite_channel(dev);
1637 /* ai configuration */ 1520 /* ai configuration */
@@ -1736,12 +1619,12 @@ static void ni_m_series_load_channelgain_list(struct comedi_device *dev,
1736 unsigned int chan, range, aref; 1619 unsigned int chan, range, aref;
1737 unsigned int i; 1620 unsigned int i;
1738 unsigned int dither; 1621 unsigned int dither;
1739 unsigned range_code; 1622 unsigned int range_code;
1740 1623
1741 ni_stc_writew(dev, 1, NISTC_CFG_MEM_CLR_REG); 1624 ni_stc_writew(dev, 1, NISTC_CFG_MEM_CLR_REG);
1742 1625
1743 if ((list[0] & CR_ALT_SOURCE)) { 1626 if ((list[0] & CR_ALT_SOURCE)) {
1744 unsigned bypass_bits; 1627 unsigned int bypass_bits;
1745 1628
1746 chan = CR_CHAN(list[0]); 1629 chan = CR_CHAN(list[0]);
1747 range = CR_RANGE(list[0]); 1630 range = CR_RANGE(list[0]);
@@ -1760,7 +1643,7 @@ static void ni_m_series_load_channelgain_list(struct comedi_device *dev,
1760 ni_writel(dev, 0, NI_M_CFG_BYPASS_FIFO_REG); 1643 ni_writel(dev, 0, NI_M_CFG_BYPASS_FIFO_REG);
1761 } 1644 }
1762 for (i = 0; i < n_chan; i++) { 1645 for (i = 0; i < n_chan; i++) {
1763 unsigned config_bits = 0; 1646 unsigned int config_bits = 0;
1764 1647
1765 chan = CR_CHAN(list[i]); 1648 chan = CR_CHAN(list[i]);
1766 aref = CR_AREF(list[i]); 1649 aref = CR_AREF(list[i]);
@@ -1842,8 +1725,8 @@ static void ni_load_channelgain_list(struct comedi_device *dev,
1842 return; 1725 return;
1843 } 1726 }
1844 if (n_chan == 1 && !devpriv->is_611x && !devpriv->is_6143) { 1727 if (n_chan == 1 && !devpriv->is_611x && !devpriv->is_6143) {
1845 if (devpriv->changain_state 1728 if (devpriv->changain_state &&
1846 && devpriv->changain_spec == list[0]) { 1729 devpriv->changain_spec == list[0]) {
1847 /* ready to go. */ 1730 /* ready to go. */
1848 return; 1731 return;
1849 } 1732 }
@@ -1857,8 +1740,8 @@ static void ni_load_channelgain_list(struct comedi_device *dev,
1857 1740
1858 /* Set up Calibration mode if required */ 1741 /* Set up Calibration mode if required */
1859 if (devpriv->is_6143) { 1742 if (devpriv->is_6143) {
1860 if ((list[0] & CR_ALT_SOURCE) 1743 if ((list[0] & CR_ALT_SOURCE) &&
1861 && !devpriv->ai_calib_source_enabled) { 1744 !devpriv->ai_calib_source_enabled) {
1862 /* Strobe Relay enable bit */ 1745 /* Strobe Relay enable bit */
1863 ni_writew(dev, devpriv->ai_calib_source | 1746 ni_writew(dev, devpriv->ai_calib_source |
1864 NI6143_CALIB_CHAN_RELAY_ON, 1747 NI6143_CALIB_CHAN_RELAY_ON,
@@ -1866,9 +1749,10 @@ static void ni_load_channelgain_list(struct comedi_device *dev,
1866 ni_writew(dev, devpriv->ai_calib_source, 1749 ni_writew(dev, devpriv->ai_calib_source,
1867 NI6143_CALIB_CHAN_REG); 1750 NI6143_CALIB_CHAN_REG);
1868 devpriv->ai_calib_source_enabled = 1; 1751 devpriv->ai_calib_source_enabled = 1;
1869 msleep_interruptible(100); /* Allow relays to change */ 1752 /* Allow relays to change */
1870 } else if (!(list[0] & CR_ALT_SOURCE) 1753 msleep_interruptible(100);
1871 && devpriv->ai_calib_source_enabled) { 1754 } else if (!(list[0] & CR_ALT_SOURCE) &&
1755 devpriv->ai_calib_source_enabled) {
1872 /* Strobe Relay disable bit */ 1756 /* Strobe Relay disable bit */
1873 ni_writew(dev, devpriv->ai_calib_source | 1757 ni_writew(dev, devpriv->ai_calib_source |
1874 NI6143_CALIB_CHAN_RELAY_OFF, 1758 NI6143_CALIB_CHAN_RELAY_OFF,
@@ -1876,7 +1760,8 @@ static void ni_load_channelgain_list(struct comedi_device *dev,
1876 ni_writew(dev, devpriv->ai_calib_source, 1760 ni_writew(dev, devpriv->ai_calib_source,
1877 NI6143_CALIB_CHAN_REG); 1761 NI6143_CALIB_CHAN_REG);
1878 devpriv->ai_calib_source_enabled = 0; 1762 devpriv->ai_calib_source_enabled = 0;
1879 msleep_interruptible(100); /* Allow relays to change */ 1763 /* Allow relays to change */
1764 msleep_interruptible(100);
1880 } 1765 }
1881 } 1766 }
1882 1767
@@ -1949,7 +1834,7 @@ static int ni_ai_insn_read(struct comedi_device *dev,
1949 struct ni_private *devpriv = dev->private; 1834 struct ni_private *devpriv = dev->private;
1950 unsigned int mask = (s->maxdata + 1) >> 1; 1835 unsigned int mask = (s->maxdata + 1) >> 1;
1951 int i, n; 1836 int i, n;
1952 unsigned signbits; 1837 unsigned int signbits;
1953 unsigned int d; 1838 unsigned int d;
1954 unsigned long dl; 1839 unsigned long dl;
1955 1840
@@ -1997,7 +1882,11 @@ static int ni_ai_insn_read(struct comedi_device *dev,
1997 ni_stc_writew(dev, NISTC_AI_CMD1_CONVERT_PULSE, 1882 ni_stc_writew(dev, NISTC_AI_CMD1_CONVERT_PULSE,
1998 NISTC_AI_CMD1_REG); 1883 NISTC_AI_CMD1_REG);
1999 1884
2000 /* The 6143 has 32-bit FIFOs. You need to strobe a bit to move a single 16bit stranded sample into the FIFO */ 1885 /*
1886 * The 6143 has 32-bit FIFOs. You need to strobe a
1887 * bit to move a single 16bit stranded sample into
1888 * the FIFO.
1889 */
2001 dl = 0; 1890 dl = 0;
2002 for (i = 0; i < NI_TIMEOUT; i++) { 1891 for (i = 0; i < NI_TIMEOUT; i++) {
2003 if (ni_readl(dev, NI6143_AI_FIFO_STATUS_REG) & 1892 if (ni_readl(dev, NI6143_AI_FIFO_STATUS_REG) &
@@ -2035,7 +1924,8 @@ static int ni_ai_insn_read(struct comedi_device *dev,
2035 data[n] = dl; 1924 data[n] = dl;
2036 } else { 1925 } else {
2037 d = ni_readw(dev, NI_E_AI_FIFO_DATA_REG); 1926 d = ni_readw(dev, NI_E_AI_FIFO_DATA_REG);
2038 d += signbits; /* subtle: needs to be short addition */ 1927 /* subtle: needs to be short addition */
1928 d += signbits;
2039 data[n] = d; 1929 data[n] = d;
2040 } 1930 }
2041 } 1931 }
@@ -2043,8 +1933,8 @@ static int ni_ai_insn_read(struct comedi_device *dev,
2043 return insn->n; 1933 return insn->n;
2044} 1934}
2045 1935
2046static int ni_ns_to_timer(const struct comedi_device *dev, unsigned nanosec, 1936static int ni_ns_to_timer(const struct comedi_device *dev,
2047 unsigned int flags) 1937 unsigned int nanosec, unsigned int flags)
2048{ 1938{
2049 struct ni_private *devpriv = dev->private; 1939 struct ni_private *devpriv = dev->private;
2050 int divider; 1940 int divider;
@@ -2064,14 +1954,14 @@ static int ni_ns_to_timer(const struct comedi_device *dev, unsigned nanosec,
2064 return divider - 1; 1954 return divider - 1;
2065} 1955}
2066 1956
2067static unsigned ni_timer_to_ns(const struct comedi_device *dev, int timer) 1957static unsigned int ni_timer_to_ns(const struct comedi_device *dev, int timer)
2068{ 1958{
2069 struct ni_private *devpriv = dev->private; 1959 struct ni_private *devpriv = dev->private;
2070 1960
2071 return devpriv->clock_ns * (timer + 1); 1961 return devpriv->clock_ns * (timer + 1);
2072} 1962}
2073 1963
2074static void ni_cmd_set_mite_transfer(struct mite_dma_descriptor_ring *ring, 1964static void ni_cmd_set_mite_transfer(struct mite_ring *ring,
2075 struct comedi_subdevice *sdev, 1965 struct comedi_subdevice *sdev,
2076 const struct comedi_cmd *cmd, 1966 const struct comedi_cmd *cmd,
2077 unsigned int max_count) { 1967 unsigned int max_count) {
@@ -2102,8 +1992,8 @@ static void ni_cmd_set_mite_transfer(struct mite_dma_descriptor_ring *ring,
2102#endif 1992#endif
2103} 1993}
2104 1994
2105static unsigned ni_min_ai_scan_period_ns(struct comedi_device *dev, 1995static unsigned int ni_min_ai_scan_period_ns(struct comedi_device *dev,
2106 unsigned num_channels) 1996 unsigned int num_channels)
2107{ 1997{
2108 const struct ni_board_struct *board = dev->board_ptr; 1998 const struct ni_board_struct *board = dev->board_ptr;
2109 struct ni_private *devpriv = dev->private; 1999 struct ni_private *devpriv = dev->private;
@@ -2294,7 +2184,7 @@ static int ni_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
2294 int start_stop_select = 0; 2184 int start_stop_select = 0;
2295 unsigned int stop_count; 2185 unsigned int stop_count;
2296 int interrupt_a_enable = 0; 2186 int interrupt_a_enable = 0;
2297 unsigned ai_trig; 2187 unsigned int ai_trig;
2298 2188
2299 if (dev->irq == 0) { 2189 if (dev->irq == 0) {
2300 dev_err(dev->class_dev, "cannot run command without an irq\n"); 2190 dev_err(dev->class_dev, "cannot run command without an irq\n");
@@ -2307,8 +2197,10 @@ static int ni_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
2307 /* start configuration */ 2197 /* start configuration */
2308 ni_stc_writew(dev, NISTC_RESET_AI_CFG_START, NISTC_RESET_REG); 2198 ni_stc_writew(dev, NISTC_RESET_AI_CFG_START, NISTC_RESET_REG);
2309 2199
2310 /* disable analog triggering for now, since it 2200 /*
2311 * interferes with the use of pfi0 */ 2201 * Disable analog triggering for now, since it interferes
2202 * with the use of pfi0.
2203 */
2312 devpriv->an_trig_etc_reg &= ~NISTC_ATRIG_ETC_ENA; 2204 devpriv->an_trig_etc_reg &= ~NISTC_ATRIG_ETC_ENA;
2313 ni_stc_writew(dev, devpriv->an_trig_etc_reg, NISTC_ATRIG_ETC_REG); 2205 ni_stc_writew(dev, devpriv->an_trig_etc_reg, NISTC_ATRIG_ETC_REG);
2314 2206
@@ -2369,7 +2261,10 @@ static int ni_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
2369 if (stop_count == 0) { 2261 if (stop_count == 0) {
2370 devpriv->ai_cmd2 |= NISTC_AI_CMD2_END_ON_EOS; 2262 devpriv->ai_cmd2 |= NISTC_AI_CMD2_END_ON_EOS;
2371 interrupt_a_enable |= NISTC_INTA_ENA_AI_STOP; 2263 interrupt_a_enable |= NISTC_INTA_ENA_AI_STOP;
2372 /* this is required to get the last sample for chanlist_len > 1, not sure why */ 2264 /*
2265 * This is required to get the last sample for
2266 * chanlist_len > 1, not sure why.
2267 */
2373 if (cmd->chanlist_len > 1) 2268 if (cmd->chanlist_len > 1)
2374 start_stop_select |= NISTC_AI_STOP_POLARITY | 2269 start_stop_select |= NISTC_AI_STOP_POLARITY |
2375 NISTC_AI_STOP_EDGE; 2270 NISTC_AI_STOP_EDGE;
@@ -2489,7 +2384,7 @@ static int ni_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
2489 2384
2490 switch (devpriv->aimode) { 2385 switch (devpriv->aimode) {
2491 case AIMODE_HALF_FULL: 2386 case AIMODE_HALF_FULL:
2492 /*generate FIFO interrupts and DMA requests on half-full */ 2387 /* FIFO interrupts and DMA requests on half-full */
2493#ifdef PCIDMA 2388#ifdef PCIDMA
2494 ni_stc_writew(dev, NISTC_AI_MODE3_FIFO_MODE_HF_E, 2389 ni_stc_writew(dev, NISTC_AI_MODE3_FIFO_MODE_HF_E,
2495 NISTC_AI_MODE3_REG); 2390 NISTC_AI_MODE3_REG);
@@ -2880,9 +2775,11 @@ static int ni_ao_inttrig(struct comedi_device *dev,
2880 if (trig_num != cmd->start_arg) 2775 if (trig_num != cmd->start_arg)
2881 return -EINVAL; 2776 return -EINVAL;
2882 2777
2883 /* Null trig at beginning prevent ao start trigger from executing more than 2778 /*
2884 once per command (and doing things like trying to allocate the ao dma channel 2779 * Null trig at beginning prevent ao start trigger from executing more
2885 multiple times) */ 2780 * than once per command (and doing things like trying to allocate the
2781 * ao dma channel multiple times).
2782 */
2886 s->async->inttrig = NULL; 2783 s->async->inttrig = NULL;
2887 2784
2888 ni_set_bits(dev, NISTC_INTB_ENA_REG, 2785 ni_set_bits(dev, NISTC_INTB_ENA_REG,
@@ -2951,7 +2848,7 @@ static void ni_ao_cmd_personalize(struct comedi_device *dev,
2951 const struct comedi_cmd *cmd) 2848 const struct comedi_cmd *cmd)
2952{ 2849{
2953 const struct ni_board_struct *board = dev->board_ptr; 2850 const struct ni_board_struct *board = dev->board_ptr;
2954 unsigned bits; 2851 unsigned int bits;
2955 2852
2956 ni_stc_writew(dev, NISTC_RESET_AO_CFG_START, NISTC_RESET_REG); 2853 ni_stc_writew(dev, NISTC_RESET_AO_CFG_START, NISTC_RESET_REG);
2957 2854
@@ -2999,6 +2896,7 @@ static void ni_ao_cmd_set_trigger(struct comedi_device *dev,
2999 const struct comedi_cmd *cmd) 2896 const struct comedi_cmd *cmd)
3000{ 2897{
3001 struct ni_private *devpriv = dev->private; 2898 struct ni_private *devpriv = dev->private;
2899 unsigned int trigsel;
3002 2900
3003 ni_stc_writew(dev, NISTC_RESET_AO_CFG_START, NISTC_RESET_REG); 2901 ni_stc_writew(dev, NISTC_RESET_AO_CFG_START, NISTC_RESET_REG);
3004 2902
@@ -3012,39 +2910,20 @@ static void ni_ao_cmd_set_trigger(struct comedi_device *dev,
3012 } 2910 }
3013 ni_stc_writew(dev, devpriv->ao_mode1, NISTC_AO_MODE1_REG); 2911 ni_stc_writew(dev, devpriv->ao_mode1, NISTC_AO_MODE1_REG);
3014 2912
3015 { 2913 if (cmd->start_src == TRIG_INT) {
3016 unsigned int trigsel = devpriv->ao_trigger_select; 2914 trigsel = NISTC_AO_TRIG_START1_EDGE |
3017 2915 NISTC_AO_TRIG_START1_SYNC;
3018 switch (cmd->start_src) { 2916 } else { /* TRIG_EXT */
3019 case TRIG_INT: 2917 trigsel = NISTC_AO_TRIG_START1_SEL(CR_CHAN(cmd->start_arg) + 1);
3020 case TRIG_NOW: 2918 /* 0=active high, 1=active low. see daq-stc 3-24 (p186) */
3021 trigsel &= ~(NISTC_AO_TRIG_START1_POLARITY | 2919 if (cmd->start_arg & CR_INVERT)
3022 NISTC_AO_TRIG_START1_SEL_MASK); 2920 trigsel |= NISTC_AO_TRIG_START1_POLARITY;
3023 trigsel |= NISTC_AO_TRIG_START1_EDGE | 2921 /* 0=edge detection disabled, 1=enabled */
3024 NISTC_AO_TRIG_START1_SYNC; 2922 if (cmd->start_arg & CR_EDGE)
3025 break; 2923 trigsel |= NISTC_AO_TRIG_START1_EDGE;
3026 case TRIG_EXT:
3027 trigsel = NISTC_AO_TRIG_START1_SEL(
3028 CR_CHAN(cmd->start_arg) + 1);
3029 if (cmd->start_arg & CR_INVERT)
3030 /*
3031 * 0=active high, 1=active low.
3032 * see daq-stc 3-24 (p186)
3033 */
3034 trigsel |= NISTC_AO_TRIG_START1_POLARITY;
3035 if (cmd->start_arg & CR_EDGE)
3036 /* 0=edge detection disabled, 1=enabled */
3037 trigsel |= NISTC_AO_TRIG_START1_EDGE;
3038 break;
3039 default:
3040 BUG();
3041 break;
3042 }
3043
3044 devpriv->ao_trigger_select = trigsel;
3045 ni_stc_writew(dev, devpriv->ao_trigger_select,
3046 NISTC_AO_TRIG_SEL_REG);
3047 } 2924 }
2925 ni_stc_writew(dev, trigsel, NISTC_AO_TRIG_SEL_REG);
2926
3048 /* AO_Delayed_START1 = 0, we do not support delayed start...yet */ 2927 /* AO_Delayed_START1 = 0, we do not support delayed start...yet */
3049 2928
3050 /* sync */ 2929 /* sync */
@@ -3149,8 +3028,9 @@ static void ni_ao_cmd_set_update(struct comedi_device *dev,
3149 NISTC_AO_MODE1_UPDATE_SRC_POLARITY 3028 NISTC_AO_MODE1_UPDATE_SRC_POLARITY
3150 ); 3029 );
3151 3030
3152 switch (cmd->scan_begin_src) { 3031 if (cmd->scan_begin_src == TRIG_TIMER) {
3153 case TRIG_TIMER: 3032 unsigned int trigvar;
3033
3154 devpriv->ao_cmd2 &= ~NISTC_AO_CMD2_BC_GATE_ENA; 3034 devpriv->ao_cmd2 &= ~NISTC_AO_CMD2_BC_GATE_ENA;
3155 3035
3156 /* 3036 /*
@@ -3181,34 +3061,25 @@ static void ni_ao_cmd_set_update(struct comedi_device *dev,
3181 * eseries/ni67xx and tMSeries.h for mseries. 3061 * eseries/ni67xx and tMSeries.h for mseries.
3182 */ 3062 */
3183 3063
3184 { 3064 trigvar = ni_ns_to_timer(dev, cmd->scan_begin_arg,
3185 unsigned trigvar = ni_ns_to_timer(dev, 3065 CMDF_ROUND_NEAREST);
3186 cmd->scan_begin_arg,
3187 CMDF_ROUND_NEAREST);
3188 3066
3189 /* 3067 /*
3190 * Wait N TB3 ticks after the start trigger before 3068 * Wait N TB3 ticks after the start trigger before
3191 * clocking(N must be >=2). 3069 * clocking (N must be >=2).
3192 */ 3070 */
3193 /* following line: 2-1 per STC */ 3071 /* following line: 2-1 per STC */
3194 ni_stc_writel(dev, 1, NISTC_AO_UI_LOADA_REG); 3072 ni_stc_writel(dev, 1, NISTC_AO_UI_LOADA_REG);
3195 ni_stc_writew(dev, NISTC_AO_CMD1_UI_LOAD, 3073 ni_stc_writew(dev, NISTC_AO_CMD1_UI_LOAD, NISTC_AO_CMD1_REG);
3196 NISTC_AO_CMD1_REG); 3074 /* following line: N-1 per STC */
3197 /* following line: N-1 per STC */ 3075 ni_stc_writel(dev, trigvar - 1, NISTC_AO_UI_LOADA_REG);
3198 ni_stc_writel(dev, trigvar - 1, NISTC_AO_UI_LOADA_REG); 3076 } else { /* TRIG_EXT */
3199 }
3200 break;
3201 case TRIG_EXT:
3202 /* FIXME: assert scan_begin_arg != 0, ret failure otherwise */ 3077 /* FIXME: assert scan_begin_arg != 0, ret failure otherwise */
3203 devpriv->ao_cmd2 |= NISTC_AO_CMD2_BC_GATE_ENA; 3078 devpriv->ao_cmd2 |= NISTC_AO_CMD2_BC_GATE_ENA;
3204 devpriv->ao_mode1 |= NISTC_AO_MODE1_UPDATE_SRC( 3079 devpriv->ao_mode1 |= NISTC_AO_MODE1_UPDATE_SRC(
3205 CR_CHAN(cmd->scan_begin_arg)); 3080 CR_CHAN(cmd->scan_begin_arg));
3206 if (cmd->scan_begin_arg & CR_INVERT) 3081 if (cmd->scan_begin_arg & CR_INVERT)
3207 devpriv->ao_mode1 |= NISTC_AO_MODE1_UPDATE_SRC_POLARITY; 3082 devpriv->ao_mode1 |= NISTC_AO_MODE1_UPDATE_SRC_POLARITY;
3208 break;
3209 default:
3210 BUG();
3211 break;
3212 } 3083 }
3213 3084
3214 ni_stc_writew(dev, devpriv->ao_cmd2, NISTC_AO_CMD2_REG); 3085 ni_stc_writew(dev, devpriv->ao_cmd2, NISTC_AO_CMD2_REG);
@@ -3231,7 +3102,7 @@ static void ni_ao_cmd_set_channels(struct comedi_device *dev,
3231{ 3102{
3232 struct ni_private *devpriv = dev->private; 3103 struct ni_private *devpriv = dev->private;
3233 const struct comedi_cmd *cmd = &s->async->cmd; 3104 const struct comedi_cmd *cmd = &s->async->cmd;
3234 unsigned bits = 0; 3105 unsigned int bits = 0;
3235 3106
3236 ni_stc_writew(dev, NISTC_RESET_AO_CFG_START, NISTC_RESET_REG); 3107 ni_stc_writew(dev, NISTC_RESET_AO_CFG_START, NISTC_RESET_REG);
3237 3108
@@ -3474,7 +3345,6 @@ static int ni_ao_reset(struct comedi_device *dev, struct comedi_subdevice *s)
3474 devpriv->ao_mode3 = NISTC_AO_MODE3_LAST_GATE_DISABLE; 3345 devpriv->ao_mode3 = NISTC_AO_MODE3_LAST_GATE_DISABLE;
3475 else 3346 else
3476 devpriv->ao_mode3 = 0; 3347 devpriv->ao_mode3 = 0;
3477 devpriv->ao_trigger_select = 0;
3478 3348
3479 ni_stc_writew(dev, 0, NISTC_AO_PERSONAL_REG); 3349 ni_stc_writew(dev, 0, NISTC_AO_PERSONAL_REG);
3480 ni_stc_writew(dev, 0, NISTC_AO_CMD1_REG); 3350 ni_stc_writew(dev, 0, NISTC_AO_CMD1_REG);
@@ -3550,6 +3420,7 @@ static int ni_dio_insn_bits(struct comedi_device *dev,
3550 return insn->n; 3420 return insn->n;
3551} 3421}
3552 3422
3423#ifdef PCIDMA
3553static int ni_m_series_dio_insn_config(struct comedi_device *dev, 3424static int ni_m_series_dio_insn_config(struct comedi_device *dev,
3554 struct comedi_subdevice *s, 3425 struct comedi_subdevice *s,
3555 struct comedi_insn *insn, 3426 struct comedi_insn *insn,
@@ -3652,13 +3523,11 @@ static int ni_cdo_inttrig(struct comedi_device *dev,
3652 unsigned int trig_num) 3523 unsigned int trig_num)
3653{ 3524{
3654 struct comedi_cmd *cmd = &s->async->cmd; 3525 struct comedi_cmd *cmd = &s->async->cmd;
3655 const unsigned timeout = 1000; 3526 const unsigned int timeout = 1000;
3656 int retval = 0; 3527 int retval = 0;
3657 unsigned i; 3528 unsigned int i;
3658#ifdef PCIDMA
3659 struct ni_private *devpriv = dev->private; 3529 struct ni_private *devpriv = dev->private;
3660 unsigned long flags; 3530 unsigned long flags;
3661#endif
3662 3531
3663 if (trig_num != cmd->start_arg) 3532 if (trig_num != cmd->start_arg)
3664 return -EINVAL; 3533 return -EINVAL;
@@ -3668,7 +3537,6 @@ static int ni_cdo_inttrig(struct comedi_device *dev,
3668 /* read alloc the entire buffer */ 3537 /* read alloc the entire buffer */
3669 comedi_buf_read_alloc(s, s->async->prealloc_bufsz); 3538 comedi_buf_read_alloc(s, s->async->prealloc_bufsz);
3670 3539
3671#ifdef PCIDMA
3672 spin_lock_irqsave(&devpriv->mite_channel_lock, flags); 3540 spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
3673 if (devpriv->cdo_mite_chan) { 3541 if (devpriv->cdo_mite_chan) {
3674 mite_prep_dma(devpriv->cdo_mite_chan, 32, 32); 3542 mite_prep_dma(devpriv->cdo_mite_chan, 32, 32);
@@ -3680,7 +3548,7 @@ static int ni_cdo_inttrig(struct comedi_device *dev,
3680 spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags); 3548 spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
3681 if (retval < 0) 3549 if (retval < 0)
3682 return retval; 3550 return retval;
3683#endif 3551
3684 /* 3552 /*
3685 * XXX not sure what interrupt C group does 3553 * XXX not sure what interrupt C group does
3686 * wait for dma to fill output fifo 3554 * wait for dma to fill output fifo
@@ -3690,7 +3558,7 @@ static int ni_cdo_inttrig(struct comedi_device *dev,
3690 if (ni_readl(dev, NI_M_CDIO_STATUS_REG) & 3558 if (ni_readl(dev, NI_M_CDIO_STATUS_REG) &
3691 NI_M_CDIO_STATUS_CDO_FIFO_FULL) 3559 NI_M_CDIO_STATUS_CDO_FIFO_FULL)
3692 break; 3560 break;
3693 udelay(10); 3561 usleep_range(10, 100);
3694 } 3562 }
3695 if (i == timeout) { 3563 if (i == timeout) {
3696 dev_err(dev->class_dev, "dma failed to fill cdo fifo!\n"); 3564 dev_err(dev->class_dev, "dma failed to fill cdo fifo!\n");
@@ -3708,7 +3576,7 @@ static int ni_cdio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
3708{ 3576{
3709 struct ni_private *devpriv = dev->private; 3577 struct ni_private *devpriv = dev->private;
3710 const struct comedi_cmd *cmd = &s->async->cmd; 3578 const struct comedi_cmd *cmd = &s->async->cmd;
3711 unsigned cdo_mode_bits; 3579 unsigned int cdo_mode_bits;
3712 int retval; 3580 int retval;
3713 3581
3714 ni_writel(dev, NI_M_CDO_CMD_RESET, NI_M_CDIO_CMD_REG); 3582 ni_writel(dev, NI_M_CDO_CMD_RESET, NI_M_CDIO_CMD_REG);
@@ -3759,28 +3627,14 @@ static int ni_cdio_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
3759static void handle_cdio_interrupt(struct comedi_device *dev) 3627static void handle_cdio_interrupt(struct comedi_device *dev)
3760{ 3628{
3761 struct ni_private *devpriv = dev->private; 3629 struct ni_private *devpriv = dev->private;
3762 unsigned cdio_status; 3630 unsigned int cdio_status;
3763 struct comedi_subdevice *s = &dev->subdevices[NI_DIO_SUBDEV]; 3631 struct comedi_subdevice *s = &dev->subdevices[NI_DIO_SUBDEV];
3764#ifdef PCIDMA
3765 unsigned long flags; 3632 unsigned long flags;
3766#endif
3767 3633
3768 if (!devpriv->is_m_series)
3769 return;
3770#ifdef PCIDMA
3771 spin_lock_irqsave(&devpriv->mite_channel_lock, flags); 3634 spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
3772 if (devpriv->cdo_mite_chan) { 3635 if (devpriv->cdo_mite_chan)
3773 unsigned cdo_mite_status = 3636 mite_ack_linkc(devpriv->cdo_mite_chan, s, true);
3774 mite_get_status(devpriv->cdo_mite_chan);
3775 if (cdo_mite_status & CHSR_LINKC) {
3776 writel(CHOR_CLRLC,
3777 devpriv->mite->mite_io_addr +
3778 MITE_CHOR(devpriv->cdo_mite_chan->channel));
3779 }
3780 mite_sync_output_dma(devpriv->cdo_mite_chan, s);
3781 }
3782 spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags); 3637 spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
3783#endif
3784 3638
3785 cdio_status = ni_readl(dev, NI_M_CDIO_STATUS_REG); 3639 cdio_status = ni_readl(dev, NI_M_CDIO_STATUS_REG);
3786 if (cdio_status & NI_M_CDIO_STATUS_CDO_ERROR) { 3640 if (cdio_status & NI_M_CDIO_STATUS_CDO_ERROR) {
@@ -3796,6 +3650,7 @@ static void handle_cdio_interrupt(struct comedi_device *dev)
3796 } 3650 }
3797 comedi_handle_events(dev, s); 3651 comedi_handle_events(dev, s);
3798} 3652}
3653#endif /* PCIDMA */
3799 3654
3800static int ni_serial_hw_readwrite8(struct comedi_device *dev, 3655static int ni_serial_hw_readwrite8(struct comedi_device *dev,
3801 struct comedi_subdevice *s, 3656 struct comedi_subdevice *s,
@@ -3813,7 +3668,7 @@ static int ni_serial_hw_readwrite8(struct comedi_device *dev,
3813 status1 = ni_stc_readw(dev, NISTC_STATUS1_REG); 3668 status1 = ni_stc_readw(dev, NISTC_STATUS1_REG);
3814 if (status1 & NISTC_STATUS1_SERIO_IN_PROG) { 3669 if (status1 & NISTC_STATUS1_SERIO_IN_PROG) {
3815 err = -EBUSY; 3670 err = -EBUSY;
3816 goto Error; 3671 goto error;
3817 } 3672 }
3818 3673
3819 devpriv->dio_control |= NISTC_DIO_CTRL_HW_SER_START; 3674 devpriv->dio_control |= NISTC_DIO_CTRL_HW_SER_START;
@@ -3829,7 +3684,7 @@ static int ni_serial_hw_readwrite8(struct comedi_device *dev,
3829 dev_err(dev->class_dev, 3684 dev_err(dev->class_dev,
3830 "SPI serial I/O didn't finish in time!\n"); 3685 "SPI serial I/O didn't finish in time!\n");
3831 err = -ETIME; 3686 err = -ETIME;
3832 goto Error; 3687 goto error;
3833 } 3688 }
3834 } 3689 }
3835 3690
@@ -3842,7 +3697,7 @@ static int ni_serial_hw_readwrite8(struct comedi_device *dev,
3842 if (data_in) 3697 if (data_in)
3843 *data_in = ni_stc_readw(dev, NISTC_DIO_SERIAL_IN_REG); 3698 *data_in = ni_stc_readw(dev, NISTC_DIO_SERIAL_IN_REG);
3844 3699
3845Error: 3700error:
3846 ni_stc_writew(dev, devpriv->dio_control, NISTC_DIO_CTRL_REG); 3701 ni_stc_writew(dev, devpriv->dio_control, NISTC_DIO_CTRL_REG);
3847 3702
3848 return err; 3703 return err;
@@ -3860,16 +3715,20 @@ static int ni_serial_sw_readwrite8(struct comedi_device *dev,
3860 udelay((devpriv->serial_interval_ns + 999) / 1000); 3715 udelay((devpriv->serial_interval_ns + 999) / 1000);
3861 3716
3862 for (mask = 0x80; mask; mask >>= 1) { 3717 for (mask = 0x80; mask; mask >>= 1) {
3863 /* Output current bit; note that we cannot touch s->state 3718 /*
3864 because it is a per-subdevice field, and serial is 3719 * Output current bit; note that we cannot touch s->state
3865 a separate subdevice from DIO. */ 3720 * because it is a per-subdevice field, and serial is
3721 * a separate subdevice from DIO.
3722 */
3866 devpriv->dio_output &= ~NISTC_DIO_SDOUT; 3723 devpriv->dio_output &= ~NISTC_DIO_SDOUT;
3867 if (data_out & mask) 3724 if (data_out & mask)
3868 devpriv->dio_output |= NISTC_DIO_SDOUT; 3725 devpriv->dio_output |= NISTC_DIO_SDOUT;
3869 ni_stc_writew(dev, devpriv->dio_output, NISTC_DIO_OUT_REG); 3726 ni_stc_writew(dev, devpriv->dio_output, NISTC_DIO_OUT_REG);
3870 3727
3871 /* Assert SDCLK (active low, inverted), wait for half of 3728 /*
3872 the delay, deassert SDCLK, and wait for the other half. */ 3729 * Assert SDCLK (active low, inverted), wait for half of
3730 * the delay, deassert SDCLK, and wait for the other half.
3731 */
3873 devpriv->dio_control |= NISTC_DIO_SDCLK; 3732 devpriv->dio_control |= NISTC_DIO_SDCLK;
3874 ni_stc_writew(dev, devpriv->dio_control, NISTC_DIO_CTRL_REG); 3733 ni_stc_writew(dev, devpriv->dio_control, NISTC_DIO_CTRL_REG);
3875 3734
@@ -3897,7 +3756,7 @@ static int ni_serial_insn_config(struct comedi_device *dev,
3897 unsigned int *data) 3756 unsigned int *data)
3898{ 3757{
3899 struct ni_private *devpriv = dev->private; 3758 struct ni_private *devpriv = dev->private;
3900 unsigned clk_fout = devpriv->clock_and_fout; 3759 unsigned int clk_fout = devpriv->clock_and_fout;
3901 int err = insn->n; 3760 int err = insn->n;
3902 unsigned char byte_out, byte_in = 0; 3761 unsigned char byte_out, byte_in = 0;
3903 3762
@@ -3916,8 +3775,10 @@ static int ni_serial_insn_config(struct comedi_device *dev,
3916 data[1] = SERIAL_DISABLED; 3775 data[1] = SERIAL_DISABLED;
3917 devpriv->serial_interval_ns = data[1]; 3776 devpriv->serial_interval_ns = data[1];
3918 } else if (data[1] <= SERIAL_600NS) { 3777 } else if (data[1] <= SERIAL_600NS) {
3919 /* Warning: this clock speed is too fast to reliably 3778 /*
3920 control SCXI. */ 3779 * Warning: this clock speed is too fast to reliably
3780 * control SCXI.
3781 */
3921 devpriv->dio_control &= ~NISTC_DIO_CTRL_HW_SER_TIMEBASE; 3782 devpriv->dio_control &= ~NISTC_DIO_CTRL_HW_SER_TIMEBASE;
3922 clk_fout |= NISTC_CLK_FOUT_SLOW_TIMEBASE; 3783 clk_fout |= NISTC_CLK_FOUT_SLOW_TIMEBASE;
3923 clk_fout &= ~NISTC_CLK_FOUT_DIO_SER_OUT_DIV2; 3784 clk_fout &= ~NISTC_CLK_FOUT_DIO_SER_OUT_DIV2;
@@ -3933,10 +3794,12 @@ static int ni_serial_insn_config(struct comedi_device *dev,
3933 devpriv->dio_control |= NISTC_DIO_CTRL_HW_SER_TIMEBASE; 3794 devpriv->dio_control |= NISTC_DIO_CTRL_HW_SER_TIMEBASE;
3934 clk_fout |= NISTC_CLK_FOUT_SLOW_TIMEBASE | 3795 clk_fout |= NISTC_CLK_FOUT_SLOW_TIMEBASE |
3935 NISTC_CLK_FOUT_DIO_SER_OUT_DIV2; 3796 NISTC_CLK_FOUT_DIO_SER_OUT_DIV2;
3936 /* Note: NISTC_CLK_FOUT_DIO_SER_OUT_DIV2 only affects 3797 /*
3937 600ns/1.2us. If you turn divide_by_2 off with the 3798 * Note: NISTC_CLK_FOUT_DIO_SER_OUT_DIV2 only affects
3938 slow clock, you will still get 10us, except then 3799 * 600ns/1.2us. If you turn divide_by_2 off with the
3939 all your delays are wrong. */ 3800 * slow clock, you will still get 10us, except then
3801 * all your delays are wrong.
3802 */
3940 data[1] = SERIAL_10US; 3803 data[1] = SERIAL_10US;
3941 devpriv->serial_interval_ns = data[1]; 3804 devpriv->serial_interval_ns = data[1];
3942 } else { 3805 } else {
@@ -4046,15 +3909,11 @@ static unsigned int ni_gpct_to_stc_register(struct comedi_device *dev,
4046 return regmap->mio_reg; 3909 return regmap->mio_reg;
4047} 3910}
4048 3911
4049static void ni_gpct_write_register(struct ni_gpct *counter, unsigned bits, 3912static void ni_gpct_write_register(struct ni_gpct *counter, unsigned int bits,
4050 enum ni_gpct_register reg) 3913 enum ni_gpct_register reg)
4051{ 3914{
4052 struct comedi_device *dev = counter->counter_dev->dev; 3915 struct comedi_device *dev = counter->counter_dev->dev;
4053 unsigned int stc_register = ni_gpct_to_stc_register(dev, reg); 3916 unsigned int stc_register = ni_gpct_to_stc_register(dev, reg);
4054 static const unsigned gpct_interrupt_a_enable_mask =
4055 NISTC_INTA_ENA_G0_GATE | NISTC_INTA_ENA_G0_TC;
4056 static const unsigned gpct_interrupt_b_enable_mask =
4057 NISTC_INTB_ENA_G1_GATE | NISTC_INTB_ENA_G1_TC;
4058 3917
4059 if (stc_register == 0) 3918 if (stc_register == 0)
4060 return; 3919 return;
@@ -4082,25 +3941,22 @@ static void ni_gpct_write_register(struct ni_gpct *counter, unsigned bits,
4082 3941
4083 /* 16 bit registers */ 3942 /* 16 bit registers */
4084 case NITIO_G0_INT_ENA: 3943 case NITIO_G0_INT_ENA:
4085 BUG_ON(bits & ~gpct_interrupt_a_enable_mask);
4086 ni_set_bitfield(dev, stc_register, 3944 ni_set_bitfield(dev, stc_register,
4087 gpct_interrupt_a_enable_mask, bits); 3945 NISTC_INTA_ENA_G0_GATE | NISTC_INTA_ENA_G0_TC,
3946 bits);
4088 break; 3947 break;
4089 case NITIO_G1_INT_ENA: 3948 case NITIO_G1_INT_ENA:
4090 BUG_ON(bits & ~gpct_interrupt_b_enable_mask);
4091 ni_set_bitfield(dev, stc_register, 3949 ni_set_bitfield(dev, stc_register,
4092 gpct_interrupt_b_enable_mask, bits); 3950 NISTC_INTB_ENA_G1_GATE | NISTC_INTB_ENA_G1_TC,
3951 bits);
4093 break; 3952 break;
4094 case NITIO_G01_RESET:
4095 BUG_ON(bits & ~(NISTC_RESET_G0 | NISTC_RESET_G1));
4096 /* fall-through */
4097 default: 3953 default:
4098 ni_stc_writew(dev, bits, stc_register); 3954 ni_stc_writew(dev, bits, stc_register);
4099 } 3955 }
4100} 3956}
4101 3957
4102static unsigned ni_gpct_read_register(struct ni_gpct *counter, 3958static unsigned int ni_gpct_read_register(struct ni_gpct *counter,
4103 enum ni_gpct_register reg) 3959 enum ni_gpct_register reg)
4104{ 3960{
4105 struct comedi_device *dev = counter->counter_dev->dev; 3961 struct comedi_device *dev = counter->counter_dev->dev;
4106 unsigned int stc_register = ni_gpct_to_stc_register(dev, reg); 3962 unsigned int stc_register = ni_gpct_to_stc_register(dev, reg);
@@ -4227,7 +4083,7 @@ static int ni_m_series_pwm_config(struct comedi_device *dev,
4227 unsigned int *data) 4083 unsigned int *data)
4228{ 4084{
4229 struct ni_private *devpriv = dev->private; 4085 struct ni_private *devpriv = dev->private;
4230 unsigned up_count, down_count; 4086 unsigned int up_count, down_count;
4231 4087
4232 switch (data[0]) { 4088 switch (data[0]) {
4233 case INSN_CONFIG_PWM_OUTPUT: 4089 case INSN_CONFIG_PWM_OUTPUT:
@@ -4287,7 +4143,7 @@ static int ni_6143_pwm_config(struct comedi_device *dev,
4287 unsigned int *data) 4143 unsigned int *data)
4288{ 4144{
4289 struct ni_private *devpriv = dev->private; 4145 struct ni_private *devpriv = dev->private;
4290 unsigned up_count, down_count; 4146 unsigned int up_count, down_count;
4291 4147
4292 switch (data[0]) { 4148 switch (data[0]) {
4293 case INSN_CONFIG_PWM_OUTPUT: 4149 case INSN_CONFIG_PWM_OUTPUT:
@@ -4343,13 +4199,13 @@ static int ni_6143_pwm_config(struct comedi_device *dev,
4343static int pack_mb88341(int addr, int val, int *bitstring) 4199static int pack_mb88341(int addr, int val, int *bitstring)
4344{ 4200{
4345 /* 4201 /*
4346 Fujitsu MB 88341 4202 * Fujitsu MB 88341
4347 Note that address bits are reversed. Thanks to 4203 * Note that address bits are reversed. Thanks to
4348 Ingo Keen for noticing this. 4204 * Ingo Keen for noticing this.
4349 4205 *
4350 Note also that the 88341 expects address values from 4206 * Note also that the 88341 expects address values from
4351 1-12, whereas we use channel numbers 0-11. The NI 4207 * 1-12, whereas we use channel numbers 0-11. The NI
4352 docs use 1-12, also, so be careful here. 4208 * docs use 1-12, also, so be careful here.
4353 */ 4209 */
4354 addr++; 4210 addr++;
4355 *bitstring = ((addr & 0x1) << 11) | 4211 *bitstring = ((addr & 0x1) << 11) |
@@ -4495,12 +4351,12 @@ static void caldac_setup(struct comedi_device *dev, struct comedi_subdevice *s)
4495 s->n_chan = n_chans; 4351 s->n_chan = n_chans;
4496 4352
4497 if (diffbits) { 4353 if (diffbits) {
4498 unsigned int *maxdata_list; 4354 unsigned int *maxdata_list = devpriv->caldac_maxdata_list;
4499 4355
4500 if (n_chans > MAX_N_CALDACS) 4356 if (n_chans > MAX_N_CALDACS)
4501 dev_err(dev->class_dev, 4357 dev_err(dev->class_dev,
4502 "BUG! MAX_N_CALDACS too small\n"); 4358 "BUG! MAX_N_CALDACS too small\n");
4503 s->maxdata_list = maxdata_list = devpriv->caldac_maxdata_list; 4359 s->maxdata_list = maxdata_list;
4504 chan = 0; 4360 chan = 0;
4505 for (i = 0; i < n_dacs; i++) { 4361 for (i = 0; i < n_dacs; i++) {
4506 type = board->caldac[i]; 4362 type = board->caldac[i];
@@ -4574,8 +4430,8 @@ static int ni_m_series_eeprom_insn_read(struct comedi_device *dev,
4574 return 1; 4430 return 1;
4575} 4431}
4576 4432
4577static unsigned ni_old_get_pfi_routing(struct comedi_device *dev, 4433static unsigned int ni_old_get_pfi_routing(struct comedi_device *dev,
4578 unsigned chan) 4434 unsigned int chan)
4579{ 4435{
4580 /* pre-m-series boards have fixed signals on pfi pins */ 4436 /* pre-m-series boards have fixed signals on pfi pins */
4581 switch (chan) { 4437 switch (chan) {
@@ -4607,7 +4463,7 @@ static unsigned ni_old_get_pfi_routing(struct comedi_device *dev,
4607} 4463}
4608 4464
4609static int ni_old_set_pfi_routing(struct comedi_device *dev, 4465static int ni_old_set_pfi_routing(struct comedi_device *dev,
4610 unsigned chan, unsigned source) 4466 unsigned int chan, unsigned int source)
4611{ 4467{
4612 /* pre-m-series boards have fixed signals on pfi pins */ 4468 /* pre-m-series boards have fixed signals on pfi pins */
4613 if (source != ni_old_get_pfi_routing(dev, chan)) 4469 if (source != ni_old_get_pfi_routing(dev, chan))
@@ -4615,21 +4471,21 @@ static int ni_old_set_pfi_routing(struct comedi_device *dev,
4615 return 2; 4471 return 2;
4616} 4472}
4617 4473
4618static unsigned ni_m_series_get_pfi_routing(struct comedi_device *dev, 4474static unsigned int ni_m_series_get_pfi_routing(struct comedi_device *dev,
4619 unsigned chan) 4475 unsigned int chan)
4620{ 4476{
4621 struct ni_private *devpriv = dev->private; 4477 struct ni_private *devpriv = dev->private;
4622 const unsigned array_offset = chan / 3; 4478 const unsigned int array_offset = chan / 3;
4623 4479
4624 return NI_M_PFI_OUT_SEL_TO_SRC(chan, 4480 return NI_M_PFI_OUT_SEL_TO_SRC(chan,
4625 devpriv->pfi_output_select_reg[array_offset]); 4481 devpriv->pfi_output_select_reg[array_offset]);
4626} 4482}
4627 4483
4628static int ni_m_series_set_pfi_routing(struct comedi_device *dev, 4484static int ni_m_series_set_pfi_routing(struct comedi_device *dev,
4629 unsigned chan, unsigned source) 4485 unsigned int chan, unsigned int source)
4630{ 4486{
4631 struct ni_private *devpriv = dev->private; 4487 struct ni_private *devpriv = dev->private;
4632 unsigned index = chan / 3; 4488 unsigned int index = chan / 3;
4633 unsigned short val = devpriv->pfi_output_select_reg[index]; 4489 unsigned short val = devpriv->pfi_output_select_reg[index];
4634 4490
4635 if ((source & 0x1f) != source) 4491 if ((source & 0x1f) != source)
@@ -4643,7 +4499,8 @@ static int ni_m_series_set_pfi_routing(struct comedi_device *dev,
4643 return 2; 4499 return 2;
4644} 4500}
4645 4501
4646static unsigned ni_get_pfi_routing(struct comedi_device *dev, unsigned chan) 4502static unsigned int ni_get_pfi_routing(struct comedi_device *dev,
4503 unsigned int chan)
4647{ 4504{
4648 struct ni_private *devpriv = dev->private; 4505 struct ni_private *devpriv = dev->private;
4649 4506
@@ -4652,8 +4509,8 @@ static unsigned ni_get_pfi_routing(struct comedi_device *dev, unsigned chan)
4652 : ni_old_get_pfi_routing(dev, chan); 4509 : ni_old_get_pfi_routing(dev, chan);
4653} 4510}
4654 4511
4655static int ni_set_pfi_routing(struct comedi_device *dev, unsigned chan, 4512static int ni_set_pfi_routing(struct comedi_device *dev,
4656 unsigned source) 4513 unsigned int chan, unsigned int source)
4657{ 4514{
4658 struct ni_private *devpriv = dev->private; 4515 struct ni_private *devpriv = dev->private;
4659 4516
@@ -4663,11 +4520,11 @@ static int ni_set_pfi_routing(struct comedi_device *dev, unsigned chan,
4663} 4520}
4664 4521
4665static int ni_config_filter(struct comedi_device *dev, 4522static int ni_config_filter(struct comedi_device *dev,
4666 unsigned pfi_channel, 4523 unsigned int pfi_channel,
4667 enum ni_pfi_filter_select filter) 4524 enum ni_pfi_filter_select filter)
4668{ 4525{
4669 struct ni_private *devpriv = dev->private; 4526 struct ni_private *devpriv = dev->private;
4670 unsigned bits; 4527 unsigned int bits;
4671 4528
4672 if (!devpriv->is_m_series) 4529 if (!devpriv->is_m_series)
4673 return -ENOTSUPP; 4530 return -ENOTSUPP;
@@ -4818,9 +4675,12 @@ static int cs5529_ai_insn_read(struct comedi_device *dev,
4818 unsigned int channel_select; 4675 unsigned int channel_select;
4819 const unsigned int INTERNAL_REF = 0x1000; 4676 const unsigned int INTERNAL_REF = 0x1000;
4820 4677
4821 /* Set calibration adc source. Docs lie, reference select bits 8 to 11 4678 /*
4679 * Set calibration adc source. Docs lie, reference select bits 8 to 11
4822 * do nothing. bit 12 seems to chooses internal reference voltage, bit 4680 * do nothing. bit 12 seems to chooses internal reference voltage, bit
4823 * 13 causes the adc input to go overrange (maybe reads external reference?) */ 4681 * 13 causes the adc input to go overrange (maybe reads external
4682 * reference?)
4683 */
4824 if (insn->chanspec & CR_ALT_SOURCE) 4684 if (insn->chanspec & CR_ALT_SOURCE)
4825 channel_select = INTERNAL_REF; 4685 channel_select = INTERNAL_REF;
4826 else 4686 else
@@ -4875,27 +4735,28 @@ static int init_cs5529(struct comedi_device *dev)
4875 * Find best multiplier/divider to try and get the PLL running at 80 MHz 4735 * Find best multiplier/divider to try and get the PLL running at 80 MHz
4876 * given an arbitrary frequency input clock. 4736 * given an arbitrary frequency input clock.
4877 */ 4737 */
4878static int ni_mseries_get_pll_parameters(unsigned reference_period_ns, 4738static int ni_mseries_get_pll_parameters(unsigned int reference_period_ns,
4879 unsigned *freq_divider, 4739 unsigned int *freq_divider,
4880 unsigned *freq_multiplier, 4740 unsigned int *freq_multiplier,
4881 unsigned *actual_period_ns) 4741 unsigned int *actual_period_ns)
4882{ 4742{
4883 unsigned div; 4743 unsigned int div;
4884 unsigned best_div = 1; 4744 unsigned int best_div = 1;
4885 unsigned mult; 4745 unsigned int mult;
4886 unsigned best_mult = 1; 4746 unsigned int best_mult = 1;
4887 static const unsigned pico_per_nano = 1000; 4747 static const unsigned int pico_per_nano = 1000;
4888 4748 const unsigned int reference_picosec = reference_period_ns *
4889 const unsigned reference_picosec = reference_period_ns * pico_per_nano; 4749 pico_per_nano;
4890 /* m-series wants the phased-locked loop to output 80MHz, which is divided by 4 to 4750 /*
4891 * 20 MHz for most timing clocks */ 4751 * m-series wants the phased-locked loop to output 80MHz, which is
4892 static const unsigned target_picosec = 12500; 4752 * divided by 4 to 20 MHz for most timing clocks
4893 static const unsigned fudge_factor_80_to_20Mhz = 4; 4753 */
4754 static const unsigned int target_picosec = 12500;
4894 int best_period_picosec = 0; 4755 int best_period_picosec = 0;
4895 4756
4896 for (div = 1; div <= NI_M_PLL_MAX_DIVISOR; ++div) { 4757 for (div = 1; div <= NI_M_PLL_MAX_DIVISOR; ++div) {
4897 for (mult = 1; mult <= NI_M_PLL_MAX_MULTIPLIER; ++mult) { 4758 for (mult = 1; mult <= NI_M_PLL_MAX_MULTIPLIER; ++mult) {
4898 unsigned new_period_ps = 4759 unsigned int new_period_ps =
4899 (reference_picosec * div) / mult; 4760 (reference_picosec * div) / mult;
4900 if (abs(new_period_ps - target_picosec) < 4761 if (abs(new_period_ps - target_picosec) <
4901 abs(best_period_picosec - target_picosec)) { 4762 abs(best_period_picosec - target_picosec)) {
@@ -4910,29 +4771,33 @@ static int ni_mseries_get_pll_parameters(unsigned reference_period_ns,
4910 4771
4911 *freq_divider = best_div; 4772 *freq_divider = best_div;
4912 *freq_multiplier = best_mult; 4773 *freq_multiplier = best_mult;
4913 *actual_period_ns = DIV_ROUND_CLOSEST(best_period_picosec * 4774 /* return the actual period (* fudge factor for 80 to 20 MHz) */
4914 fudge_factor_80_to_20Mhz, 4775 *actual_period_ns = DIV_ROUND_CLOSEST(best_period_picosec * 4,
4915 pico_per_nano); 4776 pico_per_nano);
4916 return 0; 4777 return 0;
4917} 4778}
4918 4779
4919static int ni_mseries_set_pll_master_clock(struct comedi_device *dev, 4780static int ni_mseries_set_pll_master_clock(struct comedi_device *dev,
4920 unsigned source, unsigned period_ns) 4781 unsigned int source,
4782 unsigned int period_ns)
4921{ 4783{
4922 struct ni_private *devpriv = dev->private; 4784 struct ni_private *devpriv = dev->private;
4923 static const unsigned min_period_ns = 50; 4785 static const unsigned int min_period_ns = 50;
4924 static const unsigned max_period_ns = 1000; 4786 static const unsigned int max_period_ns = 1000;
4925 static const unsigned timeout = 1000; 4787 static const unsigned int timeout = 1000;
4926 unsigned pll_control_bits; 4788 unsigned int pll_control_bits;
4927 unsigned freq_divider; 4789 unsigned int freq_divider;
4928 unsigned freq_multiplier; 4790 unsigned int freq_multiplier;
4929 unsigned rtsi; 4791 unsigned int rtsi;
4930 unsigned i; 4792 unsigned int i;
4931 int retval; 4793 int retval;
4932 4794
4933 if (source == NI_MIO_PLL_PXI10_CLOCK) 4795 if (source == NI_MIO_PLL_PXI10_CLOCK)
4934 period_ns = 100; 4796 period_ns = 100;
4935 /* these limits are somewhat arbitrary, but NI advertises 1 to 20MHz range so we'll use that */ 4797 /*
4798 * These limits are somewhat arbitrary, but NI advertises 1 to 20MHz
4799 * range so we'll use that.
4800 */
4936 if (period_ns < min_period_ns || period_ns > max_period_ns) { 4801 if (period_ns < min_period_ns || period_ns > max_period_ns) {
4937 dev_err(dev->class_dev, 4802 dev_err(dev->class_dev,
4938 "%s: you must specify an input clock frequency between %i and %i nanosec for the phased-lock loop\n", 4803 "%s: you must specify an input clock frequency between %i and %i nanosec for the phased-lock loop\n",
@@ -4982,7 +4847,7 @@ static int ni_mseries_set_pll_master_clock(struct comedi_device *dev,
4982 4847
4983 ni_writew(dev, pll_control_bits, NI_M_PLL_CTRL_REG); 4848 ni_writew(dev, pll_control_bits, NI_M_PLL_CTRL_REG);
4984 devpriv->clock_source = source; 4849 devpriv->clock_source = source;
4985 /* it seems to typically take a few hundred microseconds for PLL to lock */ 4850 /* it takes a few hundred microseconds for PLL to lock */
4986 for (i = 0; i < timeout; ++i) { 4851 for (i = 0; i < timeout; ++i) {
4987 if (ni_readw(dev, NI_M_PLL_STATUS_REG) & NI_M_PLL_STATUS_LOCKED) 4852 if (ni_readw(dev, NI_M_PLL_STATUS_REG) & NI_M_PLL_STATUS_LOCKED)
4988 break; 4853 break;
@@ -4998,7 +4863,7 @@ static int ni_mseries_set_pll_master_clock(struct comedi_device *dev,
4998} 4863}
4999 4864
5000static int ni_set_master_clock(struct comedi_device *dev, 4865static int ni_set_master_clock(struct comedi_device *dev,
5001 unsigned source, unsigned period_ns) 4866 unsigned int source, unsigned int period_ns)
5002{ 4867{
5003 struct ni_private *devpriv = dev->private; 4868 struct ni_private *devpriv = dev->private;
5004 4869
@@ -5043,7 +4908,7 @@ static int ni_set_master_clock(struct comedi_device *dev,
5043} 4908}
5044 4909
5045static int ni_valid_rtsi_output_source(struct comedi_device *dev, 4910static int ni_valid_rtsi_output_source(struct comedi_device *dev,
5046 unsigned chan, unsigned source) 4911 unsigned int chan, unsigned int source)
5047{ 4912{
5048 struct ni_private *devpriv = dev->private; 4913 struct ni_private *devpriv = dev->private;
5049 4914
@@ -5078,7 +4943,7 @@ static int ni_valid_rtsi_output_source(struct comedi_device *dev,
5078} 4943}
5079 4944
5080static int ni_set_rtsi_routing(struct comedi_device *dev, 4945static int ni_set_rtsi_routing(struct comedi_device *dev,
5081 unsigned chan, unsigned src) 4946 unsigned int chan, unsigned int src)
5082{ 4947{
5083 struct ni_private *devpriv = dev->private; 4948 struct ni_private *devpriv = dev->private;
5084 4949
@@ -5098,7 +4963,8 @@ static int ni_set_rtsi_routing(struct comedi_device *dev,
5098 return 2; 4963 return 2;
5099} 4964}
5100 4965
5101static unsigned ni_get_rtsi_routing(struct comedi_device *dev, unsigned chan) 4966static unsigned int ni_get_rtsi_routing(struct comedi_device *dev,
4967 unsigned int chan)
5102{ 4968{
5103 struct ni_private *devpriv = dev->private; 4969 struct ni_private *devpriv = dev->private;
5104 4970
@@ -5262,10 +5128,10 @@ static int ni_gpct_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
5262static irqreturn_t ni_E_interrupt(int irq, void *d) 5128static irqreturn_t ni_E_interrupt(int irq, void *d)
5263{ 5129{
5264 struct comedi_device *dev = d; 5130 struct comedi_device *dev = d;
5131 struct comedi_subdevice *s_ai = dev->read_subdev;
5132 struct comedi_subdevice *s_ao = dev->write_subdev;
5265 unsigned short a_status; 5133 unsigned short a_status;
5266 unsigned short b_status; 5134 unsigned short b_status;
5267 unsigned int ai_mite_status = 0;
5268 unsigned int ao_mite_status = 0;
5269 unsigned long flags; 5135 unsigned long flags;
5270#ifdef PCIDMA 5136#ifdef PCIDMA
5271 struct ni_private *devpriv = dev->private; 5137 struct ni_private *devpriv = dev->private;
@@ -5273,7 +5139,7 @@ static irqreturn_t ni_E_interrupt(int irq, void *d)
5273 5139
5274 if (!dev->attached) 5140 if (!dev->attached)
5275 return IRQ_NONE; 5141 return IRQ_NONE;
5276 smp_mb(); /* make sure dev->attached is checked before handler does anything else. */ 5142 smp_mb(); /* make sure dev->attached is checked */
5277 5143
5278 /* lock to avoid race with comedi_poll */ 5144 /* lock to avoid race with comedi_poll */
5279 spin_lock_irqsave(&dev->spinlock, flags); 5145 spin_lock_irqsave(&dev->spinlock, flags);
@@ -5284,34 +5150,33 @@ static irqreturn_t ni_E_interrupt(int irq, void *d)
5284 unsigned long flags_too; 5150 unsigned long flags_too;
5285 5151
5286 spin_lock_irqsave(&devpriv->mite_channel_lock, flags_too); 5152 spin_lock_irqsave(&devpriv->mite_channel_lock, flags_too);
5287 if (devpriv->ai_mite_chan) { 5153 if (s_ai && devpriv->ai_mite_chan)
5288 ai_mite_status = mite_get_status(devpriv->ai_mite_chan); 5154 mite_ack_linkc(devpriv->ai_mite_chan, s_ai, false);
5289 if (ai_mite_status & CHSR_LINKC) 5155 if (s_ao && devpriv->ao_mite_chan)
5290 writel(CHOR_CLRLC, 5156 mite_ack_linkc(devpriv->ao_mite_chan, s_ao, false);
5291 devpriv->mite->mite_io_addr +
5292 MITE_CHOR(devpriv->
5293 ai_mite_chan->channel));
5294 }
5295 if (devpriv->ao_mite_chan) {
5296 ao_mite_status = mite_get_status(devpriv->ao_mite_chan);
5297 if (ao_mite_status & CHSR_LINKC)
5298 writel(CHOR_CLRLC,
5299 devpriv->mite->mite_io_addr +
5300 MITE_CHOR(devpriv->
5301 ao_mite_chan->channel));
5302 }
5303 spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags_too); 5157 spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags_too);
5304 } 5158 }
5305#endif 5159#endif
5306 ack_a_interrupt(dev, a_status); 5160 ack_a_interrupt(dev, a_status);
5307 ack_b_interrupt(dev, b_status); 5161 ack_b_interrupt(dev, b_status);
5308 if ((a_status & NISTC_AI_STATUS1_INTA) || (ai_mite_status & CHSR_INT)) 5162 if (s_ai) {
5309 handle_a_interrupt(dev, a_status, ai_mite_status); 5163 if (a_status & NISTC_AI_STATUS1_INTA)
5310 if ((b_status & NISTC_AO_STATUS1_INTB) || (ao_mite_status & CHSR_INT)) 5164 handle_a_interrupt(dev, s_ai, a_status);
5311 handle_b_interrupt(dev, b_status, ao_mite_status); 5165 /* handle any interrupt or dma events */
5166 comedi_handle_events(dev, s_ai);
5167 }
5168 if (s_ao) {
5169 if (b_status & NISTC_AO_STATUS1_INTB)
5170 handle_b_interrupt(dev, s_ao, b_status);
5171 /* handle any interrupt or dma events */
5172 comedi_handle_events(dev, s_ao);
5173 }
5312 handle_gpct_interrupt(dev, 0); 5174 handle_gpct_interrupt(dev, 0);
5313 handle_gpct_interrupt(dev, 1); 5175 handle_gpct_interrupt(dev, 1);
5314 handle_cdio_interrupt(dev); 5176#ifdef PCIDMA
5177 if (devpriv->is_m_series)
5178 handle_cdio_interrupt(dev);
5179#endif
5315 5180
5316 spin_unlock_irqrestore(&dev->spinlock, flags); 5181 spin_unlock_irqrestore(&dev->spinlock, flags);
5317 return IRQ_HANDLED; 5182 return IRQ_HANDLED;
@@ -5333,7 +5198,7 @@ static int ni_alloc_private(struct comedi_device *dev)
5333} 5198}
5334 5199
5335static int ni_E_init(struct comedi_device *dev, 5200static int ni_E_init(struct comedi_device *dev,
5336 unsigned interrupt_pin, unsigned irq_polarity) 5201 unsigned int interrupt_pin, unsigned int irq_polarity)
5337{ 5202{
5338 const struct ni_board_struct *board = dev->board_ptr; 5203 const struct ni_board_struct *board = dev->board_ptr;
5339 struct ni_private *devpriv = dev->private; 5204 struct ni_private *devpriv = dev->private;
@@ -5450,6 +5315,7 @@ static int ni_E_init(struct comedi_device *dev,
5450 s->maxdata = 1; 5315 s->maxdata = 1;
5451 s->range_table = &range_digital; 5316 s->range_table = &range_digital;
5452 if (devpriv->is_m_series) { 5317 if (devpriv->is_m_series) {
5318#ifdef PCIDMA
5453 s->subdev_flags |= SDF_LSAMPL; 5319 s->subdev_flags |= SDF_LSAMPL;
5454 s->insn_bits = ni_m_series_dio_insn_bits; 5320 s->insn_bits = ni_m_series_dio_insn_bits;
5455 s->insn_config = ni_m_series_dio_insn_config; 5321 s->insn_config = ni_m_series_dio_insn_config;
@@ -5469,6 +5335,7 @@ static int ni_E_init(struct comedi_device *dev,
5469 NI_M_CDI_CMD_RESET, 5335 NI_M_CDI_CMD_RESET,
5470 NI_M_CDIO_CMD_REG); 5336 NI_M_CDIO_CMD_REG);
5471 ni_writel(dev, s->io_bits, NI_M_DIO_DIR_REG); 5337 ni_writel(dev, s->io_bits, NI_M_DIO_DIR_REG);
5338#endif /* PCIDMA */
5472 } else { 5339 } else {
5473 s->insn_bits = ni_dio_insn_bits; 5340 s->insn_bits = ni_dio_insn_bits;
5474 s->insn_config = ni_dio_insn_config; 5341 s->insn_config = ni_dio_insn_config;
@@ -5675,8 +5542,6 @@ static void mio_common_detach(struct comedi_device *dev)
5675{ 5542{
5676 struct ni_private *devpriv = dev->private; 5543 struct ni_private *devpriv = dev->private;
5677 5544
5678 if (devpriv) { 5545 if (devpriv)
5679 if (devpriv->counter_dev) 5546 ni_gpct_device_destroy(devpriv->counter_dev);
5680 ni_gpct_device_destroy(devpriv->counter_dev);
5681 }
5682} 5547}
diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c
index 7112c3fec8bb..02a532990979 100644
--- a/drivers/staging/comedi/drivers/ni_pcidio.c
+++ b/drivers/staging/comedi/drivers/ni_pcidio.c
@@ -284,12 +284,12 @@ static const struct nidio_board nidio_boards[] = {
284}; 284};
285 285
286struct nidio96_private { 286struct nidio96_private {
287 struct mite_struct *mite; 287 struct mite *mite;
288 int boardtype; 288 int boardtype;
289 int dio; 289 int dio;
290 unsigned short OpModeBits; 290 unsigned short OpModeBits;
291 struct mite_channel *di_mite_chan; 291 struct mite_channel *di_mite_chan;
292 struct mite_dma_descriptor_ring *di_mite_ring; 292 struct mite_ring *di_mite_ring;
293 spinlock_t mite_channel_lock; 293 spinlock_t mite_channel_lock;
294}; 294};
295 295
@@ -324,8 +324,6 @@ static void ni_pcidio_release_di_mite_channel(struct comedi_device *dev)
324 324
325 spin_lock_irqsave(&devpriv->mite_channel_lock, flags); 325 spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
326 if (devpriv->di_mite_chan) { 326 if (devpriv->di_mite_chan) {
327 mite_dma_disarm(devpriv->di_mite_chan);
328 mite_dma_reset(devpriv->di_mite_chan);
329 mite_release_channel(devpriv->di_mite_chan); 327 mite_release_channel(devpriv->di_mite_chan);
330 devpriv->di_mite_chan = NULL; 328 devpriv->di_mite_chan = NULL;
331 writeb(primary_DMAChannel_bits(0) | 329 writeb(primary_DMAChannel_bits(0) |
@@ -370,7 +368,7 @@ static int ni_pcidio_poll(struct comedi_device *dev, struct comedi_subdevice *s)
370 spin_lock_irqsave(&dev->spinlock, irq_flags); 368 spin_lock_irqsave(&dev->spinlock, irq_flags);
371 spin_lock(&devpriv->mite_channel_lock); 369 spin_lock(&devpriv->mite_channel_lock);
372 if (devpriv->di_mite_chan) 370 if (devpriv->di_mite_chan)
373 mite_sync_input_dma(devpriv->di_mite_chan, s); 371 mite_sync_dma(devpriv->di_mite_chan, s);
374 spin_unlock(&devpriv->mite_channel_lock); 372 spin_unlock(&devpriv->mite_channel_lock);
375 count = comedi_buf_n_bytes_ready(s); 373 count = comedi_buf_n_bytes_ready(s);
376 spin_unlock_irqrestore(&dev->spinlock, irq_flags); 374 spin_unlock_irqrestore(&dev->spinlock, irq_flags);
@@ -383,12 +381,10 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
383 struct nidio96_private *devpriv = dev->private; 381 struct nidio96_private *devpriv = dev->private;
384 struct comedi_subdevice *s = dev->read_subdev; 382 struct comedi_subdevice *s = dev->read_subdev;
385 struct comedi_async *async = s->async; 383 struct comedi_async *async = s->async;
386 struct mite_struct *mite = devpriv->mite;
387 unsigned int auxdata; 384 unsigned int auxdata;
388 int flags; 385 int flags;
389 int status; 386 int status;
390 int work = 0; 387 int work = 0;
391 unsigned int m_status = 0;
392 388
393 /* interrupcions parasites */ 389 /* interrupcions parasites */
394 if (!dev->attached) { 390 if (!dev->attached) {
@@ -403,24 +399,9 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
403 flags = readb(dev->mmio + Group_1_Flags); 399 flags = readb(dev->mmio + Group_1_Flags);
404 400
405 spin_lock(&devpriv->mite_channel_lock); 401 spin_lock(&devpriv->mite_channel_lock);
406 if (devpriv->di_mite_chan) 402 if (devpriv->di_mite_chan) {
407 m_status = mite_get_status(devpriv->di_mite_chan); 403 mite_ack_linkc(devpriv->di_mite_chan, s, false);
408 404 /* XXX need to byteswap sync'ed dma */
409 if (m_status & CHSR_INT) {
410 if (m_status & CHSR_LINKC) {
411 writel(CHOR_CLRLC,
412 mite->mite_io_addr +
413 MITE_CHOR(devpriv->di_mite_chan->channel));
414 mite_sync_input_dma(devpriv->di_mite_chan, s);
415 /* XXX need to byteswap */
416 }
417 if (m_status & ~(CHSR_INT | CHSR_LINKC | CHSR_DONE | CHSR_DRDY |
418 CHSR_DRQ1 | CHSR_MRDY)) {
419 dev_dbg(dev->class_dev,
420 "unknown mite interrupt, disabling IRQ\n");
421 async->events |= COMEDI_CB_ERROR;
422 disable_irq(dev->irq);
423 }
424 } 405 }
425 spin_unlock(&devpriv->mite_channel_lock); 406 spin_unlock(&devpriv->mite_channel_lock);
426 407
@@ -916,14 +897,10 @@ static int nidio_auto_attach(struct comedi_device *dev,
916 897
917 spin_lock_init(&devpriv->mite_channel_lock); 898 spin_lock_init(&devpriv->mite_channel_lock);
918 899
919 devpriv->mite = mite_alloc(pcidev); 900 devpriv->mite = mite_attach(dev, false); /* use win0 */
920 if (!devpriv->mite) 901 if (!devpriv->mite)
921 return -ENOMEM; 902 return -ENOMEM;
922 903
923 ret = mite_setup(dev, devpriv->mite);
924 if (ret < 0)
925 return ret;
926
927 devpriv->di_mite_ring = mite_alloc_ring(devpriv->mite); 904 devpriv->di_mite_ring = mite_alloc_ring(devpriv->mite);
928 if (!devpriv->di_mite_ring) 905 if (!devpriv->di_mite_ring)
929 return -ENOMEM; 906 return -ENOMEM;
diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c
index 231e37d6b7c6..344aa343e5e1 100644
--- a/drivers/staging/comedi/drivers/ni_pcimio.c
+++ b/drivers/staging/comedi/drivers/ni_pcimio.c
@@ -1061,6 +1061,8 @@ static int pcimio_dio_change(struct comedi_device *dev,
1061static void m_series_init_eeprom_buffer(struct comedi_device *dev) 1061static void m_series_init_eeprom_buffer(struct comedi_device *dev)
1062{ 1062{
1063 struct ni_private *devpriv = dev->private; 1063 struct ni_private *devpriv = dev->private;
1064 struct mite *mite = devpriv->mite;
1065 resource_size_t daq_phys_addr;
1064 static const int Start_Cal_EEPROM = 0x400; 1066 static const int Start_Cal_EEPROM = 0x400;
1065 static const unsigned window_size = 10; 1067 static const unsigned window_size = 10;
1066 static const int serial_number_eeprom_offset = 0x4; 1068 static const int serial_number_eeprom_offset = 0x4;
@@ -1070,15 +1072,17 @@ static void m_series_init_eeprom_buffer(struct comedi_device *dev)
1070 unsigned old_iodwcr1_bits; 1072 unsigned old_iodwcr1_bits;
1071 int i; 1073 int i;
1072 1074
1073 old_iodwbsr_bits = readl(devpriv->mite->mite_io_addr + MITE_IODWBSR); 1075 /* IO Window 1 needs to be temporarily mapped to read the eeprom */
1074 old_iodwbsr1_bits = readl(devpriv->mite->mite_io_addr + MITE_IODWBSR_1); 1076 daq_phys_addr = pci_resource_start(mite->pcidev, 1);
1075 old_iodwcr1_bits = readl(devpriv->mite->mite_io_addr + MITE_IODWCR_1); 1077
1076 writel(0x0, devpriv->mite->mite_io_addr + MITE_IODWBSR); 1078 old_iodwbsr_bits = readl(mite->mmio + MITE_IODWBSR);
1077 writel(((0x80 | window_size) | devpriv->mite->daq_phys_addr), 1079 old_iodwbsr1_bits = readl(mite->mmio + MITE_IODWBSR_1);
1078 devpriv->mite->mite_io_addr + MITE_IODWBSR_1); 1080 old_iodwcr1_bits = readl(mite->mmio + MITE_IODWCR_1);
1079 writel(0x1 | old_iodwcr1_bits, 1081 writel(0x0, mite->mmio + MITE_IODWBSR);
1080 devpriv->mite->mite_io_addr + MITE_IODWCR_1); 1082 writel(((0x80 | window_size) | daq_phys_addr),
1081 writel(0xf, devpriv->mite->mite_io_addr + 0x30); 1083 mite->mmio + MITE_IODWBSR_1);
1084 writel(0x1 | old_iodwcr1_bits, mite->mmio + MITE_IODWCR_1);
1085 writel(0xf, mite->mmio + 0x30);
1082 1086
1083 BUG_ON(serial_number_eeprom_length > sizeof(devpriv->serial_number)); 1087 BUG_ON(serial_number_eeprom_length > sizeof(devpriv->serial_number));
1084 for (i = 0; i < serial_number_eeprom_length; ++i) { 1088 for (i = 0; i < serial_number_eeprom_length; ++i) {
@@ -1090,10 +1094,10 @@ static void m_series_init_eeprom_buffer(struct comedi_device *dev)
1090 for (i = 0; i < M_SERIES_EEPROM_SIZE; ++i) 1094 for (i = 0; i < M_SERIES_EEPROM_SIZE; ++i)
1091 devpriv->eeprom_buffer[i] = ni_readb(dev, Start_Cal_EEPROM + i); 1095 devpriv->eeprom_buffer[i] = ni_readb(dev, Start_Cal_EEPROM + i);
1092 1096
1093 writel(old_iodwbsr1_bits, devpriv->mite->mite_io_addr + MITE_IODWBSR_1); 1097 writel(old_iodwbsr1_bits, mite->mmio + MITE_IODWBSR_1);
1094 writel(old_iodwbsr_bits, devpriv->mite->mite_io_addr + MITE_IODWBSR); 1098 writel(old_iodwbsr_bits, mite->mmio + MITE_IODWBSR);
1095 writel(old_iodwcr1_bits, devpriv->mite->mite_io_addr + MITE_IODWCR_1); 1099 writel(old_iodwcr1_bits, mite->mmio + MITE_IODWCR_1);
1096 writel(0x0, devpriv->mite->mite_io_addr + 0x30); 1100 writel(0x0, mite->mmio + 0x30);
1097} 1101}
1098 1102
1099static void init_6143(struct comedi_device *dev) 1103static void init_6143(struct comedi_device *dev)
@@ -1168,7 +1172,7 @@ static int pcimio_auto_attach(struct comedi_device *dev,
1168 return ret; 1172 return ret;
1169 devpriv = dev->private; 1173 devpriv = dev->private;
1170 1174
1171 devpriv->mite = mite_alloc(pcidev); 1175 devpriv->mite = mite_attach(dev, false); /* use win0 */
1172 if (!devpriv->mite) 1176 if (!devpriv->mite)
1173 return -ENOMEM; 1177 return -ENOMEM;
1174 1178
@@ -1193,10 +1197,6 @@ static int pcimio_auto_attach(struct comedi_device *dev,
1193 if (board->reg_type == ni_reg_6713) 1197 if (board->reg_type == ni_reg_6713)
1194 devpriv->is_6713 = 1; 1198 devpriv->is_6713 = 1;
1195 1199
1196 ret = mite_setup(dev, devpriv->mite);
1197 if (ret < 0)
1198 return ret;
1199
1200 devpriv->ai_mite_ring = mite_alloc_ring(devpriv->mite); 1200 devpriv->ai_mite_ring = mite_alloc_ring(devpriv->mite);
1201 if (!devpriv->ai_mite_ring) 1201 if (!devpriv->ai_mite_ring)
1202 return -ENOMEM; 1202 return -ENOMEM;
diff --git a/drivers/staging/comedi/drivers/ni_stc.h b/drivers/staging/comedi/drivers/ni_stc.h
index 1d5af25b92a8..1966519cb6e5 100644
--- a/drivers/staging/comedi/drivers/ni_stc.h
+++ b/drivers/staging/comedi/drivers/ni_stc.h
@@ -1,24 +1,23 @@
1/* 1/*
2 module/ni_stc.h 2 * Register descriptions for NI DAQ-STC chip
3 Register descriptions for NI DAQ-STC chip 3 *
4 4 * COMEDI - Linux Control and Measurement Device Interface
5 COMEDI - Linux Control and Measurement Device Interface 5 * Copyright (C) 1998-9 David A. Schleef <ds@schleef.org>
6 Copyright (C) 1998-9 David A. Schleef <ds@schleef.org> 6 *
7 7 * This program is free software; you can redistribute it and/or modify
8 This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by
9 it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or
10 the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version.
11 (at your option) any later version. 11 *
12 12 * This program is distributed in the hope that it will be useful,
13 This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details.
16 GNU General Public License for more details. 16 */
17*/
18 17
19/* 18/*
20 References: 19 * References:
21 DAQ-STC Technical Reference Manual 20 * DAQ-STC Technical Reference Manual
22*/ 21*/
23 22
24#ifndef _COMEDI_NI_STC_H 23#ifndef _COMEDI_NI_STC_H
@@ -958,7 +957,7 @@ struct ni_board_struct {
958 unsigned int ao_maxdata; 957 unsigned int ao_maxdata;
959 int ao_fifo_depth; 958 int ao_fifo_depth;
960 const struct comedi_lrange *ao_range_table; 959 const struct comedi_lrange *ao_range_table;
961 unsigned ao_speed; 960 unsigned int ao_speed;
962 961
963 int reg_type; 962 int reg_type;
964 unsigned int has_8255:1; 963 unsigned int has_8255:1;
@@ -1002,12 +1001,11 @@ struct ni_private {
1002 unsigned short ao_mode3; 1001 unsigned short ao_mode3;
1003 unsigned short ao_cmd1; 1002 unsigned short ao_cmd1;
1004 unsigned short ao_cmd2; 1003 unsigned short ao_cmd2;
1005 unsigned short ao_trigger_select;
1006 1004
1007 struct ni_gpct_device *counter_dev; 1005 struct ni_gpct_device *counter_dev;
1008 unsigned short an_trig_etc_reg; 1006 unsigned short an_trig_etc_reg;
1009 1007
1010 unsigned ai_offset[512]; 1008 unsigned int ai_offset[512];
1011 1009
1012 unsigned long serial_interval_ns; 1010 unsigned long serial_interval_ns;
1013 unsigned char serial_hw_mode; 1011 unsigned char serial_hw_mode;
@@ -1025,24 +1023,24 @@ struct ni_private {
1025 unsigned short g0_g1_select_reg; 1023 unsigned short g0_g1_select_reg;
1026 unsigned short cdio_dma_select_reg; 1024 unsigned short cdio_dma_select_reg;
1027 1025
1028 unsigned clock_ns; 1026 unsigned int clock_ns;
1029 unsigned clock_source; 1027 unsigned int clock_source;
1030 1028
1031 unsigned short pwm_up_count; 1029 unsigned short pwm_up_count;
1032 unsigned short pwm_down_count; 1030 unsigned short pwm_down_count;
1033 1031
1034 unsigned short ai_fifo_buffer[0x2000]; 1032 unsigned short ai_fifo_buffer[0x2000];
1035 uint8_t eeprom_buffer[M_SERIES_EEPROM_SIZE]; 1033 u8 eeprom_buffer[M_SERIES_EEPROM_SIZE];
1036 __be32 serial_number; 1034 __be32 serial_number;
1037 1035
1038 struct mite_struct *mite; 1036 struct mite *mite;
1039 struct mite_channel *ai_mite_chan; 1037 struct mite_channel *ai_mite_chan;
1040 struct mite_channel *ao_mite_chan; 1038 struct mite_channel *ao_mite_chan;
1041 struct mite_channel *cdo_mite_chan; 1039 struct mite_channel *cdo_mite_chan;
1042 struct mite_dma_descriptor_ring *ai_mite_ring; 1040 struct mite_ring *ai_mite_ring;
1043 struct mite_dma_descriptor_ring *ao_mite_ring; 1041 struct mite_ring *ao_mite_ring;
1044 struct mite_dma_descriptor_ring *cdo_mite_ring; 1042 struct mite_ring *cdo_mite_ring;
1045 struct mite_dma_descriptor_ring *gpct_mite_ring[NUM_GPCT]; 1043 struct mite_ring *gpct_mite_ring[NUM_GPCT];
1046 1044
1047 /* ni_pcimio board type flags (based on the boardinfo reg_type) */ 1045 /* ni_pcimio board type flags (based on the boardinfo reg_type) */
1048 unsigned int is_m_series:1; 1046 unsigned int is_m_series:1;
diff --git a/drivers/staging/comedi/drivers/ni_tio.c b/drivers/staging/comedi/drivers/ni_tio.c
index b74e44ec521a..7043eb0543f6 100644
--- a/drivers/staging/comedi/drivers/ni_tio.c
+++ b/drivers/staging/comedi/drivers/ni_tio.c
@@ -1,19 +1,18 @@
1/* 1/*
2 comedi/drivers/ni_tio.c 2 * Support for NI general purpose counters
3 Support for NI general purpose counters 3 *
4 4 * Copyright (C) 2006 Frank Mori Hess <fmhess@users.sourceforge.net>
5 Copyright (C) 2006 Frank Mori Hess <fmhess@users.sourceforge.net> 5 *
6 6 * This program is free software; you can redistribute it and/or modify
7 This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by
8 it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or
9 the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version.
10 (at your option) any later version. 10 *
11 11 * This program is distributed in the hope that it will be useful,
12 This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details.
15 GNU General Public License for more details. 15 */
16*/
17 16
18/* 17/*
19 * Module: ni_tio 18 * Module: ni_tio
@@ -36,13 +35,10 @@
36 * DAQ 660x Register-Level Programmer Manual (NI 370505A-01) 35 * DAQ 660x Register-Level Programmer Manual (NI 370505A-01)
37 * DAQ 6601/6602 User Manual (NI 322137B-01) 36 * DAQ 6601/6602 User Manual (NI 322137B-01)
38 * 340934b.pdf DAQ-STC reference manual 37 * 340934b.pdf DAQ-STC reference manual
38 *
39 * TODO: Support use of both banks X and Y
39 */ 40 */
40 41
41/*
42TODO:
43 Support use of both banks X and Y
44*/
45
46#include <linux/module.h> 42#include <linux/module.h>
47#include <linux/slab.h> 43#include <linux/slab.h>
48 44
@@ -115,20 +111,7 @@ TODO:
115#define NI_660X_LOGIC_LOW_GATE2_SEL 0x1f 111#define NI_660X_LOGIC_LOW_GATE2_SEL 0x1f
116#define NI_660X_MAX_UP_DOWN_PIN 7 112#define NI_660X_MAX_UP_DOWN_PIN 7
117 113
118static inline unsigned GI_ALT_SYNC(enum ni_gpct_variant variant) 114static inline unsigned int GI_PRESCALE_X2(enum ni_gpct_variant variant)
119{
120 switch (variant) {
121 case ni_gpct_variant_e_series:
122 default:
123 return 0;
124 case ni_gpct_variant_m_series:
125 return GI_M_ALT_SYNC;
126 case ni_gpct_variant_660x:
127 return GI_660X_ALT_SYNC;
128 }
129}
130
131static inline unsigned GI_PRESCALE_X2(enum ni_gpct_variant variant)
132{ 115{
133 switch (variant) { 116 switch (variant) {
134 case ni_gpct_variant_e_series: 117 case ni_gpct_variant_e_series:
@@ -141,7 +124,7 @@ static inline unsigned GI_PRESCALE_X2(enum ni_gpct_variant variant)
141 } 124 }
142} 125}
143 126
144static inline unsigned GI_PRESCALE_X8(enum ni_gpct_variant variant) 127static inline unsigned int GI_PRESCALE_X8(enum ni_gpct_variant variant)
145{ 128{
146 switch (variant) { 129 switch (variant) {
147 case ni_gpct_variant_e_series: 130 case ni_gpct_variant_e_series:
@@ -154,19 +137,6 @@ static inline unsigned GI_PRESCALE_X8(enum ni_gpct_variant variant)
154 } 137 }
155} 138}
156 139
157static inline unsigned GI_HW_ARM_SEL_MASK(enum ni_gpct_variant variant)
158{
159 switch (variant) {
160 case ni_gpct_variant_e_series:
161 default:
162 return 0;
163 case ni_gpct_variant_m_series:
164 return GI_M_HW_ARM_SEL_MASK;
165 case ni_gpct_variant_660x:
166 return GI_660X_HW_ARM_SEL_MASK;
167 }
168}
169
170static bool ni_tio_has_gate2_registers(const struct ni_gpct_device *counter_dev) 140static bool ni_tio_has_gate2_registers(const struct ni_gpct_device *counter_dev)
171{ 141{
172 switch (counter_dev->variant) { 142 switch (counter_dev->variant) {
@@ -179,17 +149,45 @@ static bool ni_tio_has_gate2_registers(const struct ni_gpct_device *counter_dev)
179 } 149 }
180} 150}
181 151
152/**
153 * ni_tio_write() - Write a TIO register using the driver provided callback.
154 * @counter: struct ni_gpct counter.
155 * @value: the value to write
156 * @reg: the register to write.
157 */
158void ni_tio_write(struct ni_gpct *counter, unsigned int value,
159 enum ni_gpct_register reg)
160{
161 if (reg < NITIO_NUM_REGS)
162 counter->counter_dev->write(counter, value, reg);
163}
164EXPORT_SYMBOL_GPL(ni_tio_write);
165
166/**
167 * ni_tio_read() - Read a TIO register using the driver provided callback.
168 * @counter: struct ni_gpct counter.
169 * @reg: the register to read.
170 */
171unsigned int ni_tio_read(struct ni_gpct *counter, enum ni_gpct_register reg)
172{
173 if (reg < NITIO_NUM_REGS)
174 return counter->counter_dev->read(counter, reg);
175 return 0;
176}
177EXPORT_SYMBOL_GPL(ni_tio_read);
178
182static void ni_tio_reset_count_and_disarm(struct ni_gpct *counter) 179static void ni_tio_reset_count_and_disarm(struct ni_gpct *counter)
183{ 180{
184 unsigned cidx = counter->counter_index; 181 unsigned int cidx = counter->counter_index;
185 182
186 write_register(counter, GI_RESET(cidx), NITIO_RESET_REG(cidx)); 183 ni_tio_write(counter, GI_RESET(cidx), NITIO_RESET_REG(cidx));
187} 184}
188 185
189static uint64_t ni_tio_clock_period_ps(const struct ni_gpct *counter, 186static int ni_tio_clock_period_ps(const struct ni_gpct *counter,
190 unsigned generic_clock_source) 187 unsigned int generic_clock_source,
188 u64 *period_ps)
191{ 189{
192 uint64_t clock_period_ps; 190 u64 clock_period_ps;
193 191
194 switch (generic_clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK) { 192 switch (generic_clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK) {
195 case NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS: 193 case NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS:
@@ -222,19 +220,80 @@ static uint64_t ni_tio_clock_period_ps(const struct ni_gpct *counter,
222 clock_period_ps *= 8; 220 clock_period_ps *= 8;
223 break; 221 break;
224 default: 222 default:
225 BUG(); 223 return -EINVAL;
226 break;
227 } 224 }
228 return clock_period_ps; 225 *period_ps = clock_period_ps;
226 return 0;
229} 227}
230 228
231static unsigned ni_tio_clock_src_modifiers(const struct ni_gpct *counter) 229static void ni_tio_set_bits_transient(struct ni_gpct *counter,
230 enum ni_gpct_register reg,
231 unsigned int mask, unsigned int value,
232 unsigned int transient)
232{ 233{
233 struct ni_gpct_device *counter_dev = counter->counter_dev; 234 struct ni_gpct_device *counter_dev = counter->counter_dev;
234 unsigned cidx = counter->counter_index; 235 unsigned long flags;
235 const unsigned counting_mode_bits = 236
237 if (reg < NITIO_NUM_REGS) {
238 spin_lock_irqsave(&counter_dev->regs_lock, flags);
239 counter_dev->regs[reg] &= ~mask;
240 counter_dev->regs[reg] |= (value & mask);
241 ni_tio_write(counter, counter_dev->regs[reg] | transient, reg);
242 mmiowb();
243 spin_unlock_irqrestore(&counter_dev->regs_lock, flags);
244 }
245}
246
247/**
248 * ni_tio_set_bits() - Safely write a counter register.
249 * @counter: struct ni_gpct counter.
250 * @reg: the register to write.
251 * @mask: the bits to change.
252 * @value: the new bits value.
253 *
254 * Used to write to, and update the software copy, a register whose bits may
255 * be twiddled in interrupt context, or whose software copy may be read in
256 * interrupt context.
257 */
258void ni_tio_set_bits(struct ni_gpct *counter, enum ni_gpct_register reg,
259 unsigned int mask, unsigned int value)
260{
261 ni_tio_set_bits_transient(counter, reg, mask, value, 0x0);
262}
263EXPORT_SYMBOL_GPL(ni_tio_set_bits);
264
265/**
266 * ni_tio_get_soft_copy() - Safely read the software copy of a counter register.
267 * @counter: struct ni_gpct counter.
268 * @reg: the register to read.
269 *
270 * Used to get the software copy of a register whose bits might be modified
271 * in interrupt context, or whose software copy might need to be read in
272 * interrupt context.
273 */
274unsigned int ni_tio_get_soft_copy(const struct ni_gpct *counter,
275 enum ni_gpct_register reg)
276{
277 struct ni_gpct_device *counter_dev = counter->counter_dev;
278 unsigned int value = 0;
279 unsigned long flags;
280
281 if (reg < NITIO_NUM_REGS) {
282 spin_lock_irqsave(&counter_dev->regs_lock, flags);
283 value = counter_dev->regs[reg];
284 spin_unlock_irqrestore(&counter_dev->regs_lock, flags);
285 }
286 return value;
287}
288EXPORT_SYMBOL_GPL(ni_tio_get_soft_copy);
289
290static unsigned int ni_tio_clock_src_modifiers(const struct ni_gpct *counter)
291{
292 struct ni_gpct_device *counter_dev = counter->counter_dev;
293 unsigned int cidx = counter->counter_index;
294 unsigned int counting_mode_bits =
236 ni_tio_get_soft_copy(counter, NITIO_CNT_MODE_REG(cidx)); 295 ni_tio_get_soft_copy(counter, NITIO_CNT_MODE_REG(cidx));
237 unsigned bits = 0; 296 unsigned int bits = 0;
238 297
239 if (ni_tio_get_soft_copy(counter, NITIO_INPUT_SEL_REG(cidx)) & 298 if (ni_tio_get_soft_copy(counter, NITIO_INPUT_SEL_REG(cidx)) &
240 GI_SRC_POL_INVERT) 299 GI_SRC_POL_INVERT)
@@ -246,14 +305,15 @@ static unsigned ni_tio_clock_src_modifiers(const struct ni_gpct *counter)
246 return bits; 305 return bits;
247} 306}
248 307
249static unsigned ni_m_series_clock_src_select(const struct ni_gpct *counter) 308static int ni_m_series_clock_src_select(const struct ni_gpct *counter,
309 unsigned int *clk_src)
250{ 310{
251 struct ni_gpct_device *counter_dev = counter->counter_dev; 311 struct ni_gpct_device *counter_dev = counter->counter_dev;
252 unsigned cidx = counter->counter_index; 312 unsigned int cidx = counter->counter_index;
253 const unsigned second_gate_reg = NITIO_GATE2_REG(cidx); 313 unsigned int second_gate_reg = NITIO_GATE2_REG(cidx);
254 unsigned clock_source = 0; 314 unsigned int clock_source = 0;
255 unsigned src; 315 unsigned int src;
256 unsigned i; 316 unsigned int i;
257 317
258 src = GI_BITS_TO_SRC(ni_tio_get_soft_copy(counter, 318 src = GI_BITS_TO_SRC(ni_tio_get_soft_copy(counter,
259 NITIO_INPUT_SEL_REG(cidx))); 319 NITIO_INPUT_SEL_REG(cidx)));
@@ -304,19 +364,20 @@ static unsigned ni_m_series_clock_src_select(const struct ni_gpct *counter)
304 } 364 }
305 if (i <= NI_M_MAX_PFI_CHAN) 365 if (i <= NI_M_MAX_PFI_CHAN)
306 break; 366 break;
307 BUG(); 367 return -EINVAL;
308 break;
309 } 368 }
310 clock_source |= ni_tio_clock_src_modifiers(counter); 369 clock_source |= ni_tio_clock_src_modifiers(counter);
311 return clock_source; 370 *clk_src = clock_source;
371 return 0;
312} 372}
313 373
314static unsigned ni_660x_clock_src_select(const struct ni_gpct *counter) 374static int ni_660x_clock_src_select(const struct ni_gpct *counter,
375 unsigned int *clk_src)
315{ 376{
316 unsigned clock_source = 0; 377 unsigned int clock_source = 0;
317 unsigned cidx = counter->counter_index; 378 unsigned int cidx = counter->counter_index;
318 unsigned src; 379 unsigned int src;
319 unsigned i; 380 unsigned int i;
320 381
321 src = GI_BITS_TO_SRC(ni_tio_get_soft_copy(counter, 382 src = GI_BITS_TO_SRC(ni_tio_get_soft_copy(counter,
322 NITIO_INPUT_SEL_REG(cidx))); 383 NITIO_INPUT_SEL_REG(cidx)));
@@ -361,78 +422,88 @@ static unsigned ni_660x_clock_src_select(const struct ni_gpct *counter)
361 } 422 }
362 if (i <= NI_660X_MAX_SRC_PIN) 423 if (i <= NI_660X_MAX_SRC_PIN)
363 break; 424 break;
364 BUG(); 425 return -EINVAL;
365 break;
366 } 426 }
367 clock_source |= ni_tio_clock_src_modifiers(counter); 427 clock_source |= ni_tio_clock_src_modifiers(counter);
368 return clock_source; 428 *clk_src = clock_source;
429 return 0;
369} 430}
370 431
371static unsigned ni_tio_generic_clock_src_select(const struct ni_gpct *counter) 432static int ni_tio_generic_clock_src_select(const struct ni_gpct *counter,
433 unsigned int *clk_src)
372{ 434{
373 switch (counter->counter_dev->variant) { 435 switch (counter->counter_dev->variant) {
374 case ni_gpct_variant_e_series: 436 case ni_gpct_variant_e_series:
375 case ni_gpct_variant_m_series: 437 case ni_gpct_variant_m_series:
376 default: 438 default:
377 return ni_m_series_clock_src_select(counter); 439 return ni_m_series_clock_src_select(counter, clk_src);
378 case ni_gpct_variant_660x: 440 case ni_gpct_variant_660x:
379 return ni_660x_clock_src_select(counter); 441 return ni_660x_clock_src_select(counter, clk_src);
380 } 442 }
381} 443}
382 444
383static void ni_tio_set_sync_mode(struct ni_gpct *counter, int force_alt_sync) 445static void ni_tio_set_sync_mode(struct ni_gpct *counter)
384{ 446{
385 struct ni_gpct_device *counter_dev = counter->counter_dev; 447 struct ni_gpct_device *counter_dev = counter->counter_dev;
386 unsigned cidx = counter->counter_index; 448 unsigned int cidx = counter->counter_index;
387 const unsigned counting_mode_reg = NITIO_CNT_MODE_REG(cidx); 449 static const u64 min_normal_sync_period_ps = 25000;
388 static const uint64_t min_normal_sync_period_ps = 25000; 450 unsigned int mask = 0;
389 unsigned mode; 451 unsigned int bits = 0;
390 uint64_t clock_period_ps; 452 unsigned int reg;
391 453 unsigned int mode;
392 if (ni_tio_counting_mode_registers_present(counter_dev) == 0) 454 unsigned int clk_src;
455 u64 ps;
456 bool force_alt_sync;
457
458 /* only m series and 660x variants have counting mode registers */
459 switch (counter_dev->variant) {
460 case ni_gpct_variant_e_series:
461 default:
393 return; 462 return;
463 case ni_gpct_variant_m_series:
464 mask = GI_M_ALT_SYNC;
465 break;
466 case ni_gpct_variant_660x:
467 mask = GI_660X_ALT_SYNC;
468 break;
469 }
394 470
395 mode = ni_tio_get_soft_copy(counter, counting_mode_reg); 471 reg = NITIO_CNT_MODE_REG(cidx);
472 mode = ni_tio_get_soft_copy(counter, reg);
396 switch (mode & GI_CNT_MODE_MASK) { 473 switch (mode & GI_CNT_MODE_MASK) {
397 case GI_CNT_MODE_QUADX1: 474 case GI_CNT_MODE_QUADX1:
398 case GI_CNT_MODE_QUADX2: 475 case GI_CNT_MODE_QUADX2:
399 case GI_CNT_MODE_QUADX4: 476 case GI_CNT_MODE_QUADX4:
400 case GI_CNT_MODE_SYNC_SRC: 477 case GI_CNT_MODE_SYNC_SRC:
401 force_alt_sync = 1; 478 force_alt_sync = true;
402 break; 479 break;
403 default: 480 default:
481 force_alt_sync = false;
404 break; 482 break;
405 } 483 }
406 484
407 clock_period_ps = ni_tio_clock_period_ps(counter, 485 ni_tio_generic_clock_src_select(counter, &clk_src);
408 ni_tio_generic_clock_src_select(counter)); 486 ni_tio_clock_period_ps(counter, clk_src, &ps);
409 487
410 /* 488 /*
411 * It's not clear what we should do if clock_period is unknown, so we 489 * It's not clear what we should do if clock_period is unknown, so we
412 * are not using the alt sync bit in that case, but allow the caller 490 * are not using the alt sync bit in that case.
413 * to decide by using the force_alt_sync parameter.
414 */ 491 */
415 if (force_alt_sync || 492 if (force_alt_sync || (ps && ps < min_normal_sync_period_ps))
416 (clock_period_ps && clock_period_ps < min_normal_sync_period_ps)) { 493 bits = mask;
417 ni_tio_set_bits(counter, counting_mode_reg, 494
418 GI_ALT_SYNC(counter_dev->variant), 495 ni_tio_set_bits(counter, reg, mask, bits);
419 GI_ALT_SYNC(counter_dev->variant));
420 } else {
421 ni_tio_set_bits(counter, counting_mode_reg,
422 GI_ALT_SYNC(counter_dev->variant),
423 0x0);
424 }
425} 496}
426 497
427static int ni_tio_set_counter_mode(struct ni_gpct *counter, unsigned mode) 498static int ni_tio_set_counter_mode(struct ni_gpct *counter, unsigned int mode)
428{ 499{
429 struct ni_gpct_device *counter_dev = counter->counter_dev; 500 struct ni_gpct_device *counter_dev = counter->counter_dev;
430 unsigned cidx = counter->counter_index; 501 unsigned int cidx = counter->counter_index;
431 unsigned mode_reg_mask; 502 unsigned int mode_reg_mask;
432 unsigned mode_reg_values; 503 unsigned int mode_reg_values;
433 unsigned input_select_bits = 0; 504 unsigned int input_select_bits = 0;
434 /* these bits map directly on to the mode register */ 505 /* these bits map directly on to the mode register */
435 static const unsigned mode_reg_direct_mask = 506 static const unsigned int mode_reg_direct_mask =
436 NI_GPCT_GATE_ON_BOTH_EDGES_BIT | NI_GPCT_EDGE_GATE_MODE_MASK | 507 NI_GPCT_GATE_ON_BOTH_EDGES_BIT | NI_GPCT_EDGE_GATE_MODE_MASK |
437 NI_GPCT_STOP_MODE_MASK | NI_GPCT_OUTPUT_MODE_MASK | 508 NI_GPCT_STOP_MODE_MASK | NI_GPCT_OUTPUT_MODE_MASK |
438 NI_GPCT_HARDWARE_DISARM_MASK | NI_GPCT_LOADING_ON_TC_BIT | 509 NI_GPCT_HARDWARE_DISARM_MASK | NI_GPCT_LOADING_ON_TC_BIT |
@@ -458,7 +529,7 @@ static int ni_tio_set_counter_mode(struct ni_gpct *counter, unsigned mode)
458 mode_reg_mask, mode_reg_values); 529 mode_reg_mask, mode_reg_values);
459 530
460 if (ni_tio_counting_mode_registers_present(counter_dev)) { 531 if (ni_tio_counting_mode_registers_present(counter_dev)) {
461 unsigned bits = 0; 532 unsigned int bits = 0;
462 533
463 bits |= GI_CNT_MODE(mode >> NI_GPCT_COUNTING_MODE_SHIFT); 534 bits |= GI_CNT_MODE(mode >> NI_GPCT_COUNTING_MODE_SHIFT);
464 bits |= GI_INDEX_PHASE((mode >> NI_GPCT_INDEX_PHASE_BITSHIFT)); 535 bits |= GI_INDEX_PHASE((mode >> NI_GPCT_INDEX_PHASE_BITSHIFT));
@@ -467,7 +538,7 @@ static int ni_tio_set_counter_mode(struct ni_gpct *counter, unsigned mode)
467 ni_tio_set_bits(counter, NITIO_CNT_MODE_REG(cidx), 538 ni_tio_set_bits(counter, NITIO_CNT_MODE_REG(cidx),
468 GI_CNT_MODE_MASK | GI_INDEX_PHASE_MASK | 539 GI_CNT_MODE_MASK | GI_INDEX_PHASE_MASK |
469 GI_INDEX_MODE, bits); 540 GI_INDEX_MODE, bits);
470 ni_tio_set_sync_mode(counter, 0); 541 ni_tio_set_sync_mode(counter);
471 } 542 }
472 543
473 ni_tio_set_bits(counter, NITIO_CMD_REG(cidx), GI_CNT_DIR_MASK, 544 ni_tio_set_bits(counter, NITIO_CMD_REG(cidx), GI_CNT_DIR_MASK,
@@ -484,65 +555,68 @@ static int ni_tio_set_counter_mode(struct ni_gpct *counter, unsigned mode)
484 return 0; 555 return 0;
485} 556}
486 557
487int ni_tio_arm(struct ni_gpct *counter, int arm, unsigned start_trigger) 558int ni_tio_arm(struct ni_gpct *counter, bool arm, unsigned int start_trigger)
488{ 559{
489 struct ni_gpct_device *counter_dev = counter->counter_dev; 560 struct ni_gpct_device *counter_dev = counter->counter_dev;
490 unsigned cidx = counter->counter_index; 561 unsigned int cidx = counter->counter_index;
491 unsigned command_transient_bits = 0; 562 unsigned int transient_bits = 0;
492 563
493 if (arm) { 564 if (arm) {
565 unsigned int mask = 0;
566 unsigned int bits = 0;
567
568 /* only m series and 660x have counting mode registers */
569 switch (counter_dev->variant) {
570 case ni_gpct_variant_e_series:
571 default:
572 break;
573 case ni_gpct_variant_m_series:
574 mask = GI_M_HW_ARM_SEL_MASK;
575 break;
576 case ni_gpct_variant_660x:
577 mask = GI_660X_HW_ARM_SEL_MASK;
578 break;
579 }
580
494 switch (start_trigger) { 581 switch (start_trigger) {
495 case NI_GPCT_ARM_IMMEDIATE: 582 case NI_GPCT_ARM_IMMEDIATE:
496 command_transient_bits |= GI_ARM; 583 transient_bits |= GI_ARM;
497 break; 584 break;
498 case NI_GPCT_ARM_PAIRED_IMMEDIATE: 585 case NI_GPCT_ARM_PAIRED_IMMEDIATE:
499 command_transient_bits |= GI_ARM | GI_ARM_COPY; 586 transient_bits |= GI_ARM | GI_ARM_COPY;
500 break; 587 break;
501 default: 588 default:
589 /*
590 * for m series and 660x, pass-through the least
591 * significant bits so we can figure out what select
592 * later
593 */
594 if (mask && (start_trigger & NI_GPCT_ARM_UNKNOWN)) {
595 bits |= GI_HW_ARM_ENA |
596 (GI_HW_ARM_SEL(start_trigger) & mask);
597 } else {
598 return -EINVAL;
599 }
502 break; 600 break;
503 } 601 }
504 if (ni_tio_counting_mode_registers_present(counter_dev)) {
505 unsigned bits = 0;
506 unsigned sel_mask;
507 602
508 sel_mask = GI_HW_ARM_SEL_MASK(counter_dev->variant); 603 if (mask)
509
510 switch (start_trigger) {
511 case NI_GPCT_ARM_IMMEDIATE:
512 case NI_GPCT_ARM_PAIRED_IMMEDIATE:
513 break;
514 default:
515 if (start_trigger & NI_GPCT_ARM_UNKNOWN) {
516 /*
517 * pass-through the least significant
518 * bits so we can figure out what
519 * select later
520 */
521 bits |= GI_HW_ARM_ENA |
522 (GI_HW_ARM_SEL(start_trigger) &
523 sel_mask);
524 } else {
525 return -EINVAL;
526 }
527 break;
528 }
529 ni_tio_set_bits(counter, NITIO_CNT_MODE_REG(cidx), 604 ni_tio_set_bits(counter, NITIO_CNT_MODE_REG(cidx),
530 GI_HW_ARM_ENA | sel_mask, bits); 605 GI_HW_ARM_ENA | mask, bits);
531 }
532 } else { 606 } else {
533 command_transient_bits |= GI_DISARM; 607 transient_bits |= GI_DISARM;
534 } 608 }
535 ni_tio_set_bits_transient(counter, NITIO_CMD_REG(cidx), 609 ni_tio_set_bits_transient(counter, NITIO_CMD_REG(cidx),
536 0, 0, command_transient_bits); 610 0, 0, transient_bits);
537 return 0; 611 return 0;
538} 612}
539EXPORT_SYMBOL_GPL(ni_tio_arm); 613EXPORT_SYMBOL_GPL(ni_tio_arm);
540 614
541static unsigned ni_660x_clk_src(unsigned int clock_source) 615static int ni_660x_clk_src(unsigned int clock_source, unsigned int *bits)
542{ 616{
543 unsigned clk_src = clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK; 617 unsigned int clk_src = clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK;
544 unsigned ni_660x_clock; 618 unsigned int ni_660x_clock;
545 unsigned i; 619 unsigned int i;
546 620
547 switch (clk_src) { 621 switch (clk_src) {
548 case NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS: 622 case NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS:
@@ -583,18 +657,17 @@ static unsigned ni_660x_clk_src(unsigned int clock_source)
583 } 657 }
584 if (i <= NI_660X_MAX_SRC_PIN) 658 if (i <= NI_660X_MAX_SRC_PIN)
585 break; 659 break;
586 ni_660x_clock = 0; 660 return -EINVAL;
587 BUG();
588 break;
589 } 661 }
590 return GI_SRC_SEL(ni_660x_clock); 662 *bits = GI_SRC_SEL(ni_660x_clock);
663 return 0;
591} 664}
592 665
593static unsigned ni_m_clk_src(unsigned int clock_source) 666static int ni_m_clk_src(unsigned int clock_source, unsigned int *bits)
594{ 667{
595 unsigned clk_src = clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK; 668 unsigned int clk_src = clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK;
596 unsigned ni_m_series_clock; 669 unsigned int ni_m_series_clock;
597 unsigned i; 670 unsigned int i;
598 671
599 switch (clk_src) { 672 switch (clk_src) {
600 case NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS: 673 case NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS:
@@ -641,21 +714,18 @@ static unsigned ni_m_clk_src(unsigned int clock_source)
641 } 714 }
642 if (i <= NI_M_MAX_PFI_CHAN) 715 if (i <= NI_M_MAX_PFI_CHAN)
643 break; 716 break;
644 pr_err("invalid clock source 0x%lx\n", 717 return -EINVAL;
645 (unsigned long)clock_source);
646 BUG();
647 ni_m_series_clock = 0;
648 break;
649 } 718 }
650 return GI_SRC_SEL(ni_m_series_clock); 719 *bits = GI_SRC_SEL(ni_m_series_clock);
720 return 0;
651}; 721};
652 722
653static void ni_tio_set_source_subselect(struct ni_gpct *counter, 723static void ni_tio_set_source_subselect(struct ni_gpct *counter,
654 unsigned int clock_source) 724 unsigned int clock_source)
655{ 725{
656 struct ni_gpct_device *counter_dev = counter->counter_dev; 726 struct ni_gpct_device *counter_dev = counter->counter_dev;
657 unsigned cidx = counter->counter_index; 727 unsigned int cidx = counter->counter_index;
658 const unsigned second_gate_reg = NITIO_GATE2_REG(cidx); 728 unsigned int second_gate_reg = NITIO_GATE2_REG(cidx);
659 729
660 if (counter_dev->variant != ni_gpct_variant_m_series) 730 if (counter_dev->variant != ni_gpct_variant_m_series)
661 return; 731 return;
@@ -674,8 +744,8 @@ static void ni_tio_set_source_subselect(struct ni_gpct *counter,
674 default: 744 default:
675 return; 745 return;
676 } 746 }
677 write_register(counter, counter_dev->regs[second_gate_reg], 747 ni_tio_write(counter, counter_dev->regs[second_gate_reg],
678 second_gate_reg); 748 second_gate_reg);
679} 749}
680 750
681static int ni_tio_set_clock_src(struct ni_gpct *counter, 751static int ni_tio_set_clock_src(struct ni_gpct *counter,
@@ -683,20 +753,28 @@ static int ni_tio_set_clock_src(struct ni_gpct *counter,
683 unsigned int period_ns) 753 unsigned int period_ns)
684{ 754{
685 struct ni_gpct_device *counter_dev = counter->counter_dev; 755 struct ni_gpct_device *counter_dev = counter->counter_dev;
686 unsigned cidx = counter->counter_index; 756 unsigned int cidx = counter->counter_index;
687 unsigned bits = 0; 757 unsigned int bits = 0;
758 int ret;
688 759
689 /* FIXME: validate clock source */
690 switch (counter_dev->variant) { 760 switch (counter_dev->variant) {
691 case ni_gpct_variant_660x: 761 case ni_gpct_variant_660x:
692 bits |= ni_660x_clk_src(clock_source); 762 ret = ni_660x_clk_src(clock_source, &bits);
693 break; 763 break;
694 case ni_gpct_variant_e_series: 764 case ni_gpct_variant_e_series:
695 case ni_gpct_variant_m_series: 765 case ni_gpct_variant_m_series:
696 default: 766 default:
697 bits |= ni_m_clk_src(clock_source); 767 ret = ni_m_clk_src(clock_source, &bits);
698 break; 768 break;
699 } 769 }
770 if (ret) {
771 struct comedi_device *dev = counter_dev->dev;
772
773 dev_err(dev->class_dev, "invalid clock source 0x%x\n",
774 clock_source);
775 return ret;
776 }
777
700 if (clock_source & NI_GPCT_INVERT_CLOCK_SRC_BIT) 778 if (clock_source & NI_GPCT_INVERT_CLOCK_SRC_BIT)
701 bits |= GI_SRC_POL_INVERT; 779 bits |= GI_SRC_POL_INVERT;
702 ni_tio_set_bits(counter, NITIO_INPUT_SEL_REG(cidx), 780 ni_tio_set_bits(counter, NITIO_INPUT_SEL_REG(cidx),
@@ -722,28 +800,34 @@ static int ni_tio_set_clock_src(struct ni_gpct *counter,
722 GI_PRESCALE_X8(counter_dev->variant), bits); 800 GI_PRESCALE_X8(counter_dev->variant), bits);
723 } 801 }
724 counter->clock_period_ps = period_ns * 1000; 802 counter->clock_period_ps = period_ns * 1000;
725 ni_tio_set_sync_mode(counter, 0); 803 ni_tio_set_sync_mode(counter);
726 return 0; 804 return 0;
727} 805}
728 806
729static void ni_tio_get_clock_src(struct ni_gpct *counter, 807static int ni_tio_get_clock_src(struct ni_gpct *counter,
730 unsigned int *clock_source, 808 unsigned int *clock_source,
731 unsigned int *period_ns) 809 unsigned int *period_ns)
732{ 810{
733 uint64_t temp64; 811 u64 temp64;
734 812 int ret;
735 *clock_source = ni_tio_generic_clock_src_select(counter); 813
736 temp64 = ni_tio_clock_period_ps(counter, *clock_source); 814 ret = ni_tio_generic_clock_src_select(counter, clock_source);
815 if (ret)
816 return ret;
817 ret = ni_tio_clock_period_ps(counter, *clock_source, &temp64);
818 if (ret)
819 return ret;
737 do_div(temp64, 1000); /* ps to ns */ 820 do_div(temp64, 1000); /* ps to ns */
738 *period_ns = temp64; 821 *period_ns = temp64;
822 return 0;
739} 823}
740 824
741static int ni_660x_set_gate(struct ni_gpct *counter, unsigned int gate_source) 825static int ni_660x_set_gate(struct ni_gpct *counter, unsigned int gate_source)
742{ 826{
743 unsigned int chan = CR_CHAN(gate_source); 827 unsigned int chan = CR_CHAN(gate_source);
744 unsigned cidx = counter->counter_index; 828 unsigned int cidx = counter->counter_index;
745 unsigned gate_sel; 829 unsigned int gate_sel;
746 unsigned i; 830 unsigned int i;
747 831
748 switch (chan) { 832 switch (chan) {
749 case NI_GPCT_NEXT_SOURCE_GATE_SELECT: 833 case NI_GPCT_NEXT_SOURCE_GATE_SELECT:
@@ -782,9 +866,9 @@ static int ni_660x_set_gate(struct ni_gpct *counter, unsigned int gate_source)
782static int ni_m_set_gate(struct ni_gpct *counter, unsigned int gate_source) 866static int ni_m_set_gate(struct ni_gpct *counter, unsigned int gate_source)
783{ 867{
784 unsigned int chan = CR_CHAN(gate_source); 868 unsigned int chan = CR_CHAN(gate_source);
785 unsigned cidx = counter->counter_index; 869 unsigned int cidx = counter->counter_index;
786 unsigned gate_sel; 870 unsigned int gate_sel;
787 unsigned i; 871 unsigned int i;
788 872
789 switch (chan) { 873 switch (chan) {
790 case NI_GPCT_TIMESTAMP_MUX_GATE_SELECT: 874 case NI_GPCT_TIMESTAMP_MUX_GATE_SELECT:
@@ -824,11 +908,11 @@ static int ni_m_set_gate(struct ni_gpct *counter, unsigned int gate_source)
824static int ni_660x_set_gate2(struct ni_gpct *counter, unsigned int gate_source) 908static int ni_660x_set_gate2(struct ni_gpct *counter, unsigned int gate_source)
825{ 909{
826 struct ni_gpct_device *counter_dev = counter->counter_dev; 910 struct ni_gpct_device *counter_dev = counter->counter_dev;
827 unsigned cidx = counter->counter_index; 911 unsigned int cidx = counter->counter_index;
828 unsigned int chan = CR_CHAN(gate_source); 912 unsigned int chan = CR_CHAN(gate_source);
829 unsigned gate2_reg = NITIO_GATE2_REG(cidx); 913 unsigned int gate2_reg = NITIO_GATE2_REG(cidx);
830 unsigned gate2_sel; 914 unsigned int gate2_sel;
831 unsigned i; 915 unsigned int i;
832 916
833 switch (chan) { 917 switch (chan) {
834 case NI_GPCT_SOURCE_PIN_i_GATE_SELECT: 918 case NI_GPCT_SOURCE_PIN_i_GATE_SELECT:
@@ -863,17 +947,17 @@ static int ni_660x_set_gate2(struct ni_gpct *counter, unsigned int gate_source)
863 counter_dev->regs[gate2_reg] |= GI_GATE2_MODE; 947 counter_dev->regs[gate2_reg] |= GI_GATE2_MODE;
864 counter_dev->regs[gate2_reg] &= ~GI_GATE2_SEL_MASK; 948 counter_dev->regs[gate2_reg] &= ~GI_GATE2_SEL_MASK;
865 counter_dev->regs[gate2_reg] |= GI_GATE2_SEL(gate2_sel); 949 counter_dev->regs[gate2_reg] |= GI_GATE2_SEL(gate2_sel);
866 write_register(counter, counter_dev->regs[gate2_reg], gate2_reg); 950 ni_tio_write(counter, counter_dev->regs[gate2_reg], gate2_reg);
867 return 0; 951 return 0;
868} 952}
869 953
870static int ni_m_set_gate2(struct ni_gpct *counter, unsigned int gate_source) 954static int ni_m_set_gate2(struct ni_gpct *counter, unsigned int gate_source)
871{ 955{
872 struct ni_gpct_device *counter_dev = counter->counter_dev; 956 struct ni_gpct_device *counter_dev = counter->counter_dev;
873 unsigned cidx = counter->counter_index; 957 unsigned int cidx = counter->counter_index;
874 unsigned int chan = CR_CHAN(gate_source); 958 unsigned int chan = CR_CHAN(gate_source);
875 unsigned gate2_reg = NITIO_GATE2_REG(cidx); 959 unsigned int gate2_reg = NITIO_GATE2_REG(cidx);
876 unsigned gate2_sel; 960 unsigned int gate2_sel;
877 961
878 /* 962 /*
879 * FIXME: We don't know what the m-series second gate codes are, 963 * FIXME: We don't know what the m-series second gate codes are,
@@ -887,20 +971,20 @@ static int ni_m_set_gate2(struct ni_gpct *counter, unsigned int gate_source)
887 counter_dev->regs[gate2_reg] |= GI_GATE2_MODE; 971 counter_dev->regs[gate2_reg] |= GI_GATE2_MODE;
888 counter_dev->regs[gate2_reg] &= ~GI_GATE2_SEL_MASK; 972 counter_dev->regs[gate2_reg] &= ~GI_GATE2_SEL_MASK;
889 counter_dev->regs[gate2_reg] |= GI_GATE2_SEL(gate2_sel); 973 counter_dev->regs[gate2_reg] |= GI_GATE2_SEL(gate2_sel);
890 write_register(counter, counter_dev->regs[gate2_reg], gate2_reg); 974 ni_tio_write(counter, counter_dev->regs[gate2_reg], gate2_reg);
891 return 0; 975 return 0;
892} 976}
893 977
894int ni_tio_set_gate_src(struct ni_gpct *counter, unsigned gate_index, 978int ni_tio_set_gate_src(struct ni_gpct *counter,
895 unsigned int gate_source) 979 unsigned int gate, unsigned int src)
896{ 980{
897 struct ni_gpct_device *counter_dev = counter->counter_dev; 981 struct ni_gpct_device *counter_dev = counter->counter_dev;
898 unsigned cidx = counter->counter_index; 982 unsigned int cidx = counter->counter_index;
899 unsigned int chan = CR_CHAN(gate_source); 983 unsigned int chan = CR_CHAN(src);
900 unsigned gate2_reg = NITIO_GATE2_REG(cidx); 984 unsigned int gate2_reg = NITIO_GATE2_REG(cidx);
901 unsigned mode = 0; 985 unsigned int mode = 0;
902 986
903 switch (gate_index) { 987 switch (gate) {
904 case 0: 988 case 0:
905 if (chan == NI_GPCT_DISABLED_GATE_SELECT) { 989 if (chan == NI_GPCT_DISABLED_GATE_SELECT) {
906 ni_tio_set_bits(counter, NITIO_MODE_REG(cidx), 990 ni_tio_set_bits(counter, NITIO_MODE_REG(cidx),
@@ -908,9 +992,9 @@ int ni_tio_set_gate_src(struct ni_gpct *counter, unsigned gate_index,
908 GI_GATING_DISABLED); 992 GI_GATING_DISABLED);
909 return 0; 993 return 0;
910 } 994 }
911 if (gate_source & CR_INVERT) 995 if (src & CR_INVERT)
912 mode |= GI_GATE_POL_INVERT; 996 mode |= GI_GATE_POL_INVERT;
913 if (gate_source & CR_EDGE) 997 if (src & CR_EDGE)
914 mode |= GI_RISING_EDGE_GATING; 998 mode |= GI_RISING_EDGE_GATING;
915 else 999 else
916 mode |= GI_LEVEL_GATING; 1000 mode |= GI_LEVEL_GATING;
@@ -921,9 +1005,9 @@ int ni_tio_set_gate_src(struct ni_gpct *counter, unsigned gate_index,
921 case ni_gpct_variant_e_series: 1005 case ni_gpct_variant_e_series:
922 case ni_gpct_variant_m_series: 1006 case ni_gpct_variant_m_series:
923 default: 1007 default:
924 return ni_m_set_gate(counter, gate_source); 1008 return ni_m_set_gate(counter, src);
925 case ni_gpct_variant_660x: 1009 case ni_gpct_variant_660x:
926 return ni_660x_set_gate(counter, gate_source); 1010 return ni_660x_set_gate(counter, src);
927 } 1011 }
928 break; 1012 break;
929 case 1: 1013 case 1:
@@ -932,22 +1016,21 @@ int ni_tio_set_gate_src(struct ni_gpct *counter, unsigned gate_index,
932 1016
933 if (chan == NI_GPCT_DISABLED_GATE_SELECT) { 1017 if (chan == NI_GPCT_DISABLED_GATE_SELECT) {
934 counter_dev->regs[gate2_reg] &= ~GI_GATE2_MODE; 1018 counter_dev->regs[gate2_reg] &= ~GI_GATE2_MODE;
935 write_register(counter, counter_dev->regs[gate2_reg], 1019 ni_tio_write(counter, counter_dev->regs[gate2_reg],
936 gate2_reg); 1020 gate2_reg);
937 return 0; 1021 return 0;
938 } 1022 }
939 if (gate_source & CR_INVERT) 1023 if (src & CR_INVERT)
940 counter_dev->regs[gate2_reg] |= GI_GATE2_POL_INVERT; 1024 counter_dev->regs[gate2_reg] |= GI_GATE2_POL_INVERT;
941 else 1025 else
942 counter_dev->regs[gate2_reg] &= ~GI_GATE2_POL_INVERT; 1026 counter_dev->regs[gate2_reg] &= ~GI_GATE2_POL_INVERT;
943 switch (counter_dev->variant) { 1027 switch (counter_dev->variant) {
944 case ni_gpct_variant_m_series: 1028 case ni_gpct_variant_m_series:
945 return ni_m_set_gate2(counter, gate_source); 1029 return ni_m_set_gate2(counter, src);
946 case ni_gpct_variant_660x: 1030 case ni_gpct_variant_660x:
947 return ni_660x_set_gate2(counter, gate_source); 1031 return ni_660x_set_gate2(counter, src);
948 default: 1032 default:
949 BUG(); 1033 return -EINVAL;
950 break;
951 } 1034 }
952 break; 1035 break;
953 default: 1036 default:
@@ -957,11 +1040,11 @@ int ni_tio_set_gate_src(struct ni_gpct *counter, unsigned gate_index,
957} 1040}
958EXPORT_SYMBOL_GPL(ni_tio_set_gate_src); 1041EXPORT_SYMBOL_GPL(ni_tio_set_gate_src);
959 1042
960static int ni_tio_set_other_src(struct ni_gpct *counter, unsigned index, 1043static int ni_tio_set_other_src(struct ni_gpct *counter, unsigned int index,
961 unsigned int source) 1044 unsigned int source)
962{ 1045{
963 struct ni_gpct_device *counter_dev = counter->counter_dev; 1046 struct ni_gpct_device *counter_dev = counter->counter_dev;
964 unsigned cidx = counter->counter_index; 1047 unsigned int cidx = counter->counter_index;
965 unsigned int abz_reg, shift, mask; 1048 unsigned int abz_reg, shift, mask;
966 1049
967 if (counter_dev->variant != ni_gpct_variant_m_series) 1050 if (counter_dev->variant != ni_gpct_variant_m_series)
@@ -987,175 +1070,221 @@ static int ni_tio_set_other_src(struct ni_gpct *counter, unsigned index,
987 1070
988 counter_dev->regs[abz_reg] &= ~mask; 1071 counter_dev->regs[abz_reg] &= ~mask;
989 counter_dev->regs[abz_reg] |= (source << shift) & mask; 1072 counter_dev->regs[abz_reg] |= (source << shift) & mask;
990 write_register(counter, counter_dev->regs[abz_reg], abz_reg); 1073 ni_tio_write(counter, counter_dev->regs[abz_reg], abz_reg);
991 return 0; 1074 return 0;
992} 1075}
993 1076
994static unsigned ni_660x_gate_to_generic_gate(unsigned gate) 1077static int ni_660x_gate_to_generic_gate(unsigned int gate, unsigned int *src)
995{ 1078{
996 unsigned i; 1079 unsigned int source;
1080 unsigned int i;
997 1081
998 switch (gate) { 1082 switch (gate) {
999 case NI_660X_SRC_PIN_I_GATE_SEL: 1083 case NI_660X_SRC_PIN_I_GATE_SEL:
1000 return NI_GPCT_SOURCE_PIN_i_GATE_SELECT; 1084 source = NI_GPCT_SOURCE_PIN_i_GATE_SELECT;
1085 break;
1001 case NI_660X_GATE_PIN_I_GATE_SEL: 1086 case NI_660X_GATE_PIN_I_GATE_SEL:
1002 return NI_GPCT_GATE_PIN_i_GATE_SELECT; 1087 source = NI_GPCT_GATE_PIN_i_GATE_SELECT;
1088 break;
1003 case NI_660X_NEXT_SRC_GATE_SEL: 1089 case NI_660X_NEXT_SRC_GATE_SEL:
1004 return NI_GPCT_NEXT_SOURCE_GATE_SELECT; 1090 source = NI_GPCT_NEXT_SOURCE_GATE_SELECT;
1091 break;
1005 case NI_660X_NEXT_OUT_GATE_SEL: 1092 case NI_660X_NEXT_OUT_GATE_SEL:
1006 return NI_GPCT_NEXT_OUT_GATE_SELECT; 1093 source = NI_GPCT_NEXT_OUT_GATE_SELECT;
1094 break;
1007 case NI_660X_LOGIC_LOW_GATE_SEL: 1095 case NI_660X_LOGIC_LOW_GATE_SEL:
1008 return NI_GPCT_LOGIC_LOW_GATE_SELECT; 1096 source = NI_GPCT_LOGIC_LOW_GATE_SELECT;
1097 break;
1009 default: 1098 default:
1010 for (i = 0; i <= NI_660X_MAX_RTSI_CHAN; ++i) { 1099 for (i = 0; i <= NI_660X_MAX_RTSI_CHAN; ++i) {
1011 if (gate == NI_660X_RTSI_GATE_SEL(i)) 1100 if (gate == NI_660X_RTSI_GATE_SEL(i)) {
1012 return NI_GPCT_RTSI_GATE_SELECT(i); 1101 source = NI_GPCT_RTSI_GATE_SELECT(i);
1102 break;
1103 }
1013 } 1104 }
1105 if (i <= NI_660X_MAX_RTSI_CHAN)
1106 break;
1014 for (i = 0; i <= NI_660X_MAX_GATE_PIN; ++i) { 1107 for (i = 0; i <= NI_660X_MAX_GATE_PIN; ++i) {
1015 if (gate == NI_660X_PIN_GATE_SEL(i)) 1108 if (gate == NI_660X_PIN_GATE_SEL(i)) {
1016 return NI_GPCT_GATE_PIN_GATE_SELECT(i); 1109 source = NI_GPCT_GATE_PIN_GATE_SELECT(i);
1110 break;
1111 }
1017 } 1112 }
1018 BUG(); 1113 if (i <= NI_660X_MAX_GATE_PIN)
1019 break; 1114 break;
1115 return -EINVAL;
1020 } 1116 }
1117 *src = source;
1021 return 0; 1118 return 0;
1022}; 1119};
1023 1120
1024static unsigned ni_m_gate_to_generic_gate(unsigned gate) 1121static int ni_m_gate_to_generic_gate(unsigned int gate, unsigned int *src)
1025{ 1122{
1026 unsigned i; 1123 unsigned int source;
1124 unsigned int i;
1027 1125
1028 switch (gate) { 1126 switch (gate) {
1029 case NI_M_TIMESTAMP_MUX_GATE_SEL: 1127 case NI_M_TIMESTAMP_MUX_GATE_SEL:
1030 return NI_GPCT_TIMESTAMP_MUX_GATE_SELECT; 1128 source = NI_GPCT_TIMESTAMP_MUX_GATE_SELECT;
1129 break;
1031 case NI_M_AI_START2_GATE_SEL: 1130 case NI_M_AI_START2_GATE_SEL:
1032 return NI_GPCT_AI_START2_GATE_SELECT; 1131 source = NI_GPCT_AI_START2_GATE_SELECT;
1132 break;
1033 case NI_M_PXI_STAR_TRIGGER_GATE_SEL: 1133 case NI_M_PXI_STAR_TRIGGER_GATE_SEL:
1034 return NI_GPCT_PXI_STAR_TRIGGER_GATE_SELECT; 1134 source = NI_GPCT_PXI_STAR_TRIGGER_GATE_SELECT;
1135 break;
1035 case NI_M_NEXT_OUT_GATE_SEL: 1136 case NI_M_NEXT_OUT_GATE_SEL:
1036 return NI_GPCT_NEXT_OUT_GATE_SELECT; 1137 source = NI_GPCT_NEXT_OUT_GATE_SELECT;
1138 break;
1037 case NI_M_AI_START1_GATE_SEL: 1139 case NI_M_AI_START1_GATE_SEL:
1038 return NI_GPCT_AI_START1_GATE_SELECT; 1140 source = NI_GPCT_AI_START1_GATE_SELECT;
1141 break;
1039 case NI_M_NEXT_SRC_GATE_SEL: 1142 case NI_M_NEXT_SRC_GATE_SEL:
1040 return NI_GPCT_NEXT_SOURCE_GATE_SELECT; 1143 source = NI_GPCT_NEXT_SOURCE_GATE_SELECT;
1144 break;
1041 case NI_M_ANALOG_TRIG_OUT_GATE_SEL: 1145 case NI_M_ANALOG_TRIG_OUT_GATE_SEL:
1042 return NI_GPCT_ANALOG_TRIGGER_OUT_GATE_SELECT; 1146 source = NI_GPCT_ANALOG_TRIGGER_OUT_GATE_SELECT;
1147 break;
1043 case NI_M_LOGIC_LOW_GATE_SEL: 1148 case NI_M_LOGIC_LOW_GATE_SEL:
1044 return NI_GPCT_LOGIC_LOW_GATE_SELECT; 1149 source = NI_GPCT_LOGIC_LOW_GATE_SELECT;
1150 break;
1045 default: 1151 default:
1046 for (i = 0; i <= NI_M_MAX_RTSI_CHAN; ++i) { 1152 for (i = 0; i <= NI_M_MAX_RTSI_CHAN; ++i) {
1047 if (gate == NI_M_RTSI_GATE_SEL(i)) 1153 if (gate == NI_M_RTSI_GATE_SEL(i)) {
1048 return NI_GPCT_RTSI_GATE_SELECT(i); 1154 source = NI_GPCT_RTSI_GATE_SELECT(i);
1155 break;
1156 }
1049 } 1157 }
1158 if (i <= NI_M_MAX_RTSI_CHAN)
1159 break;
1050 for (i = 0; i <= NI_M_MAX_PFI_CHAN; ++i) { 1160 for (i = 0; i <= NI_M_MAX_PFI_CHAN; ++i) {
1051 if (gate == NI_M_PFI_GATE_SEL(i)) 1161 if (gate == NI_M_PFI_GATE_SEL(i)) {
1052 return NI_GPCT_PFI_GATE_SELECT(i); 1162 source = NI_GPCT_PFI_GATE_SELECT(i);
1163 break;
1164 }
1053 } 1165 }
1054 BUG(); 1166 if (i <= NI_M_MAX_PFI_CHAN)
1055 break; 1167 break;
1168 return -EINVAL;
1056 } 1169 }
1170 *src = source;
1057 return 0; 1171 return 0;
1058}; 1172};
1059 1173
1060static unsigned ni_660x_gate2_to_generic_gate(unsigned gate) 1174static int ni_660x_gate2_to_generic_gate(unsigned int gate, unsigned int *src)
1061{ 1175{
1062 unsigned i; 1176 unsigned int source;
1177 unsigned int i;
1063 1178
1064 switch (gate) { 1179 switch (gate) {
1065 case NI_660X_SRC_PIN_I_GATE2_SEL: 1180 case NI_660X_SRC_PIN_I_GATE2_SEL:
1066 return NI_GPCT_SOURCE_PIN_i_GATE_SELECT; 1181 source = NI_GPCT_SOURCE_PIN_i_GATE_SELECT;
1182 break;
1067 case NI_660X_UD_PIN_I_GATE2_SEL: 1183 case NI_660X_UD_PIN_I_GATE2_SEL:
1068 return NI_GPCT_UP_DOWN_PIN_i_GATE_SELECT; 1184 source = NI_GPCT_UP_DOWN_PIN_i_GATE_SELECT;
1185 break;
1069 case NI_660X_NEXT_SRC_GATE2_SEL: 1186 case NI_660X_NEXT_SRC_GATE2_SEL:
1070 return NI_GPCT_NEXT_SOURCE_GATE_SELECT; 1187 source = NI_GPCT_NEXT_SOURCE_GATE_SELECT;
1188 break;
1071 case NI_660X_NEXT_OUT_GATE2_SEL: 1189 case NI_660X_NEXT_OUT_GATE2_SEL:
1072 return NI_GPCT_NEXT_OUT_GATE_SELECT; 1190 source = NI_GPCT_NEXT_OUT_GATE_SELECT;
1191 break;
1073 case NI_660X_SELECTED_GATE2_SEL: 1192 case NI_660X_SELECTED_GATE2_SEL:
1074 return NI_GPCT_SELECTED_GATE_GATE_SELECT; 1193 source = NI_GPCT_SELECTED_GATE_GATE_SELECT;
1194 break;
1075 case NI_660X_LOGIC_LOW_GATE2_SEL: 1195 case NI_660X_LOGIC_LOW_GATE2_SEL:
1076 return NI_GPCT_LOGIC_LOW_GATE_SELECT; 1196 source = NI_GPCT_LOGIC_LOW_GATE_SELECT;
1197 break;
1077 default: 1198 default:
1078 for (i = 0; i <= NI_660X_MAX_RTSI_CHAN; ++i) { 1199 for (i = 0; i <= NI_660X_MAX_RTSI_CHAN; ++i) {
1079 if (gate == NI_660X_RTSI_GATE2_SEL(i)) 1200 if (gate == NI_660X_RTSI_GATE2_SEL(i)) {
1080 return NI_GPCT_RTSI_GATE_SELECT(i); 1201 source = NI_GPCT_RTSI_GATE_SELECT(i);
1202 break;
1203 }
1081 } 1204 }
1205 if (i <= NI_660X_MAX_RTSI_CHAN)
1206 break;
1082 for (i = 0; i <= NI_660X_MAX_UP_DOWN_PIN; ++i) { 1207 for (i = 0; i <= NI_660X_MAX_UP_DOWN_PIN; ++i) {
1083 if (gate == NI_660X_UD_PIN_GATE2_SEL(i)) 1208 if (gate == NI_660X_UD_PIN_GATE2_SEL(i)) {
1084 return NI_GPCT_UP_DOWN_PIN_GATE_SELECT(i); 1209 source = NI_GPCT_UP_DOWN_PIN_GATE_SELECT(i);
1210 break;
1211 }
1085 } 1212 }
1086 BUG(); 1213 if (i <= NI_660X_MAX_UP_DOWN_PIN)
1087 break; 1214 break;
1215 return -EINVAL;
1088 } 1216 }
1217 *src = source;
1089 return 0; 1218 return 0;
1090}; 1219};
1091 1220
1092static unsigned ni_m_gate2_to_generic_gate(unsigned gate) 1221static int ni_m_gate2_to_generic_gate(unsigned int gate, unsigned int *src)
1093{ 1222{
1094 /* 1223 /*
1095 * FIXME: the second gate sources for the m series are undocumented, 1224 * FIXME: the second gate sources for the m series are undocumented,
1096 * so we just return the raw bits for now. 1225 * so we just return the raw bits for now.
1097 */ 1226 */
1098 switch (gate) { 1227 *src = gate;
1099 default:
1100 return gate;
1101 }
1102 return 0; 1228 return 0;
1103}; 1229};
1104 1230
1105static int ni_tio_get_gate_src(struct ni_gpct *counter, unsigned gate_index, 1231static int ni_tio_get_gate_src(struct ni_gpct *counter, unsigned int gate_index,
1106 unsigned int *gate_source) 1232 unsigned int *gate_source)
1107{ 1233{
1108 struct ni_gpct_device *counter_dev = counter->counter_dev; 1234 struct ni_gpct_device *counter_dev = counter->counter_dev;
1109 unsigned cidx = counter->counter_index; 1235 unsigned int cidx = counter->counter_index;
1110 unsigned mode = ni_tio_get_soft_copy(counter, NITIO_MODE_REG(cidx)); 1236 unsigned int mode;
1111 unsigned gate2_reg = NITIO_GATE2_REG(cidx); 1237 unsigned int reg;
1112 unsigned gate; 1238 unsigned int gate;
1239 int ret;
1240
1241 mode = ni_tio_get_soft_copy(counter, NITIO_MODE_REG(cidx));
1242 if (((mode & GI_GATING_MODE_MASK) == GI_GATING_DISABLED) ||
1243 (gate_index == 1 &&
1244 !(counter_dev->regs[NITIO_GATE2_REG(cidx)] & GI_GATE2_MODE))) {
1245 *gate_source = NI_GPCT_DISABLED_GATE_SELECT;
1246 return 0;
1247 }
1113 1248
1114 switch (gate_index) { 1249 switch (gate_index) {
1115 case 0: 1250 case 0:
1116 if ((mode & GI_GATING_MODE_MASK) == GI_GATING_DISABLED) { 1251 reg = NITIO_INPUT_SEL_REG(cidx);
1117 *gate_source = NI_GPCT_DISABLED_GATE_SELECT; 1252 gate = GI_BITS_TO_GATE(ni_tio_get_soft_copy(counter, reg));
1118 return 0;
1119 }
1120
1121 gate = GI_BITS_TO_GATE(ni_tio_get_soft_copy(counter,
1122 NITIO_INPUT_SEL_REG(cidx)));
1123 1253
1124 switch (counter_dev->variant) { 1254 switch (counter_dev->variant) {
1125 case ni_gpct_variant_e_series: 1255 case ni_gpct_variant_e_series:
1126 case ni_gpct_variant_m_series: 1256 case ni_gpct_variant_m_series:
1127 default: 1257 default:
1128 *gate_source = ni_m_gate_to_generic_gate(gate); 1258 ret = ni_m_gate_to_generic_gate(gate, gate_source);
1129 break; 1259 break;
1130 case ni_gpct_variant_660x: 1260 case ni_gpct_variant_660x:
1131 *gate_source = ni_660x_gate_to_generic_gate(gate); 1261 ret = ni_660x_gate_to_generic_gate(gate, gate_source);
1132 break; 1262 break;
1133 } 1263 }
1264 if (ret)
1265 return ret;
1134 if (mode & GI_GATE_POL_INVERT) 1266 if (mode & GI_GATE_POL_INVERT)
1135 *gate_source |= CR_INVERT; 1267 *gate_source |= CR_INVERT;
1136 if ((mode & GI_GATING_MODE_MASK) != GI_LEVEL_GATING) 1268 if ((mode & GI_GATING_MODE_MASK) != GI_LEVEL_GATING)
1137 *gate_source |= CR_EDGE; 1269 *gate_source |= CR_EDGE;
1138 break; 1270 break;
1139 case 1: 1271 case 1:
1140 if ((mode & GI_GATING_MODE_MASK) == GI_GATING_DISABLED || 1272 reg = NITIO_GATE2_REG(cidx);
1141 !(counter_dev->regs[gate2_reg] & GI_GATE2_MODE)) { 1273 gate = GI_BITS_TO_GATE2(counter_dev->regs[reg]);
1142 *gate_source = NI_GPCT_DISABLED_GATE_SELECT;
1143 return 0;
1144 }
1145
1146 gate = GI_BITS_TO_GATE2(counter_dev->regs[gate2_reg]);
1147 1274
1148 switch (counter_dev->variant) { 1275 switch (counter_dev->variant) {
1149 case ni_gpct_variant_e_series: 1276 case ni_gpct_variant_e_series:
1150 case ni_gpct_variant_m_series: 1277 case ni_gpct_variant_m_series:
1151 default: 1278 default:
1152 *gate_source = ni_m_gate2_to_generic_gate(gate); 1279 ret = ni_m_gate2_to_generic_gate(gate, gate_source);
1153 break; 1280 break;
1154 case ni_gpct_variant_660x: 1281 case ni_gpct_variant_660x:
1155 *gate_source = ni_660x_gate2_to_generic_gate(gate); 1282 ret = ni_660x_gate2_to_generic_gate(gate, gate_source);
1156 break; 1283 break;
1157 } 1284 }
1158 if (counter_dev->regs[gate2_reg] & GI_GATE2_POL_INVERT) 1285 if (ret)
1286 return ret;
1287 if (counter_dev->regs[reg] & GI_GATE2_POL_INVERT)
1159 *gate_source |= CR_INVERT; 1288 *gate_source |= CR_INVERT;
1160 /* second gate can't have edge/level mode set independently */ 1289 /* second gate can't have edge/level mode set independently */
1161 if ((mode & GI_GATING_MODE_MASK) != GI_LEVEL_GATING) 1290 if ((mode & GI_GATING_MODE_MASK) != GI_LEVEL_GATING)
@@ -1173,45 +1302,52 @@ int ni_tio_insn_config(struct comedi_device *dev,
1173 unsigned int *data) 1302 unsigned int *data)
1174{ 1303{
1175 struct ni_gpct *counter = s->private; 1304 struct ni_gpct *counter = s->private;
1176 unsigned cidx = counter->counter_index; 1305 unsigned int cidx = counter->counter_index;
1177 unsigned status; 1306 unsigned int status;
1307 int ret = 0;
1178 1308
1179 switch (data[0]) { 1309 switch (data[0]) {
1180 case INSN_CONFIG_SET_COUNTER_MODE: 1310 case INSN_CONFIG_SET_COUNTER_MODE:
1181 return ni_tio_set_counter_mode(counter, data[1]); 1311 ret = ni_tio_set_counter_mode(counter, data[1]);
1312 break;
1182 case INSN_CONFIG_ARM: 1313 case INSN_CONFIG_ARM:
1183 return ni_tio_arm(counter, 1, data[1]); 1314 ret = ni_tio_arm(counter, true, data[1]);
1315 break;
1184 case INSN_CONFIG_DISARM: 1316 case INSN_CONFIG_DISARM:
1185 ni_tio_arm(counter, 0, 0); 1317 ret = ni_tio_arm(counter, false, 0);
1186 return 0; 1318 break;
1187 case INSN_CONFIG_GET_COUNTER_STATUS: 1319 case INSN_CONFIG_GET_COUNTER_STATUS:
1188 data[1] = 0; 1320 data[1] = 0;
1189 status = read_register(counter, NITIO_SHARED_STATUS_REG(cidx)); 1321 status = ni_tio_read(counter, NITIO_SHARED_STATUS_REG(cidx));
1190 if (status & GI_ARMED(cidx)) { 1322 if (status & GI_ARMED(cidx)) {
1191 data[1] |= COMEDI_COUNTER_ARMED; 1323 data[1] |= COMEDI_COUNTER_ARMED;
1192 if (status & GI_COUNTING(cidx)) 1324 if (status & GI_COUNTING(cidx))
1193 data[1] |= COMEDI_COUNTER_COUNTING; 1325 data[1] |= COMEDI_COUNTER_COUNTING;
1194 } 1326 }
1195 data[2] = COMEDI_COUNTER_ARMED | COMEDI_COUNTER_COUNTING; 1327 data[2] = COMEDI_COUNTER_ARMED | COMEDI_COUNTER_COUNTING;
1196 return 0; 1328 break;
1197 case INSN_CONFIG_SET_CLOCK_SRC: 1329 case INSN_CONFIG_SET_CLOCK_SRC:
1198 return ni_tio_set_clock_src(counter, data[1], data[2]); 1330 ret = ni_tio_set_clock_src(counter, data[1], data[2]);
1331 break;
1199 case INSN_CONFIG_GET_CLOCK_SRC: 1332 case INSN_CONFIG_GET_CLOCK_SRC:
1200 ni_tio_get_clock_src(counter, &data[1], &data[2]); 1333 ret = ni_tio_get_clock_src(counter, &data[1], &data[2]);
1201 return 0; 1334 break;
1202 case INSN_CONFIG_SET_GATE_SRC: 1335 case INSN_CONFIG_SET_GATE_SRC:
1203 return ni_tio_set_gate_src(counter, data[1], data[2]); 1336 ret = ni_tio_set_gate_src(counter, data[1], data[2]);
1337 break;
1204 case INSN_CONFIG_GET_GATE_SRC: 1338 case INSN_CONFIG_GET_GATE_SRC:
1205 return ni_tio_get_gate_src(counter, data[1], &data[2]); 1339 ret = ni_tio_get_gate_src(counter, data[1], &data[2]);
1340 break;
1206 case INSN_CONFIG_SET_OTHER_SRC: 1341 case INSN_CONFIG_SET_OTHER_SRC:
1207 return ni_tio_set_other_src(counter, data[1], data[2]); 1342 ret = ni_tio_set_other_src(counter, data[1], data[2]);
1343 break;
1208 case INSN_CONFIG_RESET: 1344 case INSN_CONFIG_RESET:
1209 ni_tio_reset_count_and_disarm(counter); 1345 ni_tio_reset_count_and_disarm(counter);
1210 return 0;
1211 default:
1212 break; 1346 break;
1347 default:
1348 return -EINVAL;
1213 } 1349 }
1214 return -EINVAL; 1350 return ret ? ret : insn->n;
1215} 1351}
1216EXPORT_SYMBOL_GPL(ni_tio_insn_config); 1352EXPORT_SYMBOL_GPL(ni_tio_insn_config);
1217 1353
@@ -1219,7 +1355,7 @@ static unsigned int ni_tio_read_sw_save_reg(struct comedi_device *dev,
1219 struct comedi_subdevice *s) 1355 struct comedi_subdevice *s)
1220{ 1356{
1221 struct ni_gpct *counter = s->private; 1357 struct ni_gpct *counter = s->private;
1222 unsigned cidx = counter->counter_index; 1358 unsigned int cidx = counter->counter_index;
1223 unsigned int val; 1359 unsigned int val;
1224 1360
1225 ni_tio_set_bits(counter, NITIO_CMD_REG(cidx), GI_SAVE_TRACE, 0); 1361 ni_tio_set_bits(counter, NITIO_CMD_REG(cidx), GI_SAVE_TRACE, 0);
@@ -1235,9 +1371,9 @@ static unsigned int ni_tio_read_sw_save_reg(struct comedi_device *dev,
1235 * will be correct since the count value will definitely have latched 1371 * will be correct since the count value will definitely have latched
1236 * by then. 1372 * by then.
1237 */ 1373 */
1238 val = read_register(counter, NITIO_SW_SAVE_REG(cidx)); 1374 val = ni_tio_read(counter, NITIO_SW_SAVE_REG(cidx));
1239 if (val != read_register(counter, NITIO_SW_SAVE_REG(cidx))) 1375 if (val != ni_tio_read(counter, NITIO_SW_SAVE_REG(cidx)))
1240 val = read_register(counter, NITIO_SW_SAVE_REG(cidx)); 1376 val = ni_tio_read(counter, NITIO_SW_SAVE_REG(cidx));
1241 1377
1242 return val; 1378 return val;
1243} 1379}
@@ -1250,7 +1386,7 @@ int ni_tio_insn_read(struct comedi_device *dev,
1250 struct ni_gpct *counter = s->private; 1386 struct ni_gpct *counter = s->private;
1251 struct ni_gpct_device *counter_dev = counter->counter_dev; 1387 struct ni_gpct_device *counter_dev = counter->counter_dev;
1252 unsigned int channel = CR_CHAN(insn->chanspec); 1388 unsigned int channel = CR_CHAN(insn->chanspec);
1253 unsigned cidx = counter->counter_index; 1389 unsigned int cidx = counter->counter_index;
1254 int i; 1390 int i;
1255 1391
1256 for (i = 0; i < insn->n; i++) { 1392 for (i = 0; i < insn->n; i++) {
@@ -1270,11 +1406,10 @@ int ni_tio_insn_read(struct comedi_device *dev,
1270} 1406}
1271EXPORT_SYMBOL_GPL(ni_tio_insn_read); 1407EXPORT_SYMBOL_GPL(ni_tio_insn_read);
1272 1408
1273static unsigned ni_tio_next_load_register(struct ni_gpct *counter) 1409static unsigned int ni_tio_next_load_register(struct ni_gpct *counter)
1274{ 1410{
1275 unsigned cidx = counter->counter_index; 1411 unsigned int cidx = counter->counter_index;
1276 const unsigned bits = 1412 unsigned int bits = ni_tio_read(counter, NITIO_SHARED_STATUS_REG(cidx));
1277 read_register(counter, NITIO_SHARED_STATUS_REG(cidx));
1278 1413
1279 return (bits & GI_NEXT_LOAD_SRC(cidx)) 1414 return (bits & GI_NEXT_LOAD_SRC(cidx))
1280 ? NITIO_LOADB_REG(cidx) 1415 ? NITIO_LOADB_REG(cidx)
@@ -1288,9 +1423,9 @@ int ni_tio_insn_write(struct comedi_device *dev,
1288{ 1423{
1289 struct ni_gpct *counter = s->private; 1424 struct ni_gpct *counter = s->private;
1290 struct ni_gpct_device *counter_dev = counter->counter_dev; 1425 struct ni_gpct_device *counter_dev = counter->counter_dev;
1291 const unsigned channel = CR_CHAN(insn->chanspec); 1426 unsigned int channel = CR_CHAN(insn->chanspec);
1292 unsigned cidx = counter->counter_index; 1427 unsigned int cidx = counter->counter_index;
1293 unsigned load_reg; 1428 unsigned int load_reg;
1294 1429
1295 if (insn->n < 1) 1430 if (insn->n < 1)
1296 return 0; 1431 return 0;
@@ -1306,19 +1441,19 @@ int ni_tio_insn_write(struct comedi_device *dev,
1306 * load register is already selected. 1441 * load register is already selected.
1307 */ 1442 */
1308 load_reg = ni_tio_next_load_register(counter); 1443 load_reg = ni_tio_next_load_register(counter);
1309 write_register(counter, data[0], load_reg); 1444 ni_tio_write(counter, data[0], load_reg);
1310 ni_tio_set_bits_transient(counter, NITIO_CMD_REG(cidx), 1445 ni_tio_set_bits_transient(counter, NITIO_CMD_REG(cidx),
1311 0, 0, GI_LOAD); 1446 0, 0, GI_LOAD);
1312 /* restore load reg */ 1447 /* restore load reg */
1313 write_register(counter, counter_dev->regs[load_reg], load_reg); 1448 ni_tio_write(counter, counter_dev->regs[load_reg], load_reg);
1314 break; 1449 break;
1315 case 1: 1450 case 1:
1316 counter_dev->regs[NITIO_LOADA_REG(cidx)] = data[0]; 1451 counter_dev->regs[NITIO_LOADA_REG(cidx)] = data[0];
1317 write_register(counter, data[0], NITIO_LOADA_REG(cidx)); 1452 ni_tio_write(counter, data[0], NITIO_LOADA_REG(cidx));
1318 break; 1453 break;
1319 case 2: 1454 case 2:
1320 counter_dev->regs[NITIO_LOADB_REG(cidx)] = data[0]; 1455 counter_dev->regs[NITIO_LOADB_REG(cidx)] = data[0];
1321 write_register(counter, data[0], NITIO_LOADB_REG(cidx)); 1456 ni_tio_write(counter, data[0], NITIO_LOADB_REG(cidx));
1322 break; 1457 break;
1323 default: 1458 default:
1324 return -EINVAL; 1459 return -EINVAL;
@@ -1330,13 +1465,13 @@ EXPORT_SYMBOL_GPL(ni_tio_insn_write);
1330void ni_tio_init_counter(struct ni_gpct *counter) 1465void ni_tio_init_counter(struct ni_gpct *counter)
1331{ 1466{
1332 struct ni_gpct_device *counter_dev = counter->counter_dev; 1467 struct ni_gpct_device *counter_dev = counter->counter_dev;
1333 unsigned cidx = counter->counter_index; 1468 unsigned int cidx = counter->counter_index;
1334 1469
1335 ni_tio_reset_count_and_disarm(counter); 1470 ni_tio_reset_count_and_disarm(counter);
1336 1471
1337 /* initialize counter registers */ 1472 /* initialize counter registers */
1338 counter_dev->regs[NITIO_AUTO_INC_REG(cidx)] = 0x0; 1473 counter_dev->regs[NITIO_AUTO_INC_REG(cidx)] = 0x0;
1339 write_register(counter, 0x0, NITIO_AUTO_INC_REG(cidx)); 1474 ni_tio_write(counter, 0x0, NITIO_AUTO_INC_REG(cidx));
1340 1475
1341 ni_tio_set_bits(counter, NITIO_CMD_REG(cidx), 1476 ni_tio_set_bits(counter, NITIO_CMD_REG(cidx),
1342 ~0, GI_SYNC_GATE); 1477 ~0, GI_SYNC_GATE);
@@ -1344,10 +1479,10 @@ void ni_tio_init_counter(struct ni_gpct *counter)
1344 ni_tio_set_bits(counter, NITIO_MODE_REG(cidx), ~0, 0); 1479 ni_tio_set_bits(counter, NITIO_MODE_REG(cidx), ~0, 0);
1345 1480
1346 counter_dev->regs[NITIO_LOADA_REG(cidx)] = 0x0; 1481 counter_dev->regs[NITIO_LOADA_REG(cidx)] = 0x0;
1347 write_register(counter, 0x0, NITIO_LOADA_REG(cidx)); 1482 ni_tio_write(counter, 0x0, NITIO_LOADA_REG(cidx));
1348 1483
1349 counter_dev->regs[NITIO_LOADB_REG(cidx)] = 0x0; 1484 counter_dev->regs[NITIO_LOADB_REG(cidx)] = 0x0;
1350 write_register(counter, 0x0, NITIO_LOADB_REG(cidx)); 1485 ni_tio_write(counter, 0x0, NITIO_LOADB_REG(cidx));
1351 1486
1352 ni_tio_set_bits(counter, NITIO_INPUT_SEL_REG(cidx), ~0, 0); 1487 ni_tio_set_bits(counter, NITIO_INPUT_SEL_REG(cidx), ~0, 0);
1353 1488
@@ -1356,7 +1491,7 @@ void ni_tio_init_counter(struct ni_gpct *counter)
1356 1491
1357 if (ni_tio_has_gate2_registers(counter_dev)) { 1492 if (ni_tio_has_gate2_registers(counter_dev)) {
1358 counter_dev->regs[NITIO_GATE2_REG(cidx)] = 0x0; 1493 counter_dev->regs[NITIO_GATE2_REG(cidx)] = 0x0;
1359 write_register(counter, 0x0, NITIO_GATE2_REG(cidx)); 1494 ni_tio_write(counter, 0x0, NITIO_GATE2_REG(cidx));
1360 } 1495 }
1361 1496
1362 ni_tio_set_bits(counter, NITIO_DMA_CFG_REG(cidx), ~0, 0x0); 1497 ni_tio_set_bits(counter, NITIO_DMA_CFG_REG(cidx), ~0, 0x0);
@@ -1367,17 +1502,17 @@ EXPORT_SYMBOL_GPL(ni_tio_init_counter);
1367 1502
1368struct ni_gpct_device * 1503struct ni_gpct_device *
1369ni_gpct_device_construct(struct comedi_device *dev, 1504ni_gpct_device_construct(struct comedi_device *dev,
1370 void (*write_register)(struct ni_gpct *counter, 1505 void (*write)(struct ni_gpct *counter,
1371 unsigned bits, 1506 unsigned int value,
1372 enum ni_gpct_register reg), 1507 enum ni_gpct_register reg),
1373 unsigned (*read_register)(struct ni_gpct *counter, 1508 unsigned int (*read)(struct ni_gpct *counter,
1374 enum ni_gpct_register reg), 1509 enum ni_gpct_register reg),
1375 enum ni_gpct_variant variant, 1510 enum ni_gpct_variant variant,
1376 unsigned num_counters) 1511 unsigned int num_counters)
1377{ 1512{
1378 struct ni_gpct_device *counter_dev; 1513 struct ni_gpct_device *counter_dev;
1379 struct ni_gpct *counter; 1514 struct ni_gpct *counter;
1380 unsigned i; 1515 unsigned int i;
1381 1516
1382 if (num_counters == 0) 1517 if (num_counters == 0)
1383 return NULL; 1518 return NULL;
@@ -1387,8 +1522,8 @@ ni_gpct_device_construct(struct comedi_device *dev,
1387 return NULL; 1522 return NULL;
1388 1523
1389 counter_dev->dev = dev; 1524 counter_dev->dev = dev;
1390 counter_dev->write_register = write_register; 1525 counter_dev->write = write;
1391 counter_dev->read_register = read_register; 1526 counter_dev->read = read;
1392 counter_dev->variant = variant; 1527 counter_dev->variant = variant;
1393 1528
1394 spin_lock_init(&counter_dev->regs_lock); 1529 spin_lock_init(&counter_dev->regs_lock);
@@ -1413,7 +1548,7 @@ EXPORT_SYMBOL_GPL(ni_gpct_device_construct);
1413 1548
1414void ni_gpct_device_destroy(struct ni_gpct_device *counter_dev) 1549void ni_gpct_device_destroy(struct ni_gpct_device *counter_dev)
1415{ 1550{
1416 if (!counter_dev->counters) 1551 if (!counter_dev)
1417 return; 1552 return;
1418 kfree(counter_dev->counters); 1553 kfree(counter_dev->counters);
1419 kfree(counter_dev); 1554 kfree(counter_dev);
diff --git a/drivers/staging/comedi/drivers/ni_tio.h b/drivers/staging/comedi/drivers/ni_tio.h
index 25aedd0e5867..4978358f9b13 100644
--- a/drivers/staging/comedi/drivers/ni_tio.h
+++ b/drivers/staging/comedi/drivers/ni_tio.h
@@ -1,29 +1,24 @@
1/* 1/*
2 drivers/ni_tio.h 2 * Header file for NI general purpose counter support code (ni_tio.c)
3 Header file for NI general purpose counter support code (ni_tio.c) 3 *
4 4 * COMEDI - Linux Control and Measurement Device Interface
5 COMEDI - Linux Control and Measurement Device Interface 5 *
6 6 * This program is free software; you can redistribute it and/or modify
7 This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by
8 it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or
9 the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version.
10 (at your option) any later version. 10 *
11 11 * This program is distributed in the hope that it will be useful,
12 This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details.
15 GNU General Public License for more details. 15 */
16*/
17 16
18#ifndef _COMEDI_NI_TIO_H 17#ifndef _COMEDI_NI_TIO_H
19#define _COMEDI_NI_TIO_H 18#define _COMEDI_NI_TIO_H
20 19
21#include "../comedidev.h" 20#include "../comedidev.h"
22 21
23/* forward declarations */
24struct mite_struct;
25struct ni_gpct_device;
26
27enum ni_gpct_register { 22enum ni_gpct_register {
28 NITIO_G0_AUTO_INC, 23 NITIO_G0_AUTO_INC,
29 NITIO_G1_AUTO_INC, 24 NITIO_G1_AUTO_INC,
@@ -106,35 +101,34 @@ enum ni_gpct_variant {
106 101
107struct ni_gpct { 102struct ni_gpct {
108 struct ni_gpct_device *counter_dev; 103 struct ni_gpct_device *counter_dev;
109 unsigned counter_index; 104 unsigned int counter_index;
110 unsigned chip_index; 105 unsigned int chip_index;
111 uint64_t clock_period_ps; /* clock period in picoseconds */ 106 u64 clock_period_ps; /* clock period in picoseconds */
112 struct mite_channel *mite_chan; 107 struct mite_channel *mite_chan;
113 spinlock_t lock; 108 spinlock_t lock; /* protects 'mite_chan' */
114}; 109};
115 110
116struct ni_gpct_device { 111struct ni_gpct_device {
117 struct comedi_device *dev; 112 struct comedi_device *dev;
118 void (*write_register)(struct ni_gpct *counter, unsigned bits, 113 void (*write)(struct ni_gpct *, unsigned int value,
119 enum ni_gpct_register reg); 114 enum ni_gpct_register);
120 unsigned (*read_register)(struct ni_gpct *counter, 115 unsigned int (*read)(struct ni_gpct *, enum ni_gpct_register);
121 enum ni_gpct_register reg);
122 enum ni_gpct_variant variant; 116 enum ni_gpct_variant variant;
123 struct ni_gpct *counters; 117 struct ni_gpct *counters;
124 unsigned num_counters; 118 unsigned int num_counters;
125 unsigned regs[NITIO_NUM_REGS]; 119 unsigned int regs[NITIO_NUM_REGS];
126 spinlock_t regs_lock; 120 spinlock_t regs_lock; /* protects 'regs' */
127}; 121};
128 122
129struct ni_gpct_device * 123struct ni_gpct_device *
130ni_gpct_device_construct(struct comedi_device *, 124ni_gpct_device_construct(struct comedi_device *,
131 void (*write_register)(struct ni_gpct *, 125 void (*write)(struct ni_gpct *,
132 unsigned bits, 126 unsigned int value,
133 enum ni_gpct_register), 127 enum ni_gpct_register),
134 unsigned (*read_register)(struct ni_gpct *, 128 unsigned int (*read)(struct ni_gpct *,
135 enum ni_gpct_register), 129 enum ni_gpct_register),
136 enum ni_gpct_variant, 130 enum ni_gpct_variant,
137 unsigned num_counters); 131 unsigned int num_counters);
138void ni_gpct_device_destroy(struct ni_gpct_device *); 132void ni_gpct_device_destroy(struct ni_gpct_device *);
139void ni_tio_init_counter(struct ni_gpct *); 133void ni_tio_init_counter(struct ni_gpct *);
140int ni_tio_insn_read(struct comedi_device *, struct comedi_subdevice *, 134int ni_tio_insn_read(struct comedi_device *, struct comedi_subdevice *,
diff --git a/drivers/staging/comedi/drivers/ni_tio_internal.h b/drivers/staging/comedi/drivers/ni_tio_internal.h
index 2bceae493e23..b15b10833c42 100644
--- a/drivers/staging/comedi/drivers/ni_tio_internal.h
+++ b/drivers/staging/comedi/drivers/ni_tio_internal.h
@@ -1,20 +1,19 @@
1/* 1/*
2 drivers/ni_tio_internal.h 2 * Header file for NI general purpose counter support code (ni_tio.c and
3 Header file for NI general purpose counter support code (ni_tio.c and 3 * ni_tiocmd.c)
4 ni_tiocmd.c) 4 *
5 5 * COMEDI - Linux Control and Measurement Device Interface
6 COMEDI - Linux Control and Measurement Device Interface 6 *
7 7 * This program is free software; you can redistribute it and/or modify
8 This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by
9 it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or
10 the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version.
11 (at your option) any later version. 11 *
12 12 * This program is distributed in the hope that it will be useful,
13 This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details.
16 GNU General Public License for more details. 16 */
17*/
18 17
19#ifndef _COMEDI_NI_TIO_INTERNAL_H 18#ifndef _COMEDI_NI_TIO_INTERNAL_H
20#define _COMEDI_NI_TIO_INTERNAL_H 19#define _COMEDI_NI_TIO_INTERNAL_H
@@ -24,68 +23,73 @@
24#define NITIO_AUTO_INC_REG(x) (NITIO_G0_AUTO_INC + (x)) 23#define NITIO_AUTO_INC_REG(x) (NITIO_G0_AUTO_INC + (x))
25#define GI_AUTO_INC_MASK 0xff 24#define GI_AUTO_INC_MASK 0xff
26#define NITIO_CMD_REG(x) (NITIO_G0_CMD + (x)) 25#define NITIO_CMD_REG(x) (NITIO_G0_CMD + (x))
27#define GI_ARM (1 << 0) 26#define GI_ARM BIT(0)
28#define GI_SAVE_TRACE (1 << 1) 27#define GI_SAVE_TRACE BIT(1)
29#define GI_LOAD (1 << 2) 28#define GI_LOAD BIT(2)
30#define GI_DISARM (1 << 4) 29#define GI_DISARM BIT(4)
31#define GI_CNT_DIR(x) (((x) & 0x3) << 5) 30#define GI_CNT_DIR(x) (((x) & 0x3) << 5)
32#define GI_CNT_DIR_MASK (3 << 5) 31#define GI_CNT_DIR_MASK GI_CNT_DIR(3)
33#define GI_WRITE_SWITCH (1 << 7) 32#define GI_WRITE_SWITCH BIT(7)
34#define GI_SYNC_GATE (1 << 8) 33#define GI_SYNC_GATE BIT(8)
35#define GI_LITTLE_BIG_ENDIAN (1 << 9) 34#define GI_LITTLE_BIG_ENDIAN BIT(9)
36#define GI_BANK_SWITCH_START (1 << 10) 35#define GI_BANK_SWITCH_START BIT(10)
37#define GI_BANK_SWITCH_MODE (1 << 11) 36#define GI_BANK_SWITCH_MODE BIT(11)
38#define GI_BANK_SWITCH_ENABLE (1 << 12) 37#define GI_BANK_SWITCH_ENABLE BIT(12)
39#define GI_ARM_COPY (1 << 13) 38#define GI_ARM_COPY BIT(13)
40#define GI_SAVE_TRACE_COPY (1 << 14) 39#define GI_SAVE_TRACE_COPY BIT(14)
41#define GI_DISARM_COPY (1 << 15) 40#define GI_DISARM_COPY BIT(15)
42#define NITIO_HW_SAVE_REG(x) (NITIO_G0_HW_SAVE + (x)) 41#define NITIO_HW_SAVE_REG(x) (NITIO_G0_HW_SAVE + (x))
43#define NITIO_SW_SAVE_REG(x) (NITIO_G0_SW_SAVE + (x)) 42#define NITIO_SW_SAVE_REG(x) (NITIO_G0_SW_SAVE + (x))
44#define NITIO_MODE_REG(x) (NITIO_G0_MODE + (x)) 43#define NITIO_MODE_REG(x) (NITIO_G0_MODE + (x))
45#define GI_GATING_DISABLED (0 << 0) 44#define GI_GATING_MODE(x) (((x) & 0x3) << 0)
46#define GI_LEVEL_GATING (1 << 0) 45#define GI_GATING_DISABLED GI_GATING_MODE(0)
47#define GI_RISING_EDGE_GATING (2 << 0) 46#define GI_LEVEL_GATING GI_GATING_MODE(1)
48#define GI_FALLING_EDGE_GATING (3 << 0) 47#define GI_RISING_EDGE_GATING GI_GATING_MODE(2)
49#define GI_GATING_MODE_MASK (3 << 0) 48#define GI_FALLING_EDGE_GATING GI_GATING_MODE(3)
50#define GI_GATE_ON_BOTH_EDGES (1 << 2) 49#define GI_GATING_MODE_MASK GI_GATING_MODE(3)
51#define GI_EDGE_GATE_STARTS_STOPS (0 << 3) 50#define GI_GATE_ON_BOTH_EDGES BIT(2)
52#define GI_EDGE_GATE_STOPS_STARTS (1 << 3) 51#define GI_EDGE_GATE_MODE(x) (((x) & 0x3) << 3)
53#define GI_EDGE_GATE_STARTS (2 << 3) 52#define GI_EDGE_GATE_STARTS_STOPS GI_EDGE_GATE_MODE(0)
54#define GI_EDGE_GATE_NO_STARTS_OR_STOPS (3 << 3) 53#define GI_EDGE_GATE_STOPS_STARTS GI_EDGE_GATE_MODE(1)
55#define GI_EDGE_GATE_MODE_MASK (3 << 3) 54#define GI_EDGE_GATE_STARTS GI_EDGE_GATE_MODE(2)
56#define GI_STOP_ON_GATE (0 << 5) 55#define GI_EDGE_GATE_NO_STARTS_OR_STOPS GI_EDGE_GATE_MODE(3)
57#define GI_STOP_ON_GATE_OR_TC (1 << 5) 56#define GI_EDGE_GATE_MODE_MASK GI_EDGE_GATE_MODE(3)
58#define GI_STOP_ON_GATE_OR_SECOND_TC (2 << 5) 57#define GI_STOP_MODE(x) (((x) & 0x3) << 5)
59#define GI_STOP_MODE_MASK (3 << 5) 58#define GI_STOP_ON_GATE GI_STOP_MODE(0)
60#define GI_LOAD_SRC_SEL (1 << 7) 59#define GI_STOP_ON_GATE_OR_TC GI_STOP_MODE(1)
61#define GI_OUTPUT_TC_PULSE (1 << 8) 60#define GI_STOP_ON_GATE_OR_SECOND_TC GI_STOP_MODE(2)
62#define GI_OUTPUT_TC_TOGGLE (2 << 8) 61#define GI_STOP_MODE_MASK GI_STOP_MODE(3)
63#define GI_OUTPUT_TC_OR_GATE_TOGGLE (3 << 8) 62#define GI_LOAD_SRC_SEL BIT(7)
64#define GI_OUTPUT_MODE_MASK (3 << 8) 63#define GI_OUTPUT_MODE(x) (((x) & 0x3) << 8)
65#define GI_NO_HARDWARE_DISARM (0 << 10) 64#define GI_OUTPUT_TC_PULSE GI_OUTPUT_MODE(1)
66#define GI_DISARM_AT_TC (1 << 10) 65#define GI_OUTPUT_TC_TOGGLE GI_OUTPUT_MODE(2)
67#define GI_DISARM_AT_GATE (2 << 10) 66#define GI_OUTPUT_TC_OR_GATE_TOGGLE GI_OUTPUT_MODE(3)
68#define GI_DISARM_AT_TC_OR_GATE (3 << 10) 67#define GI_OUTPUT_MODE_MASK GI_OUTPUT_MODE(3)
69#define GI_COUNTING_ONCE_MASK (3 << 10) 68#define GI_COUNTING_ONCE(x) (((x) & 0x3) << 10)
70#define GI_LOADING_ON_TC (1 << 12) 69#define GI_NO_HARDWARE_DISARM GI_COUNTING_ONCE(0)
71#define GI_GATE_POL_INVERT (1 << 13) 70#define GI_DISARM_AT_TC GI_COUNTING_ONCE(1)
72#define GI_LOADING_ON_GATE (1 << 14) 71#define GI_DISARM_AT_GATE GI_COUNTING_ONCE(2)
73#define GI_RELOAD_SRC_SWITCHING (1 << 15) 72#define GI_DISARM_AT_TC_OR_GATE GI_COUNTING_ONCE(3)
73#define GI_COUNTING_ONCE_MASK GI_COUNTING_ONCE(3)
74#define GI_LOADING_ON_TC BIT(12)
75#define GI_GATE_POL_INVERT BIT(13)
76#define GI_LOADING_ON_GATE BIT(14)
77#define GI_RELOAD_SRC_SWITCHING BIT(15)
74#define NITIO_LOADA_REG(x) (NITIO_G0_LOADA + (x)) 78#define NITIO_LOADA_REG(x) (NITIO_G0_LOADA + (x))
75#define NITIO_LOADB_REG(x) (NITIO_G0_LOADB + (x)) 79#define NITIO_LOADB_REG(x) (NITIO_G0_LOADB + (x))
76#define NITIO_INPUT_SEL_REG(x) (NITIO_G0_INPUT_SEL + (x)) 80#define NITIO_INPUT_SEL_REG(x) (NITIO_G0_INPUT_SEL + (x))
77#define GI_READ_ACKS_IRQ (1 << 0) 81#define GI_READ_ACKS_IRQ BIT(0)
78#define GI_WRITE_ACKS_IRQ (1 << 1) 82#define GI_WRITE_ACKS_IRQ BIT(1)
79#define GI_BITS_TO_SRC(x) (((x) >> 2) & 0x1f) 83#define GI_BITS_TO_SRC(x) (((x) >> 2) & 0x1f)
80#define GI_SRC_SEL(x) (((x) & 0x1f) << 2) 84#define GI_SRC_SEL(x) (((x) & 0x1f) << 2)
81#define GI_SRC_SEL_MASK (0x1f << 2) 85#define GI_SRC_SEL_MASK GI_SRC_SEL(0x1f)
82#define GI_BITS_TO_GATE(x) (((x) >> 7) & 0x1f) 86#define GI_BITS_TO_GATE(x) (((x) >> 7) & 0x1f)
83#define GI_GATE_SEL(x) (((x) & 0x1f) << 7) 87#define GI_GATE_SEL(x) (((x) & 0x1f) << 7)
84#define GI_GATE_SEL_MASK (0x1f << 7) 88#define GI_GATE_SEL_MASK GI_GATE_SEL(0x1f)
85#define GI_GATE_SEL_LOAD_SRC (1 << 12) 89#define GI_GATE_SEL_LOAD_SRC BIT(12)
86#define GI_OR_GATE (1 << 13) 90#define GI_OR_GATE BIT(13)
87#define GI_OUTPUT_POL_INVERT (1 << 14) 91#define GI_OUTPUT_POL_INVERT BIT(14)
88#define GI_SRC_POL_INVERT (1 << 15) 92#define GI_SRC_POL_INVERT BIT(15)
89#define NITIO_CNT_MODE_REG(x) (NITIO_G0_CNT_MODE + (x)) 93#define NITIO_CNT_MODE_REG(x) (NITIO_G0_CNT_MODE + (x))
90#define GI_CNT_MODE(x) (((x) & 0x7) << 0) 94#define GI_CNT_MODE(x) (((x) & 0x7) << 0)
91#define GI_CNT_MODE_NORMAL GI_CNT_MODE(0) 95#define GI_CNT_MODE_NORMAL GI_CNT_MODE(0)
@@ -94,152 +98,84 @@
94#define GI_CNT_MODE_QUADX4 GI_CNT_MODE(3) 98#define GI_CNT_MODE_QUADX4 GI_CNT_MODE(3)
95#define GI_CNT_MODE_TWO_PULSE GI_CNT_MODE(4) 99#define GI_CNT_MODE_TWO_PULSE GI_CNT_MODE(4)
96#define GI_CNT_MODE_SYNC_SRC GI_CNT_MODE(6) 100#define GI_CNT_MODE_SYNC_SRC GI_CNT_MODE(6)
97#define GI_CNT_MODE_MASK (7 << 0) 101#define GI_CNT_MODE_MASK GI_CNT_MODE(7)
98#define GI_INDEX_MODE (1 << 4) 102#define GI_INDEX_MODE BIT(4)
99#define GI_INDEX_PHASE(x) (((x) & 0x3) << 5) 103#define GI_INDEX_PHASE(x) (((x) & 0x3) << 5)
100#define GI_INDEX_PHASE_MASK (3 << 5) 104#define GI_INDEX_PHASE_MASK GI_INDEX_PHASE(3)
101#define GI_HW_ARM_ENA (1 << 7) 105#define GI_HW_ARM_ENA BIT(7)
102#define GI_HW_ARM_SEL(x) ((x) << 8) 106#define GI_HW_ARM_SEL(x) ((x) << 8)
103#define GI_660X_HW_ARM_SEL_MASK (0x7 << 8) 107#define GI_660X_HW_ARM_SEL_MASK GI_HW_ARM_SEL(0x7)
104#define GI_M_HW_ARM_SEL_MASK (0x1f << 8) 108#define GI_M_HW_ARM_SEL_MASK GI_HW_ARM_SEL(0x1f)
105#define GI_660X_PRESCALE_X8 (1 << 12) 109#define GI_660X_PRESCALE_X8 BIT(12)
106#define GI_M_PRESCALE_X8 (1 << 13) 110#define GI_M_PRESCALE_X8 BIT(13)
107#define GI_660X_ALT_SYNC (1 << 13) 111#define GI_660X_ALT_SYNC BIT(13)
108#define GI_M_ALT_SYNC (1 << 14) 112#define GI_M_ALT_SYNC BIT(14)
109#define GI_660X_PRESCALE_X2 (1 << 14) 113#define GI_660X_PRESCALE_X2 BIT(14)
110#define GI_M_PRESCALE_X2 (1 << 15) 114#define GI_M_PRESCALE_X2 BIT(15)
111#define NITIO_GATE2_REG(x) (NITIO_G0_GATE2 + (x)) 115#define NITIO_GATE2_REG(x) (NITIO_G0_GATE2 + (x))
112#define GI_GATE2_MODE (1 << 0) 116#define GI_GATE2_MODE BIT(0)
113#define GI_BITS_TO_GATE2(x) (((x) >> 7) & 0x1f) 117#define GI_BITS_TO_GATE2(x) (((x) >> 7) & 0x1f)
114#define GI_GATE2_SEL(x) (((x) & 0x1f) << 7) 118#define GI_GATE2_SEL(x) (((x) & 0x1f) << 7)
115#define GI_GATE2_SEL_MASK (0x1f << 7) 119#define GI_GATE2_SEL_MASK GI_GATE2_SEL(0x1f)
116#define GI_GATE2_POL_INVERT (1 << 13) 120#define GI_GATE2_POL_INVERT BIT(13)
117#define GI_GATE2_SUBSEL (1 << 14) 121#define GI_GATE2_SUBSEL BIT(14)
118#define GI_SRC_SUBSEL (1 << 15) 122#define GI_SRC_SUBSEL BIT(15)
119#define NITIO_SHARED_STATUS_REG(x) (NITIO_G01_STATUS + ((x) / 2)) 123#define NITIO_SHARED_STATUS_REG(x) (NITIO_G01_STATUS + ((x) / 2))
120#define GI_SAVE(x) (((x) % 2) ? (1 << 1) : (1 << 0)) 124#define GI_SAVE(x) (((x) % 2) ? BIT(1) : BIT(0))
121#define GI_COUNTING(x) (((x) % 2) ? (1 << 3) : (1 << 2)) 125#define GI_COUNTING(x) (((x) % 2) ? BIT(3) : BIT(2))
122#define GI_NEXT_LOAD_SRC(x) (((x) % 2) ? (1 << 5) : (1 << 4)) 126#define GI_NEXT_LOAD_SRC(x) (((x) % 2) ? BIT(5) : BIT(4))
123#define GI_STALE_DATA(x) (((x) % 2) ? (1 << 7) : (1 << 6)) 127#define GI_STALE_DATA(x) (((x) % 2) ? BIT(7) : BIT(6))
124#define GI_ARMED(x) (((x) % 2) ? (1 << 9) : (1 << 8)) 128#define GI_ARMED(x) (((x) % 2) ? BIT(9) : BIT(8))
125#define GI_NO_LOAD_BETWEEN_GATES(x) (((x) % 2) ? (1 << 11) : (1 << 10)) 129#define GI_NO_LOAD_BETWEEN_GATES(x) (((x) % 2) ? BIT(11) : BIT(10))
126#define GI_TC_ERROR(x) (((x) % 2) ? (1 << 13) : (1 << 12)) 130#define GI_TC_ERROR(x) (((x) % 2) ? BIT(13) : BIT(12))
127#define GI_GATE_ERROR(x) (((x) % 2) ? (1 << 15) : (1 << 14)) 131#define GI_GATE_ERROR(x) (((x) % 2) ? BIT(15) : BIT(14))
128#define NITIO_RESET_REG(x) (NITIO_G01_RESET + ((x) / 2)) 132#define NITIO_RESET_REG(x) (NITIO_G01_RESET + ((x) / 2))
129#define GI_RESET(x) (1 << (2 + ((x) % 2))) 133#define GI_RESET(x) BIT(2 + ((x) % 2))
130#define NITIO_STATUS1_REG(x) (NITIO_G01_STATUS1 + ((x) / 2)) 134#define NITIO_STATUS1_REG(x) (NITIO_G01_STATUS1 + ((x) / 2))
131#define NITIO_STATUS2_REG(x) (NITIO_G01_STATUS2 + ((x) / 2)) 135#define NITIO_STATUS2_REG(x) (NITIO_G01_STATUS2 + ((x) / 2))
132#define GI_OUTPUT(x) (((x) % 2) ? (1 << 1) : (1 << 0)) 136#define GI_OUTPUT(x) (((x) % 2) ? BIT(1) : BIT(0))
133#define GI_HW_SAVE(x) (((x) % 2) ? (1 << 13) : (1 << 12)) 137#define GI_HW_SAVE(x) (((x) % 2) ? BIT(13) : BIT(12))
134#define GI_PERMANENT_STALE(x) (((x) % 2) ? (1 << 15) : (1 << 14)) 138#define GI_PERMANENT_STALE(x) (((x) % 2) ? BIT(15) : BIT(14))
135#define NITIO_DMA_CFG_REG(x) (NITIO_G0_DMA_CFG + (x)) 139#define NITIO_DMA_CFG_REG(x) (NITIO_G0_DMA_CFG + (x))
136#define GI_DMA_ENABLE (1 << 0) 140#define GI_DMA_ENABLE BIT(0)
137#define GI_DMA_WRITE (1 << 1) 141#define GI_DMA_WRITE BIT(1)
138#define GI_DMA_INT_ENA (1 << 2) 142#define GI_DMA_INT_ENA BIT(2)
139#define GI_DMA_RESET (1 << 3) 143#define GI_DMA_RESET BIT(3)
140#define GI_DMA_BANKSW_ERROR (1 << 4) 144#define GI_DMA_BANKSW_ERROR BIT(4)
141#define NITIO_DMA_STATUS_REG(x) (NITIO_G0_DMA_STATUS + (x)) 145#define NITIO_DMA_STATUS_REG(x) (NITIO_G0_DMA_STATUS + (x))
142#define GI_DMA_READBANK (1 << 13) 146#define GI_DMA_READBANK BIT(13)
143#define GI_DRQ_ERROR (1 << 14) 147#define GI_DRQ_ERROR BIT(14)
144#define GI_DRQ_STATUS (1 << 15) 148#define GI_DRQ_STATUS BIT(15)
145#define NITIO_ABZ_REG(x) (NITIO_G0_ABZ + (x)) 149#define NITIO_ABZ_REG(x) (NITIO_G0_ABZ + (x))
146#define NITIO_INT_ACK_REG(x) (NITIO_G0_INT_ACK + (x)) 150#define NITIO_INT_ACK_REG(x) (NITIO_G0_INT_ACK + (x))
147#define GI_GATE_ERROR_CONFIRM(x) (((x) % 2) ? (1 << 1) : (1 << 5)) 151#define GI_GATE_ERROR_CONFIRM(x) (((x) % 2) ? BIT(1) : BIT(5))
148#define GI_TC_ERROR_CONFIRM(x) (((x) % 2) ? (1 << 2) : (1 << 6)) 152#define GI_TC_ERROR_CONFIRM(x) (((x) % 2) ? BIT(2) : BIT(6))
149#define GI_TC_INTERRUPT_ACK (1 << 14) 153#define GI_TC_INTERRUPT_ACK BIT(14)
150#define GI_GATE_INTERRUPT_ACK (1 << 15) 154#define GI_GATE_INTERRUPT_ACK BIT(15)
151#define NITIO_STATUS_REG(x) (NITIO_G0_STATUS + (x)) 155#define NITIO_STATUS_REG(x) (NITIO_G0_STATUS + (x))
152#define GI_GATE_INTERRUPT (1 << 2) 156#define GI_GATE_INTERRUPT BIT(2)
153#define GI_TC (1 << 3) 157#define GI_TC BIT(3)
154#define GI_INTERRUPT (1 << 15) 158#define GI_INTERRUPT BIT(15)
155#define NITIO_INT_ENA_REG(x) (NITIO_G0_INT_ENA + (x)) 159#define NITIO_INT_ENA_REG(x) (NITIO_G0_INT_ENA + (x))
156#define GI_TC_INTERRUPT_ENABLE(x) (((x) % 2) ? (1 << 9) : (1 << 6)) 160#define GI_TC_INTERRUPT_ENABLE(x) (((x) % 2) ? BIT(9) : BIT(6))
157#define GI_GATE_INTERRUPT_ENABLE(x) (((x) % 2) ? (1 << 10) : (1 << 8)) 161#define GI_GATE_INTERRUPT_ENABLE(x) (((x) % 2) ? BIT(10) : BIT(8))
158
159static inline void write_register(struct ni_gpct *counter, unsigned bits,
160 enum ni_gpct_register reg)
161{
162 BUG_ON(reg >= NITIO_NUM_REGS);
163 counter->counter_dev->write_register(counter, bits, reg);
164}
165
166static inline unsigned read_register(struct ni_gpct *counter,
167 enum ni_gpct_register reg)
168{
169 BUG_ON(reg >= NITIO_NUM_REGS);
170 return counter->counter_dev->read_register(counter, reg);
171}
172 162
173static inline int ni_tio_counting_mode_registers_present(const struct 163void ni_tio_write(struct ni_gpct *, unsigned int value, enum ni_gpct_register);
174 ni_gpct_device 164unsigned int ni_tio_read(struct ni_gpct *, enum ni_gpct_register);
175 *counter_dev)
176{
177 switch (counter_dev->variant) {
178 case ni_gpct_variant_e_series:
179 return 0;
180 case ni_gpct_variant_m_series:
181 case ni_gpct_variant_660x:
182 return 1;
183 default:
184 BUG();
185 break;
186 }
187 return 0;
188}
189
190static inline void ni_tio_set_bits_transient(struct ni_gpct *counter,
191 enum ni_gpct_register
192 register_index, unsigned bit_mask,
193 unsigned bit_values,
194 unsigned transient_bit_values)
195{
196 struct ni_gpct_device *counter_dev = counter->counter_dev;
197 unsigned long flags;
198
199 BUG_ON(register_index >= NITIO_NUM_REGS);
200 spin_lock_irqsave(&counter_dev->regs_lock, flags);
201 counter_dev->regs[register_index] &= ~bit_mask;
202 counter_dev->regs[register_index] |= (bit_values & bit_mask);
203 write_register(counter,
204 counter_dev->regs[register_index] | transient_bit_values,
205 register_index);
206 mmiowb();
207 spin_unlock_irqrestore(&counter_dev->regs_lock, flags);
208}
209 165
210/* ni_tio_set_bits( ) is for safely writing to registers whose bits may be 166static inline bool
211 * twiddled in interrupt context, or whose software copy may be read in 167ni_tio_counting_mode_registers_present(const struct ni_gpct_device *counter_dev)
212 * interrupt context.
213 */
214static inline void ni_tio_set_bits(struct ni_gpct *counter,
215 enum ni_gpct_register register_index,
216 unsigned bit_mask, unsigned bit_values)
217{ 168{
218 ni_tio_set_bits_transient(counter, register_index, bit_mask, bit_values, 169 /* m series and 660x variants have counting mode registers */
219 0x0); 170 return counter_dev->variant != ni_gpct_variant_e_series;
220} 171}
221 172
222/* ni_tio_get_soft_copy( ) is for safely reading the software copy of a register 173void ni_tio_set_bits(struct ni_gpct *, enum ni_gpct_register reg,
223whose bits might be modified in interrupt context, or whose software copy 174 unsigned int mask, unsigned int value);
224might need to be read in interrupt context. 175unsigned int ni_tio_get_soft_copy(const struct ni_gpct *,
225*/ 176 enum ni_gpct_register reg);
226static inline unsigned ni_tio_get_soft_copy(const struct ni_gpct *counter,
227 enum ni_gpct_register
228 register_index)
229{
230 struct ni_gpct_device *counter_dev = counter->counter_dev;
231 unsigned long flags;
232 unsigned value;
233
234 BUG_ON(register_index >= NITIO_NUM_REGS);
235 spin_lock_irqsave(&counter_dev->regs_lock, flags);
236 value = counter_dev->regs[register_index];
237 spin_unlock_irqrestore(&counter_dev->regs_lock, flags);
238 return value;
239}
240 177
241int ni_tio_arm(struct ni_gpct *counter, int arm, unsigned start_trigger); 178int ni_tio_arm(struct ni_gpct *, bool arm, unsigned int start_trigger);
242int ni_tio_set_gate_src(struct ni_gpct *counter, unsigned gate_index, 179int ni_tio_set_gate_src(struct ni_gpct *, unsigned int gate, unsigned int src);
243 unsigned int gate_source);
244 180
245#endif /* _COMEDI_NI_TIO_INTERNAL_H */ 181#endif /* _COMEDI_NI_TIO_INTERNAL_H */
diff --git a/drivers/staging/comedi/drivers/ni_tiocmd.c b/drivers/staging/comedi/drivers/ni_tiocmd.c
index 823e47910004..9007c57544bf 100644
--- a/drivers/staging/comedi/drivers/ni_tiocmd.c
+++ b/drivers/staging/comedi/drivers/ni_tiocmd.c
@@ -1,19 +1,18 @@
1/* 1/*
2 comedi/drivers/ni_tiocmd.c 2 * Command support for NI general purpose counters
3 Command support for NI general purpose counters 3 *
4 4 * Copyright (C) 2006 Frank Mori Hess <fmhess@users.sourceforge.net>
5 Copyright (C) 2006 Frank Mori Hess <fmhess@users.sourceforge.net> 5 *
6 6 * This program is free software; you can redistribute it and/or modify
7 This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by
8 it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or
9 the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version.
10 (at your option) any later version. 10 *
11 11 * This program is distributed in the hope that it will be useful,
12 This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details.
15 GNU General Public License for more details. 15 */
16*/
17 16
18/* 17/*
19 * Module: ni_tiocmd 18 * Module: ni_tiocmd
@@ -36,13 +35,10 @@
36 * DAQ 660x Register-Level Programmer Manual (NI 370505A-01) 35 * DAQ 660x Register-Level Programmer Manual (NI 370505A-01)
37 * DAQ 6601/6602 User Manual (NI 322137B-01) 36 * DAQ 6601/6602 User Manual (NI 322137B-01)
38 * 340934b.pdf DAQ-STC reference manual 37 * 340934b.pdf DAQ-STC reference manual
38 *
39 * TODO: Support use of both banks X and Y
39 */ 40 */
40 41
41/*
42TODO:
43 Support use of both banks X and Y
44*/
45
46#include <linux/module.h> 42#include <linux/module.h>
47#include "ni_tio_internal.h" 43#include "ni_tio_internal.h"
48#include "mite.h" 44#include "mite.h"
@@ -51,9 +47,9 @@ static void ni_tio_configure_dma(struct ni_gpct *counter,
51 bool enable, bool read) 47 bool enable, bool read)
52{ 48{
53 struct ni_gpct_device *counter_dev = counter->counter_dev; 49 struct ni_gpct_device *counter_dev = counter->counter_dev;
54 unsigned cidx = counter->counter_index; 50 unsigned int cidx = counter->counter_index;
55 unsigned mask; 51 unsigned int mask;
56 unsigned bits; 52 unsigned int bits;
57 53
58 mask = GI_READ_ACKS_IRQ | GI_WRITE_ACKS_IRQ; 54 mask = GI_READ_ACKS_IRQ | GI_WRITE_ACKS_IRQ;
59 bits = 0; 55 bits = 0;
@@ -103,7 +99,7 @@ static int ni_tio_input_inttrig(struct comedi_device *dev,
103 spin_unlock_irqrestore(&counter->lock, flags); 99 spin_unlock_irqrestore(&counter->lock, flags);
104 if (ret < 0) 100 if (ret < 0)
105 return ret; 101 return ret;
106 ret = ni_tio_arm(counter, 1, NI_GPCT_ARM_IMMEDIATE); 102 ret = ni_tio_arm(counter, true, NI_GPCT_ARM_IMMEDIATE);
107 s->async->inttrig = NULL; 103 s->async->inttrig = NULL;
108 104
109 return ret; 105 return ret;
@@ -113,7 +109,7 @@ static int ni_tio_input_cmd(struct comedi_subdevice *s)
113{ 109{
114 struct ni_gpct *counter = s->private; 110 struct ni_gpct *counter = s->private;
115 struct ni_gpct_device *counter_dev = counter->counter_dev; 111 struct ni_gpct_device *counter_dev = counter->counter_dev;
116 unsigned cidx = counter->counter_index; 112 unsigned int cidx = counter->counter_index;
117 struct comedi_async *async = s->async; 113 struct comedi_async *async = s->async;
118 struct comedi_cmd *cmd = &async->cmd; 114 struct comedi_cmd *cmd = &async->cmd;
119 int ret = 0; 115 int ret = 0;
@@ -129,9 +125,6 @@ static int ni_tio_input_cmd(struct comedi_subdevice *s)
129 case ni_gpct_variant_e_series: 125 case ni_gpct_variant_e_series:
130 mite_prep_dma(counter->mite_chan, 16, 32); 126 mite_prep_dma(counter->mite_chan, 16, 32);
131 break; 127 break;
132 default:
133 BUG();
134 break;
135 } 128 }
136 ni_tio_set_bits(counter, NITIO_CMD_REG(cidx), GI_SAVE_TRACE, 0); 129 ni_tio_set_bits(counter, NITIO_CMD_REG(cidx), GI_SAVE_TRACE, 0);
137 ni_tio_configure_dma(counter, true, true); 130 ni_tio_configure_dma(counter, true, true);
@@ -143,9 +136,9 @@ static int ni_tio_input_cmd(struct comedi_subdevice *s)
143 mite_dma_arm(counter->mite_chan); 136 mite_dma_arm(counter->mite_chan);
144 137
145 if (cmd->start_src == TRIG_NOW) 138 if (cmd->start_src == TRIG_NOW)
146 ret = ni_tio_arm(counter, 1, NI_GPCT_ARM_IMMEDIATE); 139 ret = ni_tio_arm(counter, true, NI_GPCT_ARM_IMMEDIATE);
147 else if (cmd->start_src == TRIG_EXT) 140 else if (cmd->start_src == TRIG_EXT)
148 ret = ni_tio_arm(counter, 1, cmd->start_arg); 141 ret = ni_tio_arm(counter, true, cmd->start_arg);
149 } 142 }
150 return ret; 143 return ret;
151} 144}
@@ -163,9 +156,9 @@ static int ni_tio_cmd_setup(struct comedi_subdevice *s)
163{ 156{
164 struct comedi_cmd *cmd = &s->async->cmd; 157 struct comedi_cmd *cmd = &s->async->cmd;
165 struct ni_gpct *counter = s->private; 158 struct ni_gpct *counter = s->private;
166 unsigned cidx = counter->counter_index; 159 unsigned int cidx = counter->counter_index;
167 int set_gate_source = 0; 160 int set_gate_source = 0;
168 unsigned gate_source; 161 unsigned int gate_source;
169 int retval = 0; 162 int retval = 0;
170 163
171 if (cmd->scan_begin_src == TRIG_EXT) { 164 if (cmd->scan_begin_src == TRIG_EXT) {
@@ -289,10 +282,10 @@ EXPORT_SYMBOL_GPL(ni_tio_cmdtest);
289 282
290int ni_tio_cancel(struct ni_gpct *counter) 283int ni_tio_cancel(struct ni_gpct *counter)
291{ 284{
292 unsigned cidx = counter->counter_index; 285 unsigned int cidx = counter->counter_index;
293 unsigned long flags; 286 unsigned long flags;
294 287
295 ni_tio_arm(counter, 0, 0); 288 ni_tio_arm(counter, false, 0);
296 spin_lock_irqsave(&counter->lock, flags); 289 spin_lock_irqsave(&counter->lock, flags);
297 if (counter->mite_chan) 290 if (counter->mite_chan)
298 mite_dma_disarm(counter->mite_chan); 291 mite_dma_disarm(counter->mite_chan);
@@ -305,9 +298,6 @@ int ni_tio_cancel(struct ni_gpct *counter)
305} 298}
306EXPORT_SYMBOL_GPL(ni_tio_cancel); 299EXPORT_SYMBOL_GPL(ni_tio_cancel);
307 300
308 /* During buffered input counter operation for e-series, the gate
309 interrupt is acked automatically by the dma controller, due to the
310 Gi_Read/Write_Acknowledges_IRQ bits in the input select register. */
311static int should_ack_gate(struct ni_gpct *counter) 301static int should_ack_gate(struct ni_gpct *counter)
312{ 302{
313 unsigned long flags; 303 unsigned long flags;
@@ -315,12 +305,19 @@ static int should_ack_gate(struct ni_gpct *counter)
315 305
316 switch (counter->counter_dev->variant) { 306 switch (counter->counter_dev->variant) {
317 case ni_gpct_variant_m_series: 307 case ni_gpct_variant_m_series:
318 /* not sure if 660x really supports gate
319 interrupts (the bits are not listed
320 in register-level manual) */
321 case ni_gpct_variant_660x: 308 case ni_gpct_variant_660x:
309 /*
310 * not sure if 660x really supports gate interrupts
311 * (the bits are not listed in register-level manual)
312 */
322 return 1; 313 return 1;
323 case ni_gpct_variant_e_series: 314 case ni_gpct_variant_e_series:
315 /*
316 * During buffered input counter operation for e-series,
317 * the gate interrupt is acked automatically by the dma
318 * controller, due to the Gi_Read/Write_Acknowledges_IRQ
319 * bits in the input select register.
320 */
324 spin_lock_irqsave(&counter->lock, flags); 321 spin_lock_irqsave(&counter->lock, flags);
325 { 322 {
326 if (!counter->mite_chan || 323 if (!counter->mite_chan ||
@@ -338,15 +335,14 @@ static int should_ack_gate(struct ni_gpct *counter)
338static void ni_tio_acknowledge_and_confirm(struct ni_gpct *counter, 335static void ni_tio_acknowledge_and_confirm(struct ni_gpct *counter,
339 int *gate_error, 336 int *gate_error,
340 int *tc_error, 337 int *tc_error,
341 int *perm_stale_data, 338 int *perm_stale_data)
342 int *stale_data)
343{ 339{
344 unsigned cidx = counter->counter_index; 340 unsigned int cidx = counter->counter_index;
345 const unsigned short gxx_status = read_register(counter, 341 const unsigned short gxx_status = ni_tio_read(counter,
346 NITIO_SHARED_STATUS_REG(cidx)); 342 NITIO_SHARED_STATUS_REG(cidx));
347 const unsigned short gi_status = read_register(counter, 343 const unsigned short gi_status = ni_tio_read(counter,
348 NITIO_STATUS_REG(cidx)); 344 NITIO_STATUS_REG(cidx));
349 unsigned ack = 0; 345 unsigned int ack = 0;
350 346
351 if (gate_error) 347 if (gate_error)
352 *gate_error = 0; 348 *gate_error = 0;
@@ -354,15 +350,15 @@ static void ni_tio_acknowledge_and_confirm(struct ni_gpct *counter,
354 *tc_error = 0; 350 *tc_error = 0;
355 if (perm_stale_data) 351 if (perm_stale_data)
356 *perm_stale_data = 0; 352 *perm_stale_data = 0;
357 if (stale_data)
358 *stale_data = 0;
359 353
360 if (gxx_status & GI_GATE_ERROR(cidx)) { 354 if (gxx_status & GI_GATE_ERROR(cidx)) {
361 ack |= GI_GATE_ERROR_CONFIRM(cidx); 355 ack |= GI_GATE_ERROR_CONFIRM(cidx);
362 if (gate_error) { 356 if (gate_error) {
363 /*660x don't support automatic acknowledgment 357 /*
364 of gate interrupt via dma read/write 358 * 660x don't support automatic acknowledgment
365 and report bogus gate errors */ 359 * of gate interrupt via dma read/write
360 * and report bogus gate errors
361 */
366 if (counter->counter_dev->variant != 362 if (counter->counter_dev->variant !=
367 ni_gpct_variant_660x) 363 ni_gpct_variant_660x)
368 *gate_error = 1; 364 *gate_error = 1;
@@ -380,14 +376,10 @@ static void ni_tio_acknowledge_and_confirm(struct ni_gpct *counter,
380 ack |= GI_GATE_INTERRUPT_ACK; 376 ack |= GI_GATE_INTERRUPT_ACK;
381 } 377 }
382 if (ack) 378 if (ack)
383 write_register(counter, ack, NITIO_INT_ACK_REG(cidx)); 379 ni_tio_write(counter, ack, NITIO_INT_ACK_REG(cidx));
384 if (ni_tio_get_soft_copy(counter, NITIO_MODE_REG(cidx)) & 380 if (ni_tio_get_soft_copy(counter, NITIO_MODE_REG(cidx)) &
385 GI_LOADING_ON_GATE) { 381 GI_LOADING_ON_GATE) {
386 if (gxx_status & GI_STALE_DATA(cidx)) { 382 if (ni_tio_read(counter, NITIO_STATUS2_REG(cidx)) &
387 if (stale_data)
388 *stale_data = 1;
389 }
390 if (read_register(counter, NITIO_STATUS2_REG(cidx)) &
391 GI_PERMANENT_STALE(cidx)) { 383 GI_PERMANENT_STALE(cidx)) {
392 dev_info(counter->counter_dev->dev->class_dev, 384 dev_info(counter->counter_dev->dev->class_dev,
393 "%s: Gi_Permanent_Stale_Data detected.\n", 385 "%s: Gi_Permanent_Stale_Data detected.\n",
@@ -400,22 +392,21 @@ static void ni_tio_acknowledge_and_confirm(struct ni_gpct *counter,
400 392
401void ni_tio_acknowledge(struct ni_gpct *counter) 393void ni_tio_acknowledge(struct ni_gpct *counter)
402{ 394{
403 ni_tio_acknowledge_and_confirm(counter, NULL, NULL, NULL, NULL); 395 ni_tio_acknowledge_and_confirm(counter, NULL, NULL, NULL);
404} 396}
405EXPORT_SYMBOL_GPL(ni_tio_acknowledge); 397EXPORT_SYMBOL_GPL(ni_tio_acknowledge);
406 398
407void ni_tio_handle_interrupt(struct ni_gpct *counter, 399void ni_tio_handle_interrupt(struct ni_gpct *counter,
408 struct comedi_subdevice *s) 400 struct comedi_subdevice *s)
409{ 401{
410 unsigned cidx = counter->counter_index; 402 unsigned int cidx = counter->counter_index;
411 unsigned gpct_mite_status;
412 unsigned long flags; 403 unsigned long flags;
413 int gate_error; 404 int gate_error;
414 int tc_error; 405 int tc_error;
415 int perm_stale_data; 406 int perm_stale_data;
416 407
417 ni_tio_acknowledge_and_confirm(counter, &gate_error, &tc_error, 408 ni_tio_acknowledge_and_confirm(counter, &gate_error, &tc_error,
418 &perm_stale_data, NULL); 409 &perm_stale_data);
419 if (gate_error) { 410 if (gate_error) {
420 dev_notice(counter->counter_dev->dev->class_dev, 411 dev_notice(counter->counter_dev->dev->class_dev,
421 "%s: Gi_Gate_Error detected.\n", __func__); 412 "%s: Gi_Gate_Error detected.\n", __func__);
@@ -426,7 +417,7 @@ void ni_tio_handle_interrupt(struct ni_gpct *counter,
426 switch (counter->counter_dev->variant) { 417 switch (counter->counter_dev->variant) {
427 case ni_gpct_variant_m_series: 418 case ni_gpct_variant_m_series:
428 case ni_gpct_variant_660x: 419 case ni_gpct_variant_660x:
429 if (read_register(counter, NITIO_DMA_STATUS_REG(cidx)) & 420 if (ni_tio_read(counter, NITIO_DMA_STATUS_REG(cidx)) &
430 GI_DRQ_ERROR) { 421 GI_DRQ_ERROR) {
431 dev_notice(counter->counter_dev->dev->class_dev, 422 dev_notice(counter->counter_dev->dev->class_dev,
432 "%s: Gi_DRQ_Error detected.\n", __func__); 423 "%s: Gi_DRQ_Error detected.\n", __func__);
@@ -437,16 +428,8 @@ void ni_tio_handle_interrupt(struct ni_gpct *counter,
437 break; 428 break;
438 } 429 }
439 spin_lock_irqsave(&counter->lock, flags); 430 spin_lock_irqsave(&counter->lock, flags);
440 if (!counter->mite_chan) { 431 if (counter->mite_chan)
441 spin_unlock_irqrestore(&counter->lock, flags); 432 mite_ack_linkc(counter->mite_chan, s, true);
442 return;
443 }
444 gpct_mite_status = mite_get_status(counter->mite_chan);
445 if (gpct_mite_status & CHSR_LINKC)
446 writel(CHOR_CLRLC,
447 counter->mite_chan->mite->mite_io_addr +
448 MITE_CHOR(counter->mite_chan->channel));
449 mite_sync_input_dma(counter->mite_chan, s);
450 spin_unlock_irqrestore(&counter->lock, flags); 433 spin_unlock_irqrestore(&counter->lock, flags);
451} 434}
452EXPORT_SYMBOL_GPL(ni_tio_handle_interrupt); 435EXPORT_SYMBOL_GPL(ni_tio_handle_interrupt);
diff --git a/drivers/staging/comedi/drivers/plx9052.h b/drivers/staging/comedi/drivers/plx9052.h
index fbcf25069807..2892e6528967 100644
--- a/drivers/staging/comedi/drivers/plx9052.h
+++ b/drivers/staging/comedi/drivers/plx9052.h
@@ -1,22 +1,21 @@
1/* 1/*
2 comedi/drivers/plx9052.h 2 * Definitions for the PLX-9052 PCI interface chip
3 Definitions for the PLX-9052 PCI interface chip 3 *
4 4 * Copyright (C) 2002 MEV Ltd. <http://www.mev.co.uk/>
5 Copyright (C) 2002 MEV Ltd. <http://www.mev.co.uk/> 5 *
6 6 * COMEDI - Linux Control and Measurement Device Interface
7 COMEDI - Linux Control and Measurement Device Interface 7 * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
8 Copyright (C) 2000 David A. Schleef <ds@schleef.org> 8 *
9 9 * This program is free software; you can redistribute it and/or modify
10 This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by
11 it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; either version 2 of the License, or
12 the Free Software Foundation; either version 2 of the License, or 12 * (at your option) any later version.
13 (at your option) any later version. 13 *
14 14 * This program is distributed in the hope that it will be useful,
15 This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details.
18 GNU General Public License for more details. 18 */
19*/
20 19
21#ifndef _PLX9052_H_ 20#ifndef _PLX9052_H_
22#define _PLX9052_H_ 21#define _PLX9052_H_
@@ -25,55 +24,56 @@
25 * INTCSR - Interrupt Control/Status register 24 * INTCSR - Interrupt Control/Status register
26 */ 25 */
27#define PLX9052_INTCSR 0x4c 26#define PLX9052_INTCSR 0x4c
28#define PLX9052_INTCSR_LI1ENAB (1 << 0) /* LI1 enabled */ 27#define PLX9052_INTCSR_LI1ENAB BIT(0) /* LI1 enabled */
29#define PLX9052_INTCSR_LI1POL (1 << 1) /* LI1 active high */ 28#define PLX9052_INTCSR_LI1POL BIT(1) /* LI1 active high */
30#define PLX9052_INTCSR_LI1STAT (1 << 2) /* LI1 active */ 29#define PLX9052_INTCSR_LI1STAT BIT(2) /* LI1 active */
31#define PLX9052_INTCSR_LI2ENAB (1 << 3) /* LI2 enabled */ 30#define PLX9052_INTCSR_LI2ENAB BIT(3) /* LI2 enabled */
32#define PLX9052_INTCSR_LI2POL (1 << 4) /* LI2 active high */ 31#define PLX9052_INTCSR_LI2POL BIT(4) /* LI2 active high */
33#define PLX9052_INTCSR_LI2STAT (1 << 5) /* LI2 active */ 32#define PLX9052_INTCSR_LI2STAT BIT(5) /* LI2 active */
34#define PLX9052_INTCSR_PCIENAB (1 << 6) /* PCIINT enabled */ 33#define PLX9052_INTCSR_PCIENAB BIT(6) /* PCIINT enabled */
35#define PLX9052_INTCSR_SOFTINT (1 << 7) /* generate soft int */ 34#define PLX9052_INTCSR_SOFTINT BIT(7) /* generate soft int */
36#define PLX9052_INTCSR_LI1SEL (1 << 8) /* LI1 edge */ 35#define PLX9052_INTCSR_LI1SEL BIT(8) /* LI1 edge */
37#define PLX9052_INTCSR_LI2SEL (1 << 9) /* LI2 edge */ 36#define PLX9052_INTCSR_LI2SEL BIT(9) /* LI2 edge */
38#define PLX9052_INTCSR_LI1CLRINT (1 << 10) /* LI1 clear int */ 37#define PLX9052_INTCSR_LI1CLRINT BIT(10) /* LI1 clear int */
39#define PLX9052_INTCSR_LI2CLRINT (1 << 11) /* LI2 clear int */ 38#define PLX9052_INTCSR_LI2CLRINT BIT(11) /* LI2 clear int */
40#define PLX9052_INTCSR_ISAMODE (1 << 12) /* ISA interface mode */ 39#define PLX9052_INTCSR_ISAMODE BIT(12) /* ISA interface mode */
41 40
42/* 41/*
43 * CNTRL - User I/O, Direct Slave Response, Serial EEPROM, and 42 * CNTRL - User I/O, Direct Slave Response, Serial EEPROM, and
44 * Initialization Control register 43 * Initialization Control register
45 */ 44 */
46#define PLX9052_CNTRL 0x50 45#define PLX9052_CNTRL 0x50
47#define PLX9052_CNTRL_WAITO (1 << 0) /* UIO0 or WAITO# select */ 46#define PLX9052_CNTRL_WAITO BIT(0) /* UIO0 or WAITO# select */
48#define PLX9052_CNTRL_UIO0_DIR (1 << 1) /* UIO0 direction */ 47#define PLX9052_CNTRL_UIO0_DIR BIT(1) /* UIO0 direction */
49#define PLX9052_CNTRL_UIO0_DATA (1 << 2) /* UIO0 data */ 48#define PLX9052_CNTRL_UIO0_DATA BIT(2) /* UIO0 data */
50#define PLX9052_CNTRL_LLOCKO (1 << 3) /* UIO1 or LLOCKo# select */ 49#define PLX9052_CNTRL_LLOCKO BIT(3) /* UIO1 or LLOCKo# select */
51#define PLX9052_CNTRL_UIO1_DIR (1 << 4) /* UIO1 direction */ 50#define PLX9052_CNTRL_UIO1_DIR BIT(4) /* UIO1 direction */
52#define PLX9052_CNTRL_UIO1_DATA (1 << 5) /* UIO1 data */ 51#define PLX9052_CNTRL_UIO1_DATA BIT(5) /* UIO1 data */
53#define PLX9052_CNTRL_CS2 (1 << 6) /* UIO2 or CS2# select */ 52#define PLX9052_CNTRL_CS2 BIT(6) /* UIO2 or CS2# select */
54#define PLX9052_CNTRL_UIO2_DIR (1 << 7) /* UIO2 direction */ 53#define PLX9052_CNTRL_UIO2_DIR BIT(7) /* UIO2 direction */
55#define PLX9052_CNTRL_UIO2_DATA (1 << 8) /* UIO2 data */ 54#define PLX9052_CNTRL_UIO2_DATA BIT(8) /* UIO2 data */
56#define PLX9052_CNTRL_CS3 (1 << 9) /* UIO3 or CS3# select */ 55#define PLX9052_CNTRL_CS3 BIT(9) /* UIO3 or CS3# select */
57#define PLX9052_CNTRL_UIO3_DIR (1 << 10) /* UIO3 direction */ 56#define PLX9052_CNTRL_UIO3_DIR BIT(10) /* UIO3 direction */
58#define PLX9052_CNTRL_UIO3_DATA (1 << 11) /* UIO3 data */ 57#define PLX9052_CNTRL_UIO3_DATA BIT(11) /* UIO3 data */
59#define PLX9052_CNTRL_PCIBAR01 (0 << 12) /* bar 0 (mem) and 1 (I/O) */ 58#define PLX9052_CNTRL_PCIBAR(x) (((x) & 0x3) << 12)
60#define PLX9052_CNTRL_PCIBAR0 (1 << 12) /* bar 0 (mem) only */ 59#define PLX9052_CNTRL_PCIBAR01 PLX9052_CNTRL_PCIBAR(0) /* mem and IO */
61#define PLX9052_CNTRL_PCIBAR1 (2 << 12) /* bar 1 (I/O) only */ 60#define PLX9052_CNTRL_PCIBAR0 PLX9052_CNTRL_PCIBAR(1) /* mem only */
62#define PLX9052_CNTRL_PCI2_1_FEATURES (1 << 14) /* PCI r2.1 features enabled */ 61#define PLX9052_CNTRL_PCIBAR1 PLX9052_CNTRL_PCIBAR(2) /* IO only */
63#define PLX9052_CNTRL_PCI_R_W_FLUSH (1 << 15) /* read w/write flush mode */ 62#define PLX9052_CNTRL_PCI2_1_FEATURES BIT(14) /* PCI v2.1 features enabled */
64#define PLX9052_CNTRL_PCI_R_NO_FLUSH (1 << 16) /* read no flush mode */ 63#define PLX9052_CNTRL_PCI_R_W_FLUSH BIT(15) /* read w/write flush mode */
65#define PLX9052_CNTRL_PCI_R_NO_WRITE (1 << 17) /* read no write mode */ 64#define PLX9052_CNTRL_PCI_R_NO_FLUSH BIT(16) /* read no flush mode */
66#define PLX9052_CNTRL_PCI_W_RELEASE (1 << 18) /* write release bus mode */ 65#define PLX9052_CNTRL_PCI_R_NO_WRITE BIT(17) /* read no write mode */
67#define PLX9052_CNTRL_RETRY_CLKS(x) (((x) & 0xf) << 19) /* slave retry clks */ 66#define PLX9052_CNTRL_PCI_W_RELEASE BIT(18) /* write release bus mode */
68#define PLX9052_CNTRL_LOCK_ENAB (1 << 23) /* slave LOCK# enable */ 67#define PLX9052_CNTRL_RETRY_CLKS(x) (((x) & 0xf) << 19) /* retry clks */
68#define PLX9052_CNTRL_LOCK_ENAB BIT(23) /* slave LOCK# enable */
69#define PLX9052_CNTRL_EEPROM_MASK (0x1f << 24) /* EEPROM bits */ 69#define PLX9052_CNTRL_EEPROM_MASK (0x1f << 24) /* EEPROM bits */
70#define PLX9052_CNTRL_EEPROM_CLK (1 << 24) /* EEPROM clock */ 70#define PLX9052_CNTRL_EEPROM_CLK BIT(24) /* EEPROM clock */
71#define PLX9052_CNTRL_EEPROM_CS (1 << 25) /* EEPROM chip select */ 71#define PLX9052_CNTRL_EEPROM_CS BIT(25) /* EEPROM chip select */
72#define PLX9052_CNTRL_EEPROM_DOUT (1 << 26) /* EEPROM write bit */ 72#define PLX9052_CNTRL_EEPROM_DOUT BIT(26) /* EEPROM write bit */
73#define PLX9052_CNTRL_EEPROM_DIN (1 << 27) /* EEPROM read bit */ 73#define PLX9052_CNTRL_EEPROM_DIN BIT(27) /* EEPROM read bit */
74#define PLX9052_CNTRL_EEPROM_PRESENT (1 << 28) /* EEPROM present */ 74#define PLX9052_CNTRL_EEPROM_PRESENT BIT(28) /* EEPROM present */
75#define PLX9052_CNTRL_RELOAD_CFG (1 << 29) /* reload configuration */ 75#define PLX9052_CNTRL_RELOAD_CFG BIT(29) /* reload configuration */
76#define PLX9052_CNTRL_PCI_RESET (1 << 30) /* PCI adapter reset */ 76#define PLX9052_CNTRL_PCI_RESET BIT(30) /* PCI adapter reset */
77#define PLX9052_CNTRL_MASK_REV (1 << 31) /* mask revision */ 77#define PLX9052_CNTRL_MASK_REV BIT(31) /* mask revision */
78 78
79#endif /* _PLX9052_H_ */ 79#endif /* _PLX9052_H_ */
diff --git a/drivers/staging/comedi/drivers/plx9080.h b/drivers/staging/comedi/drivers/plx9080.h
index f5cd6d5004bd..8d1aee00b19f 100644
--- a/drivers/staging/comedi/drivers/plx9080.h
+++ b/drivers/staging/comedi/drivers/plx9080.h
@@ -88,7 +88,7 @@ enum marb_bits {
88 /* direct slave LLOCKo# enable */ 88 /* direct slave LLOCKo# enable */
89 MARB_DS_LLOCK_ENABLE = 0x00400000, 89 MARB_DS_LLOCK_ENABLE = 0x00400000,
90 MARB_PCI_REQUEST_MODE = 0x00800000, 90 MARB_PCI_REQUEST_MODE = 0x00800000,
91 MARB_PCIv21_MODE = 0x01000000, /* pci specification v2.1 mode */ 91 MARB_PCIV21_MODE = 0x01000000, /* pci specification v2.1 mode */
92 MARB_PCI_READ_NO_WRITE_MODE = 0x02000000, 92 MARB_PCI_READ_NO_WRITE_MODE = 0x02000000,
93 MARB_PCI_READ_WITH_WRITE_FLUSH_MODE = 0x04000000, 93 MARB_PCI_READ_WITH_WRITE_FLUSH_MODE = 0x04000000,
94 /* gate local bus latency timer with BREQ */ 94 /* gate local bus latency timer with BREQ */
diff --git a/drivers/staging/comedi/drivers/z8536.h b/drivers/staging/comedi/drivers/z8536.h
index 7be53109cc8d..47eadbf4dcc0 100644
--- a/drivers/staging/comedi/drivers/z8536.h
+++ b/drivers/staging/comedi/drivers/z8536.h
@@ -24,11 +24,12 @@
24#define Z8536_CFG_CTRL_PCE_CT3E BIT(4) /* Port C & C/T 3 Enable */ 24#define Z8536_CFG_CTRL_PCE_CT3E BIT(4) /* Port C & C/T 3 Enable */
25#define Z8536_CFG_CTRL_PLC BIT(3) /* Port A/B Link Control */ 25#define Z8536_CFG_CTRL_PLC BIT(3) /* Port A/B Link Control */
26#define Z8536_CFG_CTRL_PAE BIT(2) /* Port A Enable */ 26#define Z8536_CFG_CTRL_PAE BIT(2) /* Port A Enable */
27#define Z8536_CFG_CTRL_LC_INDEP (0 << 0)/* C/Ts Independent */ 27#define Z8536_CFG_CTRL_LC(x) (((x) & 0x3) << 0) /* Link Control */
28#define Z8536_CFG_CTRL_LC_GATE (1 << 0)/* C/T 1 Out Gates C/T 2 */ 28#define Z8536_CFG_CTRL_LC_INDEP Z8536_CFG_CTRL_LC(0)/* Independent */
29#define Z8536_CFG_CTRL_LC_TRIG (2 << 0)/* C/T 1 Out Triggers C/T 2 */ 29#define Z8536_CFG_CTRL_LC_GATE Z8536_CFG_CTRL_LC(1)/* 1 Gates 2 */
30#define Z8536_CFG_CTRL_LC_CLK (3 << 0)/* C/T 1 Out Clocks C/T 2 */ 30#define Z8536_CFG_CTRL_LC_TRIG Z8536_CFG_CTRL_LC(2)/* 1 Triggers 2 */
31#define Z8536_CFG_CTRL_LC_MASK (3 << 0)/* C/T Link Control mask */ 31#define Z8536_CFG_CTRL_LC_CLK Z8536_CFG_CTRL_LC(3)/* 1 Clocks 2 */
32#define Z8536_CFG_CTRL_LC_MASK Z8536_CFG_CTRL_LC(3)
32 33
33/* Interrupt Vector registers */ 34/* Interrupt Vector registers */
34#define Z8536_PA_INT_VECT_REG 0x02 35#define Z8536_PA_INT_VECT_REG 0x02
@@ -43,15 +44,16 @@
43#define Z8536_CT2_CMDSTAT_REG 0x0b 44#define Z8536_CT2_CMDSTAT_REG 0x0b
44#define Z8536_CT3_CMDSTAT_REG 0x0c 45#define Z8536_CT3_CMDSTAT_REG 0x0c
45#define Z8536_CT_CMDSTAT_REG(x) (0x0a + (x)) 46#define Z8536_CT_CMDSTAT_REG(x) (0x0a + (x))
46#define Z8536_CMD_NULL (0 << 5)/* Null Code */ 47#define Z8536_CMD(x) (((x) & 0x7) << 5)
47#define Z8536_CMD_CLR_IP_IUS (1 << 5)/* Clear IP & IUS */ 48#define Z8536_CMD_NULL Z8536_CMD(0) /* Null Code */
48#define Z8536_CMD_SET_IUS (2 << 5)/* Set IUS */ 49#define Z8536_CMD_CLR_IP_IUS Z8536_CMD(1) /* Clear IP & IUS */
49#define Z8536_CMD_CLR_IUS (3 << 5)/* Clear IUS */ 50#define Z8536_CMD_SET_IUS Z8536_CMD(2) /* Set IUS */
50#define Z8536_CMD_SET_IP (4 << 5)/* Set IP */ 51#define Z8536_CMD_CLR_IUS Z8536_CMD(3) /* Clear IUS */
51#define Z8536_CMD_CLR_IP (5 << 5)/* Clear IP */ 52#define Z8536_CMD_SET_IP Z8536_CMD(4) /* Set IP */
52#define Z8536_CMD_SET_IE (6 << 5)/* Set IE */ 53#define Z8536_CMD_CLR_IP Z8536_CMD(5) /* Clear IP */
53#define Z8536_CMD_CLR_IE (7 << 5)/* Clear IE */ 54#define Z8536_CMD_SET_IE Z8536_CMD(6) /* Set IE */
54#define Z8536_CMD_MASK (7 << 5) 55#define Z8536_CMD_CLR_IE Z8536_CMD(7) /* Clear IE */
56#define Z8536_CMD_MASK Z8536_CMD(7)
55 57
56#define Z8536_STAT_IUS BIT(7) /* Interrupt Under Service */ 58#define Z8536_STAT_IUS BIT(7) /* Interrupt Under Service */
57#define Z8536_STAT_IE BIT(6) /* Interrupt Enable */ 59#define Z8536_STAT_IE BIT(6) /* Interrupt Enable */
@@ -105,46 +107,51 @@
105#define Z8536_CT_MODE_ETE BIT(4) /* External Trigger Enable */ 107#define Z8536_CT_MODE_ETE BIT(4) /* External Trigger Enable */
106#define Z8536_CT_MODE_EGE BIT(3) /* External Gate Enable */ 108#define Z8536_CT_MODE_EGE BIT(3) /* External Gate Enable */
107#define Z8536_CT_MODE_REB BIT(2) /* Retrigger Enable Bit */ 109#define Z8536_CT_MODE_REB BIT(2) /* Retrigger Enable Bit */
108#define Z8536_CT_MODE_DCS_PULSE (0 << 0)/* Duty Cycle - Pulse */ 110#define Z8536_CT_MODE_DCS(x) (((x) & 0x3) << 0) /* Duty Cycle */
109#define Z8536_CT_MODE_DCS_ONESHOT (1 << 0)/* Duty Cycle - One-Shot */ 111#define Z8536_CT_MODE_DCS_PULSE Z8536_CT_MODE_DCS(0) /* Pulse */
110#define Z8536_CT_MODE_DCS_SQRWAVE (2 << 0)/* Duty Cycle - Square Wave */ 112#define Z8536_CT_MODE_DCS_ONESHOT Z8536_CT_MODE_DCS(1) /* One-Shot */
111#define Z8536_CT_MODE_DCS_DO_NOT_USE (3 << 0)/* Duty Cycle - Do Not Use */ 113#define Z8536_CT_MODE_DCS_SQRWAVE Z8536_CT_MODE_DCS(2) /* Square Wave */
112#define Z8536_CT_MODE_DCS_MASK (3 << 0)/* Duty Cycle mask */ 114#define Z8536_CT_MODE_DCS_DO_NOT_USE Z8536_CT_MODE_DCS(3) /* Do Not Use */
115#define Z8536_CT_MODE_DCS_MASK Z8536_CT_MODE_DCS(3)
113 116
114/* Port A/B Mode Specification registers */ 117/* Port A/B Mode Specification registers */
115#define Z8536_PA_MODE_REG 0x20 118#define Z8536_PA_MODE_REG 0x20
116#define Z8536_PB_MODE_REG 0x28 119#define Z8536_PB_MODE_REG 0x28
117#define Z8536_PAB_MODE_PTS_BIT (0 << 6)/* Bit Port */ 120#define Z8536_PAB_MODE_PTS(x) (((x) & 0x3) << 6) /* Port type */
118#define Z8536_PAB_MODE_PTS_INPUT (1 << 6)/* Input Port */ 121#define Z8536_PAB_MODE_PTS_BIT Z8536_PAB_MODE_PTS(0 << 6)/* Bit */
119#define Z8536_PAB_MODE_PTS_OUTPUT (2 << 6)/* Output Port */ 122#define Z8536_PAB_MODE_PTS_INPUT Z8536_PAB_MODE_PTS(1 << 6)/* Input */
120#define Z8536_PAB_MODE_PTS_BIDIR (3 << 6)/* Bidirectional Port */ 123#define Z8536_PAB_MODE_PTS_OUTPUT Z8536_PAB_MODE_PTS(2 << 6)/* Output */
121#define Z8536_PAB_MODE_PTS_MASK (3 << 6)/* Port Type Select mask */ 124#define Z8536_PAB_MODE_PTS_BIDIR Z8536_PAB_MODE_PTS(3 << 6)/* Bidir */
125#define Z8536_PAB_MODE_PTS_MASK Z8536_PAB_MODE_PTS(3 << 6)
122#define Z8536_PAB_MODE_ITB BIT(5) /* Interrupt on Two Bytes */ 126#define Z8536_PAB_MODE_ITB BIT(5) /* Interrupt on Two Bytes */
123#define Z8536_PAB_MODE_SB BIT(4) /* Single Buffered mode */ 127#define Z8536_PAB_MODE_SB BIT(4) /* Single Buffered mode */
124#define Z8536_PAB_MODE_IMO BIT(3) /* Interrupt on Match Only */ 128#define Z8536_PAB_MODE_IMO BIT(3) /* Interrupt on Match Only */
125#define Z8536_PAB_MODE_PMS_DISABLE (0 << 1)/* Disable Pattern Match */ 129#define Z8536_PAB_MODE_PMS(x) (((x) & 0x3) << 1) /* Pattern Mode */
126#define Z8536_PAB_MODE_PMS_AND (1 << 1)/* "AND" mode */ 130#define Z8536_PAB_MODE_PMS_DISABLE Z8536_PAB_MODE_PMS(0)/* Disabled */
127#define Z8536_PAB_MODE_PMS_OR (2 << 1)/* "OR" mode */ 131#define Z8536_PAB_MODE_PMS_AND Z8536_PAB_MODE_PMS(1)/* "AND" */
128#define Z8536_PAB_MODE_PMS_OR_PEV (3 << 1)/* "OR-Priority" mode */ 132#define Z8536_PAB_MODE_PMS_OR Z8536_PAB_MODE_PMS(2)/* "OR" */
129#define Z8536_PAB_MODE_PMS_MASK (3 << 1)/* Pattern Mode mask */ 133#define Z8536_PAB_MODE_PMS_OR_PEV Z8536_PAB_MODE_PMS(3)/* "OR-Priority" */
134#define Z8536_PAB_MODE_PMS_MASK Z8536_PAB_MODE_PMS(3)
130#define Z8536_PAB_MODE_LPM BIT(0) /* Latch on Pattern Match */ 135#define Z8536_PAB_MODE_LPM BIT(0) /* Latch on Pattern Match */
131#define Z8536_PAB_MODE_DTE BIT(0) /* Deskew Timer Enabled */ 136#define Z8536_PAB_MODE_DTE BIT(0) /* Deskew Timer Enabled */
132 137
133/* Port A/B Handshake Specification registers */ 138/* Port A/B Handshake Specification registers */
134#define Z8536_PA_HANDSHAKE_REG 0x21 139#define Z8536_PA_HANDSHAKE_REG 0x21
135#define Z8536_PB_HANDSHAKE_REG 0x29 140#define Z8536_PB_HANDSHAKE_REG 0x29
136#define Z8536_PAB_HANDSHAKE_HST_INTER (0 << 6)/* Interlocked Handshake */ 141#define Z8536_PAB_HANDSHAKE_HST(x) (((x) & 0x3) << 6) /* Handshake Type */
137#define Z8536_PAB_HANDSHAKE_HST_STROBED (1 << 6)/* Strobed Handshake */ 142#define Z8536_PAB_HANDSHAKE_HST_INTER Z8536_PAB_HANDSHAKE_HST(0)/*Interlock*/
138#define Z8536_PAB_HANDSHAKE_HST_PULSED (2 << 6)/* Pulsed Handshake */ 143#define Z8536_PAB_HANDSHAKE_HST_STROBED Z8536_PAB_HANDSHAKE_HST(1)/* Strobed */
139#define Z8536_PAB_HANDSHAKE_HST_3WIRE (3 << 6)/* Three-Wire Handshake */ 144#define Z8536_PAB_HANDSHAKE_HST_PULSED Z8536_PAB_HANDSHAKE_HST(2)/* Pulsed */
140#define Z8536_PAB_HANDSHAKE_HST_MASK (3 << 6)/* Handshake Type mask */ 145#define Z8536_PAB_HANDSHAKE_HST_3WIRE Z8536_PAB_HANDSHAKE_HST(3)/* 3-Wire */
141#define Z8536_PAB_HANDSHAKE_RWS_DISABLE (0 << 3)/* Req/Wait Disabled */ 146#define Z8536_PAB_HANDSHAKE_HST_MASK Z8536_PAB_HANDSHAKE_HST(3)
142#define Z8536_PAB_HANDSHAKE_RWS_OUTWAIT (1 << 3)/* Output Wait */ 147#define Z8536_PAB_HANDSHAKE_RWS(x) (((x) & 0x7) << 3) /* Req/Wait */
143#define Z8536_PAB_HANDSHAKE_RWS_INWAIT (3 << 3)/* Input Wait */ 148#define Z8536_PAB_HANDSHAKE_RWS_DISABLE Z8536_PAB_HANDSHAKE_RWS(0)/* Disabled */
144#define Z8536_PAB_HANDSHAKE_RWS_SPREQ (4 << 3)/* Special Request */ 149#define Z8536_PAB_HANDSHAKE_RWS_OUTWAIT Z8536_PAB_HANDSHAKE_RWS(1)/* Out Wait */
145#define Z8536_PAB_HANDSHAKE_RWS_OUTREQ (5 << 4)/* Output Request */ 150#define Z8536_PAB_HANDSHAKE_RWS_INWAIT Z8536_PAB_HANDSHAKE_RWS(3)/* In Wait */
146#define Z8536_PAB_HANDSHAKE_RWS_INREQ (7 << 3)/* Input Request */ 151#define Z8536_PAB_HANDSHAKE_RWS_SPREQ Z8536_PAB_HANDSHAKE_RWS(4)/* Special */
147#define Z8536_PAB_HANDSHAKE_RWS_MASK (7 << 3)/* Req/Wait mask */ 152#define Z8536_PAB_HANDSHAKE_RWS_OUTREQ Z8536_PAB_HANDSHAKE_RWS(5)/* Out Req */
153#define Z8536_PAB_HANDSHAKE_RWS_INREQ Z8536_PAB_HANDSHAKE_RWS(7)/* In Req */
154#define Z8536_PAB_HANDSHAKE_RWS_MASK Z8536_PAB_HANDSHAKE_RWS(7)
148#define Z8536_PAB_HANDSHAKE_DESKEW(x) ((x) << 0)/* Deskew Time */ 155#define Z8536_PAB_HANDSHAKE_DESKEW(x) ((x) << 0)/* Deskew Time */
149#define Z8536_PAB_HANDSHAKE_DESKEW_MASK (3 << 0)/* Deskew Time mask */ 156#define Z8536_PAB_HANDSHAKE_DESKEW_MASK (3 << 0)/* Deskew Time mask */
150 157
diff --git a/drivers/staging/dgnc/dgnc_cls.c b/drivers/staging/dgnc/dgnc_cls.c
index 0ff3139e52b6..46c050cc7dbe 100644
--- a/drivers/staging/dgnc/dgnc_cls.c
+++ b/drivers/staging/dgnc/dgnc_cls.c
@@ -1168,7 +1168,7 @@ static void cls_uart_init(struct channel_t *ch)
1168 /* Clear out UART and FIFO */ 1168 /* Clear out UART and FIFO */
1169 readb(&ch->ch_cls_uart->txrx); 1169 readb(&ch->ch_cls_uart->txrx);
1170 1170
1171 writeb((UART_FCR_ENABLE_FIFO|UART_FCR_CLEAR_RCVR|UART_FCR_CLEAR_XMIT), 1171 writeb(UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT,
1172 &ch->ch_cls_uart->isr_fcr); 1172 &ch->ch_cls_uart->isr_fcr);
1173 udelay(10); 1173 udelay(10);
1174 1174
diff --git a/drivers/staging/dgnc/dgnc_driver.c b/drivers/staging/dgnc/dgnc_driver.c
index 4eb410e09609..af2e835efa1b 100644
--- a/drivers/staging/dgnc/dgnc_driver.c
+++ b/drivers/staging/dgnc/dgnc_driver.c
@@ -48,7 +48,7 @@ static void dgnc_do_remap(struct dgnc_board *brd);
48/* 48/*
49 * File operations permitted on Control/Management major. 49 * File operations permitted on Control/Management major.
50 */ 50 */
51static const struct file_operations dgnc_BoardFops = { 51static const struct file_operations dgnc_board_fops = {
52 .owner = THIS_MODULE, 52 .owner = THIS_MODULE,
53 .unlocked_ioctl = dgnc_mgmt_ioctl, 53 .unlocked_ioctl = dgnc_mgmt_ioctl,
54 .open = dgnc_mgmt_open, 54 .open = dgnc_mgmt_open,
@@ -58,11 +58,11 @@ static const struct file_operations dgnc_BoardFops = {
58/* 58/*
59 * Globals 59 * Globals
60 */ 60 */
61uint dgnc_NumBoards; 61uint dgnc_num_boards;
62struct dgnc_board *dgnc_Board[MAXBOARDS]; 62struct dgnc_board *dgnc_board[MAXBOARDS];
63DEFINE_SPINLOCK(dgnc_global_lock); 63DEFINE_SPINLOCK(dgnc_global_lock);
64DEFINE_SPINLOCK(dgnc_poll_lock); /* Poll scheduling lock */ 64DEFINE_SPINLOCK(dgnc_poll_lock); /* Poll scheduling lock */
65uint dgnc_Major; 65uint dgnc_major;
66int dgnc_poll_tick = 20; /* Poll interval - 20 ms */ 66int dgnc_poll_tick = 20; /* Poll interval - 20 ms */
67 67
68/* 68/*
@@ -92,7 +92,7 @@ struct board_id {
92 unsigned int is_pci_express; 92 unsigned int is_pci_express;
93}; 93};
94 94
95static struct board_id dgnc_Ids[] = { 95static struct board_id dgnc_ids[] = {
96 { PCI_DEVICE_CLASSIC_4_PCI_NAME, 4, 0 }, 96 { PCI_DEVICE_CLASSIC_4_PCI_NAME, 4, 0 },
97 { PCI_DEVICE_CLASSIC_4_422_PCI_NAME, 4, 0 }, 97 { PCI_DEVICE_CLASSIC_4_422_PCI_NAME, 4, 0 },
98 { PCI_DEVICE_CLASSIC_8_PCI_NAME, 8, 0 }, 98 { PCI_DEVICE_CLASSIC_8_PCI_NAME, 8, 0 },
@@ -140,14 +140,14 @@ static void cleanup(bool sysfiles)
140 if (sysfiles) 140 if (sysfiles)
141 dgnc_remove_driver_sysfiles(&dgnc_driver); 141 dgnc_remove_driver_sysfiles(&dgnc_driver);
142 142
143 device_destroy(dgnc_class, MKDEV(dgnc_Major, 0)); 143 device_destroy(dgnc_class, MKDEV(dgnc_major, 0));
144 class_destroy(dgnc_class); 144 class_destroy(dgnc_class);
145 unregister_chrdev(dgnc_Major, "dgnc"); 145 unregister_chrdev(dgnc_major, "dgnc");
146 146
147 for (i = 0; i < dgnc_NumBoards; ++i) { 147 for (i = 0; i < dgnc_num_boards; ++i) {
148 dgnc_remove_ports_sysfiles(dgnc_Board[i]); 148 dgnc_remove_ports_sysfiles(dgnc_board[i]);
149 dgnc_tty_uninit(dgnc_Board[i]); 149 dgnc_tty_uninit(dgnc_board[i]);
150 dgnc_cleanup_board(dgnc_Board[i]); 150 dgnc_cleanup_board(dgnc_board[i]);
151 } 151 }
152 152
153 dgnc_tty_post_uninit(); 153 dgnc_tty_post_uninit();
@@ -217,12 +217,12 @@ static int dgnc_start(void)
217 * 217 *
218 * Register management/dpa devices 218 * Register management/dpa devices
219 */ 219 */
220 rc = register_chrdev(0, "dgnc", &dgnc_BoardFops); 220 rc = register_chrdev(0, "dgnc", &dgnc_board_fops);
221 if (rc < 0) { 221 if (rc < 0) {
222 pr_err(DRVSTR ": Can't register dgnc driver device (%d)\n", rc); 222 pr_err(DRVSTR ": Can't register dgnc driver device (%d)\n", rc);
223 return rc; 223 return rc;
224 } 224 }
225 dgnc_Major = rc; 225 dgnc_major = rc;
226 226
227 dgnc_class = class_create(THIS_MODULE, "dgnc_mgmt"); 227 dgnc_class = class_create(THIS_MODULE, "dgnc_mgmt");
228 if (IS_ERR(dgnc_class)) { 228 if (IS_ERR(dgnc_class)) {
@@ -232,7 +232,7 @@ static int dgnc_start(void)
232 } 232 }
233 233
234 dev = device_create(dgnc_class, NULL, 234 dev = device_create(dgnc_class, NULL,
235 MKDEV(dgnc_Major, 0), 235 MKDEV(dgnc_major, 0),
236 NULL, "dgnc_mgmt"); 236 NULL, "dgnc_mgmt");
237 if (IS_ERR(dev)) { 237 if (IS_ERR(dev)) {
238 rc = PTR_ERR(dev); 238 rc = PTR_ERR(dev);
@@ -262,11 +262,11 @@ static int dgnc_start(void)
262 return 0; 262 return 0;
263 263
264failed_tty: 264failed_tty:
265 device_destroy(dgnc_class, MKDEV(dgnc_Major, 0)); 265 device_destroy(dgnc_class, MKDEV(dgnc_major, 0));
266failed_device: 266failed_device:
267 class_destroy(dgnc_class); 267 class_destroy(dgnc_class);
268failed_class: 268failed_class:
269 unregister_chrdev(dgnc_Major, "dgnc"); 269 unregister_chrdev(dgnc_major, "dgnc");
270 return rc; 270 return rc;
271} 271}
272 272
@@ -283,7 +283,7 @@ static int dgnc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
283 283
284 rc = dgnc_found_board(pdev, ent->driver_data); 284 rc = dgnc_found_board(pdev, ent->driver_data);
285 if (rc == 0) 285 if (rc == 0)
286 dgnc_NumBoards++; 286 dgnc_num_boards++;
287 287
288 return rc; 288 return rc;
289} 289}
@@ -346,7 +346,7 @@ static void dgnc_cleanup_board(struct dgnc_board *brd)
346 } 346 }
347 } 347 }
348 348
349 dgnc_Board[brd->boardnum] = NULL; 349 dgnc_board[brd->boardnum] = NULL;
350 350
351 kfree(brd); 351 kfree(brd);
352} 352}
@@ -365,8 +365,8 @@ static int dgnc_found_board(struct pci_dev *pdev, int id)
365 unsigned long flags; 365 unsigned long flags;
366 366
367 /* get the board structure and prep it */ 367 /* get the board structure and prep it */
368 dgnc_Board[dgnc_NumBoards] = kzalloc(sizeof(*brd), GFP_KERNEL); 368 dgnc_board[dgnc_num_boards] = kzalloc(sizeof(*brd), GFP_KERNEL);
369 brd = dgnc_Board[dgnc_NumBoards]; 369 brd = dgnc_board[dgnc_num_boards];
370 370
371 if (!brd) 371 if (!brd)
372 return -ENOMEM; 372 return -ENOMEM;
@@ -382,15 +382,15 @@ static int dgnc_found_board(struct pci_dev *pdev, int id)
382 382
383 /* store the info for the board we've found */ 383 /* store the info for the board we've found */
384 brd->magic = DGNC_BOARD_MAGIC; 384 brd->magic = DGNC_BOARD_MAGIC;
385 brd->boardnum = dgnc_NumBoards; 385 brd->boardnum = dgnc_num_boards;
386 brd->vendor = dgnc_pci_tbl[id].vendor; 386 brd->vendor = dgnc_pci_tbl[id].vendor;
387 brd->device = dgnc_pci_tbl[id].device; 387 brd->device = dgnc_pci_tbl[id].device;
388 brd->pdev = pdev; 388 brd->pdev = pdev;
389 brd->pci_bus = pdev->bus->number; 389 brd->pci_bus = pdev->bus->number;
390 brd->pci_slot = PCI_SLOT(pdev->devfn); 390 brd->pci_slot = PCI_SLOT(pdev->devfn);
391 brd->name = dgnc_Ids[id].name; 391 brd->name = dgnc_ids[id].name;
392 brd->maxports = dgnc_Ids[id].maxports; 392 brd->maxports = dgnc_ids[id].maxports;
393 if (dgnc_Ids[i].is_pci_express) 393 if (dgnc_ids[i].is_pci_express)
394 brd->bd_flags |= BD_IS_PCI_EXPRESS; 394 brd->bd_flags |= BD_IS_PCI_EXPRESS;
395 brd->dpastatus = BD_NOFEP; 395 brd->dpastatus = BD_NOFEP;
396 init_waitqueue_head(&brd->state_wait); 396 init_waitqueue_head(&brd->state_wait);
@@ -642,8 +642,8 @@ static void dgnc_poll_handler(ulong dummy)
642 unsigned long new_time; 642 unsigned long new_time;
643 643
644 /* Go thru each board, kicking off a tasklet for each if needed */ 644 /* Go thru each board, kicking off a tasklet for each if needed */
645 for (i = 0; i < dgnc_NumBoards; i++) { 645 for (i = 0; i < dgnc_num_boards; i++) {
646 brd = dgnc_Board[i]; 646 brd = dgnc_board[i];
647 647
648 spin_lock_irqsave(&brd->bd_lock, flags); 648 spin_lock_irqsave(&brd->bd_lock, flags);
649 649
diff --git a/drivers/staging/dgnc/dgnc_driver.h b/drivers/staging/dgnc/dgnc_driver.h
index e4be81b66041..95ec729fae38 100644
--- a/drivers/staging/dgnc/dgnc_driver.h
+++ b/drivers/staging/dgnc/dgnc_driver.h
@@ -202,18 +202,13 @@ struct dgnc_board {
202 * to our channels. 202 * to our channels.
203 */ 203 */
204 204
205 struct tty_driver SerialDriver; 205 struct tty_driver *serial_driver;
206 char SerialName[200]; 206 char serial_name[200];
207 struct tty_driver PrintDriver; 207 struct tty_driver *print_driver;
208 char PrintName[200]; 208 char print_name[200];
209 209
210 bool dgnc_Major_Serial_Registered; 210 bool dgnc_major_serial_registered;
211 bool dgnc_Major_TransparentPrint_Registered; 211 bool dgnc_major_transparent_print_registered;
212
213 uint dgnc_Serial_Major;
214 uint dgnc_TransparentPrint_Major;
215
216 uint TtyRefCnt;
217 212
218 u16 dpatype; /* The board "type", 213 u16 dpatype; /* The board "type",
219 * as defined by DPA 214 * as defined by DPA
@@ -399,12 +394,12 @@ struct channel_t {
399/* 394/*
400 * Our Global Variables. 395 * Our Global Variables.
401 */ 396 */
402extern uint dgnc_Major; /* Our driver/mgmt major */ 397extern uint dgnc_major; /* Our driver/mgmt major */
403extern int dgnc_poll_tick; /* Poll interval - 20 ms */ 398extern int dgnc_poll_tick; /* Poll interval - 20 ms */
404extern spinlock_t dgnc_global_lock; /* Driver global spinlock */ 399extern spinlock_t dgnc_global_lock; /* Driver global spinlock */
405extern spinlock_t dgnc_poll_lock; /* Poll scheduling lock */ 400extern spinlock_t dgnc_poll_lock; /* Poll scheduling lock */
406extern uint dgnc_NumBoards; /* Total number of boards */ 401extern uint dgnc_num_boards; /* Total number of boards */
407extern struct dgnc_board *dgnc_Board[MAXBOARDS]; /* Array of board 402extern struct dgnc_board *dgnc_board[MAXBOARDS]; /* Array of board
408 * structs 403 * structs
409 */ 404 */
410 405
diff --git a/drivers/staging/dgnc/dgnc_mgmt.c b/drivers/staging/dgnc/dgnc_mgmt.c
index ba29a8d913f2..683c098391d9 100644
--- a/drivers/staging/dgnc/dgnc_mgmt.c
+++ b/drivers/staging/dgnc/dgnc_mgmt.c
@@ -111,7 +111,7 @@ long dgnc_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
111 spin_lock_irqsave(&dgnc_global_lock, flags); 111 spin_lock_irqsave(&dgnc_global_lock, flags);
112 112
113 memset(&ddi, 0, sizeof(ddi)); 113 memset(&ddi, 0, sizeof(ddi));
114 ddi.dinfo_nboards = dgnc_NumBoards; 114 ddi.dinfo_nboards = dgnc_num_boards;
115 sprintf(ddi.dinfo_version, "%s", DG_PART); 115 sprintf(ddi.dinfo_version, "%s", DG_PART);
116 116
117 spin_unlock_irqrestore(&dgnc_global_lock, flags); 117 spin_unlock_irqrestore(&dgnc_global_lock, flags);
@@ -131,27 +131,27 @@ long dgnc_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
131 if (copy_from_user(&brd, uarg, sizeof(int))) 131 if (copy_from_user(&brd, uarg, sizeof(int)))
132 return -EFAULT; 132 return -EFAULT;
133 133
134 if (brd < 0 || brd >= dgnc_NumBoards) 134 if (brd < 0 || brd >= dgnc_num_boards)
135 return -ENODEV; 135 return -ENODEV;
136 136
137 memset(&di, 0, sizeof(di)); 137 memset(&di, 0, sizeof(di));
138 138
139 di.info_bdnum = brd; 139 di.info_bdnum = brd;
140 140
141 spin_lock_irqsave(&dgnc_Board[brd]->bd_lock, flags); 141 spin_lock_irqsave(&dgnc_board[brd]->bd_lock, flags);
142 142
143 di.info_bdtype = dgnc_Board[brd]->dpatype; 143 di.info_bdtype = dgnc_board[brd]->dpatype;
144 di.info_bdstate = dgnc_Board[brd]->dpastatus; 144 di.info_bdstate = dgnc_board[brd]->dpastatus;
145 di.info_ioport = 0; 145 di.info_ioport = 0;
146 di.info_physaddr = (ulong)dgnc_Board[brd]->membase; 146 di.info_physaddr = (ulong)dgnc_board[brd]->membase;
147 di.info_physsize = (ulong)dgnc_Board[brd]->membase 147 di.info_physsize = (ulong)dgnc_board[brd]->membase
148 - dgnc_Board[brd]->membase_end; 148 - dgnc_board[brd]->membase_end;
149 if (dgnc_Board[brd]->state != BOARD_FAILED) 149 if (dgnc_board[brd]->state != BOARD_FAILED)
150 di.info_nports = dgnc_Board[brd]->nasync; 150 di.info_nports = dgnc_board[brd]->nasync;
151 else 151 else
152 di.info_nports = 0; 152 di.info_nports = 0;
153 153
154 spin_unlock_irqrestore(&dgnc_Board[brd]->bd_lock, flags); 154 spin_unlock_irqrestore(&dgnc_board[brd]->bd_lock, flags);
155 155
156 if (copy_to_user(uarg, &di, sizeof(di))) 156 if (copy_to_user(uarg, &di, sizeof(di)))
157 return -EFAULT; 157 return -EFAULT;
@@ -174,14 +174,14 @@ long dgnc_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
174 channel = ni.channel; 174 channel = ni.channel;
175 175
176 /* Verify boundaries on board */ 176 /* Verify boundaries on board */
177 if (board >= dgnc_NumBoards) 177 if (board >= dgnc_num_boards)
178 return -ENODEV; 178 return -ENODEV;
179 179
180 /* Verify boundaries on channel */ 180 /* Verify boundaries on channel */
181 if (channel >= dgnc_Board[board]->nasync) 181 if (channel >= dgnc_board[board]->nasync)
182 return -ENODEV; 182 return -ENODEV;
183 183
184 ch = dgnc_Board[board]->channels[channel]; 184 ch = dgnc_board[board]->channels[channel];
185 185
186 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) 186 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
187 return -ENODEV; 187 return -ENODEV;
diff --git a/drivers/staging/dgnc/dgnc_neo.c b/drivers/staging/dgnc/dgnc_neo.c
index 31ac437cb4a4..ba57e9546f72 100644
--- a/drivers/staging/dgnc/dgnc_neo.c
+++ b/drivers/staging/dgnc/dgnc_neo.c
@@ -77,8 +77,6 @@ struct board_ops dgnc_neo_ops = {
77 .send_immediate_char = neo_send_immediate_char 77 .send_immediate_char = neo_send_immediate_char
78}; 78};
79 79
80static uint dgnc_offset_table[8] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 };
81
82/* 80/*
83 * This function allows calls to ensure that all outstanding 81 * This function allows calls to ensure that all outstanding
84 * PCI writes have been completed, by doing a PCI read against 82 * PCI writes have been completed, by doing a PCI read against
@@ -116,7 +114,8 @@ static inline void neo_set_cts_flow_control(struct channel_t *ch)
116 writeb(efr, &ch->ch_neo_uart->efr); 114 writeb(efr, &ch->ch_neo_uart->efr);
117 115
118 /* Turn on table D, with 8 char hi/low watermarks */ 116 /* Turn on table D, with 8 char hi/low watermarks */
119 writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_4DELAY), &ch->ch_neo_uart->fctr); 117 writeb(UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_4DELAY,
118 &ch->ch_neo_uart->fctr);
120 119
121 /* Feed the UART our trigger levels */ 120 /* Feed the UART our trigger levels */
122 writeb(8, &ch->ch_neo_uart->tfifo); 121 writeb(8, &ch->ch_neo_uart->tfifo);
@@ -150,7 +149,8 @@ static inline void neo_set_rts_flow_control(struct channel_t *ch)
150 /* Turn on UART enhanced bits */ 149 /* Turn on UART enhanced bits */
151 writeb(efr, &ch->ch_neo_uart->efr); 150 writeb(efr, &ch->ch_neo_uart->efr);
152 151
153 writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_4DELAY), &ch->ch_neo_uart->fctr); 152 writeb(UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_4DELAY,
153 &ch->ch_neo_uart->fctr);
154 ch->ch_r_watermark = 4; 154 ch->ch_r_watermark = 4;
155 155
156 writeb(32, &ch->ch_neo_uart->rfifo); 156 writeb(32, &ch->ch_neo_uart->rfifo);
@@ -187,7 +187,8 @@ static inline void neo_set_ixon_flow_control(struct channel_t *ch)
187 /* Turn on UART enhanced bits */ 187 /* Turn on UART enhanced bits */
188 writeb(efr, &ch->ch_neo_uart->efr); 188 writeb(efr, &ch->ch_neo_uart->efr);
189 189
190 writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr); 190 writeb(UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY,
191 &ch->ch_neo_uart->fctr);
191 ch->ch_r_watermark = 4; 192 ch->ch_r_watermark = 4;
192 193
193 writeb(32, &ch->ch_neo_uart->rfifo); 194 writeb(32, &ch->ch_neo_uart->rfifo);
@@ -225,7 +226,8 @@ static inline void neo_set_ixoff_flow_control(struct channel_t *ch)
225 writeb(efr, &ch->ch_neo_uart->efr); 226 writeb(efr, &ch->ch_neo_uart->efr);
226 227
227 /* Turn on table D, with 8 char hi/low watermarks */ 228 /* Turn on table D, with 8 char hi/low watermarks */
228 writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr); 229 writeb(UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY,
230 &ch->ch_neo_uart->fctr);
229 231
230 writeb(8, &ch->ch_neo_uart->tfifo); 232 writeb(8, &ch->ch_neo_uart->tfifo);
231 ch->ch_t_tlevel = 8; 233 ch->ch_t_tlevel = 8;
@@ -265,7 +267,8 @@ static inline void neo_set_no_input_flow_control(struct channel_t *ch)
265 writeb(efr, &ch->ch_neo_uart->efr); 267 writeb(efr, &ch->ch_neo_uart->efr);
266 268
267 /* Turn on table D, with 8 char hi/low watermarks */ 269 /* Turn on table D, with 8 char hi/low watermarks */
268 writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr); 270 writeb(UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY,
271 &ch->ch_neo_uart->fctr);
269 272
270 ch->ch_r_watermark = 0; 273 ch->ch_r_watermark = 0;
271 274
@@ -302,7 +305,8 @@ static inline void neo_set_no_output_flow_control(struct channel_t *ch)
302 writeb(efr, &ch->ch_neo_uart->efr); 305 writeb(efr, &ch->ch_neo_uart->efr);
303 306
304 /* Turn on table D, with 8 char hi/low watermarks */ 307 /* Turn on table D, with 8 char hi/low watermarks */
305 writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr); 308 writeb(UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY,
309 &ch->ch_neo_uart->fctr);
306 310
307 ch->ch_r_watermark = 0; 311 ch->ch_r_watermark = 0;
308 312
@@ -321,7 +325,8 @@ static inline void neo_set_no_output_flow_control(struct channel_t *ch)
321static inline void neo_set_new_start_stop_chars(struct channel_t *ch) 325static inline void neo_set_new_start_stop_chars(struct channel_t *ch)
322{ 326{
323 /* if hardware flow control is set, then skip this whole thing */ 327 /* if hardware flow control is set, then skip this whole thing */
324 if (ch->ch_digi.digi_flags & (CTSPACE | RTSPACE) || ch->ch_c_cflag & CRTSCTS) 328 if (ch->ch_digi.digi_flags & (CTSPACE | RTSPACE) ||
329 ch->ch_c_cflag & CRTSCTS)
325 return; 330 return;
326 331
327 /* Tell UART what start/stop chars it should be looking for */ 332 /* Tell UART what start/stop chars it should be looking for */
@@ -351,8 +356,8 @@ static inline void neo_clear_break(struct channel_t *ch, int force)
351 356
352 /* Turn break off, and unset some variables */ 357 /* Turn break off, and unset some variables */
353 if (ch->ch_flags & CH_BREAK_SENDING) { 358 if (ch->ch_flags & CH_BREAK_SENDING) {
354 if (time_after_eq(jiffies, ch->ch_stop_sending_break) 359 if (force ||
355 || force) { 360 time_after_eq(jiffies, ch->ch_stop_sending_break)) {
356 unsigned char temp = readb(&ch->ch_neo_uart->lcr); 361 unsigned char temp = readb(&ch->ch_neo_uart->lcr);
357 362
358 writeb((temp & ~UART_LCR_SBC), &ch->ch_neo_uart->lcr); 363 writeb((temp & ~UART_LCR_SBC), &ch->ch_neo_uart->lcr);
@@ -374,14 +379,8 @@ static inline void neo_parse_isr(struct dgnc_board *brd, uint port)
374 unsigned char cause; 379 unsigned char cause;
375 unsigned long flags; 380 unsigned long flags;
376 381
377 if (!brd || brd->magic != DGNC_BOARD_MAGIC)
378 return;
379
380 if (port >= brd->maxports)
381 return;
382
383 ch = brd->channels[port]; 382 ch = brd->channels[port];
384 if (ch->magic != DGNC_CHANNEL_MAGIC) 383 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
385 return; 384 return;
386 385
387 /* Here we try to figure out what caused the interrupt to happen */ 386 /* Here we try to figure out what caused the interrupt to happen */
@@ -393,7 +392,8 @@ static inline void neo_parse_isr(struct dgnc_board *brd, uint port)
393 break; 392 break;
394 393
395 /* 394 /*
396 * Yank off the upper 2 bits, which just show that the FIFO's are enabled. 395 * Yank off the upper 2 bits,
396 * which just show that the FIFO's are enabled.
397 */ 397 */
398 isr &= ~(UART_17158_IIR_FIFO_ENABLED); 398 isr &= ~(UART_17158_IIR_FIFO_ENABLED);
399 399
@@ -666,7 +666,8 @@ static void neo_param(struct tty_struct *tty)
666 }; 666 };
667 667
668 /* Only use the TXPrint baud rate if the terminal unit is NOT open */ 668 /* Only use the TXPrint baud rate if the terminal unit is NOT open */
669 if (!(ch->ch_tun.un_flags & UN_ISOPEN) && (un->un_type == DGNC_PRINT)) 669 if (!(ch->ch_tun.un_flags & UN_ISOPEN) &&
670 (un->un_type == DGNC_PRINT))
670 baud = C_BAUD(ch->ch_pun.un_tty) & 0xff; 671 baud = C_BAUD(ch->ch_pun.un_tty) & 0xff;
671 else 672 else
672 baud = C_BAUD(ch->ch_tun.un_tty) & 0xff; 673 baud = C_BAUD(ch->ch_tun.un_tty) & 0xff;
@@ -679,7 +680,8 @@ static void neo_param(struct tty_struct *tty)
679 680
680 jindex = baud; 681 jindex = baud;
681 682
682 if ((iindex >= 0) && (iindex < 4) && (jindex >= 0) && (jindex < 16)) 683 if ((iindex >= 0) && (iindex < 4) &&
684 (jindex >= 0) && (jindex < 16))
683 baud = bauds[iindex][jindex]; 685 baud = bauds[iindex][jindex];
684 else 686 else
685 baud = 0; 687 baud = 0;
@@ -787,7 +789,8 @@ static void neo_param(struct tty_struct *tty)
787 neo_set_cts_flow_control(ch); 789 neo_set_cts_flow_control(ch);
788 } else if (ch->ch_c_iflag & IXON) { 790 } else if (ch->ch_c_iflag & IXON) {
789 /* If start/stop is set to disable, then we should disable flow control */ 791 /* If start/stop is set to disable, then we should disable flow control */
790 if ((ch->ch_startc == _POSIX_VDISABLE) || (ch->ch_stopc == _POSIX_VDISABLE)) 792 if ((ch->ch_startc == _POSIX_VDISABLE) ||
793 (ch->ch_stopc == _POSIX_VDISABLE))
791 neo_set_no_output_flow_control(ch); 794 neo_set_no_output_flow_control(ch);
792 else 795 else
793 neo_set_ixon_flow_control(ch); 796 neo_set_ixon_flow_control(ch);
@@ -799,7 +802,8 @@ static void neo_param(struct tty_struct *tty)
799 neo_set_rts_flow_control(ch); 802 neo_set_rts_flow_control(ch);
800 } else if (ch->ch_c_iflag & IXOFF) { 803 } else if (ch->ch_c_iflag & IXOFF) {
801 /* If start/stop is set to disable, then we should disable flow control */ 804 /* If start/stop is set to disable, then we should disable flow control */
802 if ((ch->ch_startc == _POSIX_VDISABLE) || (ch->ch_stopc == _POSIX_VDISABLE)) 805 if ((ch->ch_startc == _POSIX_VDISABLE) ||
806 (ch->ch_stopc == _POSIX_VDISABLE))
803 neo_set_no_input_flow_control(ch); 807 neo_set_no_input_flow_control(ch);
804 else 808 else
805 neo_set_ixoff_flow_control(ch); 809 neo_set_ixoff_flow_control(ch);
@@ -910,9 +914,7 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
910 struct dgnc_board *brd = voidbrd; 914 struct dgnc_board *brd = voidbrd;
911 struct channel_t *ch; 915 struct channel_t *ch;
912 int port = 0; 916 int port = 0;
913 int type = 0; 917 int type;
914 int current_port;
915 u32 tmp;
916 u32 uart_poll; 918 u32 uart_poll;
917 unsigned long flags; 919 unsigned long flags;
918 unsigned long flags2; 920 unsigned long flags2;
@@ -947,29 +949,12 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
947 949
948 /* At this point, we have at least SOMETHING to service, dig further... */ 950 /* At this point, we have at least SOMETHING to service, dig further... */
949 951
950 current_port = 0;
951
952 /* Loop on each port */ 952 /* Loop on each port */
953 while ((uart_poll & 0xff) != 0) { 953 while ((uart_poll & 0xff) != 0) {
954 tmp = uart_poll; 954 type = uart_poll >> (8 + (port * 3));
955 955 type &= 0x7;
956 /* Check current port to see if it has interrupt pending */
957 if ((tmp & dgnc_offset_table[current_port]) != 0) {
958 port = current_port;
959 type = tmp >> (8 + (port * 3));
960 type &= 0x7;
961 } else {
962 current_port++;
963 continue;
964 }
965 956
966 /* Remove this port + type from uart_poll */ 957 uart_poll &= ~(0x01 << port);
967 uart_poll &= ~(dgnc_offset_table[port]);
968
969 if (!type) {
970 /* If no type, just ignore it, and move onto next port */
971 continue;
972 }
973 958
974 /* Switch on type of interrupt we have */ 959 /* Switch on type of interrupt we have */
975 switch (type) { 960 switch (type) {
@@ -981,7 +966,7 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
981 966
982 /* Verify the port is in range. */ 967 /* Verify the port is in range. */
983 if (port >= brd->nasync) 968 if (port >= brd->nasync)
984 continue; 969 break;
985 970
986 ch = brd->channels[port]; 971 ch = brd->channels[port];
987 neo_copy_data_from_uart_to_queue(ch); 972 neo_copy_data_from_uart_to_queue(ch);
@@ -991,14 +976,14 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
991 dgnc_check_queue_flow_control(ch); 976 dgnc_check_queue_flow_control(ch);
992 spin_unlock_irqrestore(&ch->ch_lock, flags2); 977 spin_unlock_irqrestore(&ch->ch_lock, flags2);
993 978
994 continue; 979 break;
995 980
996 case UART_17158_RX_LINE_STATUS: 981 case UART_17158_RX_LINE_STATUS:
997 /* 982 /*
998 * RXRDY and RX LINE Status (logic OR of LSR[4:1]) 983 * RXRDY and RX LINE Status (logic OR of LSR[4:1])
999 */ 984 */
1000 neo_parse_lsr(brd, port); 985 neo_parse_lsr(brd, port);
1001 continue; 986 break;
1002 987
1003 case UART_17158_TXRDY: 988 case UART_17158_TXRDY:
1004 /* 989 /*
@@ -1014,14 +999,14 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
1014 * it should be, I was getting things like RXDY too. Weird. 999 * it should be, I was getting things like RXDY too. Weird.
1015 */ 1000 */
1016 neo_parse_isr(brd, port); 1001 neo_parse_isr(brd, port);
1017 continue; 1002 break;
1018 1003
1019 case UART_17158_MSR: 1004 case UART_17158_MSR:
1020 /* 1005 /*
1021 * MSR or flow control was seen. 1006 * MSR or flow control was seen.
1022 */ 1007 */
1023 neo_parse_isr(brd, port); 1008 neo_parse_isr(brd, port);
1024 continue; 1009 break;
1025 1010
1026 default: 1011 default:
1027 /* 1012 /*
@@ -1030,8 +1015,10 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
1030 * these once and awhile. 1015 * these once and awhile.
1031 * Its harmless, just ignore it and move on. 1016 * Its harmless, just ignore it and move on.
1032 */ 1017 */
1033 continue; 1018 break;
1034 } 1019 }
1020
1021 port++;
1035 } 1022 }
1036 1023
1037 /* 1024 /*
@@ -1172,7 +1159,8 @@ static void neo_copy_data_from_uart_to_queue(struct channel_t *ch)
1172 linestatus = 0; 1159 linestatus = 0;
1173 1160
1174 /* Copy data from uart to the queue */ 1161 /* Copy data from uart to the queue */
1175 memcpy_fromio(ch->ch_rqueue + head, &ch->ch_neo_uart->txrxburst, n); 1162 memcpy_fromio(ch->ch_rqueue + head,
1163 &ch->ch_neo_uart->txrxburst, n);
1176 1164
1177 /* 1165 /*
1178 * Since RX_FIFO_DATA_ERROR was 0, we are guaranteed 1166 * Since RX_FIFO_DATA_ERROR was 0, we are guaranteed
@@ -1225,7 +1213,8 @@ static void neo_copy_data_from_uart_to_queue(struct channel_t *ch)
1225 * we don't miss our TX FIFO emptys. 1213 * we don't miss our TX FIFO emptys.
1226 */ 1214 */
1227 if (linestatus & (UART_LSR_THRE | UART_17158_TX_AND_FIFO_CLR)) { 1215 if (linestatus & (UART_LSR_THRE | UART_17158_TX_AND_FIFO_CLR)) {
1228 linestatus &= ~(UART_LSR_THRE | UART_17158_TX_AND_FIFO_CLR); 1216 linestatus &= ~(UART_LSR_THRE |
1217 UART_17158_TX_AND_FIFO_CLR);
1229 ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); 1218 ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
1230 } 1219 }
1231 1220
@@ -1255,7 +1244,8 @@ static void neo_copy_data_from_uart_to_queue(struct channel_t *ch)
1255 qleft++; 1244 qleft++;
1256 } 1245 }
1257 1246
1258 memcpy_fromio(ch->ch_rqueue + head, &ch->ch_neo_uart->txrxburst, 1); 1247 memcpy_fromio(ch->ch_rqueue + head,
1248 &ch->ch_neo_uart->txrxburst, 1);
1259 ch->ch_equeue[head] = (unsigned char)linestatus; 1249 ch->ch_equeue[head] = (unsigned char)linestatus;
1260 1250
1261 /* Ditch any remaining linestatus value. */ 1251 /* Ditch any remaining linestatus value. */
@@ -1328,7 +1318,8 @@ static void neo_flush_uart_write(struct channel_t *ch)
1328 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) 1318 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
1329 return; 1319 return;
1330 1320
1331 writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_XMIT), &ch->ch_neo_uart->isr_fcr); 1321 writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_XMIT),
1322 &ch->ch_neo_uart->isr_fcr);
1332 neo_pci_posting_flush(ch->ch_bd); 1323 neo_pci_posting_flush(ch->ch_bd);
1333 1324
1334 for (i = 0; i < 10; i++) { 1325 for (i = 0; i < 10; i++) {
@@ -1356,7 +1347,8 @@ static void neo_flush_uart_read(struct channel_t *ch)
1356 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) 1347 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
1357 return; 1348 return;
1358 1349
1359 writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR), &ch->ch_neo_uart->isr_fcr); 1350 writeb(UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR,
1351 &ch->ch_neo_uart->isr_fcr);
1360 neo_pci_posting_flush(ch->ch_bd); 1352 neo_pci_posting_flush(ch->ch_bd);
1361 1353
1362 for (i = 0; i < 10; i++) { 1354 for (i = 0; i < 10; i++) {
@@ -1427,7 +1419,8 @@ static void neo_copy_data_from_queue_to_uart(struct channel_t *ch)
1427 ch->ch_tun.un_flags |= (UN_EMPTY); 1419 ch->ch_tun.un_flags |= (UN_EMPTY);
1428 } 1420 }
1429 1421
1430 writeb(ch->ch_wqueue[ch->ch_w_tail], &ch->ch_neo_uart->txrx); 1422 writeb(ch->ch_wqueue[ch->ch_w_tail],
1423 &ch->ch_neo_uart->txrx);
1431 ch->ch_w_tail++; 1424 ch->ch_w_tail++;
1432 ch->ch_w_tail &= WQUEUEMASK; 1425 ch->ch_w_tail &= WQUEUEMASK;
1433 ch->ch_txcount++; 1426 ch->ch_txcount++;
@@ -1494,7 +1487,8 @@ static void neo_copy_data_from_queue_to_uart(struct channel_t *ch)
1494 ch->ch_tun.un_flags |= (UN_EMPTY); 1487 ch->ch_tun.un_flags |= (UN_EMPTY);
1495 } 1488 }
1496 1489
1497 memcpy_toio(&ch->ch_neo_uart->txrxburst, ch->ch_wqueue + tail, s); 1490 memcpy_toio(&ch->ch_neo_uart->txrxburst,
1491 ch->ch_wqueue + tail, s);
1498 1492
1499 /* Add and flip queue if needed */ 1493 /* Add and flip queue if needed */
1500 tail = (tail + s) & WQUEUEMASK; 1494 tail = (tail + s) & WQUEUEMASK;
@@ -1628,7 +1622,8 @@ static void neo_uart_init(struct channel_t *ch)
1628 1622
1629 /* Clear out UART and FIFO */ 1623 /* Clear out UART and FIFO */
1630 readb(&ch->ch_neo_uart->txrx); 1624 readb(&ch->ch_neo_uart->txrx);
1631 writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT), &ch->ch_neo_uart->isr_fcr); 1625 writeb(UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT,
1626 &ch->ch_neo_uart->isr_fcr);
1632 readb(&ch->ch_neo_uart->lsr); 1627 readb(&ch->ch_neo_uart->lsr);
1633 readb(&ch->ch_neo_uart->msr); 1628 readb(&ch->ch_neo_uart->msr);
1634 1629
@@ -1725,7 +1720,8 @@ static void neo_send_immediate_char(struct channel_t *ch, unsigned char c)
1725 neo_pci_posting_flush(ch->ch_bd); 1720 neo_pci_posting_flush(ch->ch_bd);
1726} 1721}
1727 1722
1728static unsigned int neo_read_eeprom(unsigned char __iomem *base, unsigned int address) 1723static unsigned int neo_read_eeprom(unsigned char __iomem *base,
1724 unsigned int address)
1729{ 1725{
1730 unsigned int enable; 1726 unsigned int enable;
1731 unsigned int bits; 1727 unsigned int bits;
@@ -1783,10 +1779,15 @@ static void neo_vpd(struct dgnc_board *brd)
1783 brd->vpd[(i * 2) + 1] = (a >> 8) & 0xff; 1779 brd->vpd[(i * 2) + 1] = (a >> 8) & 0xff;
1784 } 1780 }
1785 1781
1786 if (((brd->vpd[0x08] != 0x82) /* long resource name tag */ 1782 /*
1787 && (brd->vpd[0x10] != 0x82)) /* long resource name tag (PCI-66 files)*/ 1783 * brd->vpd has different name tags by below index.
1788 || (brd->vpd[0x7F] != 0x78)) { /* small resource end tag */ 1784 * 0x08 : long resource name tag
1789 1785 * 0x10 : long resource name tage (PCI-66 files)
1786 * 0x7F : small resource end tag
1787 */
1788 if (((brd->vpd[0x08] != 0x82) &&
1789 (brd->vpd[0x10] != 0x82)) ||
1790 (brd->vpd[0x7F] != 0x78)) {
1790 memset(brd->vpd, '\0', NEO_VPD_IMAGESIZE); 1791 memset(brd->vpd, '\0', NEO_VPD_IMAGESIZE);
1791 } else { 1792 } else {
1792 /* Search for the serial number */ 1793 /* Search for the serial number */
diff --git a/drivers/staging/dgnc/dgnc_sysfs.c b/drivers/staging/dgnc/dgnc_sysfs.c
index 74a072599126..b8d41c5617e2 100644
--- a/drivers/staging/dgnc/dgnc_sysfs.c
+++ b/drivers/staging/dgnc/dgnc_sysfs.c
@@ -33,7 +33,7 @@ static DRIVER_ATTR(version, S_IRUSR, dgnc_driver_version_show, NULL);
33 33
34static ssize_t dgnc_driver_boards_show(struct device_driver *ddp, char *buf) 34static ssize_t dgnc_driver_boards_show(struct device_driver *ddp, char *buf)
35{ 35{
36 return snprintf(buf, PAGE_SIZE, "%d\n", dgnc_NumBoards); 36 return snprintf(buf, PAGE_SIZE, "%d\n", dgnc_num_boards);
37} 37}
38static DRIVER_ATTR(boards, S_IRUSR, dgnc_driver_boards_show, NULL); 38static DRIVER_ATTR(boards, S_IRUSR, dgnc_driver_boards_show, NULL);
39 39
@@ -189,19 +189,21 @@ static ssize_t dgnc_ports_msignals_show(struct device *p,
189 DGNC_VERIFY_BOARD(p, bd); 189 DGNC_VERIFY_BOARD(p, bd);
190 190
191 for (i = 0; i < bd->nasync; i++) { 191 for (i = 0; i < bd->nasync; i++) {
192 if (bd->channels[i]->ch_open_count) { 192 struct channel_t *ch = bd->channels[i];
193
194 if (ch->ch_open_count) {
193 count += snprintf(buf + count, PAGE_SIZE - count, 195 count += snprintf(buf + count, PAGE_SIZE - count,
194 "%d %s %s %s %s %s %s\n", 196 "%d %s %s %s %s %s %s\n",
195 bd->channels[i]->ch_portnum, 197 ch->ch_portnum,
196 (bd->channels[i]->ch_mostat & UART_MCR_RTS) ? "RTS" : "", 198 (ch->ch_mostat & UART_MCR_RTS) ? "RTS" : "",
197 (bd->channels[i]->ch_mistat & UART_MSR_CTS) ? "CTS" : "", 199 (ch->ch_mistat & UART_MSR_CTS) ? "CTS" : "",
198 (bd->channels[i]->ch_mostat & UART_MCR_DTR) ? "DTR" : "", 200 (ch->ch_mostat & UART_MCR_DTR) ? "DTR" : "",
199 (bd->channels[i]->ch_mistat & UART_MSR_DSR) ? "DSR" : "", 201 (ch->ch_mistat & UART_MSR_DSR) ? "DSR" : "",
200 (bd->channels[i]->ch_mistat & UART_MSR_DCD) ? "DCD" : "", 202 (ch->ch_mistat & UART_MSR_DCD) ? "DCD" : "",
201 (bd->channels[i]->ch_mistat & UART_MSR_RI) ? "RI" : ""); 203 (ch->ch_mistat & UART_MSR_RI) ? "RI" : "");
202 } else { 204 } else {
203 count += snprintf(buf + count, PAGE_SIZE - count, 205 count += snprintf(buf + count, PAGE_SIZE - count,
204 "%d\n", bd->channels[i]->ch_portnum); 206 "%d\n", ch->ch_portnum);
205 } 207 }
206 } 208 }
207 return count; 209 return count;
diff --git a/drivers/staging/dgnc/dgnc_tty.c b/drivers/staging/dgnc/dgnc_tty.c
index 5c221593a0c6..4eeecc992a02 100644
--- a/drivers/staging/dgnc/dgnc_tty.c
+++ b/drivers/staging/dgnc/dgnc_tty.c
@@ -176,57 +176,42 @@ int dgnc_tty_preinit(void)
176 */ 176 */
177int dgnc_tty_register(struct dgnc_board *brd) 177int dgnc_tty_register(struct dgnc_board *brd)
178{ 178{
179 int rc = 0; 179 int rc;
180
181 brd->SerialDriver.magic = TTY_DRIVER_MAGIC;
182 180
183 snprintf(brd->SerialName, MAXTTYNAMELEN, "tty_dgnc_%d_", brd->boardnum); 181 brd->serial_driver = tty_alloc_driver(brd->maxports,
182 TTY_DRIVER_REAL_RAW |
183 TTY_DRIVER_DYNAMIC_DEV |
184 TTY_DRIVER_HARDWARE_BREAK);
184 185
185 brd->SerialDriver.name = brd->SerialName; 186 if (IS_ERR(brd->serial_driver))
186 brd->SerialDriver.name_base = 0; 187 return PTR_ERR(brd->serial_driver);
187 brd->SerialDriver.major = 0;
188 brd->SerialDriver.minor_start = 0;
189 brd->SerialDriver.num = brd->maxports;
190 brd->SerialDriver.type = TTY_DRIVER_TYPE_SERIAL;
191 brd->SerialDriver.subtype = SERIAL_TYPE_NORMAL;
192 brd->SerialDriver.init_termios = DgncDefaultTermios;
193 brd->SerialDriver.driver_name = DRVSTR;
194 brd->SerialDriver.flags = (TTY_DRIVER_REAL_RAW |
195 TTY_DRIVER_DYNAMIC_DEV |
196 TTY_DRIVER_HARDWARE_BREAK);
197 188
198 /* 189 snprintf(brd->serial_name, MAXTTYNAMELEN, "tty_dgnc_%d_", brd->boardnum);
199 * The kernel wants space to store pointers to
200 * tty_struct's and termios's.
201 */
202 brd->SerialDriver.ttys = kcalloc(brd->maxports,
203 sizeof(*brd->SerialDriver.ttys),
204 GFP_KERNEL);
205 if (!brd->SerialDriver.ttys)
206 return -ENOMEM;
207 190
208 kref_init(&brd->SerialDriver.kref); 191 brd->serial_driver->name = brd->serial_name;
209 brd->SerialDriver.termios = kcalloc(brd->maxports, 192 brd->serial_driver->name_base = 0;
210 sizeof(*brd->SerialDriver.termios), 193 brd->serial_driver->major = 0;
211 GFP_KERNEL); 194 brd->serial_driver->minor_start = 0;
212 if (!brd->SerialDriver.termios) 195 brd->serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
213 return -ENOMEM; 196 brd->serial_driver->subtype = SERIAL_TYPE_NORMAL;
197 brd->serial_driver->init_termios = DgncDefaultTermios;
198 brd->serial_driver->driver_name = DRVSTR;
214 199
215 /* 200 /*
216 * Entry points for driver. Called by the kernel from 201 * Entry points for driver. Called by the kernel from
217 * tty_io.c and n_tty.c. 202 * tty_io.c and n_tty.c.
218 */ 203 */
219 tty_set_operations(&brd->SerialDriver, &dgnc_tty_ops); 204 tty_set_operations(brd->serial_driver, &dgnc_tty_ops);
220 205
221 if (!brd->dgnc_Major_Serial_Registered) { 206 if (!brd->dgnc_major_serial_registered) {
222 /* Register tty devices */ 207 /* Register tty devices */
223 rc = tty_register_driver(&brd->SerialDriver); 208 rc = tty_register_driver(brd->serial_driver);
224 if (rc < 0) { 209 if (rc < 0) {
225 dev_dbg(&brd->pdev->dev, 210 dev_dbg(&brd->pdev->dev,
226 "Can't register tty device (%d)\n", rc); 211 "Can't register tty device (%d)\n", rc);
227 return rc; 212 goto free_serial_driver;
228 } 213 }
229 brd->dgnc_Major_Serial_Registered = true; 214 brd->dgnc_major_serial_registered = true;
230 } 215 }
231 216
232 /* 217 /*
@@ -234,60 +219,55 @@ int dgnc_tty_register(struct dgnc_board *brd)
234 * again, separately so we don't get the LD confused about what major 219 * again, separately so we don't get the LD confused about what major
235 * we are when we get into the dgnc_tty_open() routine. 220 * we are when we get into the dgnc_tty_open() routine.
236 */ 221 */
237 brd->PrintDriver.magic = TTY_DRIVER_MAGIC; 222 brd->print_driver = tty_alloc_driver(brd->maxports,
238 snprintf(brd->PrintName, MAXTTYNAMELEN, "pr_dgnc_%d_", brd->boardnum); 223 TTY_DRIVER_REAL_RAW |
239 224 TTY_DRIVER_DYNAMIC_DEV |
240 brd->PrintDriver.name = brd->PrintName; 225 TTY_DRIVER_HARDWARE_BREAK);
241 brd->PrintDriver.name_base = 0; 226
242 brd->PrintDriver.major = brd->SerialDriver.major; 227 if (IS_ERR(brd->print_driver)) {
243 brd->PrintDriver.minor_start = 0x80; 228 rc = PTR_ERR(brd->print_driver);
244 brd->PrintDriver.num = brd->maxports; 229 goto unregister_serial_driver;
245 brd->PrintDriver.type = TTY_DRIVER_TYPE_SERIAL; 230 }
246 brd->PrintDriver.subtype = SERIAL_TYPE_NORMAL;
247 brd->PrintDriver.init_termios = DgncDefaultTermios;
248 brd->PrintDriver.driver_name = DRVSTR;
249 brd->PrintDriver.flags = (TTY_DRIVER_REAL_RAW |
250 TTY_DRIVER_DYNAMIC_DEV |
251 TTY_DRIVER_HARDWARE_BREAK);
252 231
253 /* 232 snprintf(brd->print_name, MAXTTYNAMELEN, "pr_dgnc_%d_", brd->boardnum);
254 * The kernel wants space to store pointers to 233
255 * tty_struct's and termios's. Must be separated from 234 brd->print_driver->name = brd->print_name;
256 * the Serial Driver so we don't get confused 235 brd->print_driver->name_base = 0;
257 */ 236 brd->print_driver->major = brd->serial_driver->major;
258 brd->PrintDriver.ttys = kcalloc(brd->maxports, 237 brd->print_driver->minor_start = 0x80;
259 sizeof(*brd->PrintDriver.ttys), 238 brd->print_driver->type = TTY_DRIVER_TYPE_SERIAL;
260 GFP_KERNEL); 239 brd->print_driver->subtype = SERIAL_TYPE_NORMAL;
261 if (!brd->PrintDriver.ttys) 240 brd->print_driver->init_termios = DgncDefaultTermios;
262 return -ENOMEM; 241 brd->print_driver->driver_name = DRVSTR;
263 kref_init(&brd->PrintDriver.kref);
264 brd->PrintDriver.termios = kcalloc(brd->maxports,
265 sizeof(*brd->PrintDriver.termios),
266 GFP_KERNEL);
267 if (!brd->PrintDriver.termios)
268 return -ENOMEM;
269 242
270 /* 243 /*
271 * Entry points for driver. Called by the kernel from 244 * Entry points for driver. Called by the kernel from
272 * tty_io.c and n_tty.c. 245 * tty_io.c and n_tty.c.
273 */ 246 */
274 tty_set_operations(&brd->PrintDriver, &dgnc_tty_ops); 247 tty_set_operations(brd->print_driver, &dgnc_tty_ops);
275 248
276 if (!brd->dgnc_Major_TransparentPrint_Registered) { 249 if (!brd->dgnc_major_transparent_print_registered) {
277 /* Register Transparent Print devices */ 250 /* Register Transparent Print devices */
278 rc = tty_register_driver(&brd->PrintDriver); 251 rc = tty_register_driver(brd->print_driver);
279 if (rc < 0) { 252 if (rc < 0) {
280 dev_dbg(&brd->pdev->dev, 253 dev_dbg(&brd->pdev->dev,
281 "Can't register Transparent Print device(%d)\n", 254 "Can't register Transparent Print device(%d)\n",
282 rc); 255 rc);
283 return rc; 256 goto free_print_driver;
284 } 257 }
285 brd->dgnc_Major_TransparentPrint_Registered = true; 258 brd->dgnc_major_transparent_print_registered = true;
286 } 259 }
287 260
288 dgnc_BoardsByMajor[brd->SerialDriver.major] = brd; 261 dgnc_BoardsByMajor[brd->serial_driver->major] = brd;
289 brd->dgnc_Serial_Major = brd->SerialDriver.major; 262
290 brd->dgnc_TransparentPrint_Major = brd->PrintDriver.major; 263 return 0;
264
265free_print_driver:
266 put_tty_driver(brd->print_driver);
267unregister_serial_driver:
268 tty_unregister_driver(brd->serial_driver);
269free_serial_driver:
270 put_tty_driver(brd->serial_driver);
291 271
292 return rc; 272 return rc;
293} 273}
@@ -364,12 +344,12 @@ int dgnc_tty_init(struct dgnc_board *brd)
364 { 344 {
365 struct device *classp; 345 struct device *classp;
366 346
367 classp = tty_register_device(&brd->SerialDriver, i, 347 classp = tty_register_device(brd->serial_driver, i,
368 &ch->ch_bd->pdev->dev); 348 &ch->ch_bd->pdev->dev);
369 ch->ch_tun.un_sysfs = classp; 349 ch->ch_tun.un_sysfs = classp;
370 dgnc_create_tty_sysfs(&ch->ch_tun, classp); 350 dgnc_create_tty_sysfs(&ch->ch_tun, classp);
371 351
372 classp = tty_register_device(&brd->PrintDriver, i, 352 classp = tty_register_device(brd->print_driver, i,
373 &ch->ch_bd->pdev->dev); 353 &ch->ch_bd->pdev->dev);
374 ch->ch_pun.un_sysfs = classp; 354 ch->ch_pun.un_sysfs = classp;
375 dgnc_create_tty_sysfs(&ch->ch_pun, classp); 355 dgnc_create_tty_sysfs(&ch->ch_pun, classp);
@@ -407,40 +387,32 @@ void dgnc_tty_uninit(struct dgnc_board *brd)
407{ 387{
408 int i = 0; 388 int i = 0;
409 389
410 if (brd->dgnc_Major_Serial_Registered) { 390 if (brd->dgnc_major_serial_registered) {
411 dgnc_BoardsByMajor[brd->SerialDriver.major] = NULL; 391 dgnc_BoardsByMajor[brd->serial_driver->major] = NULL;
412 brd->dgnc_Serial_Major = 0;
413 for (i = 0; i < brd->nasync; i++) { 392 for (i = 0; i < brd->nasync; i++) {
414 if (brd->channels[i]) 393 if (brd->channels[i])
415 dgnc_remove_tty_sysfs(brd->channels[i]-> 394 dgnc_remove_tty_sysfs(brd->channels[i]->
416 ch_tun.un_sysfs); 395 ch_tun.un_sysfs);
417 tty_unregister_device(&brd->SerialDriver, i); 396 tty_unregister_device(brd->serial_driver, i);
418 } 397 }
419 tty_unregister_driver(&brd->SerialDriver); 398 tty_unregister_driver(brd->serial_driver);
420 brd->dgnc_Major_Serial_Registered = false; 399 brd->dgnc_major_serial_registered = false;
421 } 400 }
422 401
423 if (brd->dgnc_Major_TransparentPrint_Registered) { 402 if (brd->dgnc_major_transparent_print_registered) {
424 dgnc_BoardsByMajor[brd->PrintDriver.major] = NULL; 403 dgnc_BoardsByMajor[brd->print_driver->major] = NULL;
425 brd->dgnc_TransparentPrint_Major = 0;
426 for (i = 0; i < brd->nasync; i++) { 404 for (i = 0; i < brd->nasync; i++) {
427 if (brd->channels[i]) 405 if (brd->channels[i])
428 dgnc_remove_tty_sysfs(brd->channels[i]-> 406 dgnc_remove_tty_sysfs(brd->channels[i]->
429 ch_pun.un_sysfs); 407 ch_pun.un_sysfs);
430 tty_unregister_device(&brd->PrintDriver, i); 408 tty_unregister_device(brd->print_driver, i);
431 } 409 }
432 tty_unregister_driver(&brd->PrintDriver); 410 tty_unregister_driver(brd->print_driver);
433 brd->dgnc_Major_TransparentPrint_Registered = false; 411 brd->dgnc_major_transparent_print_registered = false;
434 } 412 }
435 413
436 kfree(brd->SerialDriver.ttys); 414 put_tty_driver(brd->serial_driver);
437 brd->SerialDriver.ttys = NULL; 415 put_tty_driver(brd->print_driver);
438 kfree(brd->SerialDriver.termios);
439 brd->SerialDriver.termios = NULL;
440 kfree(brd->PrintDriver.ttys);
441 brd->PrintDriver.ttys = NULL;
442 kfree(brd->PrintDriver.termios);
443 brd->PrintDriver.termios = NULL;
444} 416}
445 417
446/* 418/*
@@ -606,6 +578,8 @@ void dgnc_input(struct channel_t *ch)
606 * or the amount of data the card actually has pending... 578 * or the amount of data the card actually has pending...
607 */ 579 */
608 while (n) { 580 while (n) {
581 unsigned char *ch_pos = ch->ch_equeue + tail;
582
609 s = ((head >= tail) ? head : RQUEUESIZE) - tail; 583 s = ((head >= tail) ? head : RQUEUESIZE) - tail;
610 s = min(s, n); 584 s = min(s, n);
611 585
@@ -620,29 +594,20 @@ void dgnc_input(struct channel_t *ch)
620 */ 594 */
621 if (I_PARMRK(tp) || I_BRKINT(tp) || I_INPCK(tp)) { 595 if (I_PARMRK(tp) || I_BRKINT(tp) || I_INPCK(tp)) {
622 for (i = 0; i < s; i++) { 596 for (i = 0; i < s; i++) {
623 if (*(ch->ch_equeue + tail + i) & UART_LSR_BI) 597 unsigned char ch = *(ch_pos + i);
624 tty_insert_flip_char(tp->port, 598 char flag = TTY_NORMAL;
625 *(ch->ch_rqueue + tail + i), 599
626 TTY_BREAK); 600 if (ch & UART_LSR_BI)
627 else if (*(ch->ch_equeue + tail + i) & 601 flag = TTY_BREAK;
628 UART_LSR_PE) 602 else if (ch & UART_LSR_PE)
629 tty_insert_flip_char(tp->port, 603 flag = TTY_PARITY;
630 *(ch->ch_rqueue + tail + i), 604 else if (ch & UART_LSR_FE)
631 TTY_PARITY); 605 flag = TTY_FRAME;
632 else if (*(ch->ch_equeue + tail + i) & 606
633 UART_LSR_FE) 607 tty_insert_flip_char(tp->port, ch, flag);
634 tty_insert_flip_char(tp->port,
635 *(ch->ch_rqueue + tail + i),
636 TTY_FRAME);
637 else
638 tty_insert_flip_char(tp->port,
639 *(ch->ch_rqueue + tail + i),
640 TTY_NORMAL);
641 } 608 }
642 } else { 609 } else {
643 tty_insert_flip_string(tp->port, 610 tty_insert_flip_string(tp->port, ch_pos, s);
644 ch->ch_rqueue + tail,
645 s);
646 } 611 }
647 612
648 tail += s; 613 tail += s;
@@ -1117,6 +1082,14 @@ static int dgnc_tty_open(struct tty_struct *tty, struct file *file)
1117 if (!ch->ch_wqueue) 1082 if (!ch->ch_wqueue)
1118 ch->ch_wqueue = kzalloc(WQUEUESIZE, GFP_KERNEL); 1083 ch->ch_wqueue = kzalloc(WQUEUESIZE, GFP_KERNEL);
1119 1084
1085 if (!ch->ch_rqueue || !ch->ch_equeue || !ch->ch_wqueue) {
1086 kfree(ch->ch_rqueue);
1087 kfree(ch->ch_equeue);
1088 kfree(ch->ch_wqueue);
1089
1090 return -ENOMEM;
1091 }
1092
1120 spin_lock_irqsave(&ch->ch_lock, flags); 1093 spin_lock_irqsave(&ch->ch_lock, flags);
1121 1094
1122 ch->ch_flags &= ~(CH_OPENING); 1095 ch->ch_flags &= ~(CH_OPENING);
@@ -1539,19 +1512,8 @@ static int dgnc_tty_chars_in_buffer(struct tty_struct *tty)
1539 */ 1512 */
1540static int dgnc_maxcps_room(struct tty_struct *tty, int bytes_available) 1513static int dgnc_maxcps_room(struct tty_struct *tty, int bytes_available)
1541{ 1514{
1542 struct channel_t *ch = NULL; 1515 struct un_t *un = tty->driver_data;
1543 struct un_t *un = NULL; 1516 struct channel_t *ch = un->un_ch;
1544
1545 if (!tty)
1546 return bytes_available;
1547
1548 un = tty->driver_data;
1549 if (!un || un->magic != DGNC_UNIT_MAGIC)
1550 return bytes_available;
1551
1552 ch = un->un_ch;
1553 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
1554 return bytes_available;
1555 1517
1556 /* 1518 /*
1557 * If its not the Transparent print device, return 1519 * If its not the Transparent print device, return
@@ -2058,17 +2020,7 @@ static inline int dgnc_get_mstat(struct channel_t *ch)
2058static int dgnc_get_modem_info(struct channel_t *ch, 2020static int dgnc_get_modem_info(struct channel_t *ch,
2059 unsigned int __user *value) 2021 unsigned int __user *value)
2060{ 2022{
2061 int result; 2023 return put_user(dgnc_get_mstat(ch), value);
2062
2063 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
2064 return -ENXIO;
2065
2066 result = dgnc_get_mstat(ch);
2067
2068 if (result < 0)
2069 return -ENXIO;
2070
2071 return put_user(result, value);
2072} 2024}
2073 2025
2074/* 2026/*
@@ -2529,6 +2481,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
2529 unsigned long arg) 2481 unsigned long arg)
2530{ 2482{
2531 struct dgnc_board *bd; 2483 struct dgnc_board *bd;
2484 struct board_ops *ch_bd_ops;
2532 struct channel_t *ch; 2485 struct channel_t *ch;
2533 struct un_t *un; 2486 struct un_t *un;
2534 int rc; 2487 int rc;
@@ -2550,6 +2503,8 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
2550 if (!bd || bd->magic != DGNC_BOARD_MAGIC) 2503 if (!bd || bd->magic != DGNC_BOARD_MAGIC)
2551 return -ENODEV; 2504 return -ENODEV;
2552 2505
2506 ch_bd_ops = bd->bd_ops;
2507
2553 spin_lock_irqsave(&ch->ch_lock, flags); 2508 spin_lock_irqsave(&ch->ch_lock, flags);
2554 2509
2555 if (un->un_open_count <= 0) { 2510 if (un->un_open_count <= 0) {
@@ -2574,7 +2529,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
2574 if (rc) 2529 if (rc)
2575 return rc; 2530 return rc;
2576 2531
2577 rc = ch->ch_bd->bd_ops->drain(tty, 0); 2532 rc = ch_bd_ops->drain(tty, 0);
2578 2533
2579 if (rc) 2534 if (rc)
2580 return -EINTR; 2535 return -EINTR;
@@ -2582,7 +2537,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
2582 spin_lock_irqsave(&ch->ch_lock, flags); 2537 spin_lock_irqsave(&ch->ch_lock, flags);
2583 2538
2584 if (((cmd == TCSBRK) && (!arg)) || (cmd == TCSBRKP)) 2539 if (((cmd == TCSBRK) && (!arg)) || (cmd == TCSBRKP))
2585 ch->ch_bd->bd_ops->send_break(ch, 250); 2540 ch_bd_ops->send_break(ch, 250);
2586 2541
2587 spin_unlock_irqrestore(&ch->ch_lock, flags); 2542 spin_unlock_irqrestore(&ch->ch_lock, flags);
2588 2543
@@ -2599,13 +2554,13 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
2599 if (rc) 2554 if (rc)
2600 return rc; 2555 return rc;
2601 2556
2602 rc = ch->ch_bd->bd_ops->drain(tty, 0); 2557 rc = ch_bd_ops->drain(tty, 0);
2603 if (rc) 2558 if (rc)
2604 return -EINTR; 2559 return -EINTR;
2605 2560
2606 spin_lock_irqsave(&ch->ch_lock, flags); 2561 spin_lock_irqsave(&ch->ch_lock, flags);
2607 2562
2608 ch->ch_bd->bd_ops->send_break(ch, 250); 2563 ch_bd_ops->send_break(ch, 250);
2609 2564
2610 spin_unlock_irqrestore(&ch->ch_lock, flags); 2565 spin_unlock_irqrestore(&ch->ch_lock, flags);
2611 2566
@@ -2617,13 +2572,13 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
2617 if (rc) 2572 if (rc)
2618 return rc; 2573 return rc;
2619 2574
2620 rc = ch->ch_bd->bd_ops->drain(tty, 0); 2575 rc = ch_bd_ops->drain(tty, 0);
2621 if (rc) 2576 if (rc)
2622 return -EINTR; 2577 return -EINTR;
2623 2578
2624 spin_lock_irqsave(&ch->ch_lock, flags); 2579 spin_lock_irqsave(&ch->ch_lock, flags);
2625 2580
2626 ch->ch_bd->bd_ops->send_break(ch, 250); 2581 ch_bd_ops->send_break(ch, 250);
2627 2582
2628 spin_unlock_irqrestore(&ch->ch_lock, flags); 2583 spin_unlock_irqrestore(&ch->ch_lock, flags);
2629 2584
@@ -2652,7 +2607,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
2652 spin_lock_irqsave(&ch->ch_lock, flags); 2607 spin_lock_irqsave(&ch->ch_lock, flags);
2653 tty->termios.c_cflag = ((tty->termios.c_cflag & ~CLOCAL) | 2608 tty->termios.c_cflag = ((tty->termios.c_cflag & ~CLOCAL) |
2654 (arg ? CLOCAL : 0)); 2609 (arg ? CLOCAL : 0));
2655 ch->ch_bd->bd_ops->param(tty); 2610 ch_bd_ops->param(tty);
2656 spin_unlock_irqrestore(&ch->ch_lock, flags); 2611 spin_unlock_irqrestore(&ch->ch_lock, flags);
2657 2612
2658 return 0; 2613 return 0;
@@ -2689,7 +2644,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
2689 2644
2690 if ((arg == TCIFLUSH) || (arg == TCIOFLUSH)) { 2645 if ((arg == TCIFLUSH) || (arg == TCIOFLUSH)) {
2691 ch->ch_r_head = ch->ch_r_tail; 2646 ch->ch_r_head = ch->ch_r_tail;
2692 ch->ch_bd->bd_ops->flush_uart_read(ch); 2647 ch_bd_ops->flush_uart_read(ch);
2693 /* Force queue flow control to be released, if needed */ 2648 /* Force queue flow control to be released, if needed */
2694 dgnc_check_queue_flow_control(ch); 2649 dgnc_check_queue_flow_control(ch);
2695 } 2650 }
@@ -2697,9 +2652,9 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
2697 if ((arg == TCOFLUSH) || (arg == TCIOFLUSH)) { 2652 if ((arg == TCOFLUSH) || (arg == TCIOFLUSH)) {
2698 if (!(un->un_type == DGNC_PRINT)) { 2653 if (!(un->un_type == DGNC_PRINT)) {
2699 ch->ch_w_head = ch->ch_w_tail; 2654 ch->ch_w_head = ch->ch_w_tail;
2700 ch->ch_bd->bd_ops->flush_uart_write(ch); 2655 ch_bd_ops->flush_uart_write(ch);
2701 2656
2702 if (ch->ch_tun.un_flags & (UN_LOW|UN_EMPTY)) { 2657 if (ch->ch_tun.un_flags & (UN_LOW | UN_EMPTY)) {
2703 ch->ch_tun.un_flags &= 2658 ch->ch_tun.un_flags &=
2704 ~(UN_LOW | UN_EMPTY); 2659 ~(UN_LOW | UN_EMPTY);
2705 wake_up_interruptible(&ch->ch_tun.un_flags_wait); 2660 wake_up_interruptible(&ch->ch_tun.un_flags_wait);
@@ -2731,14 +2686,14 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
2731 /* flush rx */ 2686 /* flush rx */
2732 ch->ch_flags &= ~CH_STOP; 2687 ch->ch_flags &= ~CH_STOP;
2733 ch->ch_r_head = ch->ch_r_tail; 2688 ch->ch_r_head = ch->ch_r_tail;
2734 ch->ch_bd->bd_ops->flush_uart_read(ch); 2689 ch_bd_ops->flush_uart_read(ch);
2735 /* Force queue flow control to be released, if needed */ 2690 /* Force queue flow control to be released, if needed */
2736 dgnc_check_queue_flow_control(ch); 2691 dgnc_check_queue_flow_control(ch);
2737 } 2692 }
2738 2693
2739 /* now wait for all the output to drain */ 2694 /* now wait for all the output to drain */
2740 spin_unlock_irqrestore(&ch->ch_lock, flags); 2695 spin_unlock_irqrestore(&ch->ch_lock, flags);
2741 rc = ch->ch_bd->bd_ops->drain(tty, 0); 2696 rc = ch_bd_ops->drain(tty, 0);
2742 if (rc) 2697 if (rc)
2743 return -EINTR; 2698 return -EINTR;
2744 2699
@@ -2748,7 +2703,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
2748 case TCSETAW: 2703 case TCSETAW:
2749 2704
2750 spin_unlock_irqrestore(&ch->ch_lock, flags); 2705 spin_unlock_irqrestore(&ch->ch_lock, flags);
2751 rc = ch->ch_bd->bd_ops->drain(tty, 0); 2706 rc = ch_bd_ops->drain(tty, 0);
2752 if (rc) 2707 if (rc)
2753 return -EINTR; 2708 return -EINTR;
2754 2709
@@ -2771,7 +2726,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
2771 /* set information for ditty */ 2726 /* set information for ditty */
2772 if (cmd == (DIGI_SETAW)) { 2727 if (cmd == (DIGI_SETAW)) {
2773 spin_unlock_irqrestore(&ch->ch_lock, flags); 2728 spin_unlock_irqrestore(&ch->ch_lock, flags);
2774 rc = ch->ch_bd->bd_ops->drain(tty, 0); 2729 rc = ch_bd_ops->drain(tty, 0);
2775 2730
2776 if (rc) 2731 if (rc)
2777 return -EINTR; 2732 return -EINTR;
@@ -2804,7 +2759,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
2804 else 2759 else
2805 ch->ch_flags &= ~(CH_LOOPBACK); 2760 ch->ch_flags &= ~(CH_LOOPBACK);
2806 2761
2807 ch->ch_bd->bd_ops->param(tty); 2762 ch_bd_ops->param(tty);
2808 spin_unlock_irqrestore(&ch->ch_lock, flags); 2763 spin_unlock_irqrestore(&ch->ch_lock, flags);
2809 return 0; 2764 return 0;
2810 } 2765 }
@@ -2824,7 +2779,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
2824 return rc; 2779 return rc;
2825 spin_lock_irqsave(&ch->ch_lock, flags); 2780 spin_lock_irqsave(&ch->ch_lock, flags);
2826 dgnc_set_custom_speed(ch, new_rate); 2781 dgnc_set_custom_speed(ch, new_rate);
2827 ch->ch_bd->bd_ops->param(tty); 2782 ch_bd_ops->param(tty);
2828 spin_unlock_irqrestore(&ch->ch_lock, flags); 2783 spin_unlock_irqrestore(&ch->ch_lock, flags);
2829 return 0; 2784 return 0;
2830 } 2785 }
@@ -2845,7 +2800,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
2845 if (rc) 2800 if (rc)
2846 return rc; 2801 return rc;
2847 spin_lock_irqsave(&ch->ch_lock, flags); 2802 spin_lock_irqsave(&ch->ch_lock, flags);
2848 ch->ch_bd->bd_ops->send_immediate_char(ch, c); 2803 ch_bd_ops->send_immediate_char(ch, c);
2849 spin_unlock_irqrestore(&ch->ch_lock, flags); 2804 spin_unlock_irqrestore(&ch->ch_lock, flags);
2850 return 0; 2805 return 0;
2851 } 2806 }
@@ -2933,13 +2888,13 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
2933 /* 2888 /*
2934 * Is the UART empty? Add that value to whats in our TX queue. 2889 * Is the UART empty? Add that value to whats in our TX queue.
2935 */ 2890 */
2936 count = buf.txbuf + ch->ch_bd->bd_ops->get_uart_bytes_left(ch); 2891 count = buf.txbuf + ch_bd_ops->get_uart_bytes_left(ch);
2937 2892
2938 /* 2893 /*
2939 * Figure out how much data the RealPort Server believes should 2894 * Figure out how much data the RealPort Server believes should
2940 * be in our TX queue. 2895 * be in our TX queue.
2941 */ 2896 */
2942 tdist = (buf.tIn - buf.tOut) & 0xffff; 2897 tdist = (buf.tx_in - buf.tx_out) & 0xffff;
2943 2898
2944 /* 2899 /*
2945 * If we have more data than the RealPort Server believes we 2900 * If we have more data than the RealPort Server believes we
diff --git a/drivers/staging/dgnc/digi.h b/drivers/staging/dgnc/digi.h
index 523a2d34f747..5b983e6f5ee2 100644
--- a/drivers/staging/dgnc/digi.h
+++ b/drivers/staging/dgnc/digi.h
@@ -109,8 +109,8 @@ struct digi_info {
109 109
110struct digi_getbuffer /* Struct for holding buffer use counts */ 110struct digi_getbuffer /* Struct for holding buffer use counts */
111{ 111{
112 unsigned long tIn; 112 unsigned long tx_in;
113 unsigned long tOut; 113 unsigned long tx_out;
114 unsigned long rxbuf; 114 unsigned long rxbuf;
115 unsigned long txbuf; 115 unsigned long txbuf;
116 unsigned long txdone; 116 unsigned long txdone;
diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
index e8cacaecf9ad..3bd91758b2da 100644
--- a/drivers/staging/emxx_udc/emxx_udc.c
+++ b/drivers/staging/emxx_udc/emxx_udc.c
@@ -418,9 +418,9 @@ static void _nbu2ss_ep_dma_abort(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep)
418{ 418{
419 struct fc_regs *preg = udc->p_regs; 419 struct fc_regs *preg = udc->p_regs;
420 420
421 _nbu2ss_bitclr(&preg->EP_DCR[ep->epnum-1].EP_DCR1, DCR1_EPn_REQEN); 421 _nbu2ss_bitclr(&preg->EP_DCR[ep->epnum - 1].EP_DCR1, DCR1_EPn_REQEN);
422 mdelay(DMA_DISABLE_TIME); /* DCR1_EPn_REQEN Clear */ 422 mdelay(DMA_DISABLE_TIME); /* DCR1_EPn_REQEN Clear */
423 _nbu2ss_bitclr(&preg->EP_REGS[ep->epnum-1].EP_DMA_CTRL, EPn_DMA_EN); 423 _nbu2ss_bitclr(&preg->EP_REGS[ep->epnum - 1].EP_DMA_CTRL, EPn_DMA_EN);
424} 424}
425 425
426/*-------------------------------------------------------------------------*/ 426/*-------------------------------------------------------------------------*/
@@ -909,7 +909,7 @@ static int _nbu2ss_epn_out_pio(
909 /* Copy of every four bytes */ 909 /* Copy of every four bytes */
910 for (i = 0; i < iWordLength; i++) { 910 for (i = 0; i < iWordLength; i++) {
911 pBuf32->dw = 911 pBuf32->dw =
912 _nbu2ss_readl(&preg->EP_REGS[ep->epnum-1].EP_READ); 912 _nbu2ss_readl(&preg->EP_REGS[ep->epnum - 1].EP_READ);
913 pBuf32++; 913 pBuf32++;
914 } 914 }
915 result = iWordLength * sizeof(u32); 915 result = iWordLength * sizeof(u32);
@@ -919,7 +919,7 @@ static int _nbu2ss_epn_out_pio(
919 if (data > 0) { 919 if (data > 0) {
920 /*---------------------------------------------------------*/ 920 /*---------------------------------------------------------*/
921 /* Copy of fraction byte */ 921 /* Copy of fraction byte */
922 Temp32.dw = _nbu2ss_readl(&preg->EP_REGS[ep->epnum-1].EP_READ); 922 Temp32.dw = _nbu2ss_readl(&preg->EP_REGS[ep->epnum - 1].EP_READ);
923 for (i = 0 ; i < data ; i++) 923 for (i = 0 ; i < data ; i++)
924 pBuf32->byte.DATA[i] = Temp32.byte.DATA[i]; 924 pBuf32->byte.DATA[i] = Temp32.byte.DATA[i];
925 result += data; 925 result += data;
@@ -1128,7 +1128,7 @@ static int _nbu2ss_epn_in_pio(
1128 if (iWordLength > 0) { 1128 if (iWordLength > 0) {
1129 for (i = 0; i < iWordLength; i++) { 1129 for (i = 0; i < iWordLength; i++) {
1130 _nbu2ss_writel( 1130 _nbu2ss_writel(
1131 &preg->EP_REGS[ep->epnum-1].EP_WRITE 1131 &preg->EP_REGS[ep->epnum - 1].EP_WRITE
1132 , pBuf32->dw 1132 , pBuf32->dw
1133 ); 1133 );
1134 1134
@@ -1290,7 +1290,7 @@ static void _nbu2ss_restert_transfer(struct nbu2ss_ep *ep)
1290 1290
1291 if (ep->epnum > 0) { 1291 if (ep->epnum > 0) {
1292 length = _nbu2ss_readl( 1292 length = _nbu2ss_readl(
1293 &ep->udc->p_regs->EP_REGS[ep->epnum-1].EP_LEN_DCNT); 1293 &ep->udc->p_regs->EP_REGS[ep->epnum - 1].EP_LEN_DCNT);
1294 1294
1295 length &= EPn_LDATA; 1295 length &= EPn_LDATA;
1296 if (length < ep->ep.maxpacket) 1296 if (length < ep->ep.maxpacket)
@@ -1463,7 +1463,7 @@ static int _nbu2ss_get_ep_stall(struct nbu2ss_udc *udc, u8 ep_adrs)
1463 bit_data = EP0_STL; 1463 bit_data = EP0_STL;
1464 1464
1465 } else { 1465 } else {
1466 data = _nbu2ss_readl(&preg->EP_REGS[epnum-1].EP_CONTROL); 1466 data = _nbu2ss_readl(&preg->EP_REGS[epnum - 1].EP_CONTROL);
1467 if ((data & EPn_EN) == 0) 1467 if ((data & EPn_EN) == 0)
1468 return -1; 1468 return -1;
1469 1469
@@ -1558,7 +1558,7 @@ static void _nbu2ss_epn_set_stall(
1558 ; limit_cnt++) { 1558 ; limit_cnt++) {
1559 1559
1560 regdata = _nbu2ss_readl( 1560 regdata = _nbu2ss_readl(
1561 &preg->EP_REGS[ep->epnum-1].EP_STATUS); 1561 &preg->EP_REGS[ep->epnum - 1].EP_STATUS);
1562 1562
1563 if ((regdata & EPn_IN_DATA) == 0) 1563 if ((regdata & EPn_IN_DATA) == 0)
1564 break; 1564 break;
@@ -1983,7 +1983,7 @@ static inline void _nbu2ss_epn_in_int(
1983 if (req->zero && ((req->req.actual % ep->ep.maxpacket) == 0)) { 1983 if (req->zero && ((req->req.actual % ep->ep.maxpacket) == 0)) {
1984 1984
1985 status = 1985 status =
1986 _nbu2ss_readl(&preg->EP_REGS[ep->epnum-1].EP_STATUS); 1986 _nbu2ss_readl(&preg->EP_REGS[ep->epnum - 1].EP_STATUS);
1987 1987
1988 if ((status & EPn_IN_FULL) == 0) { 1988 if ((status & EPn_IN_FULL) == 0) {
1989 /*-----------------------------------------*/ 1989 /*-----------------------------------------*/
@@ -2894,7 +2894,7 @@ static int nbu2ss_ep_fifo_status(struct usb_ep *_ep)
2894 data = _nbu2ss_readl(&preg->EP0_LENGTH) & EP0_LDATA; 2894 data = _nbu2ss_readl(&preg->EP0_LENGTH) & EP0_LDATA;
2895 2895
2896 } else { 2896 } else {
2897 data = _nbu2ss_readl(&preg->EP_REGS[ep->epnum-1].EP_LEN_DCNT) 2897 data = _nbu2ss_readl(&preg->EP_REGS[ep->epnum - 1].EP_LEN_DCNT)
2898 & EPn_LDATA; 2898 & EPn_LDATA;
2899 } 2899 }
2900 2900
@@ -3051,7 +3051,7 @@ static int nbu2ss_gad_vbus_session(struct usb_gadget *pgadget, int is_active)
3051} 3051}
3052 3052
3053/*-------------------------------------------------------------------------*/ 3053/*-------------------------------------------------------------------------*/
3054static int nbu2ss_gad_vbus_draw(struct usb_gadget *pgadget, unsigned mA) 3054static int nbu2ss_gad_vbus_draw(struct usb_gadget *pgadget, unsigned int mA)
3055{ 3055{
3056 struct nbu2ss_udc *udc; 3056 struct nbu2ss_udc *udc;
3057 unsigned long flags; 3057 unsigned long flags;
@@ -3101,7 +3101,7 @@ static int nbu2ss_gad_pullup(struct usb_gadget *pgadget, int is_on)
3101/*-------------------------------------------------------------------------*/ 3101/*-------------------------------------------------------------------------*/
3102static int nbu2ss_gad_ioctl( 3102static int nbu2ss_gad_ioctl(
3103 struct usb_gadget *pgadget, 3103 struct usb_gadget *pgadget,
3104 unsigned code, 3104 unsigned int code,
3105 unsigned long param) 3105 unsigned long param)
3106{ 3106{
3107 return 0; 3107 return 0;
diff --git a/drivers/staging/emxx_udc/emxx_udc.h b/drivers/staging/emxx_udc/emxx_udc.h
index 4a2cc38de7b3..39769e3a801c 100644
--- a/drivers/staging/emxx_udc/emxx_udc.h
+++ b/drivers/staging/emxx_udc/emxx_udc.h
@@ -97,7 +97,7 @@
97#define BIT30 0x40000000 97#define BIT30 0x40000000
98#define BIT31 0x80000000 98#define BIT31 0x80000000
99 99
100#define TEST_FORCE_ENABLE (BIT18+BIT16) 100#define TEST_FORCE_ENABLE (BIT18 + BIT16)
101 101
102#define INT_SEL BIT10 102#define INT_SEL BIT10
103#define CONSTFS BIT09 103#define CONSTFS BIT09
@@ -125,15 +125,15 @@
125/*------- (0x0008) USB Address Register */ 125/*------- (0x0008) USB Address Register */
126#define USB_ADDR 0x007F0000 126#define USB_ADDR 0x007F0000
127#define SOF_STATUS BIT15 127#define SOF_STATUS BIT15
128#define UFRAME (BIT14+BIT13+BIT12) 128#define UFRAME (BIT14 + BIT13 + BIT12)
129#define FRAME 0x000007FF 129#define FRAME 0x000007FF
130 130
131#define USB_ADRS_SHIFT 16 131#define USB_ADRS_SHIFT 16
132 132
133/*------- (0x000C) UTMI Characteristic 1 Register */ 133/*------- (0x000C) UTMI Characteristic 1 Register */
134#define SQUSET (BIT07+BIT06+BIT05+BIT04) 134#define SQUSET (BIT07 + BIT06 + BIT05 + BIT04)
135 135
136#define USB_SQUSET (BIT06+BIT05+BIT04) 136#define USB_SQUSET (BIT06 + BIT05 + BIT04)
137 137
138/*------- (0x0010) TEST Control Register */ 138/*------- (0x0010) TEST Control Register */
139#define FORCEHS BIT02 139#define FORCEHS BIT02
@@ -196,7 +196,7 @@
196#define RSUM_EN BIT01 196#define RSUM_EN BIT01
197 197
198#define USB_INT_EN_BIT \ 198#define USB_INT_EN_BIT \
199 (EP0_EN|SPEED_MODE_EN|USB_RST_EN|SPND_EN|RSUM_EN) 199 (EP0_EN | SPEED_MODE_EN | USB_RST_EN | SPND_EN | RSUM_EN)
200 200
201/*------- (0x0028) EP0 Control Register */ 201/*------- (0x0028) EP0 Control Register */
202#define EP0_STGSEL BIT18 202#define EP0_STGSEL BIT18
@@ -205,9 +205,9 @@
205#define EP0_PIDCLR BIT09 205#define EP0_PIDCLR BIT09
206#define EP0_BCLR BIT08 206#define EP0_BCLR BIT08
207#define EP0_DEND BIT07 207#define EP0_DEND BIT07
208#define EP0_DW (BIT06+BIT05) 208#define EP0_DW (BIT06 + BIT05)
209#define EP0_DW4 0 209#define EP0_DW4 0
210#define EP0_DW3 (BIT06+BIT05) 210#define EP0_DW3 (BIT06 + BIT05)
211#define EP0_DW2 BIT06 211#define EP0_DW2 BIT06
212#define EP0_DW1 BIT05 212#define EP0_DW1 BIT05
213 213
@@ -238,7 +238,7 @@
238#define STG_START_INT BIT01 238#define STG_START_INT BIT01
239#define SETUP_INT BIT00 239#define SETUP_INT BIT00
240 240
241#define EP0_STATUS_RW_BIT (BIT16|BIT15|BIT11|0xFF) 241#define EP0_STATUS_RW_BIT (BIT16 | BIT15 | BIT11 | 0xFF)
242 242
243/*------- (0x0030) EP0 Interrupt Enable Register */ 243/*------- (0x0030) EP0 Interrupt Enable Register */
244#define EP0_PERR_NAK_EN BIT16 244#define EP0_PERR_NAK_EN BIT16
@@ -256,7 +256,7 @@
256#define SETUP_EN BIT00 256#define SETUP_EN BIT00
257 257
258#define EP0_INT_EN_BIT \ 258#define EP0_INT_EN_BIT \
259 (EP0_OUT_OR_EN|EP0_OUT_EN|EP0_IN_EN|STG_END_EN|SETUP_EN) 259 (EP0_OUT_OR_EN | EP0_OUT_EN | EP0_IN_EN | STG_END_EN | SETUP_EN)
260 260
261/*------- (0x0034) EP0 Length Register */ 261/*------- (0x0034) EP0 Length Register */
262#define EP0_LDATA 0x0000007F 262#define EP0_LDATA 0x0000007F
@@ -270,7 +270,7 @@
270#define EPn_BUF_SINGLE BIT30 270#define EPn_BUF_SINGLE BIT30
271 271
272#define EPn_DIR0 BIT26 272#define EPn_DIR0 BIT26
273#define EPn_MODE (BIT25+BIT24) 273#define EPn_MODE (BIT25 + BIT24)
274#define EPn_BULK 0 274#define EPn_BULK 0
275#define EPn_INTERRUPT BIT24 275#define EPn_INTERRUPT BIT24
276#define EPn_ISO BIT25 276#define EPn_ISO BIT25
@@ -283,9 +283,9 @@
283#define EPn_BCLR BIT09 283#define EPn_BCLR BIT09
284#define EPn_CBCLR BIT08 284#define EPn_CBCLR BIT08
285#define EPn_DEND BIT07 285#define EPn_DEND BIT07
286#define EPn_DW (BIT06+BIT05) 286#define EPn_DW (BIT06 + BIT05)
287#define EPn_DW4 0 287#define EPn_DW4 0
288#define EPn_DW3 (BIT06+BIT05) 288#define EPn_DW3 (BIT06 + BIT05)
289#define EPn_DW2 BIT06 289#define EPn_DW2 BIT06
290#define EPn_DW1 BIT05 290#define EPn_DW1 BIT05
291 291
@@ -324,7 +324,7 @@
324#define EPn_IN_EMPTY BIT00 /* R */ 324#define EPn_IN_EMPTY BIT00 /* R */
325 325
326#define EPn_INT_EN \ 326#define EPn_INT_EN \
327 (EPn_OUT_END_INT|EPn_OUT_INT|EPn_IN_END_INT|EPn_IN_INT) 327 (EPn_OUT_END_INT | EPn_OUT_INT | EPn_IN_END_INT | EPn_IN_INT)
328 328
329/*------- (0x0048:) EPn Interrupt Enable Register */ 329/*------- (0x0048:) EPn Interrupt Enable Register */
330#define EPn_OUT_END_EN BIT23 /* RW */ 330#define EPn_OUT_END_EN BIT23 /* RW */
@@ -368,7 +368,7 @@
368#define ARBITER_CTR BIT31 /* RW */ 368#define ARBITER_CTR BIT31 /* RW */
369#define MCYCLE_RST BIT12 /* RW */ 369#define MCYCLE_RST BIT12 /* RW */
370 370
371#define ENDIAN_CTR (BIT09+BIT08) /* RW */ 371#define ENDIAN_CTR (BIT09 + BIT08) /* RW */
372#define ENDIAN_BYTE_SWAP BIT09 372#define ENDIAN_BYTE_SWAP BIT09
373#define ENDIAN_HALF_WORD_SWAP ENDIAN_CTR 373#define ENDIAN_HALF_WORD_SWAP ENDIAN_CTR
374 374
@@ -376,7 +376,7 @@
376#define HTRANS_MODE BIT04 /* RW */ 376#define HTRANS_MODE BIT04 /* RW */
377 377
378#define WBURST_TYPE BIT02 /* RW */ 378#define WBURST_TYPE BIT02 /* RW */
379#define BURST_TYPE (BIT01+BIT00) /* RW */ 379#define BURST_TYPE (BIT01 + BIT00) /* RW */
380#define BURST_MAX_16 0 380#define BURST_MAX_16 0
381#define BURST_MAX_8 BIT00 381#define BURST_MAX_8 BIT00
382#define BURST_MAX_4 BIT01 382#define BURST_MAX_4 BIT01
@@ -412,7 +412,7 @@
412#define EPC_RST BIT00 /* RW */ 412#define EPC_RST BIT00 /* RW */
413 413
414/*------- (0x1014) USBF_EPTEST Register */ 414/*------- (0x1014) USBF_EPTEST Register */
415#define LINESTATE (BIT09+BIT08) /* R */ 415#define LINESTATE (BIT09 + BIT08) /* R */
416#define DM_LEVEL BIT09 /* R */ 416#define DM_LEVEL BIT09 /* R */
417#define DP_LEVEL BIT08 /* R */ 417#define DP_LEVEL BIT08 /* R */
418 418
@@ -485,7 +485,7 @@ struct fc_regs {
485 485
486 struct ep_regs EP_REGS[REG_EP_NUM]; /* Endpoint Register */ 486 struct ep_regs EP_REGS[REG_EP_NUM]; /* Endpoint Register */
487 487
488 u8 Reserved220[0x1000-0x220]; /* (0x0220:0x0FFF) Reserved */ 488 u8 Reserved220[0x1000 - 0x220]; /* (0x0220:0x0FFF) Reserved */
489 489
490 u32 AHBSCTR; /* (0x1000) AHBSCTR */ 490 u32 AHBSCTR; /* (0x1000) AHBSCTR */
491 u32 AHBMCTR; /* (0x1004) AHBMCTR */ 491 u32 AHBMCTR; /* (0x1004) AHBMCTR */
@@ -494,16 +494,16 @@ struct fc_regs {
494 u32 EPCTR; /* (0x1010) EPCTR */ 494 u32 EPCTR; /* (0x1010) EPCTR */
495 u32 USBF_EPTEST; /* (0x1014) USBF_EPTEST */ 495 u32 USBF_EPTEST; /* (0x1014) USBF_EPTEST */
496 496
497 u8 Reserved1018[0x20-0x18]; /* (0x1018:0x101F) Reserved */ 497 u8 Reserved1018[0x20 - 0x18]; /* (0x1018:0x101F) Reserved */
498 498
499 u32 USBSSVER; /* (0x1020) USBSSVER */ 499 u32 USBSSVER; /* (0x1020) USBSSVER */
500 u32 USBSSCONF; /* (0x1024) USBSSCONF */ 500 u32 USBSSCONF; /* (0x1024) USBSSCONF */
501 501
502 u8 Reserved1028[0x110-0x28]; /* (0x1028:0x110F) Reserved */ 502 u8 Reserved1028[0x110 - 0x28]; /* (0x1028:0x110F) Reserved */
503 503
504 struct ep_dcr EP_DCR[REG_EP_NUM]; /* */ 504 struct ep_dcr EP_DCR[REG_EP_NUM]; /* */
505 505
506 u8 Reserved1200[0x1000-0x200]; /* Reserved */ 506 u8 Reserved1200[0x1000 - 0x200]; /* Reserved */
507} __aligned(32); 507} __aligned(32);
508 508
509#define EP0_PACKETSIZE 64 509#define EP0_PACKETSIZE 64
diff --git a/drivers/staging/fbtft/fb_agm1264k-fl.c b/drivers/staging/fbtft/fb_agm1264k-fl.c
index ba9fc444b848..82b46cd27ca7 100644
--- a/drivers/staging/fbtft/fb_agm1264k-fl.c
+++ b/drivers/staging/fbtft/fb_agm1264k-fl.c
@@ -414,7 +414,7 @@ static int write(struct fbtft_par *par, void *buf, size_t len)
414 while (len--) { 414 while (len--) {
415 u8 i, data; 415 u8 i, data;
416 416
417 data = *(u8 *) buf++; 417 data = *(u8 *)buf++;
418 418
419 /* set data bus */ 419 /* set data bus */
420 for (i = 0; i < 8; ++i) 420 for (i = 0; i < 8; ++i)
diff --git a/drivers/staging/fbtft/fbtft-io.c b/drivers/staging/fbtft/fbtft-io.c
index a6f091fb975c..4dcea2e0b3ae 100644
--- a/drivers/staging/fbtft/fbtft-io.c
+++ b/drivers/staging/fbtft/fbtft-io.c
@@ -141,7 +141,7 @@ int fbtft_write_gpio8_wr(struct fbtft_par *par, void *buf, size_t len)
141 "%s(len=%d): ", __func__, len); 141 "%s(len=%d): ", __func__, len);
142 142
143 while (len--) { 143 while (len--) {
144 data = *(u8 *) buf; 144 data = *(u8 *)buf;
145 145
146 /* Start writing by pulling down /WR */ 146 /* Start writing by pulling down /WR */
147 gpio_set_value(par->gpio.wr, 0); 147 gpio_set_value(par->gpio.wr, 0);
@@ -170,7 +170,7 @@ int fbtft_write_gpio8_wr(struct fbtft_par *par, void *buf, size_t len)
170 gpio_set_value(par->gpio.wr, 1); 170 gpio_set_value(par->gpio.wr, 1);
171 171
172#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO 172#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
173 prev_data = *(u8 *) buf; 173 prev_data = *(u8 *)buf;
174#endif 174#endif
175 buf++; 175 buf++;
176 } 176 }
@@ -191,7 +191,7 @@ int fbtft_write_gpio16_wr(struct fbtft_par *par, void *buf, size_t len)
191 "%s(len=%d): ", __func__, len); 191 "%s(len=%d): ", __func__, len);
192 192
193 while (len) { 193 while (len) {
194 data = *(u16 *) buf; 194 data = *(u16 *)buf;
195 195
196 /* Start writing by pulling down /WR */ 196 /* Start writing by pulling down /WR */
197 gpio_set_value(par->gpio.wr, 0); 197 gpio_set_value(par->gpio.wr, 0);
@@ -220,7 +220,7 @@ int fbtft_write_gpio16_wr(struct fbtft_par *par, void *buf, size_t len)
220 gpio_set_value(par->gpio.wr, 1); 220 gpio_set_value(par->gpio.wr, 1);
221 221
222#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO 222#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
223 prev_data = *(u16 *) buf; 223 prev_data = *(u16 *)buf;
224#endif 224#endif
225 buf += 2; 225 buf += 2;
226 len -= 2; 226 len -= 2;
diff --git a/drivers/staging/fbtft/fbtft_device.c b/drivers/staging/fbtft/fbtft_device.c
index 241d7c6bebde..e4a355aefb25 100644
--- a/drivers/staging/fbtft/fbtft_device.c
+++ b/drivers/staging/fbtft/fbtft_device.c
@@ -1254,7 +1254,7 @@ static int write_gpio16_wr_slow(struct fbtft_par *par, void *buf, size_t len)
1254 "%s(len=%d): ", __func__, len); 1254 "%s(len=%d): ", __func__, len);
1255 1255
1256 while (len) { 1256 while (len) {
1257 data = *(u16 *) buf; 1257 data = *(u16 *)buf;
1258 1258
1259 /* Start writing by pulling down /WR */ 1259 /* Start writing by pulling down /WR */
1260 gpio_set_value(par->gpio.wr, 0); 1260 gpio_set_value(par->gpio.wr, 0);
@@ -1283,7 +1283,7 @@ static int write_gpio16_wr_slow(struct fbtft_par *par, void *buf, size_t len)
1283 gpio_set_value(par->gpio.wr, 1); 1283 gpio_set_value(par->gpio.wr, 1);
1284 1284
1285#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO 1285#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
1286 prev_data = *(u16 *) buf; 1286 prev_data = *(u16 *)buf;
1287#endif 1287#endif
1288 buf += 2; 1288 buf += 2;
1289 len -= 2; 1289 len -= 2;
@@ -1436,7 +1436,7 @@ static int __init fbtft_device_init(void)
1436 } 1436 }
1437 strncpy(fbtft_device_param_gpios[i].name, p_name, 1437 strncpy(fbtft_device_param_gpios[i].name, p_name,
1438 FBTFT_GPIO_NAME_SIZE - 1); 1438 FBTFT_GPIO_NAME_SIZE - 1);
1439 fbtft_device_param_gpios[i++].gpio = (int) val; 1439 fbtft_device_param_gpios[i++].gpio = (int)val;
1440 if (i == MAX_GPIOS) { 1440 if (i == MAX_GPIOS) {
1441 pr_err("gpios parameter: exceeded max array size: %d\n", 1441 pr_err("gpios parameter: exceeded max array size: %d\n",
1442 MAX_GPIOS); 1442 MAX_GPIOS);
diff --git a/drivers/staging/fsl-mc/README.txt b/drivers/staging/fsl-mc/README.txt
index 8214102f104b..179536a9b7a1 100644
--- a/drivers/staging/fsl-mc/README.txt
+++ b/drivers/staging/fsl-mc/README.txt
@@ -11,11 +11,11 @@ Contents summary
11 -Overview of DPAA2 objects 11 -Overview of DPAA2 objects
12 -DPAA2 Linux driver architecture overview 12 -DPAA2 Linux driver architecture overview
13 -bus driver 13 -bus driver
14 -dprc driver 14 -DPRC driver
15 -allocator 15 -allocator
16 -dpio driver 16 -DPIO driver
17 -Ethernet 17 -Ethernet
18 -mac 18 -MAC
19 19
20DPAA2 Overview 20DPAA2 Overview
21-------------- 21--------------
@@ -37,6 +37,9 @@ interfaces, an L2 switch, or accelerator instances.
37The MC provides memory-mapped I/O command interfaces (MC portals) 37The MC provides memory-mapped I/O command interfaces (MC portals)
38which DPAA2 software drivers use to operate on DPAA2 objects: 38which DPAA2 software drivers use to operate on DPAA2 objects:
39 39
40The diagram below shows an overview of the DPAA2 resource management
41architecture:
42
40 +--------------------------------------+ 43 +--------------------------------------+
41 | OS | 44 | OS |
42 | DPAA2 drivers | 45 | DPAA2 drivers |
@@ -77,13 +80,13 @@ DPIO objects.
77 80
78Overview of DPAA2 Objects 81Overview of DPAA2 Objects
79------------------------- 82-------------------------
80The section provides a brief overview of some key objects 83The section provides a brief overview of some key DPAA2 objects.
81in the DPAA2 hardware. A simple scenario is described illustrating 84A simple scenario is described illustrating the objects involved
82the objects involved in creating a network interfaces. 85in creating a network interfaces.
83 86
84-DPRC (Datapath Resource Container) 87-DPRC (Datapath Resource Container)
85 88
86 A DPRC is an container object that holds all the other 89 A DPRC is a container object that holds all the other
87 types of DPAA2 objects. In the example diagram below there 90 types of DPAA2 objects. In the example diagram below there
88 are 8 objects of 5 types (DPMCP, DPIO, DPBP, DPNI, and DPMAC) 91 are 8 objects of 5 types (DPMCP, DPIO, DPBP, DPNI, and DPMAC)
89 in the container. 92 in the container.
@@ -101,23 +104,23 @@ the objects involved in creating a network interfaces.
101 | | 104 | |
102 +---------------------------------------------------------+ 105 +---------------------------------------------------------+
103 106
104 From the point of view of an OS, a DPRC is bus-like. Like 107 From the point of view of an OS, a DPRC behaves similar to a plug and
105 a plug-and-play bus, such as PCI, DPRC commands can be used to 108 play bus, like PCI. DPRC commands can be used to enumerate the contents
106 enumerate the contents of the DPRC, discover the hardware 109 of the DPRC, discover the hardware objects present (including mappable
107 objects present (including mappable regions and interrupts). 110 regions and interrupts).
108 111
109 dprc.1 (bus) 112 DPRC.1 (bus)
110 | 113 |
111 +--+--------+-------+-------+-------+ 114 +--+--------+-------+-------+-------+
112 | | | | | 115 | | | | |
113 dpmcp.1 dpio.1 dpbp.1 dpni.1 dpmac.1 116 DPMCP.1 DPIO.1 DPBP.1 DPNI.1 DPMAC.1
114 dpmcp.2 dpio.2 117 DPMCP.2 DPIO.2
115 dpmcp.3 118 DPMCP.3
116 119
117 Hardware objects can be created and destroyed dynamically, providing 120 Hardware objects can be created and destroyed dynamically, providing
118 the ability to hot plug/unplug objects in and out of the DPRC. 121 the ability to hot plug/unplug objects in and out of the DPRC.
119 122
120 A DPRC has a mappable mmio region (an MC portal) that can be used 123 A DPRC has a mappable MMIO region (an MC portal) that can be used
121 to send MC commands. It has an interrupt for status events (like 124 to send MC commands. It has an interrupt for status events (like
122 hotplug). 125 hotplug).
123 126
@@ -137,10 +140,11 @@ the objects involved in creating a network interfaces.
137 A typical Ethernet NIC is monolithic-- the NIC device contains TX/RX 140 A typical Ethernet NIC is monolithic-- the NIC device contains TX/RX
138 queuing mechanisms, configuration mechanisms, buffer management, 141 queuing mechanisms, configuration mechanisms, buffer management,
139 physical ports, and interrupts. DPAA2 uses a more granular approach 142 physical ports, and interrupts. DPAA2 uses a more granular approach
140 utilizing multiple hardware objects. Each object has specialized 143 utilizing multiple hardware objects. Each object provides specialized
141 functions, and are used together by software to provide Ethernet network 144 functions. Groups of these objects are used by software to provide
142 interface functionality. This approach provides efficient use of finite 145 Ethernet network interface functionality. This approach provides
143 hardware resources, flexibility, and performance advantages. 146 efficient use of finite hardware resources, flexibility, and
147 performance advantages.
144 148
145 The diagram below shows the objects needed for a simple 149 The diagram below shows the objects needed for a simple
146 network interface configuration on a system with 2 CPUs. 150 network interface configuration on a system with 2 CPUs.
@@ -168,46 +172,52 @@ the objects involved in creating a network interfaces.
168 172
169 Below the objects are described. For each object a brief description 173 Below the objects are described. For each object a brief description
170 is provided along with a summary of the kinds of operations the object 174 is provided along with a summary of the kinds of operations the object
171 supports and a summary of key resources of the object (mmio regions 175 supports and a summary of key resources of the object (MMIO regions
172 and irqs). 176 and IRQs).
173 177
174 -DPMAC (Datapath Ethernet MAC): represents an Ethernet MAC, a 178 -DPMAC (Datapath Ethernet MAC): represents an Ethernet MAC, a
175 hardware device that connects to an Ethernet PHY and allows 179 hardware device that connects to an Ethernet PHY and allows
176 physical transmission and reception of Ethernet frames. 180 physical transmission and reception of Ethernet frames.
177 -mmio regions: none 181 -MMIO regions: none
178 -irqs: dpni link change 182 -IRQs: DPNI link change
179 -commands: set link up/down, link config, get stats, 183 -commands: set link up/down, link config, get stats,
180 irq config, enable, reset 184 IRQ config, enable, reset
181 185
182 -DPNI (Datapath Network Interface): contains TX/RX queues, 186 -DPNI (Datapath Network Interface): contains TX/RX queues,
183 network interface configuration, and rx buffer pool configuration 187 network interface configuration, and RX buffer pool configuration
184 mechanisms. 188 mechanisms. The TX/RX queues are in memory and are identified by
185 -mmio regions: none 189 queue number.
186 -irqs: link state 190 -MMIO regions: none
191 -IRQs: link state
187 -commands: port config, offload config, queue config, 192 -commands: port config, offload config, queue config,
188 parse/classify config, irq config, enable, reset 193 parse/classify config, IRQ config, enable, reset
189 194
190 -DPIO (Datapath I/O): provides interfaces to enqueue and dequeue 195 -DPIO (Datapath I/O): provides interfaces to enqueue and dequeue
191 packets and do hardware buffer pool management operations. For 196 packets and do hardware buffer pool management operations. The DPAA2
192 optimum performance there is typically DPIO per CPU. This allows 197 architecture separates the mechanism to access queues (the DPIO object)
193 each CPU to perform simultaneous enqueue/dequeue operations. 198 from the queues themselves. The DPIO provides an MMIO interface to
194 -mmio regions: queue operations, buffer mgmt 199 enqueue/dequeue packets. To enqueue something a descriptor is written
195 -irqs: data availability, congestion notification, buffer 200 to the DPIO MMIO region, which includes the target queue number.
201 There will typically be one DPIO assigned to each CPU. This allows all
202 CPUs to simultaneously perform enqueue/dequeued operations. DPIOs are
203 expected to be shared by different DPAA2 drivers.
204 -MMIO regions: queue operations, buffer management
205 -IRQs: data availability, congestion notification, buffer
196 pool depletion 206 pool depletion
197 -commands: irq config, enable, reset 207 -commands: IRQ config, enable, reset
198 208
199 -DPBP (Datapath Buffer Pool): represents a hardware buffer 209 -DPBP (Datapath Buffer Pool): represents a hardware buffer
200 pool. 210 pool.
201 -mmio regions: none 211 -MMIO regions: none
202 -irqs: none 212 -IRQs: none
203 -commands: enable, reset 213 -commands: enable, reset
204 214
205 -DPMCP (Datapath MC Portal): provides an MC command portal. 215 -DPMCP (Datapath MC Portal): provides an MC command portal.
206 Used by drivers to send commands to the MC to manage 216 Used by drivers to send commands to the MC to manage
207 objects. 217 objects.
208 -mmio regions: MC command portal 218 -MMIO regions: MC command portal
209 -irqs: command completion 219 -IRQs: command completion
210 -commands: irq config, enable, reset 220 -commands: IRQ config, enable, reset
211 221
212 Object Connections 222 Object Connections
213 ------------------ 223 ------------------
@@ -268,22 +278,22 @@ of each driver follows.
268 | Stack | 278 | Stack |
269 +------------+ +------------+ 279 +------------+ +------------+
270 | Allocator |. . . . . . . | Ethernet | 280 | Allocator |. . . . . . . | Ethernet |
271 |(dpmcp,dpbp)| | (dpni) | 281 |(DPMCP,DPBP)| | (DPNI) |
272 +-.----------+ +---+---+----+ 282 +-.----------+ +---+---+----+
273 . . ^ | 283 . . ^ |
274 . . <data avail, | |<enqueue, 284 . . <data avail, | |<enqueue,
275 . . tx confirm> | | dequeue> 285 . . tx confirm> | | dequeue>
276 +-------------+ . | | 286 +-------------+ . | |
277 | DPRC driver | . +---+---V----+ +---------+ 287 | DPRC driver | . +---+---V----+ +---------+
278 | (dprc) | . . . . . .| DPIO driver| | MAC | 288 | (DPRC) | . . . . . .| DPIO driver| | MAC |
279 +----------+--+ | (dpio) | | (dpmac) | 289 +----------+--+ | (DPIO) | | (DPMAC) |
280 | +------+-----+ +-----+---+ 290 | +------+-----+ +-----+---+
281 |<dev add/remove> | | 291 |<dev add/remove> | |
282 | | | 292 | | |
283 +----+--------------+ | +--+---+ 293 +----+--------------+ | +--+---+
284 | mc-bus driver | | | PHY | 294 | MC-bus driver | | | PHY |
285 | | | |driver| 295 | | | |driver|
286 | /fsl-mc@80c000000 | | +--+---+ 296 | /soc/fsl-mc | | +--+---+
287 +-------------------+ | | 297 +-------------------+ | |
288 | | 298 | |
289 ================================ HARDWARE =========|=================|====== 299 ================================ HARDWARE =========|=================|======
@@ -298,25 +308,27 @@ of each driver follows.
298 308
299A brief description of each driver is provided below. 309A brief description of each driver is provided below.
300 310
301 mc-bus driver 311 MC-bus driver
302 ------------- 312 -------------
303 The mc-bus driver is a platform driver and is probed from an 313 The MC-bus driver is a platform driver and is probed from a
304 "/fsl-mc@xxxx" node in the device tree passed in by boot firmware. 314 node in the device tree (compatible "fsl,qoriq-mc") passed in by boot
305 It is responsible for bootstrapping the DPAA2 kernel infrastructure. 315 firmware. It is responsible for bootstrapping the DPAA2 kernel
316 infrastructure.
306 Key functions include: 317 Key functions include:
307 -registering a new bus type named "fsl-mc" with the kernel, 318 -registering a new bus type named "fsl-mc" with the kernel,
308 and implementing bus call-backs (e.g. match/uevent/dev_groups) 319 and implementing bus call-backs (e.g. match/uevent/dev_groups)
309 -implemeting APIs for DPAA2 driver registration and for device 320 -implementing APIs for DPAA2 driver registration and for device
310 add/remove 321 add/remove
311 -creates an MSI irq domain 322 -creates an MSI IRQ domain
312 -do a device add of the 'root' DPRC device, which is needed 323 -doing a 'device add' to expose the 'root' DPRC, in turn triggering
313 to bootstrap things 324 a bind of the root DPRC to the DPRC driver
314 325
315 DPRC driver 326 DPRC driver
316 ----------- 327 -----------
317 The dprc-driver is bound DPRC objects and does runtime management 328 The DPRC driver is bound to DPRC objects and does runtime management
318 of a bus instance. It performs the initial bus scan of the DPRC 329 of a bus instance. It performs the initial bus scan of the DPRC
319 and handles interrupts for container events such as hot plug. 330 and handles interrupts for container events such as hot plug by
331 re-scanning the DPRC.
320 332
321 Allocator 333 Allocator
322 ---------- 334 ----------
@@ -334,14 +346,20 @@ A brief description of each driver is provided below.
334 DPIO driver 346 DPIO driver
335 ----------- 347 -----------
336 The DPIO driver is bound to DPIO objects and provides services that allow 348 The DPIO driver is bound to DPIO objects and provides services that allow
337 other drivers such as the Ethernet driver to receive and transmit data. 349 other drivers such as the Ethernet driver to enqueue and dequeue data for
350 their respective objects.
338 Key services include: 351 Key services include:
339 -data availability notifications 352 -data availability notifications
340 -hardware queuing operations (enqueue and dequeue of data) 353 -hardware queuing operations (enqueue and dequeue of data)
341 -hardware buffer pool management 354 -hardware buffer pool management
342 355
356 To transmit a packet the Ethernet driver puts data on a queue and
357 invokes a DPIO API. For receive, the Ethernet driver registers
358 a data availability notification callback. To dequeue a packet
359 a DPIO API is used.
360
343 There is typically one DPIO object per physical CPU for optimum 361 There is typically one DPIO object per physical CPU for optimum
344 performance, allowing each CPU to simultaneously enqueue 362 performance, allowing different CPUs to simultaneously enqueue
345 and dequeue data. 363 and dequeue data.
346 364
347 The DPIO driver operates on behalf of all DPAA2 drivers 365 The DPIO driver operates on behalf of all DPAA2 drivers
@@ -362,3 +380,7 @@ A brief description of each driver is provided below.
362 by the appropriate PHY driver via an mdio bus. The MAC driver 380 by the appropriate PHY driver via an mdio bus. The MAC driver
363 plays a role of being a proxy between the PHY driver and the 381 plays a role of being a proxy between the PHY driver and the
364 MC. It does this proxy via the MC commands to a DPMAC object. 382 MC. It does this proxy via the MC commands to a DPMAC object.
383 If the PHY driver signals a link change, the MAC driver notifies
384 the MC via a DPMAC command. If a network interface is brought
385 up or down, the MC notifies the DPMAC driver via an interrupt and
386 the driver can take appropriate action.
diff --git a/drivers/staging/fsl-mc/TODO b/drivers/staging/fsl-mc/TODO
index 389436891b93..54a8bc69222e 100644
--- a/drivers/staging/fsl-mc/TODO
+++ b/drivers/staging/fsl-mc/TODO
@@ -1,21 +1,8 @@
1* Decide if multiple root fsl-mc buses will be supported per Linux instance,
2 and if so add support for this.
3
4* Add at least one device driver for a DPAA2 object (child device of the 1* Add at least one device driver for a DPAA2 object (child device of the
5 fsl-mc bus). Most likely candidate for this is adding DPAA2 Ethernet 2 fsl-mc bus). Most likely candidate for this is adding DPAA2 Ethernet
6 driver support, which depends on drivers for several objects: DPNI, 3 driver support, which depends on drivers for several objects: DPNI,
7 DPIO, DPMAC. Other pre-requisites include: 4 DPIO, DPMAC. Other pre-requisites include:
8 5
9 * interrupt support. for meaningful driver support we need
10 interrupts, and thus need message interrupt support by the bus
11 driver.
12 -Note: this has dependencies on generic MSI support work
13 in process upstream, see [1] and [2].
14
15 * Management Complex (MC) command serialization. locking mechanisms
16 are needed by drivers to serialize commands sent to the MC, including
17 from atomic context.
18
19 * MC firmware uprev. The MC firmware upon which the fsl-mc 6 * MC firmware uprev. The MC firmware upon which the fsl-mc
20 bus driver and DPAA2 object drivers are based is continuing 7 bus driver and DPAA2 object drivers are based is continuing
21 to evolve, so minor updates are needed to keep in sync with binary 8 to evolve, so minor updates are needed to keep in sync with binary
diff --git a/drivers/staging/fsl-mc/bus/dpbp.c b/drivers/staging/fsl-mc/bus/dpbp.c
index 2d97173f8e91..c31fe1bca191 100644
--- a/drivers/staging/fsl-mc/bus/dpbp.c
+++ b/drivers/staging/fsl-mc/bus/dpbp.c
@@ -293,7 +293,7 @@ int dpbp_set_irq(struct fsl_mc_io *mc_io,
293 cmd.params[0] |= mc_enc(0, 8, irq_index); 293 cmd.params[0] |= mc_enc(0, 8, irq_index);
294 cmd.params[0] |= mc_enc(32, 32, irq_cfg->val); 294 cmd.params[0] |= mc_enc(32, 32, irq_cfg->val);
295 cmd.params[1] |= mc_enc(0, 64, irq_cfg->addr); 295 cmd.params[1] |= mc_enc(0, 64, irq_cfg->addr);
296 cmd.params[2] |= mc_enc(0, 32, irq_cfg->user_irq_id); 296 cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num);
297 297
298 /* send command to mc*/ 298 /* send command to mc*/
299 return mc_send_command(mc_io, &cmd); 299 return mc_send_command(mc_io, &cmd);
@@ -334,7 +334,7 @@ int dpbp_get_irq(struct fsl_mc_io *mc_io,
334 /* retrieve response parameters */ 334 /* retrieve response parameters */
335 irq_cfg->val = (u32)mc_dec(cmd.params[0], 0, 32); 335 irq_cfg->val = (u32)mc_dec(cmd.params[0], 0, 32);
336 irq_cfg->addr = (u64)mc_dec(cmd.params[1], 0, 64); 336 irq_cfg->addr = (u64)mc_dec(cmd.params[1], 0, 64);
337 irq_cfg->user_irq_id = (int)mc_dec(cmd.params[2], 0, 32); 337 irq_cfg->irq_num = (int)mc_dec(cmd.params[2], 0, 32);
338 *type = (int)mc_dec(cmd.params[2], 32, 32); 338 *type = (int)mc_dec(cmd.params[2], 32, 32);
339 return 0; 339 return 0;
340} 340}
@@ -502,6 +502,7 @@ int dpbp_get_irq_status(struct fsl_mc_io *mc_io,
502 /* prepare command */ 502 /* prepare command */
503 cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_STATUS, 503 cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_STATUS,
504 cmd_flags, token); 504 cmd_flags, token);
505 cmd.params[0] |= mc_enc(0, 32, *status);
505 cmd.params[0] |= mc_enc(32, 8, irq_index); 506 cmd.params[0] |= mc_enc(32, 8, irq_index);
506 507
507 /* send command to mc*/ 508 /* send command to mc*/
@@ -580,3 +581,75 @@ int dpbp_get_attributes(struct fsl_mc_io *mc_io,
580 return 0; 581 return 0;
581} 582}
582EXPORT_SYMBOL(dpbp_get_attributes); 583EXPORT_SYMBOL(dpbp_get_attributes);
584
585/**
586 * dpbp_set_notifications() - Set notifications towards software
587 * @mc_io: Pointer to MC portal's I/O object
588 * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
589 * @token: Token of DPBP object
590 * @cfg: notifications configuration
591 *
592 * Return: '0' on Success; Error code otherwise.
593 */
594int dpbp_set_notifications(struct fsl_mc_io *mc_io,
595 u32 cmd_flags,
596 u16 token,
597 struct dpbp_notification_cfg *cfg)
598{
599 struct mc_command cmd = { 0 };
600
601 /* prepare command */
602 cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_NOTIFICATIONS,
603 cmd_flags,
604 token);
605
606 cmd.params[0] |= mc_enc(0, 32, cfg->depletion_entry);
607 cmd.params[0] |= mc_enc(32, 32, cfg->depletion_exit);
608 cmd.params[1] |= mc_enc(0, 32, cfg->surplus_entry);
609 cmd.params[1] |= mc_enc(32, 32, cfg->surplus_exit);
610 cmd.params[2] |= mc_enc(0, 16, cfg->options);
611 cmd.params[3] |= mc_enc(0, 64, cfg->message_ctx);
612 cmd.params[4] |= mc_enc(0, 64, cfg->message_iova);
613
614 /* send command to mc*/
615 return mc_send_command(mc_io, &cmd);
616}
617
618/**
619 * dpbp_get_notifications() - Get the notifications configuration
620 * @mc_io: Pointer to MC portal's I/O object
621 * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
622 * @token: Token of DPBP object
623 * @cfg: notifications configuration
624 *
625 * Return: '0' on Success; Error code otherwise.
626 */
627int dpbp_get_notifications(struct fsl_mc_io *mc_io,
628 u32 cmd_flags,
629 u16 token,
630 struct dpbp_notification_cfg *cfg)
631{
632 struct mc_command cmd = { 0 };
633 int err;
634
635 /* prepare command */
636 cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_NOTIFICATIONS,
637 cmd_flags,
638 token);
639
640 /* send command to mc*/
641 err = mc_send_command(mc_io, &cmd);
642 if (err)
643 return err;
644
645 /* retrieve response parameters */
646 cfg->depletion_entry = (u32)mc_dec(cmd.params[0], 0, 32);
647 cfg->depletion_exit = (u32)mc_dec(cmd.params[0], 32, 32);
648 cfg->surplus_entry = (u32)mc_dec(cmd.params[1], 0, 32);
649 cfg->surplus_exit = (u32)mc_dec(cmd.params[1], 32, 32);
650 cfg->options = (u16)mc_dec(cmd.params[2], 0, 16);
651 cfg->message_ctx = (u64)mc_dec(cmd.params[3], 0, 64);
652 cfg->message_iova = (u64)mc_dec(cmd.params[4], 0, 64);
653
654 return 0;
655}
diff --git a/drivers/staging/fsl-mc/bus/dpmcp-cmd.h b/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
index a87e9f84fa42..c9b52dd7ba31 100644
--- a/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
+++ b/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
@@ -32,9 +32,9 @@
32#ifndef _FSL_DPMCP_CMD_H 32#ifndef _FSL_DPMCP_CMD_H
33#define _FSL_DPMCP_CMD_H 33#define _FSL_DPMCP_CMD_H
34 34
35/* DPMCP Version */ 35/* Minimal supported DPMCP Version */
36#define DPMCP_VER_MAJOR 2 36#define DPMCP_MIN_VER_MAJOR 3
37#define DPMCP_VER_MINOR 1 37#define DPMCP_MIN_VER_MINOR 0
38 38
39/* Command IDs */ 39/* Command IDs */
40#define DPMCP_CMDID_CLOSE 0x800 40#define DPMCP_CMDID_CLOSE 0x800
@@ -52,6 +52,5 @@
52#define DPMCP_CMDID_SET_IRQ_MASK 0x014 52#define DPMCP_CMDID_SET_IRQ_MASK 0x014
53#define DPMCP_CMDID_GET_IRQ_MASK 0x015 53#define DPMCP_CMDID_GET_IRQ_MASK 0x015
54#define DPMCP_CMDID_GET_IRQ_STATUS 0x016 54#define DPMCP_CMDID_GET_IRQ_STATUS 0x016
55#define DPMCP_CMDID_CLEAR_IRQ_STATUS 0x017
56 55
57#endif /* _FSL_DPMCP_CMD_H */ 56#endif /* _FSL_DPMCP_CMD_H */
diff --git a/drivers/staging/fsl-mc/bus/dpmcp.c b/drivers/staging/fsl-mc/bus/dpmcp.c
index b0248f574619..fd6dd4e07b87 100644
--- a/drivers/staging/fsl-mc/bus/dpmcp.c
+++ b/drivers/staging/fsl-mc/bus/dpmcp.c
@@ -213,7 +213,7 @@ int dpmcp_set_irq(struct fsl_mc_io *mc_io,
213 cmd.params[0] |= mc_enc(0, 8, irq_index); 213 cmd.params[0] |= mc_enc(0, 8, irq_index);
214 cmd.params[0] |= mc_enc(32, 32, irq_cfg->val); 214 cmd.params[0] |= mc_enc(32, 32, irq_cfg->val);
215 cmd.params[1] |= mc_enc(0, 64, irq_cfg->paddr); 215 cmd.params[1] |= mc_enc(0, 64, irq_cfg->paddr);
216 cmd.params[2] |= mc_enc(0, 32, irq_cfg->user_irq_id); 216 cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num);
217 217
218 /* send command to mc*/ 218 /* send command to mc*/
219 return mc_send_command(mc_io, &cmd); 219 return mc_send_command(mc_io, &cmd);
@@ -254,7 +254,7 @@ int dpmcp_get_irq(struct fsl_mc_io *mc_io,
254 /* retrieve response parameters */ 254 /* retrieve response parameters */
255 irq_cfg->val = (u32)mc_dec(cmd.params[0], 0, 32); 255 irq_cfg->val = (u32)mc_dec(cmd.params[0], 0, 32);
256 irq_cfg->paddr = (u64)mc_dec(cmd.params[1], 0, 64); 256 irq_cfg->paddr = (u64)mc_dec(cmd.params[1], 0, 64);
257 irq_cfg->user_irq_id = (int)mc_dec(cmd.params[2], 0, 32); 257 irq_cfg->irq_num = (int)mc_dec(cmd.params[2], 0, 32);
258 *type = (int)mc_dec(cmd.params[2], 32, 32); 258 *type = (int)mc_dec(cmd.params[2], 32, 32);
259 return 0; 259 return 0;
260} 260}
@@ -435,37 +435,6 @@ int dpmcp_get_irq_status(struct fsl_mc_io *mc_io,
435} 435}
436 436
437/** 437/**
438 * dpmcp_clear_irq_status() - Clear a pending interrupt's status
439 *
440 * @mc_io: Pointer to MC portal's I/O object
441 * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
442 * @token: Token of DPMCP object
443 * @irq_index: The interrupt index to configure
444 * @status: Bits to clear (W1C) - one bit per cause:
445 * 0 = don't change
446 * 1 = clear status bit
447 *
448 * Return: '0' on Success; Error code otherwise.
449 */
450int dpmcp_clear_irq_status(struct fsl_mc_io *mc_io,
451 u32 cmd_flags,
452 u16 token,
453 u8 irq_index,
454 u32 status)
455{
456 struct mc_command cmd = { 0 };
457
458 /* prepare command */
459 cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CLEAR_IRQ_STATUS,
460 cmd_flags, token);
461 cmd.params[0] |= mc_enc(0, 32, status);
462 cmd.params[0] |= mc_enc(32, 8, irq_index);
463
464 /* send command to mc*/
465 return mc_send_command(mc_io, &cmd);
466}
467
468/**
469 * dpmcp_get_attributes - Retrieve DPMCP attributes. 438 * dpmcp_get_attributes - Retrieve DPMCP attributes.
470 * 439 *
471 * @mc_io: Pointer to MC portal's I/O object 440 * @mc_io: Pointer to MC portal's I/O object
diff --git a/drivers/staging/fsl-mc/bus/dpmcp.h b/drivers/staging/fsl-mc/bus/dpmcp.h
index 6df351f0caa5..fe79d4d9293d 100644
--- a/drivers/staging/fsl-mc/bus/dpmcp.h
+++ b/drivers/staging/fsl-mc/bus/dpmcp.h
@@ -82,12 +82,12 @@ int dpmcp_reset(struct fsl_mc_io *mc_io,
82 * struct dpmcp_irq_cfg - IRQ configuration 82 * struct dpmcp_irq_cfg - IRQ configuration
83 * @paddr: Address that must be written to signal a message-based interrupt 83 * @paddr: Address that must be written to signal a message-based interrupt
84 * @val: Value to write into irq_addr address 84 * @val: Value to write into irq_addr address
85 * @user_irq_id: A user defined number associated with this IRQ 85 * @irq_num: A user defined number associated with this IRQ
86 */ 86 */
87struct dpmcp_irq_cfg { 87struct dpmcp_irq_cfg {
88 uint64_t paddr; 88 uint64_t paddr;
89 uint32_t val; 89 uint32_t val;
90 int user_irq_id; 90 int irq_num;
91}; 91};
92 92
93int dpmcp_set_irq(struct fsl_mc_io *mc_io, 93int dpmcp_set_irq(struct fsl_mc_io *mc_io,
@@ -133,12 +133,6 @@ int dpmcp_get_irq_status(struct fsl_mc_io *mc_io,
133 uint8_t irq_index, 133 uint8_t irq_index,
134 uint32_t *status); 134 uint32_t *status);
135 135
136int dpmcp_clear_irq_status(struct fsl_mc_io *mc_io,
137 uint32_t cmd_flags,
138 uint16_t token,
139 uint8_t irq_index,
140 uint32_t status);
141
142/** 136/**
143 * struct dpmcp_attr - Structure representing DPMCP attributes 137 * struct dpmcp_attr - Structure representing DPMCP attributes
144 * @id: DPMCP object ID 138 * @id: DPMCP object ID
diff --git a/drivers/staging/fsl-mc/bus/dprc-cmd.h b/drivers/staging/fsl-mc/bus/dprc-cmd.h
index 6552c2034947..9b854fa8e84d 100644
--- a/drivers/staging/fsl-mc/bus/dprc-cmd.h
+++ b/drivers/staging/fsl-mc/bus/dprc-cmd.h
@@ -40,9 +40,9 @@
40#ifndef _FSL_DPRC_CMD_H 40#ifndef _FSL_DPRC_CMD_H
41#define _FSL_DPRC_CMD_H 41#define _FSL_DPRC_CMD_H
42 42
43/* DPRC Version */ 43/* Minimal supported DPRC Version */
44#define DPRC_VER_MAJOR 4 44#define DPRC_MIN_VER_MAJOR 5
45#define DPRC_VER_MINOR 0 45#define DPRC_MIN_VER_MINOR 0
46 46
47/* Command IDs */ 47/* Command IDs */
48#define DPRC_CMDID_CLOSE 0x800 48#define DPRC_CMDID_CLOSE 0x800
diff --git a/drivers/staging/fsl-mc/bus/dprc-driver.c b/drivers/staging/fsl-mc/bus/dprc-driver.c
index 31488a7b9e86..7fc47173c164 100644
--- a/drivers/staging/fsl-mc/bus/dprc-driver.c
+++ b/drivers/staging/fsl-mc/bus/dprc-driver.c
@@ -312,6 +312,15 @@ int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
312 continue; 312 continue;
313 } 313 }
314 314
315 /*
316 * add a quirk for all versions of dpsec < 4.0...none
317 * are coherent regardless of what the MC reports.
318 */
319 if ((strcmp(obj_desc->type, "dpseci") == 0) &&
320 (obj_desc->ver_major < 4))
321 obj_desc->flags |=
322 DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY;
323
315 irq_count += obj_desc->irq_count; 324 irq_count += obj_desc->irq_count;
316 dev_dbg(&mc_bus_dev->dev, 325 dev_dbg(&mc_bus_dev->dev,
317 "Discovered object: type %s, id %d\n", 326 "Discovered object: type %s, id %d\n",
@@ -423,6 +432,7 @@ static irqreturn_t dprc_irq0_handler_thread(int irq_num, void *arg)
423 if (WARN_ON(!msi_desc || msi_desc->irq != (u32)irq_num)) 432 if (WARN_ON(!msi_desc || msi_desc->irq != (u32)irq_num))
424 goto out; 433 goto out;
425 434
435 status = 0;
426 error = dprc_get_irq_status(mc_io, 0, mc_dev->mc_handle, 0, 436 error = dprc_get_irq_status(mc_io, 0, mc_dev->mc_handle, 0,
427 &status); 437 &status);
428 if (error < 0) { 438 if (error < 0) {
@@ -692,6 +702,25 @@ static int dprc_probe(struct fsl_mc_device *mc_dev)
692 goto error_cleanup_msi_domain; 702 goto error_cleanup_msi_domain;
693 } 703 }
694 704
705 error = dprc_get_attributes(mc_dev->mc_io, 0, mc_dev->mc_handle,
706 &mc_bus->dprc_attr);
707 if (error < 0) {
708 dev_err(&mc_dev->dev, "dprc_get_attributes() failed: %d\n",
709 error);
710 goto error_cleanup_open;
711 }
712
713 if (mc_bus->dprc_attr.version.major < DPRC_MIN_VER_MAJOR ||
714 (mc_bus->dprc_attr.version.major == DPRC_MIN_VER_MAJOR &&
715 mc_bus->dprc_attr.version.minor < DPRC_MIN_VER_MINOR)) {
716 dev_err(&mc_dev->dev,
717 "ERROR: DPRC version %d.%d not supported\n",
718 mc_bus->dprc_attr.version.major,
719 mc_bus->dprc_attr.version.minor);
720 error = -ENOTSUPP;
721 goto error_cleanup_open;
722 }
723
695 mutex_init(&mc_bus->scan_mutex); 724 mutex_init(&mc_bus->scan_mutex);
696 725
697 /* 726 /*
@@ -779,9 +808,7 @@ static int dprc_remove(struct fsl_mc_device *mc_dev)
779static const struct fsl_mc_device_match_id match_id_table[] = { 808static const struct fsl_mc_device_match_id match_id_table[] = {
780 { 809 {
781 .vendor = FSL_MC_VENDOR_FREESCALE, 810 .vendor = FSL_MC_VENDOR_FREESCALE,
782 .obj_type = "dprc", 811 .obj_type = "dprc"},
783 .ver_major = DPRC_VER_MAJOR,
784 .ver_minor = DPRC_VER_MINOR},
785 {.vendor = 0x0}, 812 {.vendor = 0x0},
786}; 813};
787 814
diff --git a/drivers/staging/fsl-mc/bus/dprc.c b/drivers/staging/fsl-mc/bus/dprc.c
index 381b9a96a14b..a2c47377cc4e 100644
--- a/drivers/staging/fsl-mc/bus/dprc.c
+++ b/drivers/staging/fsl-mc/bus/dprc.c
@@ -265,7 +265,7 @@ int dprc_get_irq(struct fsl_mc_io *mc_io,
265 /* retrieve response parameters */ 265 /* retrieve response parameters */
266 irq_cfg->val = mc_dec(cmd.params[0], 0, 32); 266 irq_cfg->val = mc_dec(cmd.params[0], 0, 32);
267 irq_cfg->paddr = mc_dec(cmd.params[1], 0, 64); 267 irq_cfg->paddr = mc_dec(cmd.params[1], 0, 64);
268 irq_cfg->user_irq_id = mc_dec(cmd.params[2], 0, 32); 268 irq_cfg->irq_num = mc_dec(cmd.params[2], 0, 32);
269 *type = mc_dec(cmd.params[2], 32, 32); 269 *type = mc_dec(cmd.params[2], 32, 32);
270 270
271 return 0; 271 return 0;
@@ -296,7 +296,7 @@ int dprc_set_irq(struct fsl_mc_io *mc_io,
296 cmd.params[0] |= mc_enc(32, 8, irq_index); 296 cmd.params[0] |= mc_enc(32, 8, irq_index);
297 cmd.params[0] |= mc_enc(0, 32, irq_cfg->val); 297 cmd.params[0] |= mc_enc(0, 32, irq_cfg->val);
298 cmd.params[1] |= mc_enc(0, 64, irq_cfg->paddr); 298 cmd.params[1] |= mc_enc(0, 64, irq_cfg->paddr);
299 cmd.params[2] |= mc_enc(0, 32, irq_cfg->user_irq_id); 299 cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num);
300 300
301 /* send command to mc*/ 301 /* send command to mc*/
302 return mc_send_command(mc_io, &cmd); 302 return mc_send_command(mc_io, &cmd);
@@ -466,6 +466,7 @@ int dprc_get_irq_status(struct fsl_mc_io *mc_io,
466 /* prepare command */ 466 /* prepare command */
467 cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_STATUS, 467 cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_STATUS,
468 cmd_flags, token); 468 cmd_flags, token);
469 cmd.params[0] |= mc_enc(0, 32, *status);
469 cmd.params[0] |= mc_enc(32, 8, irq_index); 470 cmd.params[0] |= mc_enc(32, 8, irq_index);
470 471
471 /* send command to mc*/ 472 /* send command to mc*/
@@ -948,6 +949,7 @@ int dprc_get_obj(struct fsl_mc_io *mc_io,
948 obj_desc->state = mc_dec(cmd.params[1], 32, 32); 949 obj_desc->state = mc_dec(cmd.params[1], 32, 32);
949 obj_desc->ver_major = mc_dec(cmd.params[2], 0, 16); 950 obj_desc->ver_major = mc_dec(cmd.params[2], 0, 16);
950 obj_desc->ver_minor = mc_dec(cmd.params[2], 16, 16); 951 obj_desc->ver_minor = mc_dec(cmd.params[2], 16, 16);
952 obj_desc->flags = mc_dec(cmd.params[2], 32, 16);
951 obj_desc->type[0] = mc_dec(cmd.params[3], 0, 8); 953 obj_desc->type[0] = mc_dec(cmd.params[3], 0, 8);
952 obj_desc->type[1] = mc_dec(cmd.params[3], 8, 8); 954 obj_desc->type[1] = mc_dec(cmd.params[3], 8, 8);
953 obj_desc->type[2] = mc_dec(cmd.params[3], 16, 8); 955 obj_desc->type[2] = mc_dec(cmd.params[3], 16, 8);
@@ -1042,6 +1044,7 @@ int dprc_get_obj_desc(struct fsl_mc_io *mc_io,
1042 obj_desc->state = (u32)mc_dec(cmd.params[1], 32, 32); 1044 obj_desc->state = (u32)mc_dec(cmd.params[1], 32, 32);
1043 obj_desc->ver_major = (u16)mc_dec(cmd.params[2], 0, 16); 1045 obj_desc->ver_major = (u16)mc_dec(cmd.params[2], 0, 16);
1044 obj_desc->ver_minor = (u16)mc_dec(cmd.params[2], 16, 16); 1046 obj_desc->ver_minor = (u16)mc_dec(cmd.params[2], 16, 16);
1047 obj_desc->flags = mc_dec(cmd.params[2], 32, 16);
1045 obj_desc->type[0] = (char)mc_dec(cmd.params[3], 0, 8); 1048 obj_desc->type[0] = (char)mc_dec(cmd.params[3], 0, 8);
1046 obj_desc->type[1] = (char)mc_dec(cmd.params[3], 8, 8); 1049 obj_desc->type[1] = (char)mc_dec(cmd.params[3], 8, 8);
1047 obj_desc->type[2] = (char)mc_dec(cmd.params[3], 16, 8); 1050 obj_desc->type[2] = (char)mc_dec(cmd.params[3], 16, 8);
@@ -1108,7 +1111,7 @@ int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
1108 cmd.params[0] |= mc_enc(32, 8, irq_index); 1111 cmd.params[0] |= mc_enc(32, 8, irq_index);
1109 cmd.params[0] |= mc_enc(0, 32, irq_cfg->val); 1112 cmd.params[0] |= mc_enc(0, 32, irq_cfg->val);
1110 cmd.params[1] |= mc_enc(0, 64, irq_cfg->paddr); 1113 cmd.params[1] |= mc_enc(0, 64, irq_cfg->paddr);
1111 cmd.params[2] |= mc_enc(0, 32, irq_cfg->user_irq_id); 1114 cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num);
1112 cmd.params[2] |= mc_enc(32, 32, obj_id); 1115 cmd.params[2] |= mc_enc(32, 32, obj_id);
1113 cmd.params[3] |= mc_enc(0, 8, obj_type[0]); 1116 cmd.params[3] |= mc_enc(0, 8, obj_type[0]);
1114 cmd.params[3] |= mc_enc(8, 8, obj_type[1]); 1117 cmd.params[3] |= mc_enc(8, 8, obj_type[1]);
@@ -1189,7 +1192,7 @@ int dprc_get_obj_irq(struct fsl_mc_io *mc_io,
1189 /* retrieve response parameters */ 1192 /* retrieve response parameters */
1190 irq_cfg->val = (u32)mc_dec(cmd.params[0], 0, 32); 1193 irq_cfg->val = (u32)mc_dec(cmd.params[0], 0, 32);
1191 irq_cfg->paddr = (u64)mc_dec(cmd.params[1], 0, 64); 1194 irq_cfg->paddr = (u64)mc_dec(cmd.params[1], 0, 64);
1192 irq_cfg->user_irq_id = (int)mc_dec(cmd.params[2], 0, 32); 1195 irq_cfg->irq_num = (int)mc_dec(cmd.params[2], 0, 32);
1193 *type = (int)mc_dec(cmd.params[2], 32, 32); 1196 *type = (int)mc_dec(cmd.params[2], 32, 32);
1194 1197
1195 return 0; 1198 return 0;
@@ -1437,14 +1440,8 @@ EXPORT_SYMBOL(dprc_set_obj_label);
1437 * @endpoint1: Endpoint 1 configuration parameters 1440 * @endpoint1: Endpoint 1 configuration parameters
1438 * @endpoint2: Endpoint 2 configuration parameters 1441 * @endpoint2: Endpoint 2 configuration parameters
1439 * @cfg: Connection configuration. The connection configuration is ignored for 1442 * @cfg: Connection configuration. The connection configuration is ignored for
1440 * connections made to DPMAC objects, where rate is set according to 1443 * connections made to DPMAC objects, where rate is retrieved from the
1441 * MAC configuration. 1444 * MAC configuration.
1442 * The committed rate is the guaranteed rate for the connection.
1443 * The maximum rate is an upper limit allowed for the connection; it is
1444 * expected to be equal or higher than the committed rate.
1445 * When committed and maximum rates are both zero, the connection is set
1446 * to "best effort" mode, having lower priority compared to connections
1447 * with committed or maximum rates.
1448 * 1445 *
1449 * Return: '0' on Success; Error code otherwise. 1446 * Return: '0' on Success; Error code otherwise.
1450 */ 1447 */
@@ -1555,7 +1552,10 @@ int dprc_disconnect(struct fsl_mc_io *mc_io,
1555* @token: Token of DPRC object 1552* @token: Token of DPRC object
1556* @endpoint1: Endpoint 1 configuration parameters 1553* @endpoint1: Endpoint 1 configuration parameters
1557* @endpoint2: Returned endpoint 2 configuration parameters 1554* @endpoint2: Returned endpoint 2 configuration parameters
1558* @state: Returned link state: 1 - link is up, 0 - link is down 1555* @state: Returned link state:
1556* 1 - link is up;
1557* 0 - link is down;
1558* -1 - no connection (endpoint2 information is irrelevant)
1559* 1559*
1560* Return: '0' on Success; -ENAVAIL if connection does not exist. 1560* Return: '0' on Success; -ENAVAIL if connection does not exist.
1561*/ 1561*/
diff --git a/drivers/staging/fsl-mc/bus/mc-allocator.c b/drivers/staging/fsl-mc/bus/mc-allocator.c
index 86f8543c2b9a..fb08f22a7f9c 100644
--- a/drivers/staging/fsl-mc/bus/mc-allocator.c
+++ b/drivers/staging/fsl-mc/bus/mc-allocator.c
@@ -39,7 +39,6 @@ static int __must_check fsl_mc_resource_pool_add_device(struct fsl_mc_bus
39 struct fsl_mc_resource *resource; 39 struct fsl_mc_resource *resource;
40 struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev; 40 struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
41 int error = -EINVAL; 41 int error = -EINVAL;
42 bool mutex_locked = false;
43 42
44 if (WARN_ON(pool_type < 0 || pool_type >= FSL_MC_NUM_POOL_TYPES)) 43 if (WARN_ON(pool_type < 0 || pool_type >= FSL_MC_NUM_POOL_TYPES))
45 goto out; 44 goto out;
@@ -55,13 +54,12 @@ static int __must_check fsl_mc_resource_pool_add_device(struct fsl_mc_bus
55 goto out; 54 goto out;
56 55
57 mutex_lock(&res_pool->mutex); 56 mutex_lock(&res_pool->mutex);
58 mutex_locked = true;
59 57
60 if (WARN_ON(res_pool->max_count < 0)) 58 if (WARN_ON(res_pool->max_count < 0))
61 goto out; 59 goto out_unlock;
62 if (WARN_ON(res_pool->free_count < 0 || 60 if (WARN_ON(res_pool->free_count < 0 ||
63 res_pool->free_count > res_pool->max_count)) 61 res_pool->free_count > res_pool->max_count))
64 goto out; 62 goto out_unlock;
65 63
66 resource = devm_kzalloc(&mc_bus_dev->dev, sizeof(*resource), 64 resource = devm_kzalloc(&mc_bus_dev->dev, sizeof(*resource),
67 GFP_KERNEL); 65 GFP_KERNEL);
@@ -69,7 +67,7 @@ static int __must_check fsl_mc_resource_pool_add_device(struct fsl_mc_bus
69 error = -ENOMEM; 67 error = -ENOMEM;
70 dev_err(&mc_bus_dev->dev, 68 dev_err(&mc_bus_dev->dev,
71 "Failed to allocate memory for fsl_mc_resource\n"); 69 "Failed to allocate memory for fsl_mc_resource\n");
72 goto out; 70 goto out_unlock;
73 } 71 }
74 72
75 resource->type = pool_type; 73 resource->type = pool_type;
@@ -82,10 +80,9 @@ static int __must_check fsl_mc_resource_pool_add_device(struct fsl_mc_bus
82 res_pool->free_count++; 80 res_pool->free_count++;
83 res_pool->max_count++; 81 res_pool->max_count++;
84 error = 0; 82 error = 0;
83out_unlock:
84 mutex_unlock(&res_pool->mutex);
85out: 85out:
86 if (mutex_locked)
87 mutex_unlock(&res_pool->mutex);
88
89 return error; 86 return error;
90} 87}
91 88
@@ -106,7 +103,6 @@ static int __must_check fsl_mc_resource_pool_remove_device(struct fsl_mc_device
106 struct fsl_mc_resource_pool *res_pool; 103 struct fsl_mc_resource_pool *res_pool;
107 struct fsl_mc_resource *resource; 104 struct fsl_mc_resource *resource;
108 int error = -EINVAL; 105 int error = -EINVAL;
109 bool mutex_locked = false;
110 106
111 if (WARN_ON(!FSL_MC_IS_ALLOCATABLE(mc_dev->obj_desc.type))) 107 if (WARN_ON(!FSL_MC_IS_ALLOCATABLE(mc_dev->obj_desc.type)))
112 goto out; 108 goto out;
@@ -122,13 +118,12 @@ static int __must_check fsl_mc_resource_pool_remove_device(struct fsl_mc_device
122 goto out; 118 goto out;
123 119
124 mutex_lock(&res_pool->mutex); 120 mutex_lock(&res_pool->mutex);
125 mutex_locked = true;
126 121
127 if (WARN_ON(res_pool->max_count <= 0)) 122 if (WARN_ON(res_pool->max_count <= 0))
128 goto out; 123 goto out_unlock;
129 if (WARN_ON(res_pool->free_count <= 0 || 124 if (WARN_ON(res_pool->free_count <= 0 ||
130 res_pool->free_count > res_pool->max_count)) 125 res_pool->free_count > res_pool->max_count))
131 goto out; 126 goto out_unlock;
132 127
133 /* 128 /*
134 * If the device is currently allocated, its resource is not 129 * If the device is currently allocated, its resource is not
@@ -139,7 +134,7 @@ static int __must_check fsl_mc_resource_pool_remove_device(struct fsl_mc_device
139 dev_err(&mc_bus_dev->dev, 134 dev_err(&mc_bus_dev->dev,
140 "Device %s cannot be removed from resource pool\n", 135 "Device %s cannot be removed from resource pool\n",
141 dev_name(&mc_dev->dev)); 136 dev_name(&mc_dev->dev));
142 goto out; 137 goto out_unlock;
143 } 138 }
144 139
145 list_del(&resource->node); 140 list_del(&resource->node);
@@ -150,10 +145,9 @@ static int __must_check fsl_mc_resource_pool_remove_device(struct fsl_mc_device
150 devm_kfree(&mc_bus_dev->dev, resource); 145 devm_kfree(&mc_bus_dev->dev, resource);
151 mc_dev->resource = NULL; 146 mc_dev->resource = NULL;
152 error = 0; 147 error = 0;
148out_unlock:
149 mutex_unlock(&res_pool->mutex);
153out: 150out:
154 if (mutex_locked)
155 mutex_unlock(&res_pool->mutex);
156
157 return error; 151 return error;
158} 152}
159 153
@@ -188,21 +182,19 @@ int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus,
188 struct fsl_mc_resource *resource; 182 struct fsl_mc_resource *resource;
189 struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev; 183 struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
190 int error = -EINVAL; 184 int error = -EINVAL;
191 bool mutex_locked = false;
192 185
193 BUILD_BUG_ON(ARRAY_SIZE(fsl_mc_pool_type_strings) != 186 BUILD_BUG_ON(ARRAY_SIZE(fsl_mc_pool_type_strings) !=
194 FSL_MC_NUM_POOL_TYPES); 187 FSL_MC_NUM_POOL_TYPES);
195 188
196 *new_resource = NULL; 189 *new_resource = NULL;
197 if (WARN_ON(pool_type < 0 || pool_type >= FSL_MC_NUM_POOL_TYPES)) 190 if (WARN_ON(pool_type < 0 || pool_type >= FSL_MC_NUM_POOL_TYPES))
198 goto error; 191 goto out;
199 192
200 res_pool = &mc_bus->resource_pools[pool_type]; 193 res_pool = &mc_bus->resource_pools[pool_type];
201 if (WARN_ON(res_pool->mc_bus != mc_bus)) 194 if (WARN_ON(res_pool->mc_bus != mc_bus))
202 goto error; 195 goto out;
203 196
204 mutex_lock(&res_pool->mutex); 197 mutex_lock(&res_pool->mutex);
205 mutex_locked = true;
206 resource = list_first_entry_or_null(&res_pool->free_list, 198 resource = list_first_entry_or_null(&res_pool->free_list,
207 struct fsl_mc_resource, node); 199 struct fsl_mc_resource, node);
208 200
@@ -212,28 +204,26 @@ int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus,
212 dev_err(&mc_bus_dev->dev, 204 dev_err(&mc_bus_dev->dev,
213 "No more resources of type %s left\n", 205 "No more resources of type %s left\n",
214 fsl_mc_pool_type_strings[pool_type]); 206 fsl_mc_pool_type_strings[pool_type]);
215 goto error; 207 goto out_unlock;
216 } 208 }
217 209
218 if (WARN_ON(resource->type != pool_type)) 210 if (WARN_ON(resource->type != pool_type))
219 goto error; 211 goto out_unlock;
220 if (WARN_ON(resource->parent_pool != res_pool)) 212 if (WARN_ON(resource->parent_pool != res_pool))
221 goto error; 213 goto out_unlock;
222 if (WARN_ON(res_pool->free_count <= 0 || 214 if (WARN_ON(res_pool->free_count <= 0 ||
223 res_pool->free_count > res_pool->max_count)) 215 res_pool->free_count > res_pool->max_count))
224 goto error; 216 goto out_unlock;
225 217
226 list_del(&resource->node); 218 list_del(&resource->node);
227 INIT_LIST_HEAD(&resource->node); 219 INIT_LIST_HEAD(&resource->node);
228 220
229 res_pool->free_count--; 221 res_pool->free_count--;
222 error = 0;
223out_unlock:
230 mutex_unlock(&res_pool->mutex); 224 mutex_unlock(&res_pool->mutex);
231 *new_resource = resource; 225 *new_resource = resource;
232 return 0; 226out:
233error:
234 if (mutex_locked)
235 mutex_unlock(&res_pool->mutex);
236
237 return error; 227 return error;
238} 228}
239EXPORT_SYMBOL_GPL(fsl_mc_resource_allocate); 229EXPORT_SYMBOL_GPL(fsl_mc_resource_allocate);
@@ -241,26 +231,23 @@ EXPORT_SYMBOL_GPL(fsl_mc_resource_allocate);
241void fsl_mc_resource_free(struct fsl_mc_resource *resource) 231void fsl_mc_resource_free(struct fsl_mc_resource *resource)
242{ 232{
243 struct fsl_mc_resource_pool *res_pool; 233 struct fsl_mc_resource_pool *res_pool;
244 bool mutex_locked = false;
245 234
246 res_pool = resource->parent_pool; 235 res_pool = resource->parent_pool;
247 if (WARN_ON(resource->type != res_pool->type)) 236 if (WARN_ON(resource->type != res_pool->type))
248 goto out; 237 return;
249 238
250 mutex_lock(&res_pool->mutex); 239 mutex_lock(&res_pool->mutex);
251 mutex_locked = true;
252 if (WARN_ON(res_pool->free_count < 0 || 240 if (WARN_ON(res_pool->free_count < 0 ||
253 res_pool->free_count >= res_pool->max_count)) 241 res_pool->free_count >= res_pool->max_count))
254 goto out; 242 goto out_unlock;
255 243
256 if (WARN_ON(!list_empty(&resource->node))) 244 if (WARN_ON(!list_empty(&resource->node)))
257 goto out; 245 goto out_unlock;
258 246
259 list_add_tail(&resource->node, &res_pool->free_list); 247 list_add_tail(&resource->node, &res_pool->free_list);
260 res_pool->free_count++; 248 res_pool->free_count++;
261out: 249out_unlock:
262 if (mutex_locked) 250 mutex_unlock(&res_pool->mutex);
263 mutex_unlock(&res_pool->mutex);
264} 251}
265EXPORT_SYMBOL_GPL(fsl_mc_resource_free); 252EXPORT_SYMBOL_GPL(fsl_mc_resource_free);
266 253
@@ -306,10 +293,22 @@ int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev,
306 if (error < 0) 293 if (error < 0)
307 return error; 294 return error;
308 295
296 error = -EINVAL;
309 dpmcp_dev = resource->data; 297 dpmcp_dev = resource->data;
310 if (WARN_ON(!dpmcp_dev)) 298 if (WARN_ON(!dpmcp_dev))
311 goto error_cleanup_resource; 299 goto error_cleanup_resource;
312 300
301 if (dpmcp_dev->obj_desc.ver_major < DPMCP_MIN_VER_MAJOR ||
302 (dpmcp_dev->obj_desc.ver_major == DPMCP_MIN_VER_MAJOR &&
303 dpmcp_dev->obj_desc.ver_minor < DPMCP_MIN_VER_MINOR)) {
304 dev_err(&dpmcp_dev->dev,
305 "ERROR: Version %d.%d of DPMCP not supported.\n",
306 dpmcp_dev->obj_desc.ver_major,
307 dpmcp_dev->obj_desc.ver_minor);
308 error = -ENOTSUPP;
309 goto error_cleanup_resource;
310 }
311
313 if (WARN_ON(dpmcp_dev->obj_desc.region_count == 0)) 312 if (WARN_ON(dpmcp_dev->obj_desc.region_count == 0))
314 goto error_cleanup_resource; 313 goto error_cleanup_resource;
315 314
@@ -722,20 +721,14 @@ static const struct fsl_mc_device_match_id match_id_table[] = {
722 { 721 {
723 .vendor = FSL_MC_VENDOR_FREESCALE, 722 .vendor = FSL_MC_VENDOR_FREESCALE,
724 .obj_type = "dpbp", 723 .obj_type = "dpbp",
725 .ver_major = DPBP_VER_MAJOR,
726 .ver_minor = DPBP_VER_MINOR
727 }, 724 },
728 { 725 {
729 .vendor = FSL_MC_VENDOR_FREESCALE, 726 .vendor = FSL_MC_VENDOR_FREESCALE,
730 .obj_type = "dpmcp", 727 .obj_type = "dpmcp",
731 .ver_major = DPMCP_VER_MAJOR,
732 .ver_minor = DPMCP_VER_MINOR
733 }, 728 },
734 { 729 {
735 .vendor = FSL_MC_VENDOR_FREESCALE, 730 .vendor = FSL_MC_VENDOR_FREESCALE,
736 .obj_type = "dpcon", 731 .obj_type = "dpcon",
737 .ver_major = DPCON_VER_MAJOR,
738 .ver_minor = DPCON_VER_MINOR
739 }, 732 },
740 {.vendor = 0x0}, 733 {.vendor = 0x0},
741}; 734};
diff --git a/drivers/staging/fsl-mc/bus/mc-bus.c b/drivers/staging/fsl-mc/bus/mc-bus.c
index b59455661f4d..405364307561 100644
--- a/drivers/staging/fsl-mc/bus/mc-bus.c
+++ b/drivers/staging/fsl-mc/bus/mc-bus.c
@@ -40,8 +40,6 @@ static int fsl_mc_bus_match(struct device *dev, struct device_driver *drv)
40 struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); 40 struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
41 struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(drv); 41 struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(drv);
42 bool found = false; 42 bool found = false;
43 bool major_version_mismatch = false;
44 bool minor_version_mismatch = false;
45 43
46 if (WARN_ON(!fsl_mc_bus_exists())) 44 if (WARN_ON(!fsl_mc_bus_exists()))
47 goto out; 45 goto out;
@@ -64,32 +62,12 @@ static int fsl_mc_bus_match(struct device *dev, struct device_driver *drv)
64 for (id = mc_drv->match_id_table; id->vendor != 0x0; id++) { 62 for (id = mc_drv->match_id_table; id->vendor != 0x0; id++) {
65 if (id->vendor == mc_dev->obj_desc.vendor && 63 if (id->vendor == mc_dev->obj_desc.vendor &&
66 strcmp(id->obj_type, mc_dev->obj_desc.type) == 0) { 64 strcmp(id->obj_type, mc_dev->obj_desc.type) == 0) {
67 if (id->ver_major == mc_dev->obj_desc.ver_major) { 65 found = true;
68 found = true;
69 if (id->ver_minor != mc_dev->obj_desc.ver_minor)
70 minor_version_mismatch = true;
71 } else {
72 major_version_mismatch = true;
73 }
74 66
75 break; 67 break;
76 } 68 }
77 } 69 }
78 70
79 if (major_version_mismatch) {
80 dev_warn(dev,
81 "Major version mismatch: driver version %u.%u, MC object version %u.%u\n",
82 id->ver_major, id->ver_minor,
83 mc_dev->obj_desc.ver_major,
84 mc_dev->obj_desc.ver_minor);
85 } else if (minor_version_mismatch) {
86 dev_warn(dev,
87 "Minor version mismatch: driver version %u.%u, MC object version %u.%u\n",
88 id->ver_major, id->ver_minor,
89 mc_dev->obj_desc.ver_major,
90 mc_dev->obj_desc.ver_minor);
91 }
92
93out: 71out:
94 dev_dbg(dev, "%smatched\n", found ? "" : "not "); 72 dev_dbg(dev, "%smatched\n", found ? "" : "not ");
95 return found; 73 return found;
@@ -251,11 +229,10 @@ static bool fsl_mc_is_root_dprc(struct device *dev)
251 return dev == root_dprc_dev; 229 return dev == root_dprc_dev;
252} 230}
253 231
254static int get_dprc_icid(struct fsl_mc_io *mc_io, 232static int get_dprc_attr(struct fsl_mc_io *mc_io,
255 int container_id, u16 *icid) 233 int container_id, struct dprc_attributes *attr)
256{ 234{
257 u16 dprc_handle; 235 u16 dprc_handle;
258 struct dprc_attributes attr;
259 int error; 236 int error;
260 237
261 error = dprc_open(mc_io, 0, container_id, &dprc_handle); 238 error = dprc_open(mc_io, 0, container_id, &dprc_handle);
@@ -264,15 +241,14 @@ static int get_dprc_icid(struct fsl_mc_io *mc_io,
264 return error; 241 return error;
265 } 242 }
266 243
267 memset(&attr, 0, sizeof(attr)); 244 memset(attr, 0, sizeof(struct dprc_attributes));
268 error = dprc_get_attributes(mc_io, 0, dprc_handle, &attr); 245 error = dprc_get_attributes(mc_io, 0, dprc_handle, attr);
269 if (error < 0) { 246 if (error < 0) {
270 dev_err(mc_io->dev, "dprc_get_attributes() failed: %d\n", 247 dev_err(mc_io->dev, "dprc_get_attributes() failed: %d\n",
271 error); 248 error);
272 goto common_cleanup; 249 goto common_cleanup;
273 } 250 }
274 251
275 *icid = attr.icid;
276 error = 0; 252 error = 0;
277 253
278common_cleanup: 254common_cleanup:
@@ -280,6 +256,34 @@ common_cleanup:
280 return error; 256 return error;
281} 257}
282 258
259static int get_dprc_icid(struct fsl_mc_io *mc_io,
260 int container_id, u16 *icid)
261{
262 struct dprc_attributes attr;
263 int error;
264
265 error = get_dprc_attr(mc_io, container_id, &attr);
266 if (error == 0)
267 *icid = attr.icid;
268
269 return error;
270}
271
272static int get_dprc_version(struct fsl_mc_io *mc_io,
273 int container_id, u16 *major, u16 *minor)
274{
275 struct dprc_attributes attr;
276 int error;
277
278 error = get_dprc_attr(mc_io, container_id, &attr);
279 if (error == 0) {
280 *major = attr.version.major;
281 *minor = attr.version.minor;
282 }
283
284 return error;
285}
286
283static int translate_mc_addr(struct fsl_mc_device *mc_dev, 287static int translate_mc_addr(struct fsl_mc_device *mc_dev,
284 enum dprc_region_type mc_region_type, 288 enum dprc_region_type mc_region_type,
285 u64 mc_offset, phys_addr_t *phys_addr) 289 u64 mc_offset, phys_addr_t *phys_addr)
@@ -376,6 +380,8 @@ static int fsl_mc_device_get_mmio_regions(struct fsl_mc_device *mc_dev,
376 regions[i].end = regions[i].start + region_desc.size - 1; 380 regions[i].end = regions[i].start + region_desc.size - 1;
377 regions[i].name = "fsl-mc object MMIO region"; 381 regions[i].name = "fsl-mc object MMIO region";
378 regions[i].flags = IORESOURCE_IO; 382 regions[i].flags = IORESOURCE_IO;
383 if (region_desc.flags & DPRC_REGION_CACHEABLE)
384 regions[i].flags |= IORESOURCE_CACHEABLE;
379 } 385 }
380 386
381 mc_dev->regions = regions; 387 mc_dev->regions = regions;
@@ -491,6 +497,10 @@ int fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
491 goto error_cleanup_dev; 497 goto error_cleanup_dev;
492 } 498 }
493 499
500 /* Objects are coherent, unless 'no shareability' flag set. */
501 if (!(obj_desc->flags & DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY))
502 arch_setup_dma_ops(&mc_dev->dev, 0, 0, NULL, true);
503
494 /* 504 /*
495 * The device-specific probe callback will get invoked by device_add() 505 * The device-specific probe callback will get invoked by device_add()
496 */ 506 */
@@ -722,20 +732,6 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
722 "Freescale Management Complex Firmware version: %u.%u.%u\n", 732 "Freescale Management Complex Firmware version: %u.%u.%u\n",
723 mc_version.major, mc_version.minor, mc_version.revision); 733 mc_version.major, mc_version.minor, mc_version.revision);
724 734
725 if (mc_version.major < MC_VER_MAJOR) {
726 dev_err(&pdev->dev,
727 "ERROR: MC firmware version not supported by driver (driver version: %u.%u)\n",
728 MC_VER_MAJOR, MC_VER_MINOR);
729 error = -ENOTSUPP;
730 goto error_cleanup_mc_io;
731 }
732
733 if (mc_version.major > MC_VER_MAJOR) {
734 dev_warn(&pdev->dev,
735 "WARNING: driver may not support newer MC firmware features (driver version: %u.%u)\n",
736 MC_VER_MAJOR, MC_VER_MINOR);
737 }
738
739 error = get_mc_addr_translation_ranges(&pdev->dev, 735 error = get_mc_addr_translation_ranges(&pdev->dev,
740 &mc->translation_ranges, 736 &mc->translation_ranges,
741 &mc->num_translation_ranges); 737 &mc->num_translation_ranges);
@@ -749,11 +745,15 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
749 goto error_cleanup_mc_io; 745 goto error_cleanup_mc_io;
750 } 746 }
751 747
748 memset(&obj_desc, 0, sizeof(struct dprc_obj_desc));
749 error = get_dprc_version(mc_io, container_id,
750 &obj_desc.ver_major, &obj_desc.ver_minor);
751 if (error < 0)
752 goto error_cleanup_mc_io;
753
752 obj_desc.vendor = FSL_MC_VENDOR_FREESCALE; 754 obj_desc.vendor = FSL_MC_VENDOR_FREESCALE;
753 strcpy(obj_desc.type, "dprc"); 755 strcpy(obj_desc.type, "dprc");
754 obj_desc.id = container_id; 756 obj_desc.id = container_id;
755 obj_desc.ver_major = DPRC_VER_MAJOR;
756 obj_desc.ver_minor = DPRC_VER_MINOR;
757 obj_desc.irq_count = 1; 757 obj_desc.irq_count = 1;
758 obj_desc.region_count = 0; 758 obj_desc.region_count = 0;
759 759
diff --git a/drivers/staging/fsl-mc/bus/mc-msi.c b/drivers/staging/fsl-mc/bus/mc-msi.c
index 3a8258ff4426..e202b2b88c63 100644
--- a/drivers/staging/fsl-mc/bus/mc-msi.c
+++ b/drivers/staging/fsl-mc/bus/mc-msi.c
@@ -37,10 +37,8 @@ static void fsl_mc_msi_update_dom_ops(struct msi_domain_info *info)
37 /* 37 /*
38 * set_desc should not be set by the caller 38 * set_desc should not be set by the caller
39 */ 39 */
40 if (WARN_ON(ops->set_desc)) 40 if (ops->set_desc == NULL)
41 return; 41 ops->set_desc = fsl_mc_msi_set_desc;
42
43 ops->set_desc = fsl_mc_msi_set_desc;
44} 42}
45 43
46static void __fsl_mc_msi_write_msg(struct fsl_mc_device *mc_bus_dev, 44static void __fsl_mc_msi_write_msg(struct fsl_mc_device *mc_bus_dev,
@@ -65,7 +63,7 @@ static void __fsl_mc_msi_write_msg(struct fsl_mc_device *mc_bus_dev,
65 irq_cfg.paddr = ((u64)msi_desc->msg.address_hi << 32) | 63 irq_cfg.paddr = ((u64)msi_desc->msg.address_hi << 32) |
66 msi_desc->msg.address_lo; 64 msi_desc->msg.address_lo;
67 irq_cfg.val = msi_desc->msg.data; 65 irq_cfg.val = msi_desc->msg.data;
68 irq_cfg.user_irq_id = msi_desc->irq; 66 irq_cfg.irq_num = msi_desc->irq;
69 67
70 if (owner_mc_dev == mc_bus_dev) { 68 if (owner_mc_dev == mc_bus_dev) {
71 /* 69 /*
@@ -129,10 +127,8 @@ static void fsl_mc_msi_update_chip_ops(struct msi_domain_info *info)
129 /* 127 /*
130 * irq_write_msi_msg should not be set by the caller 128 * irq_write_msi_msg should not be set by the caller
131 */ 129 */
132 if (WARN_ON(chip->irq_write_msi_msg)) 130 if (chip->irq_write_msi_msg == NULL)
133 return; 131 chip->irq_write_msi_msg = fsl_mc_msi_write_msg;
134
135 chip->irq_write_msi_msg = fsl_mc_msi_write_msg;
136} 132}
137 133
138/** 134/**
diff --git a/drivers/staging/fsl-mc/include/dpbp-cmd.h b/drivers/staging/fsl-mc/include/dpbp-cmd.h
index efa9bf33c1a5..c57b454a2912 100644
--- a/drivers/staging/fsl-mc/include/dpbp-cmd.h
+++ b/drivers/staging/fsl-mc/include/dpbp-cmd.h
@@ -34,7 +34,7 @@
34 34
35/* DPBP Version */ 35/* DPBP Version */
36#define DPBP_VER_MAJOR 2 36#define DPBP_VER_MAJOR 2
37#define DPBP_VER_MINOR 1 37#define DPBP_VER_MINOR 2
38 38
39/* Command IDs */ 39/* Command IDs */
40#define DPBP_CMDID_CLOSE 0x800 40#define DPBP_CMDID_CLOSE 0x800
@@ -57,4 +57,6 @@
57#define DPBP_CMDID_GET_IRQ_STATUS 0x016 57#define DPBP_CMDID_GET_IRQ_STATUS 0x016
58#define DPBP_CMDID_CLEAR_IRQ_STATUS 0x017 58#define DPBP_CMDID_CLEAR_IRQ_STATUS 0x017
59 59
60#define DPBP_CMDID_SET_NOTIFICATIONS 0x01b0
61#define DPBP_CMDID_GET_NOTIFICATIONS 0x01b1
60#endif /* _FSL_DPBP_CMD_H */ 62#endif /* _FSL_DPBP_CMD_H */
diff --git a/drivers/staging/fsl-mc/include/dpbp.h b/drivers/staging/fsl-mc/include/dpbp.h
index 37ed951436d5..e14e85a5d6df 100644
--- a/drivers/staging/fsl-mc/include/dpbp.h
+++ b/drivers/staging/fsl-mc/include/dpbp.h
@@ -85,12 +85,12 @@ int dpbp_reset(struct fsl_mc_io *mc_io,
85 * struct dpbp_irq_cfg - IRQ configuration 85 * struct dpbp_irq_cfg - IRQ configuration
86 * @addr: Address that must be written to signal a message-based interrupt 86 * @addr: Address that must be written to signal a message-based interrupt
87 * @val: Value to write into irq_addr address 87 * @val: Value to write into irq_addr address
88 * @user_irq_id: A user defined number associated with this IRQ 88 * @irq_num: A user defined number associated with this IRQ
89 */ 89 */
90struct dpbp_irq_cfg { 90struct dpbp_irq_cfg {
91 u64 addr; 91 u64 addr;
92 u32 val; 92 u32 val;
93 int user_irq_id; 93 int irq_num;
94}; 94};
95 95
96int dpbp_set_irq(struct fsl_mc_io *mc_io, 96int dpbp_set_irq(struct fsl_mc_io *mc_io,
@@ -168,6 +168,53 @@ int dpbp_get_attributes(struct fsl_mc_io *mc_io,
168 u16 token, 168 u16 token,
169 struct dpbp_attr *attr); 169 struct dpbp_attr *attr);
170 170
171/**
172 * DPBP notifications options
173 */
174
175/**
176 * BPSCN write will attempt to allocate into a cache (coherent write)
177 */
178#define DPBP_NOTIF_OPT_COHERENT_WRITE 0x00000001
179
180/**
181 * struct dpbp_notification_cfg - Structure representing DPBP notifications
182 * towards software
183 * @depletion_entry: below this threshold the pool is "depleted";
184 * set it to '0' to disable it
185 * @depletion_exit: greater than or equal to this threshold the pool exit its
186 * "depleted" state
187 * @surplus_entry: above this threshold the pool is in "surplus" state;
188 * set it to '0' to disable it
189 * @surplus_exit: less than or equal to this threshold the pool exit its
190 * "surplus" state
191 * @message_iova: MUST be given if either 'depletion_entry' or 'surplus_entry'
192 * is not '0' (enable); I/O virtual address (must be in DMA-able memory),
193 * must be 16B aligned.
194 * @message_ctx: The context that will be part of the BPSCN message and will
195 * be written to 'message_iova'
196 * @options: Mask of available options; use 'DPBP_NOTIF_OPT_<X>' values
197 */
198struct dpbp_notification_cfg {
199 u32 depletion_entry;
200 u32 depletion_exit;
201 u32 surplus_entry;
202 u32 surplus_exit;
203 u64 message_iova;
204 u64 message_ctx;
205 u16 options;
206};
207
208int dpbp_set_notifications(struct fsl_mc_io *mc_io,
209 u32 cmd_flags,
210 u16 token,
211 struct dpbp_notification_cfg *cfg);
212
213int dpbp_get_notifications(struct fsl_mc_io *mc_io,
214 u32 cmd_flags,
215 u16 token,
216 struct dpbp_notification_cfg *cfg);
217
171/** @} */ 218/** @} */
172 219
173#endif /* __FSL_DPBP_H */ 220#endif /* __FSL_DPBP_H */
diff --git a/drivers/staging/fsl-mc/include/dprc.h b/drivers/staging/fsl-mc/include/dprc.h
index 94c492706315..593b2bbe7f71 100644
--- a/drivers/staging/fsl-mc/include/dprc.h
+++ b/drivers/staging/fsl-mc/include/dprc.h
@@ -94,11 +94,6 @@ int dprc_close(struct fsl_mc_io *mc_io,
94 */ 94 */
95#define DPRC_CFG_OPT_TOPOLOGY_CHANGES_ALLOWED 0x00000008 95#define DPRC_CFG_OPT_TOPOLOGY_CHANGES_ALLOWED 0x00000008
96 96
97/* IOMMU bypass - indicates whether objects of this container are permitted
98 * to bypass the IOMMU.
99 */
100#define DPRC_CFG_OPT_IOMMU_BYPASS 0x00000010
101
102/* AIOP - Indicates that container belongs to AIOP. */ 97/* AIOP - Indicates that container belongs to AIOP. */
103#define DPRC_CFG_OPT_AIOP 0x00000020 98#define DPRC_CFG_OPT_AIOP 0x00000020
104 99
@@ -173,12 +168,12 @@ int dprc_reset_container(struct fsl_mc_io *mc_io,
173 * struct dprc_irq_cfg - IRQ configuration 168 * struct dprc_irq_cfg - IRQ configuration
174 * @paddr: Address that must be written to signal a message-based interrupt 169 * @paddr: Address that must be written to signal a message-based interrupt
175 * @val: Value to write into irq_addr address 170 * @val: Value to write into irq_addr address
176 * @user_irq_id: A user defined number associated with this IRQ 171 * @irq_num: A user defined number associated with this IRQ
177 */ 172 */
178struct dprc_irq_cfg { 173struct dprc_irq_cfg {
179 phys_addr_t paddr; 174 phys_addr_t paddr;
180 u32 val; 175 u32 val;
181 int user_irq_id; 176 int irq_num;
182}; 177};
183 178
184int dprc_set_irq(struct fsl_mc_io *mc_io, 179int dprc_set_irq(struct fsl_mc_io *mc_io,
@@ -353,6 +348,14 @@ int dprc_get_obj_count(struct fsl_mc_io *mc_io,
353#define DPRC_OBJ_STATE_PLUGGED 0x00000002 348#define DPRC_OBJ_STATE_PLUGGED 0x00000002
354 349
355/** 350/**
351 * Shareability flag - Object flag indicating no memory shareability.
352 * the object generates memory accesses that are non coherent with other
353 * masters;
354 * user is responsible for proper memory handling through IOMMU configuration.
355 */
356#define DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY 0x0001
357
358/**
356 * struct dprc_obj_desc - Object descriptor, returned from dprc_get_obj() 359 * struct dprc_obj_desc - Object descriptor, returned from dprc_get_obj()
357 * @type: Type of object: NULL terminated string 360 * @type: Type of object: NULL terminated string
358 * @id: ID of logical object resource 361 * @id: ID of logical object resource
@@ -363,6 +366,7 @@ int dprc_get_obj_count(struct fsl_mc_io *mc_io,
363 * @region_count: Number of mappable regions supported by the object 366 * @region_count: Number of mappable regions supported by the object
364 * @state: Object state: combination of DPRC_OBJ_STATE_ states 367 * @state: Object state: combination of DPRC_OBJ_STATE_ states
365 * @label: Object label 368 * @label: Object label
369 * @flags: Object's flags
366 */ 370 */
367struct dprc_obj_desc { 371struct dprc_obj_desc {
368 char type[16]; 372 char type[16];
@@ -374,6 +378,7 @@ struct dprc_obj_desc {
374 u8 region_count; 378 u8 region_count;
375 u32 state; 379 u32 state;
376 char label[16]; 380 char label[16];
381 u16 flags;
377}; 382};
378 383
379int dprc_get_obj(struct fsl_mc_io *mc_io, 384int dprc_get_obj(struct fsl_mc_io *mc_io,
diff --git a/drivers/staging/fsl-mc/include/mc-private.h b/drivers/staging/fsl-mc/include/mc-private.h
index ee5f1d2bf604..cab1ae90f09e 100644
--- a/drivers/staging/fsl-mc/include/mc-private.h
+++ b/drivers/staging/fsl-mc/include/mc-private.h
@@ -94,12 +94,14 @@ struct fsl_mc_resource_pool {
94 * from the physical DPRC. 94 * from the physical DPRC.
95 * @irq_resources: Pointer to array of IRQ objects for the IRQ pool 95 * @irq_resources: Pointer to array of IRQ objects for the IRQ pool
96 * @scan_mutex: Serializes bus scanning 96 * @scan_mutex: Serializes bus scanning
97 * @dprc_attr: DPRC attributes
97 */ 98 */
98struct fsl_mc_bus { 99struct fsl_mc_bus {
99 struct fsl_mc_device mc_dev; 100 struct fsl_mc_device mc_dev;
100 struct fsl_mc_resource_pool resource_pools[FSL_MC_NUM_POOL_TYPES]; 101 struct fsl_mc_resource_pool resource_pools[FSL_MC_NUM_POOL_TYPES];
101 struct fsl_mc_device_irq *irq_resources; 102 struct fsl_mc_device_irq *irq_resources;
102 struct mutex scan_mutex; /* serializes bus scanning */ 103 struct mutex scan_mutex; /* serializes bus scanning */
104 struct dprc_attributes dprc_attr;
103}; 105};
104 106
105#define to_fsl_mc_bus(_mc_dev) \ 107#define to_fsl_mc_bus(_mc_dev) \
diff --git a/drivers/staging/fwserial/dma_fifo.c b/drivers/staging/fwserial/dma_fifo.c
index 4cd3ed3ee141..8b23a553fd4a 100644
--- a/drivers/staging/fwserial/dma_fifo.c
+++ b/drivers/staging/fwserial/dma_fifo.c
@@ -35,7 +35,7 @@
35/* 35/*
36 * private helper fn to determine if check is in open interval (lo,hi) 36 * private helper fn to determine if check is in open interval (lo,hi)
37 */ 37 */
38static bool addr_check(unsigned check, unsigned lo, unsigned hi) 38static bool addr_check(unsigned int check, unsigned int lo, unsigned int hi)
39{ 39{
40 return check - (lo + 1) < (hi - 1) - lo; 40 return check - (lo + 1) < (hi - 1) - lo;
41} 41}
@@ -64,7 +64,7 @@ void dma_fifo_init(struct dma_fifo *fifo)
64 * The 'apparent' size will be rounded up to next greater aligned size. 64 * The 'apparent' size will be rounded up to next greater aligned size.
65 * Returns 0 if no error, otherwise an error code 65 * Returns 0 if no error, otherwise an error code
66 */ 66 */
67int dma_fifo_alloc(struct dma_fifo *fifo, int size, unsigned align, 67int dma_fifo_alloc(struct dma_fifo *fifo, int size, unsigned int align,
68 int tx_limit, int open_limit, gfp_t gfp_mask) 68 int tx_limit, int open_limit, gfp_t gfp_mask)
69{ 69{
70 int capacity; 70 int capacity;
@@ -190,7 +190,7 @@ int dma_fifo_in(struct dma_fifo *fifo, const void *src, int n)
190 */ 190 */
191int dma_fifo_out_pend(struct dma_fifo *fifo, struct dma_pending *pended) 191int dma_fifo_out_pend(struct dma_fifo *fifo, struct dma_pending *pended)
192{ 192{
193 unsigned len, n, ofs, l, limit; 193 unsigned int len, n, ofs, l, limit;
194 194
195 if (!fifo->data) 195 if (!fifo->data)
196 return -ENOENT; 196 return -ENOENT;
@@ -210,7 +210,7 @@ int dma_fifo_out_pend(struct dma_fifo *fifo, struct dma_pending *pended)
210 n = len; 210 n = len;
211 ofs = fifo->out % fifo->capacity; 211 ofs = fifo->out % fifo->capacity;
212 l = fifo->capacity - ofs; 212 l = fifo->capacity - ofs;
213 limit = min_t(unsigned, l, fifo->tx_limit); 213 limit = min_t(unsigned int, l, fifo->tx_limit);
214 if (n > limit) { 214 if (n > limit) {
215 n = limit; 215 n = limit;
216 fifo->out += limit; 216 fifo->out += limit;
diff --git a/drivers/staging/fwserial/dma_fifo.h b/drivers/staging/fwserial/dma_fifo.h
index 410988224f89..37a91c6a1709 100644
--- a/drivers/staging/fwserial/dma_fifo.h
+++ b/drivers/staging/fwserial/dma_fifo.h
@@ -45,9 +45,9 @@
45#define DMA_FIFO_GUARD 3 /* # of cache lines to reserve for the guard area */ 45#define DMA_FIFO_GUARD 3 /* # of cache lines to reserve for the guard area */
46 46
47struct dma_fifo { 47struct dma_fifo {
48 unsigned in; 48 unsigned int in;
49 unsigned out; /* updated when dma is pended */ 49 unsigned int out; /* updated when dma is pended */
50 unsigned done; /* updated upon dma completion */ 50 unsigned int done; /* updated upon dma completion */
51 struct { 51 struct {
52 unsigned corrupt:1; 52 unsigned corrupt:1;
53 }; 53 };
@@ -55,7 +55,7 @@ struct dma_fifo {
55 int guard; /* ofs of guard area */ 55 int guard; /* ofs of guard area */
56 int capacity; /* size + reserved */ 56 int capacity; /* size + reserved */
57 int avail; /* # of unused bytes in fifo */ 57 int avail; /* # of unused bytes in fifo */
58 unsigned align; /* must be power of 2 */ 58 unsigned int align; /* must be power of 2 */
59 int tx_limit; /* max # of bytes per dma transaction */ 59 int tx_limit; /* max # of bytes per dma transaction */
60 int open_limit; /* max # of outstanding allowed */ 60 int open_limit; /* max # of outstanding allowed */
61 int open; /* # of outstanding dma transactions */ 61 int open; /* # of outstanding dma transactions */
@@ -66,9 +66,9 @@ struct dma_fifo {
66struct dma_pending { 66struct dma_pending {
67 struct list_head link; 67 struct list_head link;
68 void *data; 68 void *data;
69 unsigned len; 69 unsigned int len;
70 unsigned next; 70 unsigned int next;
71 unsigned out; 71 unsigned int out;
72}; 72};
73 73
74static inline void dp_mark_completed(struct dma_pending *dp) 74static inline void dp_mark_completed(struct dma_pending *dp)
@@ -82,7 +82,7 @@ static inline bool dp_is_completed(struct dma_pending *dp)
82} 82}
83 83
84void dma_fifo_init(struct dma_fifo *fifo); 84void dma_fifo_init(struct dma_fifo *fifo);
85int dma_fifo_alloc(struct dma_fifo *fifo, int size, unsigned align, 85int dma_fifo_alloc(struct dma_fifo *fifo, int size, unsigned int align,
86 int tx_limit, int open_limit, gfp_t gfp_mask); 86 int tx_limit, int open_limit, gfp_t gfp_mask);
87void dma_fifo_free(struct dma_fifo *fifo); 87void dma_fifo_free(struct dma_fifo *fifo);
88void dma_fifo_reset(struct dma_fifo *fifo); 88void dma_fifo_reset(struct dma_fifo *fifo);
diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c
index 1f9389d8c152..c241c0ae3f20 100644
--- a/drivers/staging/fwserial/fwserial.c
+++ b/drivers/staging/fwserial/fwserial.c
@@ -132,7 +132,7 @@ static struct fwtty_peer *__fwserial_peer_by_node_id(struct fw_card *card,
132 132
133#ifdef FWTTY_PROFILING 133#ifdef FWTTY_PROFILING
134 134
135static void fwtty_profile_fifo(struct fwtty_port *port, unsigned *stat) 135static void fwtty_profile_fifo(struct fwtty_port *port, unsigned int *stat)
136{ 136{
137 spin_lock_bh(&port->lock); 137 spin_lock_bh(&port->lock);
138 fwtty_profile_data(stat, dma_fifo_avail(&port->tx_fifo)); 138 fwtty_profile_data(stat, dma_fifo_avail(&port->tx_fifo));
@@ -143,7 +143,7 @@ static void fwtty_dump_profile(struct seq_file *m, struct stats *stats)
143{ 143{
144 /* for each stat, print sum of 0 to 2^k, then individually */ 144 /* for each stat, print sum of 0 to 2^k, then individually */
145 int k = 4; 145 int k = 4;
146 unsigned sum; 146 unsigned int sum;
147 int j; 147 int j;
148 char t[10]; 148 char t[10];
149 149
@@ -303,9 +303,10 @@ static void fwtty_restart_tx(struct fwtty_port *port)
303 * Note: in loopback, the port->lock is being held. Only use functions that 303 * Note: in loopback, the port->lock is being held. Only use functions that
304 * don't attempt to reclaim the port->lock. 304 * don't attempt to reclaim the port->lock.
305 */ 305 */
306static void fwtty_update_port_status(struct fwtty_port *port, unsigned status) 306static void fwtty_update_port_status(struct fwtty_port *port,
307 unsigned int status)
307{ 308{
308 unsigned delta; 309 unsigned int delta;
309 struct tty_struct *tty; 310 struct tty_struct *tty;
310 311
311 /* simulated LSR/MSR status from remote */ 312 /* simulated LSR/MSR status from remote */
@@ -396,9 +397,9 @@ static void fwtty_update_port_status(struct fwtty_port *port, unsigned status)
396 * 397 *
397 * Note: caller must be holding port lock 398 * Note: caller must be holding port lock
398 */ 399 */
399static unsigned __fwtty_port_line_status(struct fwtty_port *port) 400static unsigned int __fwtty_port_line_status(struct fwtty_port *port)
400{ 401{
401 unsigned status = 0; 402 unsigned int status = 0;
402 403
403 /* TODO: add module param to tie RNG to DTR as well */ 404 /* TODO: add module param to tie RNG to DTR as well */
404 405
@@ -424,7 +425,7 @@ static int __fwtty_write_port_status(struct fwtty_port *port)
424{ 425{
425 struct fwtty_peer *peer; 426 struct fwtty_peer *peer;
426 int err = -ENOENT; 427 int err = -ENOENT;
427 unsigned status = __fwtty_port_line_status(port); 428 unsigned int status = __fwtty_port_line_status(port);
428 429
429 rcu_read_lock(); 430 rcu_read_lock();
430 peer = rcu_dereference(port->peer); 431 peer = rcu_dereference(port->peer);
@@ -454,7 +455,7 @@ static int fwtty_write_port_status(struct fwtty_port *port)
454static void fwtty_throttle_port(struct fwtty_port *port) 455static void fwtty_throttle_port(struct fwtty_port *port)
455{ 456{
456 struct tty_struct *tty; 457 struct tty_struct *tty;
457 unsigned old; 458 unsigned int old;
458 459
459 tty = tty_port_tty_get(&port->port); 460 tty = tty_port_tty_get(&port->port);
460 if (!tty) 461 if (!tty)
@@ -540,7 +541,7 @@ static void fwtty_emit_breaks(struct work_struct *work)
540static int fwtty_rx(struct fwtty_port *port, unsigned char *data, size_t len) 541static int fwtty_rx(struct fwtty_port *port, unsigned char *data, size_t len)
541{ 542{
542 int c, n = len; 543 int c, n = len;
543 unsigned lsr; 544 unsigned int lsr;
544 int err = 0; 545 int err = 0;
545 546
546 fwtty_dbg(port, "%d\n", n); 547 fwtty_dbg(port, "%d\n", n);
@@ -635,7 +636,7 @@ static void fwtty_port_handler(struct fw_card *card,
635 if (addr != port->rx_handler.offset || len != 4) { 636 if (addr != port->rx_handler.offset || len != 4) {
636 rcode = RCODE_ADDRESS_ERROR; 637 rcode = RCODE_ADDRESS_ERROR;
637 } else { 638 } else {
638 fwtty_update_port_status(port, *(unsigned *)data); 639 fwtty_update_port_status(port, *(unsigned int *)data);
639 rcode = RCODE_COMPLETE; 640 rcode = RCODE_COMPLETE;
640 } 641 }
641 break; 642 break;
@@ -828,7 +829,7 @@ static void fwtty_write_xchar(struct fwtty_port *port, char ch)
828 rcu_read_unlock(); 829 rcu_read_unlock();
829} 830}
830 831
831static struct fwtty_port *fwtty_port_get(unsigned index) 832static struct fwtty_port *fwtty_port_get(unsigned int index)
832{ 833{
833 struct fwtty_port *port; 834 struct fwtty_port *port;
834 835
@@ -934,9 +935,9 @@ static int fwtty_port_carrier_raised(struct tty_port *tty_port)
934 return rc; 935 return rc;
935} 936}
936 937
937static unsigned set_termios(struct fwtty_port *port, struct tty_struct *tty) 938static unsigned int set_termios(struct fwtty_port *port, struct tty_struct *tty)
938{ 939{
939 unsigned baud, frame; 940 unsigned int baud, frame;
940 941
941 baud = tty_termios_baud_rate(&tty->termios); 942 baud = tty_termios_baud_rate(&tty->termios);
942 tty_termios_encode_baud_rate(&tty->termios, baud, baud); 943 tty_termios_encode_baud_rate(&tty->termios, baud, baud);
@@ -988,7 +989,7 @@ static int fwtty_port_activate(struct tty_port *tty_port,
988 struct tty_struct *tty) 989 struct tty_struct *tty)
989{ 990{
990 struct fwtty_port *port = to_port(tty_port, port); 991 struct fwtty_port *port = to_port(tty_port, port);
991 unsigned baud; 992 unsigned int baud;
992 int err; 993 int err;
993 994
994 set_bit(TTY_IO_ERROR, &tty->flags); 995 set_bit(TTY_IO_ERROR, &tty->flags);
@@ -1264,7 +1265,7 @@ static int set_serial_info(struct fwtty_port *port,
1264 return 0; 1265 return 0;
1265} 1266}
1266 1267
1267static int fwtty_ioctl(struct tty_struct *tty, unsigned cmd, 1268static int fwtty_ioctl(struct tty_struct *tty, unsigned int cmd,
1268 unsigned long arg) 1269 unsigned long arg)
1269{ 1270{
1270 struct fwtty_port *port = tty->driver_data; 1271 struct fwtty_port *port = tty->driver_data;
@@ -1297,7 +1298,7 @@ static int fwtty_ioctl(struct tty_struct *tty, unsigned cmd,
1297static void fwtty_set_termios(struct tty_struct *tty, struct ktermios *old) 1298static void fwtty_set_termios(struct tty_struct *tty, struct ktermios *old)
1298{ 1299{
1299 struct fwtty_port *port = tty->driver_data; 1300 struct fwtty_port *port = tty->driver_data;
1300 unsigned baud; 1301 unsigned int baud;
1301 1302
1302 spin_lock_bh(&port->lock); 1303 spin_lock_bh(&port->lock);
1303 baud = set_termios(port, tty); 1304 baud = set_termios(port, tty);
@@ -1369,7 +1370,7 @@ static int fwtty_break_ctl(struct tty_struct *tty, int state)
1369static int fwtty_tiocmget(struct tty_struct *tty) 1370static int fwtty_tiocmget(struct tty_struct *tty)
1370{ 1371{
1371 struct fwtty_port *port = tty->driver_data; 1372 struct fwtty_port *port = tty->driver_data;
1372 unsigned tiocm; 1373 unsigned int tiocm;
1373 1374
1374 spin_lock_bh(&port->lock); 1375 spin_lock_bh(&port->lock);
1375 tiocm = (port->mctrl & MCTRL_MASK) | (port->mstatus & ~MCTRL_MASK); 1376 tiocm = (port->mctrl & MCTRL_MASK) | (port->mstatus & ~MCTRL_MASK);
@@ -1380,7 +1381,8 @@ static int fwtty_tiocmget(struct tty_struct *tty)
1380 return tiocm; 1381 return tiocm;
1381} 1382}
1382 1383
1383static int fwtty_tiocmset(struct tty_struct *tty, unsigned set, unsigned clear) 1384static int fwtty_tiocmset(struct tty_struct *tty,
1385 unsigned int set, unsigned int clear)
1384{ 1386{
1385 struct fwtty_port *port = tty->driver_data; 1387 struct fwtty_port *port = tty->driver_data;
1386 1388
@@ -1699,7 +1701,7 @@ static void fwserial_virt_plug_complete(struct fwtty_peer *peer,
1699 dma_fifo_change_tx_limit(&port->tx_fifo, port->max_payload); 1701 dma_fifo_change_tx_limit(&port->tx_fifo, port->max_payload);
1700 spin_unlock_bh(&peer->port->lock); 1702 spin_unlock_bh(&peer->port->lock);
1701 1703
1702 if (port->port.console && port->fwcon_ops->notify != NULL) 1704 if (port->port.console && port->fwcon_ops->notify)
1703 (*port->fwcon_ops->notify)(FWCON_NOTIFY_ATTACH, port->con_data); 1705 (*port->fwcon_ops->notify)(FWCON_NOTIFY_ATTACH, port->con_data);
1704 1706
1705 fwtty_info(&peer->unit, "peer (guid:%016llx) connected on %s\n", 1707 fwtty_info(&peer->unit, "peer (guid:%016llx) connected on %s\n",
@@ -1806,7 +1808,7 @@ static void fwserial_release_port(struct fwtty_port *port, bool reset)
1806 RCU_INIT_POINTER(port->peer, NULL); 1808 RCU_INIT_POINTER(port->peer, NULL);
1807 spin_unlock_bh(&port->lock); 1809 spin_unlock_bh(&port->lock);
1808 1810
1809 if (port->port.console && port->fwcon_ops->notify != NULL) 1811 if (port->port.console && port->fwcon_ops->notify)
1810 (*port->fwcon_ops->notify)(FWCON_NOTIFY_DETACH, port->con_data); 1812 (*port->fwcon_ops->notify)(FWCON_NOTIFY_DETACH, port->con_data);
1811} 1813}
1812 1814
diff --git a/drivers/staging/fwserial/fwserial.h b/drivers/staging/fwserial/fwserial.h
index 6fa936501b3f..30b2481fe32b 100644
--- a/drivers/staging/fwserial/fwserial.h
+++ b/drivers/staging/fwserial/fwserial.h
@@ -22,7 +22,7 @@
22#ifdef FWTTY_PROFILING 22#ifdef FWTTY_PROFILING
23#define DISTRIBUTION_MAX_SIZE 8192 23#define DISTRIBUTION_MAX_SIZE 8192
24#define DISTRIBUTION_MAX_INDEX (ilog2(DISTRIBUTION_MAX_SIZE) + 1) 24#define DISTRIBUTION_MAX_INDEX (ilog2(DISTRIBUTION_MAX_SIZE) + 1)
25static inline void fwtty_profile_data(unsigned stat[], unsigned val) 25static inline void fwtty_profile_data(unsigned int stat[], unsigned int val)
26{ 26{
27 int n = (val) ? min(ilog2(val) + 1, DISTRIBUTION_MAX_INDEX) : 0; 27 int n = (val) ? min(ilog2(val) + 1, DISTRIBUTION_MAX_INDEX) : 0;
28 ++stat[n]; 28 ++stat[n];
@@ -78,7 +78,7 @@ struct fwtty_peer {
78 u64 guid; 78 u64 guid;
79 int generation; 79 int generation;
80 int node_id; 80 int node_id;
81 unsigned speed; 81 unsigned int speed;
82 int max_payload; 82 int max_payload;
83 u64 mgmt_addr; 83 u64 mgmt_addr;
84 84
@@ -160,17 +160,17 @@ struct fwserial_mgmt_pkt {
160#define VIRT_CABLE_PLUG_TIMEOUT (60 * HZ) 160#define VIRT_CABLE_PLUG_TIMEOUT (60 * HZ)
161 161
162struct stats { 162struct stats {
163 unsigned xchars; 163 unsigned int xchars;
164 unsigned dropped; 164 unsigned int dropped;
165 unsigned tx_stall; 165 unsigned int tx_stall;
166 unsigned fifo_errs; 166 unsigned int fifo_errs;
167 unsigned sent; 167 unsigned int sent;
168 unsigned lost; 168 unsigned int lost;
169 unsigned throttled; 169 unsigned int throttled;
170 unsigned reads[DISTRIBUTION_MAX_INDEX + 1]; 170 unsigned int reads[DISTRIBUTION_MAX_INDEX + 1];
171 unsigned writes[DISTRIBUTION_MAX_INDEX + 1]; 171 unsigned int writes[DISTRIBUTION_MAX_INDEX + 1];
172 unsigned txns[DISTRIBUTION_MAX_INDEX + 1]; 172 unsigned int txns[DISTRIBUTION_MAX_INDEX + 1];
173 unsigned unthrottle[DISTRIBUTION_MAX_INDEX + 1]; 173 unsigned int unthrottle[DISTRIBUTION_MAX_INDEX + 1];
174}; 174};
175 175
176struct fwconsole_ops { 176struct fwconsole_ops {
@@ -237,7 +237,7 @@ struct fwconsole_ops {
237struct fwtty_port { 237struct fwtty_port {
238 struct tty_port port; 238 struct tty_port port;
239 struct device *device; 239 struct device *device;
240 unsigned index; 240 unsigned int index;
241 struct fw_serial *serial; 241 struct fw_serial *serial;
242 struct fw_address_handler rx_handler; 242 struct fw_address_handler rx_handler;
243 243
@@ -246,21 +246,21 @@ struct fwtty_port {
246 246
247 wait_queue_head_t wait_tx; 247 wait_queue_head_t wait_tx;
248 struct delayed_work emit_breaks; 248 struct delayed_work emit_breaks;
249 unsigned cps; 249 unsigned int cps;
250 unsigned long break_last; 250 unsigned long break_last;
251 251
252 struct work_struct hangup; 252 struct work_struct hangup;
253 253
254 unsigned mstatus; 254 unsigned int mstatus;
255 255
256 spinlock_t lock; 256 spinlock_t lock;
257 unsigned mctrl; 257 unsigned int mctrl;
258 struct delayed_work drain; 258 struct delayed_work drain;
259 struct dma_fifo tx_fifo; 259 struct dma_fifo tx_fifo;
260 int max_payload; 260 int max_payload;
261 unsigned status_mask; 261 unsigned int status_mask;
262 unsigned ignore_mask; 262 unsigned int ignore_mask;
263 unsigned break_ctl:1, 263 unsigned int break_ctl:1,
264 write_only:1, 264 write_only:1,
265 overrun:1, 265 overrun:1,
266 loopback:1; 266 loopback:1;
@@ -349,7 +349,7 @@ extern struct tty_driver *fwtty_driver;
349 * being used for isochronous traffic) 349 * being used for isochronous traffic)
350 * 2) isochronous arbitration always wins. 350 * 2) isochronous arbitration always wins.
351 */ 351 */
352static inline int link_speed_to_max_payload(unsigned speed) 352static inline int link_speed_to_max_payload(unsigned int speed)
353{ 353{
354 /* Max async payload is 4096 - see IEEE 1394-2008 tables 6-4, 16-18 */ 354 /* Max async payload is 4096 - see IEEE 1394-2008 tables 6-4, 16-18 */
355 return min(512 << speed, 4096); 355 return min(512 << speed, 4096);
diff --git a/drivers/staging/gdm724x/gdm_mux.c b/drivers/staging/gdm724x/gdm_mux.c
index 6bedd668324c..400969170d1c 100644
--- a/drivers/staging/gdm724x/gdm_mux.c
+++ b/drivers/staging/gdm724x/gdm_mux.c
@@ -278,8 +278,9 @@ static void gdm_mux_rcv_complete(struct urb *urb)
278 } 278 }
279} 279}
280 280
281static int gdm_mux_recv(void *priv_dev, int (*cb)(void *data, int len, 281static int gdm_mux_recv(void *priv_dev,
282 int tty_index, struct tty_dev *tty_dev, int complete)) 282 int (*cb)(void *data, int len, int tty_index,
283 struct tty_dev *tty_dev, int complete))
283{ 284{
284 struct mux_dev *mux_dev = priv_dev; 285 struct mux_dev *mux_dev = priv_dev;
285 struct usb_device *usbdev = mux_dev->usbdev; 286 struct usb_device *usbdev = mux_dev->usbdev;
diff --git a/drivers/staging/gdm724x/gdm_usb.c b/drivers/staging/gdm724x/gdm_usb.c
index 9db9b903f1db..d650d772095b 100644
--- a/drivers/staging/gdm724x/gdm_usb.c
+++ b/drivers/staging/gdm724x/gdm_usb.c
@@ -708,7 +708,7 @@ static void do_tx(struct work_struct *work)
708 708
709#define SDU_PARAM_LEN 12 709#define SDU_PARAM_LEN 12
710static int gdm_usb_sdu_send(void *priv_dev, void *data, int len, 710static int gdm_usb_sdu_send(void *priv_dev, void *data, int len,
711 unsigned int dftEpsId, unsigned int epsId, 711 unsigned int dft_eps_ID, unsigned int eps_ID,
712 void (*cb)(void *data), void *cb_data, 712 void (*cb)(void *data), void *cb_data,
713 int dev_idx, int nic_type) 713 int dev_idx, int nic_type)
714{ 714{
@@ -746,8 +746,8 @@ static int gdm_usb_sdu_send(void *priv_dev, void *data, int len,
746 } 746 }
747 747
748 sdu->len = gdm_cpu_to_dev16(&udev->gdm_ed, send_len); 748 sdu->len = gdm_cpu_to_dev16(&udev->gdm_ed, send_len);
749 sdu->dftEpsId = gdm_cpu_to_dev32(&udev->gdm_ed, dftEpsId); 749 sdu->dft_eps_ID = gdm_cpu_to_dev32(&udev->gdm_ed, dft_eps_ID);
750 sdu->bearer_ID = gdm_cpu_to_dev32(&udev->gdm_ed, epsId); 750 sdu->bearer_ID = gdm_cpu_to_dev32(&udev->gdm_ed, eps_ID);
751 sdu->nic_type = gdm_cpu_to_dev32(&udev->gdm_ed, nic_type); 751 sdu->nic_type = gdm_cpu_to_dev32(&udev->gdm_ed, nic_type);
752 752
753 t_sdu->len = send_len + HCI_HEADER_SIZE; 753 t_sdu->len = send_len + HCI_HEADER_SIZE;
diff --git a/drivers/staging/gdm724x/hci_packet.h b/drivers/staging/gdm724x/hci_packet.h
index 7fba8a687faf..dbc4446cf78d 100644
--- a/drivers/staging/gdm724x/hci_packet.h
+++ b/drivers/staging/gdm724x/hci_packet.h
@@ -58,7 +58,7 @@ struct sdu_header {
58struct sdu { 58struct sdu {
59 u16 cmd_evt; 59 u16 cmd_evt;
60 u16 len; 60 u16 len;
61 u32 dftEpsId; 61 u32 dft_eps_ID;
62 u32 bearer_ID; 62 u32 bearer_ID;
63 u32 nic_type; 63 u32 nic_type;
64 u8 data[0]; 64 u8 data[0];
diff --git a/drivers/staging/gdm724x/netlink_k.c b/drivers/staging/gdm724x/netlink_k.c
index 9d8347769e88..a0232e8aec10 100644
--- a/drivers/staging/gdm724x/netlink_k.c
+++ b/drivers/staging/gdm724x/netlink_k.c
@@ -88,7 +88,8 @@ static void netlink_rcv(struct sk_buff *skb)
88} 88}
89 89
90struct sock *netlink_init(int unit, 90struct sock *netlink_init(int unit,
91 void (*cb)(struct net_device *dev, u16 type, void *msg, int len)) 91 void (*cb)(struct net_device *dev, u16 type,
92 void *msg, int len))
92{ 93{
93 struct sock *sock; 94 struct sock *sock;
94 struct netlink_kernel_cfg cfg = { 95 struct netlink_kernel_cfg cfg = {
diff --git a/drivers/staging/gs_fpgaboot/gs_fpgaboot.c b/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
index 7b7c9786c162..a221f261c3d3 100644
--- a/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
+++ b/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
@@ -93,7 +93,6 @@ static int readlength_bitstream(char *bitdata, int *lendata, int *offset)
93 return 0; 93 return 0;
94} 94}
95 95
96
97/* 96/*
98 * read first 13 bytes to check bitstream magic number 97 * read first 13 bytes to check bitstream magic number
99 */ 98 */
@@ -201,7 +200,7 @@ static int gs_download_image(struct fpgaimage *fimage, enum wbus bus_bytes)
201#endif /* DEBUG_FPGA */ 200#endif /* DEBUG_FPGA */
202 if (!xl_supported_prog_bus_width(bus_bytes)) { 201 if (!xl_supported_prog_bus_width(bus_bytes)) {
203 pr_err("unsupported program bus width %d\n", 202 pr_err("unsupported program bus width %d\n",
204 bus_bytes); 203 bus_bytes);
205 return -1; 204 return -1;
206 } 205 }
207 206
@@ -222,7 +221,7 @@ static int gs_download_image(struct fpgaimage *fimage, enum wbus bus_bytes)
222 pr_info("device init done\n"); 221 pr_info("device init done\n");
223 222
224 for (i = 0; i < size; i += bus_bytes) 223 for (i = 0; i < size; i += bus_bytes)
225 xl_shift_bytes_out(bus_bytes, bitdata+i); 224 xl_shift_bytes_out(bus_bytes, bitdata + i);
226 225
227 pr_info("program done\n"); 226 pr_info("program done\n");
228 227
@@ -277,7 +276,7 @@ static int gs_set_download_method(struct fpgaimage *fimage)
277static int init_driver(void) 276static int init_driver(void)
278{ 277{
279 firmware_pdev = platform_device_register_simple("fpgaboot", -1, 278 firmware_pdev = platform_device_register_simple("fpgaboot", -1,
280 NULL, 0); 279 NULL, 0);
281 return PTR_ERR_OR_ZERO(firmware_pdev); 280 return PTR_ERR_OR_ZERO(firmware_pdev);
282} 281}
283 282
@@ -331,7 +330,6 @@ err_out1:
331 kfree(fimage); 330 kfree(fimage);
332 331
333 return -1; 332 return -1;
334
335} 333}
336 334
337static int __init gs_fpgaboot_init(void) 335static int __init gs_fpgaboot_init(void)
diff --git a/drivers/staging/gs_fpgaboot/gs_fpgaboot.h b/drivers/staging/gs_fpgaboot/gs_fpgaboot.h
index f41f4cc798cc..8cc32555dbf3 100644
--- a/drivers/staging/gs_fpgaboot/gs_fpgaboot.h
+++ b/drivers/staging/gs_fpgaboot/gs_fpgaboot.h
@@ -51,6 +51,6 @@ struct fpgaimage {
51 char part[MAX_STR]; 51 char part[MAX_STR];
52 char date[MAX_STR]; 52 char date[MAX_STR];
53 char time[MAX_STR]; 53 char time[MAX_STR];
54 int32_t lendata; 54 int lendata;
55 char *fpgadata; 55 char *fpgadata;
56}; 56};
diff --git a/drivers/staging/gs_fpgaboot/io.c b/drivers/staging/gs_fpgaboot/io.c
index 819db53da64d..c9391198fbfb 100644
--- a/drivers/staging/gs_fpgaboot/io.c
+++ b/drivers/staging/gs_fpgaboot/io.c
@@ -35,7 +35,6 @@ static inline void byte0_out(unsigned char data);
35static inline void byte1_out(unsigned char data); 35static inline void byte1_out(unsigned char data);
36static inline void xl_cclk_b(int32_t i); 36static inline void xl_cclk_b(int32_t i);
37 37
38
39/* Assert and Deassert CCLK */ 38/* Assert and Deassert CCLK */
40void xl_shift_cclk(int count) 39void xl_shift_cclk(int count)
41{ 40{
diff --git a/drivers/staging/i4l/act2000/act2000_isa.c b/drivers/staging/i4l/act2000/act2000_isa.c
index b5fad29a9ba6..f0eb8441deed 100644
--- a/drivers/staging/i4l/act2000/act2000_isa.c
+++ b/drivers/staging/i4l/act2000/act2000_isa.c
@@ -31,7 +31,8 @@ act2000_isa_reset(unsigned short portbase)
31 int serial = 0; 31 int serial = 0;
32 32
33 found = 0; 33 found = 0;
34 if ((reg = inb(portbase + ISA_COR)) != 0xff) { 34 reg = inb(portbase + ISA_COR);
35 if (reg != 0xff) {
35 outb(reg | ISA_COR_RESET, portbase + ISA_COR); 36 outb(reg | ISA_COR_RESET, portbase + ISA_COR);
36 mdelay(10); 37 mdelay(10);
37 outb(reg, portbase + ISA_COR); 38 outb(reg, portbase + ISA_COR);
@@ -232,7 +233,7 @@ act2000_isa_receive(act2000_card *card)
232{ 233{
233 u_char c; 234 u_char c;
234 235
235 if (test_and_set_bit(ACT2000_LOCK_RX, (void *) &card->ilock) != 0) 236 if (test_and_set_bit(ACT2000_LOCK_RX, (void *)&card->ilock) != 0)
236 return; 237 return;
237 while (!act2000_isa_readb(card, &c)) { 238 while (!act2000_isa_readb(card, &c)) {
238 if (card->idat.isa.rcvidx < 8) { 239 if (card->idat.isa.rcvidx < 8) {
@@ -247,7 +248,7 @@ act2000_isa_receive(act2000_card *card)
247 card->idat.isa.rcvignore = 1; 248 card->idat.isa.rcvignore = 1;
248 printk(KERN_WARNING 249 printk(KERN_WARNING
249 "act2000_isa_receive: no memory\n"); 250 "act2000_isa_receive: no memory\n");
250 test_and_clear_bit(ACT2000_LOCK_RX, (void *) &card->ilock); 251 test_and_clear_bit(ACT2000_LOCK_RX, (void *)&card->ilock);
251 return; 252 return;
252 } 253 }
253 memcpy(skb_put(card->idat.isa.rcvskb, 8), card->idat.isa.rcvhdr, 8); 254 memcpy(skb_put(card->idat.isa.rcvskb, 8), card->idat.isa.rcvhdr, 8);
@@ -287,7 +288,7 @@ act2000_isa_receive(act2000_card *card)
287 (card->idat.isa.rcvidx < card->idat.isa.rcvlen))) 288 (card->idat.isa.rcvidx < card->idat.isa.rcvlen)))
288 act2000_schedule_poll(card); 289 act2000_schedule_poll(card);
289 } 290 }
290 test_and_clear_bit(ACT2000_LOCK_RX, (void *) &card->ilock); 291 test_and_clear_bit(ACT2000_LOCK_RX, (void *)&card->ilock);
291} 292}
292 293
293void 294void
@@ -298,12 +299,13 @@ act2000_isa_send(act2000_card *card)
298 actcapi_msg *msg; 299 actcapi_msg *msg;
299 int l; 300 int l;
300 301
301 if (test_and_set_bit(ACT2000_LOCK_TX, (void *) &card->ilock) != 0) 302 if (test_and_set_bit(ACT2000_LOCK_TX, (void *)&card->ilock) != 0)
302 return; 303 return;
303 while (1) { 304 while (1) {
304 spin_lock_irqsave(&card->lock, flags); 305 spin_lock_irqsave(&card->lock, flags);
305 if (!(card->sbuf)) { 306 if (!(card->sbuf)) {
306 if ((card->sbuf = skb_dequeue(&card->sndq))) { 307 card->sbuf = skb_dequeue(&card->sndq);
308 if (card->sbuf) {
307 card->ack_msg = card->sbuf->data; 309 card->ack_msg = card->sbuf->data;
308 msg = (actcapi_msg *)card->sbuf->data; 310 msg = (actcapi_msg *)card->sbuf->data;
309 if ((msg->hdr.cmd.cmd == 0x86) && 311 if ((msg->hdr.cmd.cmd == 0x86) &&
@@ -317,7 +319,7 @@ act2000_isa_send(act2000_card *card)
317 spin_unlock_irqrestore(&card->lock, flags); 319 spin_unlock_irqrestore(&card->lock, flags);
318 if (!(card->sbuf)) { 320 if (!(card->sbuf)) {
319 /* No more data to send */ 321 /* No more data to send */
320 test_and_clear_bit(ACT2000_LOCK_TX, (void *) &card->ilock); 322 test_and_clear_bit(ACT2000_LOCK_TX, (void *)&card->ilock);
321 return; 323 return;
322 } 324 }
323 skb = card->sbuf; 325 skb = card->sbuf;
@@ -325,7 +327,7 @@ act2000_isa_send(act2000_card *card)
325 while (skb->len) { 327 while (skb->len) {
326 if (act2000_isa_writeb(card, *(skb->data))) { 328 if (act2000_isa_writeb(card, *(skb->data))) {
327 /* Fifo is full, but more data to send */ 329 /* Fifo is full, but more data to send */
328 test_and_clear_bit(ACT2000_LOCK_TX, (void *) &card->ilock); 330 test_and_clear_bit(ACT2000_LOCK_TX, (void *)&card->ilock);
329 /* Schedule myself */ 331 /* Schedule myself */
330 act2000_schedule_tx(card); 332 act2000_schedule_tx(card);
331 return; 333 return;
@@ -356,7 +358,6 @@ act2000_isa_send(act2000_card *card)
356static int 358static int
357act2000_isa_getid(act2000_card *card) 359act2000_isa_getid(act2000_card *card)
358{ 360{
359
360 act2000_fwid fid; 361 act2000_fwid fid;
361 u_char *p = (u_char *)&fid; 362 u_char *p = (u_char *)&fid;
362 int count = 0; 363 int count = 0;
@@ -378,7 +379,8 @@ act2000_isa_getid(act2000_card *card)
378 printk(KERN_WARNING "act2000: Wrong Firmware-ID!\n"); 379 printk(KERN_WARNING "act2000: Wrong Firmware-ID!\n");
379 return -EPROTO; 380 return -EPROTO;
380 } 381 }
381 if ((p = strchr(fid.revision, '\n'))) 382 p = strchr(fid.revision, '\n');
383 if (p)
382 *p = '\0'; 384 *p = '\0';
383 printk(KERN_INFO "act2000: Firmware-ID: %s\n", fid.revision); 385 printk(KERN_INFO "act2000: Firmware-ID: %s\n", fid.revision);
384 if (card->flags & ACT2000_FLAGS_IVALID) { 386 if (card->flags & ACT2000_FLAGS_IVALID) {
@@ -439,5 +441,5 @@ act2000_isa_download(act2000_card *card, act2000_ddef __user *cb)
439 } 441 }
440 kfree(buf); 442 kfree(buf);
441 msleep_interruptible(500); 443 msleep_interruptible(500);
442 return (act2000_isa_getid(card)); 444 return act2000_isa_getid(card);
443} 445}
diff --git a/drivers/staging/i4l/pcbit/capi.h b/drivers/staging/i4l/pcbit/capi.h
index 635f63476944..6f6f4dd0714e 100644
--- a/drivers/staging/i4l/pcbit/capi.h
+++ b/drivers/staging/i4l/pcbit/capi.h
@@ -17,7 +17,7 @@
17#define REQ_DISPLAY 0x04 17#define REQ_DISPLAY 0x04
18#define REQ_USER_TO_USER 0x08 18#define REQ_USER_TO_USER 0x08
19 19
20#define AppInfoMask REQ_CAUSE | REQ_DISPLAY | REQ_USER_TO_USER 20#define AppInfoMask (REQ_CAUSE | REQ_DISPLAY | REQ_USER_TO_USER)
21 21
22/* Connection Setup */ 22/* Connection Setup */
23extern int capi_conn_req(const char *calledPN, struct sk_buff **buf, 23extern int capi_conn_req(const char *calledPN, struct sk_buff **buf,
diff --git a/drivers/staging/i4l/pcbit/drv.c b/drivers/staging/i4l/pcbit/drv.c
index 4172e22ae7ed..c5270e229efb 100644
--- a/drivers/staging/i4l/pcbit/drv.c
+++ b/drivers/staging/i4l/pcbit/drv.c
@@ -284,7 +284,7 @@ static int pcbit_command(isdn_ctrl *ctl)
284 default: 284 default:
285 printk(KERN_DEBUG "pcbit_command: unknown command\n"); 285 printk(KERN_DEBUG "pcbit_command: unknown command\n");
286 break; 286 break;
287 }; 287 }
288 288
289 return 0; 289 return 0;
290} 290}
@@ -699,8 +699,8 @@ void pcbit_l3_receive(struct pcbit_dev *dev, ulong msg,
699 */ 699 */
700 700
701static char statbuf[STATBUF_LEN]; 701static char statbuf[STATBUF_LEN];
702static int stat_st = 0; 702static int stat_st;
703static int stat_end = 0; 703static int stat_end;
704 704
705static int pcbit_stat(u_char __user *buf, int len, int driver, int channel) 705static int pcbit_stat(u_char __user *buf, int len, int driver, int channel)
706{ 706{
@@ -968,7 +968,7 @@ static int pcbit_ioctl(isdn_ctrl *ctl)
968 default: 968 default:
969 printk("error: unknown ioctl\n"); 969 printk("error: unknown ioctl\n");
970 break; 970 break;
971 }; 971 }
972 return 0; 972 return 0;
973} 973}
974 974
diff --git a/drivers/staging/i4l/pcbit/edss1.c b/drivers/staging/i4l/pcbit/edss1.c
index b2262ba6f0c9..e72c16420712 100644
--- a/drivers/staging/i4l/pcbit/edss1.c
+++ b/drivers/staging/i4l/pcbit/edss1.c
@@ -254,7 +254,7 @@ static void pcbit_fsm_timer(unsigned long data)
254 254
255 dev = chan2dev(chan); 255 dev = chan2dev(chan);
256 256
257 if (dev == NULL) { 257 if (!dev) {
258 printk(KERN_WARNING "pcbit: timer for unknown device\n"); 258 printk(KERN_WARNING "pcbit: timer for unknown device\n");
259 return; 259 return;
260 } 260 }
diff --git a/drivers/staging/i4l/pcbit/layer2.h b/drivers/staging/i4l/pcbit/layer2.h
index be1327bc162a..6b9063e388cd 100644
--- a/drivers/staging/i4l/pcbit/layer2.h
+++ b/drivers/staging/i4l/pcbit/layer2.h
@@ -109,7 +109,7 @@
109#define SCHED_READ 0x01 109#define SCHED_READ 0x01
110#define SCHED_WRITE 0x02 110#define SCHED_WRITE 0x02
111 111
112#define SET_RUN_TIMEOUT 2 * HZ /* 2 seconds */ 112#define SET_RUN_TIMEOUT (2 * HZ) /* 2 seconds */
113 113
114struct frame_buf { 114struct frame_buf {
115 ulong msg; 115 ulong msg;
diff --git a/drivers/staging/iio/accel/Kconfig b/drivers/staging/iio/accel/Kconfig
index fa67da9408b6..f066aa30f0ac 100644
--- a/drivers/staging/iio/accel/Kconfig
+++ b/drivers/staging/iio/accel/Kconfig
@@ -27,18 +27,6 @@ config ADIS16203
27 To compile this driver as a module, say M here: the module will be 27 To compile this driver as a module, say M here: the module will be
28 called adis16203. 28 called adis16203.
29 29
30config ADIS16204
31 tristate "Analog Devices ADIS16204 Programmable High-g Digital Impact Sensor and Recorder"
32 depends on SPI
33 select IIO_ADIS_LIB
34 select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
35 help
36 Say Y here to build support for Analog Devices adis16204 Programmable
37 High-g Digital Impact Sensor and Recorder.
38
39 To compile this driver as a module, say M here: the module will be
40 called adis16204.
41
42config ADIS16209 30config ADIS16209
43 tristate "Analog Devices ADIS16209 Dual-Axis Digital Inclinometer and Accelerometer" 31 tristate "Analog Devices ADIS16209 Dual-Axis Digital Inclinometer and Accelerometer"
44 depends on SPI 32 depends on SPI
@@ -51,17 +39,6 @@ config ADIS16209
51 To compile this driver as a module, say M here: the module will be 39 To compile this driver as a module, say M here: the module will be
52 called adis16209. 40 called adis16209.
53 41
54config ADIS16220
55 tristate "Analog Devices ADIS16220 Programmable Digital Vibration Sensor"
56 depends on SPI
57 select IIO_ADIS_LIB
58 help
59 Say Y here to build support for Analog Devices adis16220 programmable
60 digital vibration sensor.
61
62 To compile this driver as a module, say M here: the module will be
63 called adis16220.
64
65config ADIS16240 42config ADIS16240
66 tristate "Analog Devices ADIS16240 Programmable Impact Sensor and Recorder" 43 tristate "Analog Devices ADIS16240 Programmable Impact Sensor and Recorder"
67 depends on SPI 44 depends on SPI
diff --git a/drivers/staging/iio/accel/Makefile b/drivers/staging/iio/accel/Makefile
index 1ed137f1a506..415329c96f0c 100644
--- a/drivers/staging/iio/accel/Makefile
+++ b/drivers/staging/iio/accel/Makefile
@@ -8,15 +8,9 @@ obj-$(CONFIG_ADIS16201) += adis16201.o
8adis16203-y := adis16203_core.o 8adis16203-y := adis16203_core.o
9obj-$(CONFIG_ADIS16203) += adis16203.o 9obj-$(CONFIG_ADIS16203) += adis16203.o
10 10
11adis16204-y := adis16204_core.o
12obj-$(CONFIG_ADIS16204) += adis16204.o
13
14adis16209-y := adis16209_core.o 11adis16209-y := adis16209_core.o
15obj-$(CONFIG_ADIS16209) += adis16209.o 12obj-$(CONFIG_ADIS16209) += adis16209.o
16 13
17adis16220-y := adis16220_core.o
18obj-$(CONFIG_ADIS16220) += adis16220.o
19
20adis16240-y := adis16240_core.o 14adis16240-y := adis16240_core.o
21obj-$(CONFIG_ADIS16240) += adis16240.o 15obj-$(CONFIG_ADIS16240) += adis16240.o
22 16
diff --git a/drivers/staging/iio/accel/adis16201.h b/drivers/staging/iio/accel/adis16201.h
index e6b8c9af6e22..64844adcaacd 100644
--- a/drivers/staging/iio/accel/adis16201.h
+++ b/drivers/staging/iio/accel/adis16201.h
@@ -3,51 +3,129 @@
3 3
4#define ADIS16201_STARTUP_DELAY 220 /* ms */ 4#define ADIS16201_STARTUP_DELAY 220 /* ms */
5 5
6#define ADIS16201_FLASH_CNT 0x00 /* Flash memory write count */ 6/* Flash memory write count */
7#define ADIS16201_SUPPLY_OUT 0x02 /* Output, power supply */ 7#define ADIS16201_FLASH_CNT 0x00
8#define ADIS16201_XACCL_OUT 0x04 /* Output, x-axis accelerometer */ 8
9#define ADIS16201_YACCL_OUT 0x06 /* Output, y-axis accelerometer */ 9/* Output, power supply */
10#define ADIS16201_AUX_ADC 0x08 /* Output, auxiliary ADC input */ 10#define ADIS16201_SUPPLY_OUT 0x02
11#define ADIS16201_TEMP_OUT 0x0A /* Output, temperature */ 11
12#define ADIS16201_XINCL_OUT 0x0C /* Output, x-axis inclination */ 12/* Output, x-axis accelerometer */
13#define ADIS16201_YINCL_OUT 0x0E /* Output, y-axis inclination */ 13#define ADIS16201_XACCL_OUT 0x04
14#define ADIS16201_XACCL_OFFS 0x10 /* Calibration, x-axis acceleration offset */ 14
15#define ADIS16201_YACCL_OFFS 0x12 /* Calibration, y-axis acceleration offset */ 15/* Output, y-axis accelerometer */
16#define ADIS16201_XACCL_SCALE 0x14 /* x-axis acceleration scale factor */ 16#define ADIS16201_YACCL_OUT 0x06
17#define ADIS16201_YACCL_SCALE 0x16 /* y-axis acceleration scale factor */ 17
18#define ADIS16201_XINCL_OFFS 0x18 /* Calibration, x-axis inclination offset */ 18/* Output, auxiliary ADC input */
19#define ADIS16201_YINCL_OFFS 0x1A /* Calibration, y-axis inclination offset */ 19#define ADIS16201_AUX_ADC 0x08
20#define ADIS16201_XINCL_SCALE 0x1C /* x-axis inclination scale factor */ 20
21#define ADIS16201_YINCL_SCALE 0x1E /* y-axis inclination scale factor */ 21/* Output, temperature */
22#define ADIS16201_ALM_MAG1 0x20 /* Alarm 1 amplitude threshold */ 22#define ADIS16201_TEMP_OUT 0x0A
23#define ADIS16201_ALM_MAG2 0x22 /* Alarm 2 amplitude threshold */ 23
24#define ADIS16201_ALM_SMPL1 0x24 /* Alarm 1, sample period */ 24/* Output, x-axis inclination */
25#define ADIS16201_ALM_SMPL2 0x26 /* Alarm 2, sample period */ 25#define ADIS16201_XINCL_OUT 0x0C
26#define ADIS16201_ALM_CTRL 0x28 /* Alarm control */ 26
27#define ADIS16201_AUX_DAC 0x30 /* Auxiliary DAC data */ 27/* Output, y-axis inclination */
28#define ADIS16201_GPIO_CTRL 0x32 /* General-purpose digital input/output control */ 28#define ADIS16201_YINCL_OUT 0x0E
29#define ADIS16201_MSC_CTRL 0x34 /* Miscellaneous control */ 29
30#define ADIS16201_SMPL_PRD 0x36 /* Internal sample period (rate) control */ 30/* Calibration, x-axis acceleration offset */
31#define ADIS16201_AVG_CNT 0x38 /* Operation, filter configuration */ 31#define ADIS16201_XACCL_OFFS 0x10
32#define ADIS16201_SLP_CNT 0x3A /* Operation, sleep mode control */ 32
33#define ADIS16201_DIAG_STAT 0x3C /* Diagnostics, system status register */ 33/* Calibration, y-axis acceleration offset */
34#define ADIS16201_GLOB_CMD 0x3E /* Operation, system command register */ 34#define ADIS16201_YACCL_OFFS 0x12
35
36/* x-axis acceleration scale factor */
37#define ADIS16201_XACCL_SCALE 0x14
38
39/* y-axis acceleration scale factor */
40#define ADIS16201_YACCL_SCALE 0x16
41
42/* Calibration, x-axis inclination offset */
43#define ADIS16201_XINCL_OFFS 0x18
44
45/* Calibration, y-axis inclination offset */
46#define ADIS16201_YINCL_OFFS 0x1A
47
48/* x-axis inclination scale factor */
49#define ADIS16201_XINCL_SCALE 0x1C
50
51/* y-axis inclination scale factor */
52#define ADIS16201_YINCL_SCALE 0x1E
53
54/* Alarm 1 amplitude threshold */
55#define ADIS16201_ALM_MAG1 0x20
56
57/* Alarm 2 amplitude threshold */
58#define ADIS16201_ALM_MAG2 0x22
59
60/* Alarm 1, sample period */
61#define ADIS16201_ALM_SMPL1 0x24
62
63/* Alarm 2, sample period */
64#define ADIS16201_ALM_SMPL2 0x26
65
66/* Alarm control */
67#define ADIS16201_ALM_CTRL 0x28
68
69/* Auxiliary DAC data */
70#define ADIS16201_AUX_DAC 0x30
71
72/* General-purpose digital input/output control */
73#define ADIS16201_GPIO_CTRL 0x32
74
75/* Miscellaneous control */
76#define ADIS16201_MSC_CTRL 0x34
77
78/* Internal sample period (rate) control */
79#define ADIS16201_SMPL_PRD 0x36
80
81/* Operation, filter configuration */
82#define ADIS16201_AVG_CNT 0x38
83
84/* Operation, sleep mode control */
85#define ADIS16201_SLP_CNT 0x3A
86
87/* Diagnostics, system status register */
88#define ADIS16201_DIAG_STAT 0x3C
89
90/* Operation, system command register */
91#define ADIS16201_GLOB_CMD 0x3E
35 92
36/* MSC_CTRL */ 93/* MSC_CTRL */
37#define ADIS16201_MSC_CTRL_SELF_TEST_EN BIT(8) /* Self-test enable */ 94
38#define ADIS16201_MSC_CTRL_DATA_RDY_EN BIT(2) /* Data-ready enable: 1 = enabled, 0 = disabled */ 95/* Self-test enable */
39#define ADIS16201_MSC_CTRL_ACTIVE_HIGH BIT(1) /* Data-ready polarity: 1 = active high, 0 = active low */ 96#define ADIS16201_MSC_CTRL_SELF_TEST_EN BIT(8)
40#define ADIS16201_MSC_CTRL_DATA_RDY_DIO1 BIT(0) /* Data-ready line selection: 1 = DIO1, 0 = DIO0 */ 97
98/* Data-ready enable: 1 = enabled, 0 = disabled */
99#define ADIS16201_MSC_CTRL_DATA_RDY_EN BIT(2)
100
101/* Data-ready polarity: 1 = active high, 0 = active low */
102#define ADIS16201_MSC_CTRL_ACTIVE_HIGH BIT(1)
103
104/* Data-ready line selection: 1 = DIO1, 0 = DIO0 */
105#define ADIS16201_MSC_CTRL_DATA_RDY_DIO1 BIT(0)
41 106
42/* DIAG_STAT */ 107/* DIAG_STAT */
43#define ADIS16201_DIAG_STAT_ALARM2 BIT(9) /* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */ 108
44#define ADIS16201_DIAG_STAT_ALARM1 BIT(8) /* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */ 109/* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
45#define ADIS16201_DIAG_STAT_SPI_FAIL_BIT 3 /* SPI communications failure */ 110#define ADIS16201_DIAG_STAT_ALARM2 BIT(9)
46#define ADIS16201_DIAG_STAT_FLASH_UPT_BIT 2 /* Flash update failure */ 111
47#define ADIS16201_DIAG_STAT_POWER_HIGH_BIT 1 /* Power supply above 3.625 V */ 112/* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
48#define ADIS16201_DIAG_STAT_POWER_LOW_BIT 0 /* Power supply below 3.15 V */ 113#define ADIS16201_DIAG_STAT_ALARM1 BIT(8)
114
115/* SPI communications failure */
116#define ADIS16201_DIAG_STAT_SPI_FAIL_BIT 3
117
118/* Flash update failure */
119#define ADIS16201_DIAG_STAT_FLASH_UPT_BIT 2
120
121/* Power supply above 3.625 V */
122#define ADIS16201_DIAG_STAT_POWER_HIGH_BIT 1
123
124/* Power supply below 3.15 V */
125#define ADIS16201_DIAG_STAT_POWER_LOW_BIT 0
49 126
50/* GLOB_CMD */ 127/* GLOB_CMD */
128
51#define ADIS16201_GLOB_CMD_SW_RESET BIT(7) 129#define ADIS16201_GLOB_CMD_SW_RESET BIT(7)
52#define ADIS16201_GLOB_CMD_FACTORY_CAL BIT(1) 130#define ADIS16201_GLOB_CMD_FACTORY_CAL BIT(1)
53 131
diff --git a/drivers/staging/iio/accel/adis16201_core.c b/drivers/staging/iio/accel/adis16201_core.c
index 06c0b75ed26a..6f3f8ff2a066 100644
--- a/drivers/staging/iio/accel/adis16201_core.c
+++ b/drivers/staging/iio/accel/adis16201_core.c
@@ -167,6 +167,7 @@ static const struct adis_data adis16201_data = {
167 .diag_stat_reg = ADIS16201_DIAG_STAT, 167 .diag_stat_reg = ADIS16201_DIAG_STAT,
168 168
169 .self_test_mask = ADIS16201_MSC_CTRL_SELF_TEST_EN, 169 .self_test_mask = ADIS16201_MSC_CTRL_SELF_TEST_EN,
170 .self_test_no_autoclear = true,
170 .startup_delay = ADIS16201_STARTUP_DELAY, 171 .startup_delay = ADIS16201_STARTUP_DELAY,
171 172
172 .status_error_msgs = adis16201_status_error_msgs, 173 .status_error_msgs = adis16201_status_error_msgs,
diff --git a/drivers/staging/iio/accel/adis16203.h b/drivers/staging/iio/accel/adis16203.h
index 6426e38bf006..b483e4e6475b 100644
--- a/drivers/staging/iio/accel/adis16203.h
+++ b/drivers/staging/iio/accel/adis16203.h
@@ -3,45 +3,111 @@
3 3
4#define ADIS16203_STARTUP_DELAY 220 /* ms */ 4#define ADIS16203_STARTUP_DELAY 220 /* ms */
5 5
6#define ADIS16203_FLASH_CNT 0x00 /* Flash memory write count */ 6/* Flash memory write count */
7#define ADIS16203_SUPPLY_OUT 0x02 /* Output, power supply */ 7#define ADIS16203_FLASH_CNT 0x00
8#define ADIS16203_AUX_ADC 0x08 /* Output, auxiliary ADC input */ 8
9#define ADIS16203_TEMP_OUT 0x0A /* Output, temperature */ 9/* Output, power supply */
10#define ADIS16203_XINCL_OUT 0x0C /* Output, x-axis inclination */ 10#define ADIS16203_SUPPLY_OUT 0x02
11#define ADIS16203_YINCL_OUT 0x0E /* Output, y-axis inclination */ 11
12#define ADIS16203_INCL_NULL 0x18 /* Incline null calibration */ 12/* Output, auxiliary ADC input */
13#define ADIS16203_ALM_MAG1 0x20 /* Alarm 1 amplitude threshold */ 13#define ADIS16203_AUX_ADC 0x08
14#define ADIS16203_ALM_MAG2 0x22 /* Alarm 2 amplitude threshold */ 14
15#define ADIS16203_ALM_SMPL1 0x24 /* Alarm 1, sample period */ 15/* Output, temperature */
16#define ADIS16203_ALM_SMPL2 0x26 /* Alarm 2, sample period */ 16#define ADIS16203_TEMP_OUT 0x0A
17#define ADIS16203_ALM_CTRL 0x28 /* Alarm control */ 17
18#define ADIS16203_AUX_DAC 0x30 /* Auxiliary DAC data */ 18/* Output, x-axis inclination */
19#define ADIS16203_GPIO_CTRL 0x32 /* General-purpose digital input/output control */ 19#define ADIS16203_XINCL_OUT 0x0C
20#define ADIS16203_MSC_CTRL 0x34 /* Miscellaneous control */ 20
21#define ADIS16203_SMPL_PRD 0x36 /* Internal sample period (rate) control */ 21/* Output, y-axis inclination */
22#define ADIS16203_AVG_CNT 0x38 /* Operation, filter configuration */ 22#define ADIS16203_YINCL_OUT 0x0E
23#define ADIS16203_SLP_CNT 0x3A /* Operation, sleep mode control */ 23
24#define ADIS16203_DIAG_STAT 0x3C /* Diagnostics, system status register */ 24/* Incline null calibration */
25#define ADIS16203_GLOB_CMD 0x3E /* Operation, system command register */ 25#define ADIS16203_INCL_NULL 0x18
26
27/* Alarm 1 amplitude threshold */
28#define ADIS16203_ALM_MAG1 0x20
29
30/* Alarm 2 amplitude threshold */
31#define ADIS16203_ALM_MAG2 0x22
32
33/* Alarm 1, sample period */
34#define ADIS16203_ALM_SMPL1 0x24
35
36/* Alarm 2, sample period */
37#define ADIS16203_ALM_SMPL2 0x26
38
39/* Alarm control */
40#define ADIS16203_ALM_CTRL 0x28
41
42/* Auxiliary DAC data */
43#define ADIS16203_AUX_DAC 0x30
44
45/* General-purpose digital input/output control */
46#define ADIS16203_GPIO_CTRL 0x32
47
48/* Miscellaneous control */
49#define ADIS16203_MSC_CTRL 0x34
50
51/* Internal sample period (rate) control */
52#define ADIS16203_SMPL_PRD 0x36
53
54/* Operation, filter configuration */
55#define ADIS16203_AVG_CNT 0x38
56
57/* Operation, sleep mode control */
58#define ADIS16203_SLP_CNT 0x3A
59
60/* Diagnostics, system status register */
61#define ADIS16203_DIAG_STAT 0x3C
62
63/* Operation, system command register */
64#define ADIS16203_GLOB_CMD 0x3E
26 65
27/* MSC_CTRL */ 66/* MSC_CTRL */
28#define ADIS16203_MSC_CTRL_PWRUP_SELF_TEST BIT(10) /* Self-test at power-on: 1 = disabled, 0 = enabled */ 67
29#define ADIS16203_MSC_CTRL_REVERSE_ROT_EN BIT(9) /* Reverses rotation of both inclination outputs */ 68/* Self-test at power-on: 1 = disabled, 0 = enabled */
30#define ADIS16203_MSC_CTRL_SELF_TEST_EN BIT(8) /* Self-test enable */ 69#define ADIS16203_MSC_CTRL_PWRUP_SELF_TEST BIT(10)
31#define ADIS16203_MSC_CTRL_DATA_RDY_EN BIT(2) /* Data-ready enable: 1 = enabled, 0 = disabled */ 70
32#define ADIS16203_MSC_CTRL_ACTIVE_HIGH BIT(1) /* Data-ready polarity: 1 = active high, 0 = active low */ 71/* Reverses rotation of both inclination outputs */
33#define ADIS16203_MSC_CTRL_DATA_RDY_DIO1 BIT(0) /* Data-ready line selection: 1 = DIO1, 0 = DIO0 */ 72#define ADIS16203_MSC_CTRL_REVERSE_ROT_EN BIT(9)
73
74/* Self-test enable */
75#define ADIS16203_MSC_CTRL_SELF_TEST_EN BIT(8)
76
77/* Data-ready enable: 1 = enabled, 0 = disabled */
78#define ADIS16203_MSC_CTRL_DATA_RDY_EN BIT(2)
79
80/* Data-ready polarity: 1 = active high, 0 = active low */
81#define ADIS16203_MSC_CTRL_ACTIVE_HIGH BIT(1)
82
83/* Data-ready line selection: 1 = DIO1, 0 = DIO0 */
84#define ADIS16203_MSC_CTRL_DATA_RDY_DIO1 BIT(0)
34 85
35/* DIAG_STAT */ 86/* DIAG_STAT */
36#define ADIS16203_DIAG_STAT_ALARM2 BIT(9) /* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */ 87
37#define ADIS16203_DIAG_STAT_ALARM1 BIT(8) /* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */ 88/* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
38#define ADIS16203_DIAG_STAT_SELFTEST_FAIL_BIT 5 /* Self-test diagnostic error flag */ 89#define ADIS16203_DIAG_STAT_ALARM2 BIT(9)
39#define ADIS16203_DIAG_STAT_SPI_FAIL_BIT 3 /* SPI communications failure */ 90
40#define ADIS16203_DIAG_STAT_FLASH_UPT_BIT 2 /* Flash update failure */ 91/* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
41#define ADIS16203_DIAG_STAT_POWER_HIGH_BIT 1 /* Power supply above 3.625 V */ 92#define ADIS16203_DIAG_STAT_ALARM1 BIT(8)
42#define ADIS16203_DIAG_STAT_POWER_LOW_BIT 0 /* Power supply below 3.15 V */ 93
94/* Self-test diagnostic error flag */
95#define ADIS16203_DIAG_STAT_SELFTEST_FAIL_BIT 5
96
97/* SPI communications failure */
98#define ADIS16203_DIAG_STAT_SPI_FAIL_BIT 3
99
100/* Flash update failure */
101#define ADIS16203_DIAG_STAT_FLASH_UPT_BIT 2
102
103/* Power supply above 3.625 V */
104#define ADIS16203_DIAG_STAT_POWER_HIGH_BIT 1
105
106/* Power supply below 3.15 V */
107#define ADIS16203_DIAG_STAT_POWER_LOW_BIT 0
43 108
44/* GLOB_CMD */ 109/* GLOB_CMD */
110
45#define ADIS16203_GLOB_CMD_SW_RESET BIT(7) 111#define ADIS16203_GLOB_CMD_SW_RESET BIT(7)
46#define ADIS16203_GLOB_CMD_CLEAR_STAT BIT(4) 112#define ADIS16203_GLOB_CMD_CLEAR_STAT BIT(4)
47#define ADIS16203_GLOB_CMD_FACTORY_CAL BIT(1) 113#define ADIS16203_GLOB_CMD_FACTORY_CAL BIT(1)
diff --git a/drivers/staging/iio/accel/adis16203_core.c b/drivers/staging/iio/accel/adis16203_core.c
index de5b84ac842b..c70671778bae 100644
--- a/drivers/staging/iio/accel/adis16203_core.c
+++ b/drivers/staging/iio/accel/adis16203_core.c
@@ -134,6 +134,7 @@ static const struct adis_data adis16203_data = {
134 .diag_stat_reg = ADIS16203_DIAG_STAT, 134 .diag_stat_reg = ADIS16203_DIAG_STAT,
135 135
136 .self_test_mask = ADIS16203_MSC_CTRL_SELF_TEST_EN, 136 .self_test_mask = ADIS16203_MSC_CTRL_SELF_TEST_EN,
137 .self_test_no_autoclear = true,
137 .startup_delay = ADIS16203_STARTUP_DELAY, 138 .startup_delay = ADIS16203_STARTUP_DELAY,
138 139
139 .status_error_msgs = adis16203_status_error_msgs, 140 .status_error_msgs = adis16203_status_error_msgs,
diff --git a/drivers/staging/iio/accel/adis16204.h b/drivers/staging/iio/accel/adis16204.h
deleted file mode 100644
index 0b23f0b5c52f..000000000000
--- a/drivers/staging/iio/accel/adis16204.h
+++ /dev/null
@@ -1,68 +0,0 @@
1#ifndef SPI_ADIS16204_H_
2#define SPI_ADIS16204_H_
3
4#define ADIS16204_STARTUP_DELAY 220 /* ms */
5
6#define ADIS16204_FLASH_CNT 0x00 /* Flash memory write count */
7#define ADIS16204_SUPPLY_OUT 0x02 /* Output, power supply */
8#define ADIS16204_XACCL_OUT 0x04 /* Output, x-axis accelerometer */
9#define ADIS16204_YACCL_OUT 0x06 /* Output, y-axis accelerometer */
10#define ADIS16204_AUX_ADC 0x08 /* Output, auxiliary ADC input */
11#define ADIS16204_TEMP_OUT 0x0A /* Output, temperature */
12#define ADIS16204_X_PEAK_OUT 0x0C /* Twos complement */
13#define ADIS16204_Y_PEAK_OUT 0x0E /* Twos complement */
14#define ADIS16204_XACCL_NULL 0x10 /* Calibration, x-axis acceleration offset null */
15#define ADIS16204_YACCL_NULL 0x12 /* Calibration, y-axis acceleration offset null */
16#define ADIS16204_XACCL_SCALE 0x14 /* X-axis scale factor calibration register */
17#define ADIS16204_YACCL_SCALE 0x16 /* Y-axis scale factor calibration register */
18#define ADIS16204_XY_RSS_OUT 0x18 /* XY combined acceleration (RSS) */
19#define ADIS16204_XY_PEAK_OUT 0x1A /* Peak, XY combined output (RSS) */
20#define ADIS16204_CAP_BUF_1 0x1C /* Capture buffer output register 1 */
21#define ADIS16204_CAP_BUF_2 0x1E /* Capture buffer output register 2 */
22#define ADIS16204_ALM_MAG1 0x20 /* Alarm 1 amplitude threshold */
23#define ADIS16204_ALM_MAG2 0x22 /* Alarm 2 amplitude threshold */
24#define ADIS16204_ALM_CTRL 0x28 /* Alarm control */
25#define ADIS16204_CAPT_PNTR 0x2A /* Capture register address pointer */
26#define ADIS16204_AUX_DAC 0x30 /* Auxiliary DAC data */
27#define ADIS16204_GPIO_CTRL 0x32 /* General-purpose digital input/output control */
28#define ADIS16204_MSC_CTRL 0x34 /* Miscellaneous control */
29#define ADIS16204_SMPL_PRD 0x36 /* Internal sample period (rate) control */
30#define ADIS16204_AVG_CNT 0x38 /* Operation, filter configuration */
31#define ADIS16204_SLP_CNT 0x3A /* Operation, sleep mode control */
32#define ADIS16204_DIAG_STAT 0x3C /* Diagnostics, system status register */
33#define ADIS16204_GLOB_CMD 0x3E /* Operation, system command register */
34
35/* MSC_CTRL */
36#define ADIS16204_MSC_CTRL_PWRUP_SELF_TEST BIT(10) /* Self-test at power-on: 1 = disabled, 0 = enabled */
37#define ADIS16204_MSC_CTRL_SELF_TEST_EN BIT(8) /* Self-test enable */
38#define ADIS16204_MSC_CTRL_DATA_RDY_EN BIT(2) /* Data-ready enable: 1 = enabled, 0 = disabled */
39#define ADIS16204_MSC_CTRL_ACTIVE_HIGH BIT(1) /* Data-ready polarity: 1 = active high, 0 = active low */
40#define ADIS16204_MSC_CTRL_DATA_RDY_DIO2 BIT(0) /* Data-ready line selection: 1 = DIO2, 0 = DIO1 */
41
42/* DIAG_STAT */
43#define ADIS16204_DIAG_STAT_ALARM2 BIT(9) /* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
44#define ADIS16204_DIAG_STAT_ALARM1 BIT(8) /* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
45#define ADIS16204_DIAG_STAT_SELFTEST_FAIL_BIT 5 /* Self-test diagnostic error flag: 1 = error condition,
46 0 = normal operation */
47#define ADIS16204_DIAG_STAT_SPI_FAIL_BIT 3 /* SPI communications failure */
48#define ADIS16204_DIAG_STAT_FLASH_UPT_BIT 2 /* Flash update failure */
49#define ADIS16204_DIAG_STAT_POWER_HIGH_BIT 1 /* Power supply above 3.625 V */
50#define ADIS16204_DIAG_STAT_POWER_LOW_BIT 0 /* Power supply below 2.975 V */
51
52/* GLOB_CMD */
53#define ADIS16204_GLOB_CMD_SW_RESET BIT(7)
54#define ADIS16204_GLOB_CMD_CLEAR_STAT BIT(4)
55#define ADIS16204_GLOB_CMD_FACTORY_CAL BIT(1)
56
57#define ADIS16204_ERROR_ACTIVE BIT(14)
58
59enum adis16204_scan {
60 ADIS16204_SCAN_ACC_X,
61 ADIS16204_SCAN_ACC_Y,
62 ADIS16204_SCAN_ACC_XY,
63 ADIS16204_SCAN_SUPPLY,
64 ADIS16204_SCAN_AUX_ADC,
65 ADIS16204_SCAN_TEMP,
66};
67
68#endif /* SPI_ADIS16204_H_ */
diff --git a/drivers/staging/iio/accel/adis16204_core.c b/drivers/staging/iio/accel/adis16204_core.c
deleted file mode 100644
index 20a9df64f1ed..000000000000
--- a/drivers/staging/iio/accel/adis16204_core.c
+++ /dev/null
@@ -1,253 +0,0 @@
1/*
2 * ADIS16204 Programmable High-g Digital Impact Sensor and Recorder
3 *
4 * Copyright 2010 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#include <linux/interrupt.h>
10#include <linux/irq.h>
11#include <linux/delay.h>
12#include <linux/mutex.h>
13#include <linux/device.h>
14#include <linux/kernel.h>
15#include <linux/spi/spi.h>
16#include <linux/slab.h>
17#include <linux/sysfs.h>
18#include <linux/list.h>
19#include <linux/module.h>
20
21#include <linux/iio/iio.h>
22#include <linux/iio/sysfs.h>
23#include <linux/iio/buffer.h>
24#include <linux/iio/imu/adis.h>
25
26#include "adis16204.h"
27
28/* Unique to this driver currently */
29
30static const u8 adis16204_addresses[][2] = {
31 [ADIS16204_SCAN_ACC_X] = { ADIS16204_XACCL_NULL, ADIS16204_X_PEAK_OUT },
32 [ADIS16204_SCAN_ACC_Y] = { ADIS16204_YACCL_NULL, ADIS16204_Y_PEAK_OUT },
33 [ADIS16204_SCAN_ACC_XY] = { 0, ADIS16204_XY_PEAK_OUT },
34};
35
36static int adis16204_read_raw(struct iio_dev *indio_dev,
37 struct iio_chan_spec const *chan,
38 int *val, int *val2,
39 long mask)
40{
41 struct adis *st = iio_priv(indio_dev);
42 int ret;
43 int bits;
44 u8 addr;
45 s16 val16;
46 int addrind;
47
48 switch (mask) {
49 case IIO_CHAN_INFO_RAW:
50 return adis_single_conversion(indio_dev, chan,
51 ADIS16204_ERROR_ACTIVE, val);
52 case IIO_CHAN_INFO_SCALE:
53 switch (chan->type) {
54 case IIO_VOLTAGE:
55 if (chan->channel == 0) {
56 *val = 1;
57 *val2 = 220000; /* 1.22 mV */
58 } else {
59 *val = 0;
60 *val2 = 610000; /* 0.61 mV */
61 }
62 return IIO_VAL_INT_PLUS_MICRO;
63 case IIO_TEMP:
64 *val = -470; /* 0.47 C */
65 *val2 = 0;
66 return IIO_VAL_INT_PLUS_MICRO;
67 case IIO_ACCEL:
68 *val = 0;
69 switch (chan->channel2) {
70 case IIO_MOD_X:
71 case IIO_MOD_ROOT_SUM_SQUARED_X_Y:
72 *val2 = IIO_G_TO_M_S_2(17125); /* 17.125 mg */
73 break;
74 case IIO_MOD_Y:
75 case IIO_MOD_Z:
76 *val2 = IIO_G_TO_M_S_2(8407); /* 8.407 mg */
77 break;
78 }
79 return IIO_VAL_INT_PLUS_MICRO;
80 default:
81 return -EINVAL;
82 }
83 break;
84 case IIO_CHAN_INFO_OFFSET:
85 *val = 25000 / -470 - 1278; /* 25 C = 1278 */
86 return IIO_VAL_INT;
87 case IIO_CHAN_INFO_CALIBBIAS:
88 case IIO_CHAN_INFO_PEAK:
89 if (mask == IIO_CHAN_INFO_CALIBBIAS) {
90 bits = 12;
91 addrind = 0;
92 } else { /* PEAK_SEPARATE */
93 bits = 14;
94 addrind = 1;
95 }
96 mutex_lock(&indio_dev->mlock);
97 addr = adis16204_addresses[chan->scan_index][addrind];
98 ret = adis_read_reg_16(st, addr, &val16);
99 if (ret) {
100 mutex_unlock(&indio_dev->mlock);
101 return ret;
102 }
103 val16 &= (1 << bits) - 1;
104 val16 = (s16)(val16 << (16 - bits)) >> (16 - bits);
105 *val = val16;
106 mutex_unlock(&indio_dev->mlock);
107 return IIO_VAL_INT;
108 }
109 return -EINVAL;
110}
111
112static int adis16204_write_raw(struct iio_dev *indio_dev,
113 struct iio_chan_spec const *chan,
114 int val,
115 int val2,
116 long mask)
117{
118 struct adis *st = iio_priv(indio_dev);
119 int bits;
120 s16 val16;
121 u8 addr;
122
123 switch (mask) {
124 case IIO_CHAN_INFO_CALIBBIAS:
125 switch (chan->type) {
126 case IIO_ACCEL:
127 bits = 12;
128 break;
129 default:
130 return -EINVAL;
131 }
132 val16 = val & ((1 << bits) - 1);
133 addr = adis16204_addresses[chan->scan_index][1];
134 return adis_write_reg_16(st, addr, val16);
135 }
136 return -EINVAL;
137}
138
139static const struct iio_chan_spec adis16204_channels[] = {
140 ADIS_SUPPLY_CHAN(ADIS16204_SUPPLY_OUT, ADIS16204_SCAN_SUPPLY, 0, 12),
141 ADIS_AUX_ADC_CHAN(ADIS16204_AUX_ADC, ADIS16204_SCAN_AUX_ADC, 0, 12),
142 ADIS_TEMP_CHAN(ADIS16204_TEMP_OUT, ADIS16204_SCAN_TEMP, 0, 12),
143 ADIS_ACCEL_CHAN(X, ADIS16204_XACCL_OUT, ADIS16204_SCAN_ACC_X,
144 BIT(IIO_CHAN_INFO_CALIBBIAS) | BIT(IIO_CHAN_INFO_PEAK),
145 0, 14),
146 ADIS_ACCEL_CHAN(Y, ADIS16204_YACCL_OUT, ADIS16204_SCAN_ACC_Y,
147 BIT(IIO_CHAN_INFO_CALIBBIAS) | BIT(IIO_CHAN_INFO_PEAK),
148 0, 14),
149 ADIS_ACCEL_CHAN(ROOT_SUM_SQUARED_X_Y, ADIS16204_XY_RSS_OUT,
150 ADIS16204_SCAN_ACC_XY, BIT(IIO_CHAN_INFO_PEAK), 0, 14),
151 IIO_CHAN_SOFT_TIMESTAMP(5),
152};
153
154static const struct iio_info adis16204_info = {
155 .read_raw = &adis16204_read_raw,
156 .write_raw = &adis16204_write_raw,
157 .update_scan_mode = adis_update_scan_mode,
158 .driver_module = THIS_MODULE,
159};
160
161static const char * const adis16204_status_error_msgs[] = {
162 [ADIS16204_DIAG_STAT_SELFTEST_FAIL_BIT] = "Self test failure",
163 [ADIS16204_DIAG_STAT_SPI_FAIL_BIT] = "SPI failure",
164 [ADIS16204_DIAG_STAT_FLASH_UPT_BIT] = "Flash update failed",
165 [ADIS16204_DIAG_STAT_POWER_HIGH_BIT] = "Power supply above 3.625V",
166 [ADIS16204_DIAG_STAT_POWER_LOW_BIT] = "Power supply below 2.975V",
167};
168
169static const struct adis_data adis16204_data = {
170 .read_delay = 20,
171 .msc_ctrl_reg = ADIS16204_MSC_CTRL,
172 .glob_cmd_reg = ADIS16204_GLOB_CMD,
173 .diag_stat_reg = ADIS16204_DIAG_STAT,
174
175 .self_test_mask = ADIS16204_MSC_CTRL_SELF_TEST_EN,
176 .startup_delay = ADIS16204_STARTUP_DELAY,
177
178 .status_error_msgs = adis16204_status_error_msgs,
179 .status_error_mask = BIT(ADIS16204_DIAG_STAT_SELFTEST_FAIL_BIT) |
180 BIT(ADIS16204_DIAG_STAT_SPI_FAIL_BIT) |
181 BIT(ADIS16204_DIAG_STAT_FLASH_UPT_BIT) |
182 BIT(ADIS16204_DIAG_STAT_POWER_HIGH_BIT) |
183 BIT(ADIS16204_DIAG_STAT_POWER_LOW_BIT),
184};
185
186static int adis16204_probe(struct spi_device *spi)
187{
188 int ret;
189 struct adis *st;
190 struct iio_dev *indio_dev;
191
192 /* setup the industrialio driver allocated elements */
193 indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
194 if (!indio_dev)
195 return -ENOMEM;
196 st = iio_priv(indio_dev);
197 /* this is only used for removal purposes */
198 spi_set_drvdata(spi, indio_dev);
199
200 indio_dev->name = spi->dev.driver->name;
201 indio_dev->dev.parent = &spi->dev;
202 indio_dev->info = &adis16204_info;
203 indio_dev->channels = adis16204_channels;
204 indio_dev->num_channels = ARRAY_SIZE(adis16204_channels);
205 indio_dev->modes = INDIO_DIRECT_MODE;
206
207 ret = adis_init(st, indio_dev, spi, &adis16204_data);
208 if (ret)
209 return ret;
210
211 ret = adis_setup_buffer_and_trigger(st, indio_dev, NULL);
212 if (ret)
213 return ret;
214
215 /* Get the device into a sane initial state */
216 ret = adis_initial_startup(st);
217 if (ret)
218 goto error_cleanup_buffer_trigger;
219 ret = iio_device_register(indio_dev);
220 if (ret)
221 goto error_cleanup_buffer_trigger;
222
223 return 0;
224
225error_cleanup_buffer_trigger:
226 adis_cleanup_buffer_and_trigger(st, indio_dev);
227 return ret;
228}
229
230static int adis16204_remove(struct spi_device *spi)
231{
232 struct iio_dev *indio_dev = spi_get_drvdata(spi);
233 struct adis *st = iio_priv(indio_dev);
234
235 iio_device_unregister(indio_dev);
236 adis_cleanup_buffer_and_trigger(st, indio_dev);
237
238 return 0;
239}
240
241static struct spi_driver adis16204_driver = {
242 .driver = {
243 .name = "adis16204",
244 },
245 .probe = adis16204_probe,
246 .remove = adis16204_remove,
247};
248module_spi_driver(adis16204_driver);
249
250MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
251MODULE_DESCRIPTION("ADIS16204 High-g Digital Impact Sensor and Recorder");
252MODULE_LICENSE("GPL v2");
253MODULE_ALIAS("spi:adis16204");
diff --git a/drivers/staging/iio/accel/adis16209.h b/drivers/staging/iio/accel/adis16209.h
index 813698d18ec8..315f1c0c46e8 100644
--- a/drivers/staging/iio/accel/adis16209.h
+++ b/drivers/staging/iio/accel/adis16209.h
@@ -5,88 +5,127 @@
5 5
6/* Flash memory write count */ 6/* Flash memory write count */
7#define ADIS16209_FLASH_CNT 0x00 7#define ADIS16209_FLASH_CNT 0x00
8
8/* Output, power supply */ 9/* Output, power supply */
9#define ADIS16209_SUPPLY_OUT 0x02 10#define ADIS16209_SUPPLY_OUT 0x02
11
10/* Output, x-axis accelerometer */ 12/* Output, x-axis accelerometer */
11#define ADIS16209_XACCL_OUT 0x04 13#define ADIS16209_XACCL_OUT 0x04
14
12/* Output, y-axis accelerometer */ 15/* Output, y-axis accelerometer */
13#define ADIS16209_YACCL_OUT 0x06 16#define ADIS16209_YACCL_OUT 0x06
17
14/* Output, auxiliary ADC input */ 18/* Output, auxiliary ADC input */
15#define ADIS16209_AUX_ADC 0x08 19#define ADIS16209_AUX_ADC 0x08
20
16/* Output, temperature */ 21/* Output, temperature */
17#define ADIS16209_TEMP_OUT 0x0A 22#define ADIS16209_TEMP_OUT 0x0A
23
18/* Output, x-axis inclination */ 24/* Output, x-axis inclination */
19#define ADIS16209_XINCL_OUT 0x0C 25#define ADIS16209_XINCL_OUT 0x0C
26
20/* Output, y-axis inclination */ 27/* Output, y-axis inclination */
21#define ADIS16209_YINCL_OUT 0x0E 28#define ADIS16209_YINCL_OUT 0x0E
29
22/* Output, +/-180 vertical rotational position */ 30/* Output, +/-180 vertical rotational position */
23#define ADIS16209_ROT_OUT 0x10 31#define ADIS16209_ROT_OUT 0x10
32
24/* Calibration, x-axis acceleration offset null */ 33/* Calibration, x-axis acceleration offset null */
25#define ADIS16209_XACCL_NULL 0x12 34#define ADIS16209_XACCL_NULL 0x12
35
26/* Calibration, y-axis acceleration offset null */ 36/* Calibration, y-axis acceleration offset null */
27#define ADIS16209_YACCL_NULL 0x14 37#define ADIS16209_YACCL_NULL 0x14
38
28/* Calibration, x-axis inclination offset null */ 39/* Calibration, x-axis inclination offset null */
29#define ADIS16209_XINCL_NULL 0x16 40#define ADIS16209_XINCL_NULL 0x16
41
30/* Calibration, y-axis inclination offset null */ 42/* Calibration, y-axis inclination offset null */
31#define ADIS16209_YINCL_NULL 0x18 43#define ADIS16209_YINCL_NULL 0x18
44
32/* Calibration, vertical rotation offset null */ 45/* Calibration, vertical rotation offset null */
33#define ADIS16209_ROT_NULL 0x1A 46#define ADIS16209_ROT_NULL 0x1A
47
34/* Alarm 1 amplitude threshold */ 48/* Alarm 1 amplitude threshold */
35#define ADIS16209_ALM_MAG1 0x20 49#define ADIS16209_ALM_MAG1 0x20
50
36/* Alarm 2 amplitude threshold */ 51/* Alarm 2 amplitude threshold */
37#define ADIS16209_ALM_MAG2 0x22 52#define ADIS16209_ALM_MAG2 0x22
53
38/* Alarm 1, sample period */ 54/* Alarm 1, sample period */
39#define ADIS16209_ALM_SMPL1 0x24 55#define ADIS16209_ALM_SMPL1 0x24
56
40/* Alarm 2, sample period */ 57/* Alarm 2, sample period */
41#define ADIS16209_ALM_SMPL2 0x26 58#define ADIS16209_ALM_SMPL2 0x26
59
42/* Alarm control */ 60/* Alarm control */
43#define ADIS16209_ALM_CTRL 0x28 61#define ADIS16209_ALM_CTRL 0x28
62
44/* Auxiliary DAC data */ 63/* Auxiliary DAC data */
45#define ADIS16209_AUX_DAC 0x30 64#define ADIS16209_AUX_DAC 0x30
65
46/* General-purpose digital input/output control */ 66/* General-purpose digital input/output control */
47#define ADIS16209_GPIO_CTRL 0x32 67#define ADIS16209_GPIO_CTRL 0x32
68
48/* Miscellaneous control */ 69/* Miscellaneous control */
49#define ADIS16209_MSC_CTRL 0x34 70#define ADIS16209_MSC_CTRL 0x34
71
50/* Internal sample period (rate) control */ 72/* Internal sample period (rate) control */
51#define ADIS16209_SMPL_PRD 0x36 73#define ADIS16209_SMPL_PRD 0x36
74
52/* Operation, filter configuration */ 75/* Operation, filter configuration */
53#define ADIS16209_AVG_CNT 0x38 76#define ADIS16209_AVG_CNT 0x38
77
54/* Operation, sleep mode control */ 78/* Operation, sleep mode control */
55#define ADIS16209_SLP_CNT 0x3A 79#define ADIS16209_SLP_CNT 0x3A
80
56/* Diagnostics, system status register */ 81/* Diagnostics, system status register */
57#define ADIS16209_DIAG_STAT 0x3C 82#define ADIS16209_DIAG_STAT 0x3C
83
58/* Operation, system command register */ 84/* Operation, system command register */
59#define ADIS16209_GLOB_CMD 0x3E 85#define ADIS16209_GLOB_CMD 0x3E
60 86
61/* MSC_CTRL */ 87/* MSC_CTRL */
88
62/* Self-test at power-on: 1 = disabled, 0 = enabled */ 89/* Self-test at power-on: 1 = disabled, 0 = enabled */
63#define ADIS16209_MSC_CTRL_PWRUP_SELF_TEST BIT(10) 90#define ADIS16209_MSC_CTRL_PWRUP_SELF_TEST BIT(10)
91
64/* Self-test enable */ 92/* Self-test enable */
65#define ADIS16209_MSC_CTRL_SELF_TEST_EN BIT(8) 93#define ADIS16209_MSC_CTRL_SELF_TEST_EN BIT(8)
94
66/* Data-ready enable: 1 = enabled, 0 = disabled */ 95/* Data-ready enable: 1 = enabled, 0 = disabled */
67#define ADIS16209_MSC_CTRL_DATA_RDY_EN BIT(2) 96#define ADIS16209_MSC_CTRL_DATA_RDY_EN BIT(2)
97
68/* Data-ready polarity: 1 = active high, 0 = active low */ 98/* Data-ready polarity: 1 = active high, 0 = active low */
69#define ADIS16209_MSC_CTRL_ACTIVE_HIGH BIT(1) 99#define ADIS16209_MSC_CTRL_ACTIVE_HIGH BIT(1)
100
70/* Data-ready line selection: 1 = DIO2, 0 = DIO1 */ 101/* Data-ready line selection: 1 = DIO2, 0 = DIO1 */
71#define ADIS16209_MSC_CTRL_DATA_RDY_DIO2 BIT(0) 102#define ADIS16209_MSC_CTRL_DATA_RDY_DIO2 BIT(0)
72 103
73/* DIAG_STAT */ 104/* DIAG_STAT */
105
74/* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */ 106/* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
75#define ADIS16209_DIAG_STAT_ALARM2 BIT(9) 107#define ADIS16209_DIAG_STAT_ALARM2 BIT(9)
108
76/* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */ 109/* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
77#define ADIS16209_DIAG_STAT_ALARM1 BIT(8) 110#define ADIS16209_DIAG_STAT_ALARM1 BIT(8)
111
78/* Self-test diagnostic error flag: 1 = error condition, 0 = normal operation */ 112/* Self-test diagnostic error flag: 1 = error condition, 0 = normal operation */
79#define ADIS16209_DIAG_STAT_SELFTEST_FAIL_BIT 5 113#define ADIS16209_DIAG_STAT_SELFTEST_FAIL_BIT 5
114
80/* SPI communications failure */ 115/* SPI communications failure */
81#define ADIS16209_DIAG_STAT_SPI_FAIL_BIT 3 116#define ADIS16209_DIAG_STAT_SPI_FAIL_BIT 3
117
82/* Flash update failure */ 118/* Flash update failure */
83#define ADIS16209_DIAG_STAT_FLASH_UPT_BIT 2 119#define ADIS16209_DIAG_STAT_FLASH_UPT_BIT 2
120
84/* Power supply above 3.625 V */ 121/* Power supply above 3.625 V */
85#define ADIS16209_DIAG_STAT_POWER_HIGH_BIT 1 122#define ADIS16209_DIAG_STAT_POWER_HIGH_BIT 1
123
86/* Power supply below 3.15 V */ 124/* Power supply below 3.15 V */
87#define ADIS16209_DIAG_STAT_POWER_LOW_BIT 0 125#define ADIS16209_DIAG_STAT_POWER_LOW_BIT 0
88 126
89/* GLOB_CMD */ 127/* GLOB_CMD */
128
90#define ADIS16209_GLOB_CMD_SW_RESET BIT(7) 129#define ADIS16209_GLOB_CMD_SW_RESET BIT(7)
91#define ADIS16209_GLOB_CMD_CLEAR_STAT BIT(4) 130#define ADIS16209_GLOB_CMD_CLEAR_STAT BIT(4)
92#define ADIS16209_GLOB_CMD_FACTORY_CAL BIT(1) 131#define ADIS16209_GLOB_CMD_FACTORY_CAL BIT(1)
diff --git a/drivers/staging/iio/accel/adis16209_core.c b/drivers/staging/iio/accel/adis16209_core.c
index 8b42bf8c3f60..8dbad58628a1 100644
--- a/drivers/staging/iio/accel/adis16209_core.c
+++ b/drivers/staging/iio/accel/adis16209_core.c
@@ -168,6 +168,7 @@ static const struct adis_data adis16209_data = {
168 .diag_stat_reg = ADIS16209_DIAG_STAT, 168 .diag_stat_reg = ADIS16209_DIAG_STAT,
169 169
170 .self_test_mask = ADIS16209_MSC_CTRL_SELF_TEST_EN, 170 .self_test_mask = ADIS16209_MSC_CTRL_SELF_TEST_EN,
171 .self_test_no_autoclear = true,
171 .startup_delay = ADIS16209_STARTUP_DELAY, 172 .startup_delay = ADIS16209_STARTUP_DELAY,
172 173
173 .status_error_msgs = adis16209_status_error_msgs, 174 .status_error_msgs = adis16209_status_error_msgs,
diff --git a/drivers/staging/iio/accel/adis16220.h b/drivers/staging/iio/accel/adis16220.h
deleted file mode 100644
index eab86331124f..000000000000
--- a/drivers/staging/iio/accel/adis16220.h
+++ /dev/null
@@ -1,140 +0,0 @@
1#ifndef SPI_ADIS16220_H_
2#define SPI_ADIS16220_H_
3
4#include <linux/iio/imu/adis.h>
5
6#define ADIS16220_STARTUP_DELAY 220 /* ms */
7
8/* Flash memory write count */
9#define ADIS16220_FLASH_CNT 0x00
10/* Control, acceleration offset adjustment control */
11#define ADIS16220_ACCL_NULL 0x02
12/* Control, AIN1 offset adjustment control */
13#define ADIS16220_AIN1_NULL 0x04
14/* Control, AIN2 offset adjustment control */
15#define ADIS16220_AIN2_NULL 0x06
16/* Output, power supply during capture */
17#define ADIS16220_CAPT_SUPPLY 0x0A
18/* Output, temperature during capture */
19#define ADIS16220_CAPT_TEMP 0x0C
20/* Output, peak acceleration during capture */
21#define ADIS16220_CAPT_PEAKA 0x0E
22/* Output, peak AIN1 level during capture */
23#define ADIS16220_CAPT_PEAK1 0x10
24/* Output, peak AIN2 level during capture */
25#define ADIS16220_CAPT_PEAK2 0x12
26/* Output, capture buffer for acceleration */
27#define ADIS16220_CAPT_BUFA 0x14
28/* Output, capture buffer for AIN1 */
29#define ADIS16220_CAPT_BUF1 0x16
30/* Output, capture buffer for AIN2 */
31#define ADIS16220_CAPT_BUF2 0x18
32/* Control, capture buffer address pointer */
33#define ADIS16220_CAPT_PNTR 0x1A
34/* Control, capture control register */
35#define ADIS16220_CAPT_CTRL 0x1C
36/* Control, capture period (automatic mode) */
37#define ADIS16220_CAPT_PRD 0x1E
38/* Control, Alarm A, acceleration peak threshold */
39#define ADIS16220_ALM_MAGA 0x20
40/* Control, Alarm 1, AIN1 peak threshold */
41#define ADIS16220_ALM_MAG1 0x22
42/* Control, Alarm 2, AIN2 peak threshold */
43#define ADIS16220_ALM_MAG2 0x24
44/* Control, Alarm S, peak threshold */
45#define ADIS16220_ALM_MAGS 0x26
46/* Control, alarm configuration register */
47#define ADIS16220_ALM_CTRL 0x28
48/* Control, general I/O configuration */
49#define ADIS16220_GPIO_CTRL 0x32
50/* Control, self-test control, AIN configuration */
51#define ADIS16220_MSC_CTRL 0x34
52/* Control, digital I/O configuration */
53#define ADIS16220_DIO_CTRL 0x36
54/* Control, filter configuration */
55#define ADIS16220_AVG_CNT 0x38
56/* Status, system status */
57#define ADIS16220_DIAG_STAT 0x3C
58/* Control, system commands */
59#define ADIS16220_GLOB_CMD 0x3E
60/* Status, self-test response */
61#define ADIS16220_ST_DELTA 0x40
62/* Lot Identification Code 1 */
63#define ADIS16220_LOT_ID1 0x52
64/* Lot Identification Code 2 */
65#define ADIS16220_LOT_ID2 0x54
66/* Product identifier; convert to decimal = 16220 */
67#define ADIS16220_PROD_ID 0x56
68/* Serial number */
69#define ADIS16220_SERIAL_NUM 0x58
70
71#define ADIS16220_CAPTURE_SIZE 2048
72
73/* MSC_CTRL */
74#define ADIS16220_MSC_CTRL_SELF_TEST_EN BIT(8)
75#define ADIS16220_MSC_CTRL_POWER_SUP_COM_AIN1 BIT(1)
76#define ADIS16220_MSC_CTRL_POWER_SUP_COM_AIN2 BIT(0)
77
78/* DIO_CTRL */
79#define ADIS16220_MSC_CTRL_DIO2_BUSY_IND (BIT(5) | BIT(4))
80#define ADIS16220_MSC_CTRL_DIO1_BUSY_IND (BIT(3) | BIT(2))
81#define ADIS16220_MSC_CTRL_DIO2_ACT_HIGH BIT(1)
82#define ADIS16220_MSC_CTRL_DIO1_ACT_HIGH BIT(0)
83
84/* DIAG_STAT */
85/* AIN2 sample > ALM_MAG2 */
86#define ADIS16220_DIAG_STAT_ALM_MAG2 BIT(14)
87/* AIN1 sample > ALM_MAG1 */
88#define ADIS16220_DIAG_STAT_ALM_MAG1 BIT(13)
89/* Acceleration sample > ALM_MAGA */
90#define ADIS16220_DIAG_STAT_ALM_MAGA BIT(12)
91/* Error condition programmed into ALM_MAGS[11:0] and ALM_CTRL[5:4] is true */
92#define ADIS16220_DIAG_STAT_ALM_MAGS BIT(11)
93/* |Peak value in AIN2 data capture| > ALM_MAG2 */
94#define ADIS16220_DIAG_STAT_PEAK_AIN2 BIT(10)
95/* |Peak value in AIN1 data capture| > ALM_MAG1 */
96#define ADIS16220_DIAG_STAT_PEAK_AIN1 BIT(9)
97/* |Peak value in acceleration data capture| > ALM_MAGA */
98#define ADIS16220_DIAG_STAT_PEAK_ACCEL BIT(8)
99/* Data ready, capture complete */
100#define ADIS16220_DIAG_STAT_DATA_RDY BIT(7)
101#define ADIS16220_DIAG_STAT_FLASH_CHK BIT(6)
102#define ADIS16220_DIAG_STAT_SELF_TEST BIT(5)
103/* Capture period violation/interruption */
104#define ADIS16220_DIAG_STAT_VIOLATION_BIT 4
105/* SPI communications failure */
106#define ADIS16220_DIAG_STAT_SPI_FAIL_BIT 3
107/* Flash update failure */
108#define ADIS16220_DIAG_STAT_FLASH_UPT_BIT 2
109/* Power supply above 3.625 V */
110#define ADIS16220_DIAG_STAT_POWER_HIGH_BIT 1
111/* Power supply below 3.15 V */
112#define ADIS16220_DIAG_STAT_POWER_LOW_BIT 0
113
114/* GLOB_CMD */
115#define ADIS16220_GLOB_CMD_SW_RESET BIT(7)
116#define ADIS16220_GLOB_CMD_SELF_TEST BIT(2)
117#define ADIS16220_GLOB_CMD_PWR_DOWN BIT(1)
118
119#define ADIS16220_MAX_TX 2048
120#define ADIS16220_MAX_RX 2048
121
122#define ADIS16220_SPI_BURST (u32)(1000 * 1000)
123#define ADIS16220_SPI_FAST (u32)(2000 * 1000)
124
125/**
126 * struct adis16220_state - device instance specific data
127 * @adis: adis device
128 * @tx: transmit buffer
129 * @rx: receive buffer
130 * @buf_lock: mutex to protect tx and rx
131 **/
132struct adis16220_state {
133 struct adis adis;
134
135 struct mutex buf_lock;
136 u8 tx[ADIS16220_MAX_TX] ____cacheline_aligned;
137 u8 rx[ADIS16220_MAX_RX];
138};
139
140#endif /* SPI_ADIS16220_H_ */
diff --git a/drivers/staging/iio/accel/adis16220_core.c b/drivers/staging/iio/accel/adis16220_core.c
deleted file mode 100644
index d0165218b60c..000000000000
--- a/drivers/staging/iio/accel/adis16220_core.c
+++ /dev/null
@@ -1,494 +0,0 @@
1/*
2 * ADIS16220 Programmable Digital Vibration Sensor driver
3 *
4 * Copyright 2010 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#include <linux/delay.h>
10#include <linux/mutex.h>
11#include <linux/device.h>
12#include <linux/kernel.h>
13#include <linux/spi/spi.h>
14#include <linux/slab.h>
15#include <linux/sysfs.h>
16#include <linux/module.h>
17
18#include <linux/iio/iio.h>
19#include <linux/iio/sysfs.h>
20
21#include "adis16220.h"
22
23static ssize_t adis16220_read_16bit(struct device *dev,
24 struct device_attribute *attr,
25 char *buf)
26{
27 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
28 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
29 struct adis16220_state *st = iio_priv(indio_dev);
30 ssize_t ret;
31 u16 val;
32
33 /* Take the iio_dev status lock */
34 mutex_lock(&indio_dev->mlock);
35 ret = adis_read_reg_16(&st->adis, this_attr->address, &val);
36 mutex_unlock(&indio_dev->mlock);
37 if (ret)
38 return ret;
39 return sprintf(buf, "%u\n", val);
40}
41
42static ssize_t adis16220_write_16bit(struct device *dev,
43 struct device_attribute *attr,
44 const char *buf,
45 size_t len)
46{
47 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
48 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
49 struct adis16220_state *st = iio_priv(indio_dev);
50 int ret;
51 u16 val;
52
53 ret = kstrtou16(buf, 10, &val);
54 if (ret)
55 goto error_ret;
56 ret = adis_write_reg_16(&st->adis, this_attr->address, val);
57
58error_ret:
59 return ret ? ret : len;
60}
61
62static int adis16220_capture(struct iio_dev *indio_dev)
63{
64 struct adis16220_state *st = iio_priv(indio_dev);
65 int ret;
66
67 /* initiates a manual data capture */
68 ret = adis_write_reg_16(&st->adis, ADIS16220_GLOB_CMD, 0xBF08);
69 if (ret)
70 dev_err(&indio_dev->dev, "problem beginning capture");
71
72 usleep_range(10000, 11000); /* delay for capture to finish */
73
74 return ret;
75}
76
77static ssize_t adis16220_write_capture(struct device *dev,
78 struct device_attribute *attr,
79 const char *buf, size_t len)
80{
81 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
82 bool val;
83 int ret;
84
85 ret = strtobool(buf, &val);
86 if (ret)
87 return ret;
88 if (!val)
89 return -EINVAL;
90 ret = adis16220_capture(indio_dev);
91 if (ret)
92 return ret;
93
94 return len;
95}
96
97static ssize_t adis16220_capture_buffer_read(struct iio_dev *indio_dev,
98 char *buf,
99 loff_t off,
100 size_t count,
101 int addr)
102{
103 struct adis16220_state *st = iio_priv(indio_dev);
104 struct spi_transfer xfers[] = {
105 {
106 .tx_buf = st->tx,
107 .bits_per_word = 8,
108 .len = 2,
109 .cs_change = 1,
110 .delay_usecs = 25,
111 }, {
112 .tx_buf = st->tx,
113 .rx_buf = st->rx,
114 .bits_per_word = 8,
115 .cs_change = 1,
116 .delay_usecs = 25,
117 },
118 };
119 int ret;
120 int i;
121
122 if (unlikely(!count))
123 return count;
124
125 if ((off >= ADIS16220_CAPTURE_SIZE) || (count & 1) || (off & 1))
126 return -EINVAL;
127
128 if (off + count > ADIS16220_CAPTURE_SIZE)
129 count = ADIS16220_CAPTURE_SIZE - off;
130
131 /* write the begin position of capture buffer */
132 ret = adis_write_reg_16(&st->adis,
133 ADIS16220_CAPT_PNTR,
134 off > 1);
135 if (ret)
136 return -EIO;
137
138 /* read count/2 values from capture buffer */
139 mutex_lock(&st->buf_lock);
140
141 for (i = 0; i < count; i += 2) {
142 st->tx[i] = ADIS_READ_REG(addr);
143 st->tx[i + 1] = 0;
144 }
145 xfers[1].len = count;
146
147 ret = spi_sync_transfer(st->adis.spi, xfers, ARRAY_SIZE(xfers));
148 if (ret) {
149 mutex_unlock(&st->buf_lock);
150 return -EIO;
151 }
152
153 memcpy(buf, st->rx, count);
154
155 mutex_unlock(&st->buf_lock);
156 return count;
157}
158
159static ssize_t adis16220_accel_bin_read(struct file *filp, struct kobject *kobj,
160 struct bin_attribute *attr,
161 char *buf,
162 loff_t off,
163 size_t count)
164{
165 struct iio_dev *indio_dev = dev_to_iio_dev(kobj_to_dev(kobj));
166
167 return adis16220_capture_buffer_read(indio_dev, buf,
168 off, count,
169 ADIS16220_CAPT_BUFA);
170}
171
172static struct bin_attribute accel_bin = {
173 .attr = {
174 .name = "accel_bin",
175 .mode = S_IRUGO,
176 },
177 .read = adis16220_accel_bin_read,
178 .size = ADIS16220_CAPTURE_SIZE,
179};
180
181static ssize_t adis16220_adc1_bin_read(struct file *filp, struct kobject *kobj,
182 struct bin_attribute *attr,
183 char *buf, loff_t off,
184 size_t count)
185{
186 struct iio_dev *indio_dev = dev_to_iio_dev(kobj_to_dev(kobj));
187
188 return adis16220_capture_buffer_read(indio_dev, buf,
189 off, count,
190 ADIS16220_CAPT_BUF1);
191}
192
193static struct bin_attribute adc1_bin = {
194 .attr = {
195 .name = "in0_bin",
196 .mode = S_IRUGO,
197 },
198 .read = adis16220_adc1_bin_read,
199 .size = ADIS16220_CAPTURE_SIZE,
200};
201
202static ssize_t adis16220_adc2_bin_read(struct file *filp, struct kobject *kobj,
203 struct bin_attribute *attr,
204 char *buf, loff_t off,
205 size_t count)
206{
207 struct iio_dev *indio_dev = dev_to_iio_dev(kobj_to_dev(kobj));
208
209 return adis16220_capture_buffer_read(indio_dev, buf,
210 off, count,
211 ADIS16220_CAPT_BUF2);
212}
213
214static struct bin_attribute adc2_bin = {
215 .attr = {
216 .name = "in1_bin",
217 .mode = S_IRUGO,
218 },
219 .read = adis16220_adc2_bin_read,
220 .size = ADIS16220_CAPTURE_SIZE,
221};
222
223#define IIO_DEV_ATTR_CAPTURE(_store) \
224 IIO_DEVICE_ATTR(capture, S_IWUSR, NULL, _store, 0)
225
226static IIO_DEV_ATTR_CAPTURE(adis16220_write_capture);
227
228#define IIO_DEV_ATTR_CAPTURE_COUNT(_mode, _show, _store, _addr) \
229 IIO_DEVICE_ATTR(capture_count, _mode, _show, _store, _addr)
230
231static IIO_DEV_ATTR_CAPTURE_COUNT(S_IWUSR | S_IRUGO,
232 adis16220_read_16bit,
233 adis16220_write_16bit,
234 ADIS16220_CAPT_PNTR);
235
236enum adis16220_channel {
237 in_supply, in_1, in_2, accel, temp
238};
239
240struct adis16220_address_spec {
241 u8 addr;
242 u8 bits;
243 bool sign;
244};
245
246/* Address / bits / signed */
247static const struct adis16220_address_spec adis16220_addresses[][3] = {
248 [in_supply] = { { ADIS16220_CAPT_SUPPLY, 12, 0 }, },
249 [in_1] = { { ADIS16220_CAPT_BUF1, 16, 1 },
250 { ADIS16220_AIN1_NULL, 16, 1 },
251 { ADIS16220_CAPT_PEAK1, 16, 1 }, },
252 [in_2] = { { ADIS16220_CAPT_BUF2, 16, 1 },
253 { ADIS16220_AIN2_NULL, 16, 1 },
254 { ADIS16220_CAPT_PEAK2, 16, 1 }, },
255 [accel] = { { ADIS16220_CAPT_BUFA, 16, 1 },
256 { ADIS16220_ACCL_NULL, 16, 1 },
257 { ADIS16220_CAPT_PEAKA, 16, 1 }, },
258 [temp] = { { ADIS16220_CAPT_TEMP, 12, 0 }, }
259};
260
261static int adis16220_read_raw(struct iio_dev *indio_dev,
262 struct iio_chan_spec const *chan,
263 int *val, int *val2,
264 long mask)
265{
266 struct adis16220_state *st = iio_priv(indio_dev);
267 const struct adis16220_address_spec *addr;
268 int ret = -EINVAL;
269 int addrind = 0;
270 u16 uval;
271 s16 sval;
272 u8 bits;
273
274 switch (mask) {
275 case IIO_CHAN_INFO_RAW:
276 addrind = 0;
277 break;
278 case IIO_CHAN_INFO_OFFSET:
279 if (chan->type == IIO_TEMP) {
280 *val = 25000 / -470 - 1278; /* 25 C = 1278 */
281 return IIO_VAL_INT;
282 }
283 addrind = 1;
284 break;
285 case IIO_CHAN_INFO_PEAK:
286 addrind = 2;
287 break;
288 case IIO_CHAN_INFO_SCALE:
289 switch (chan->type) {
290 case IIO_TEMP:
291 *val = -470; /* -0.47 C */
292 *val2 = 0;
293 return IIO_VAL_INT_PLUS_MICRO;
294 case IIO_ACCEL:
295 *val2 = IIO_G_TO_M_S_2(19073); /* 19.073 g */
296 return IIO_VAL_INT_PLUS_MICRO;
297 case IIO_VOLTAGE:
298 if (chan->channel == 0) {
299 *val = 1;
300 *val2 = 220700; /* 1.2207 mV */
301 } else {
302 /* Should really be dependent on VDD */
303 *val2 = 305180; /* 305.18 uV */
304 }
305 return IIO_VAL_INT_PLUS_MICRO;
306 default:
307 return -EINVAL;
308 }
309 default:
310 return -EINVAL;
311 }
312 addr = &adis16220_addresses[chan->address][addrind];
313 if (addr->sign) {
314 ret = adis_read_reg_16(&st->adis, addr->addr, &sval);
315 if (ret)
316 return ret;
317 bits = addr->bits;
318 sval &= (1 << bits) - 1;
319 sval = (s16)(sval << (16 - bits)) >> (16 - bits);
320 *val = sval;
321 return IIO_VAL_INT;
322 }
323 ret = adis_read_reg_16(&st->adis, addr->addr, &uval);
324 if (ret)
325 return ret;
326 bits = addr->bits;
327 uval &= (1 << bits) - 1;
328 *val = uval;
329 return IIO_VAL_INT;
330}
331
332static const struct iio_chan_spec adis16220_channels[] = {
333 {
334 .type = IIO_VOLTAGE,
335 .indexed = 1,
336 .channel = 0,
337 .extend_name = "supply",
338 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
339 BIT(IIO_CHAN_INFO_SCALE),
340 .address = in_supply,
341 }, {
342 .type = IIO_ACCEL,
343 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
344 BIT(IIO_CHAN_INFO_OFFSET) |
345 BIT(IIO_CHAN_INFO_SCALE) |
346 BIT(IIO_CHAN_INFO_PEAK),
347 .address = accel,
348 }, {
349 .type = IIO_TEMP,
350 .indexed = 1,
351 .channel = 0,
352 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
353 BIT(IIO_CHAN_INFO_OFFSET) |
354 BIT(IIO_CHAN_INFO_SCALE),
355 .address = temp,
356 }, {
357 .type = IIO_VOLTAGE,
358 .indexed = 1,
359 .channel = 1,
360 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
361 BIT(IIO_CHAN_INFO_OFFSET) |
362 BIT(IIO_CHAN_INFO_SCALE),
363 .address = in_1,
364 }, {
365 .type = IIO_VOLTAGE,
366 .indexed = 1,
367 .channel = 2,
368 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
369 .address = in_2,
370 }
371};
372
373static struct attribute *adis16220_attributes[] = {
374 &iio_dev_attr_capture.dev_attr.attr,
375 &iio_dev_attr_capture_count.dev_attr.attr,
376 NULL
377};
378
379static const struct attribute_group adis16220_attribute_group = {
380 .attrs = adis16220_attributes,
381};
382
383static const struct iio_info adis16220_info = {
384 .attrs = &adis16220_attribute_group,
385 .driver_module = THIS_MODULE,
386 .read_raw = &adis16220_read_raw,
387};
388
389static const char * const adis16220_status_error_msgs[] = {
390 [ADIS16220_DIAG_STAT_VIOLATION_BIT] = "Capture period violation/interruption",
391 [ADIS16220_DIAG_STAT_SPI_FAIL_BIT] = "SPI failure",
392 [ADIS16220_DIAG_STAT_FLASH_UPT_BIT] = "Flash update failed",
393 [ADIS16220_DIAG_STAT_POWER_HIGH_BIT] = "Power supply above 3.625V",
394 [ADIS16220_DIAG_STAT_POWER_LOW_BIT] = "Power supply below 3.15V",
395};
396
397static const struct adis_data adis16220_data = {
398 .read_delay = 35,
399 .write_delay = 35,
400 .msc_ctrl_reg = ADIS16220_MSC_CTRL,
401 .glob_cmd_reg = ADIS16220_GLOB_CMD,
402 .diag_stat_reg = ADIS16220_DIAG_STAT,
403
404 .self_test_mask = ADIS16220_MSC_CTRL_SELF_TEST_EN,
405 .startup_delay = ADIS16220_STARTUP_DELAY,
406
407 .status_error_msgs = adis16220_status_error_msgs,
408 .status_error_mask = BIT(ADIS16220_DIAG_STAT_VIOLATION_BIT) |
409 BIT(ADIS16220_DIAG_STAT_SPI_FAIL_BIT) |
410 BIT(ADIS16220_DIAG_STAT_FLASH_UPT_BIT) |
411 BIT(ADIS16220_DIAG_STAT_POWER_HIGH_BIT) |
412 BIT(ADIS16220_DIAG_STAT_POWER_LOW_BIT),
413};
414
415static int adis16220_probe(struct spi_device *spi)
416{
417 int ret;
418 struct adis16220_state *st;
419 struct iio_dev *indio_dev;
420
421 /* setup the industrialio driver allocated elements */
422 indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
423 if (!indio_dev)
424 return -ENOMEM;
425
426 st = iio_priv(indio_dev);
427 /* this is only used for removal purposes */
428 spi_set_drvdata(spi, indio_dev);
429
430 indio_dev->name = spi->dev.driver->name;
431 indio_dev->dev.parent = &spi->dev;
432 indio_dev->info = &adis16220_info;
433 indio_dev->modes = INDIO_DIRECT_MODE;
434 indio_dev->channels = adis16220_channels;
435 indio_dev->num_channels = ARRAY_SIZE(adis16220_channels);
436
437 ret = devm_iio_device_register(&spi->dev, indio_dev);
438 if (ret)
439 return ret;
440
441 ret = sysfs_create_bin_file(&indio_dev->dev.kobj, &accel_bin);
442 if (ret)
443 return ret;
444
445 ret = sysfs_create_bin_file(&indio_dev->dev.kobj, &adc1_bin);
446 if (ret)
447 goto error_rm_accel_bin;
448
449 ret = sysfs_create_bin_file(&indio_dev->dev.kobj, &adc2_bin);
450 if (ret)
451 goto error_rm_adc1_bin;
452
453 ret = adis_init(&st->adis, indio_dev, spi, &adis16220_data);
454 if (ret)
455 goto error_rm_adc2_bin;
456 /* Get the device into a sane initial state */
457 ret = adis_initial_startup(&st->adis);
458 if (ret)
459 goto error_rm_adc2_bin;
460 return 0;
461
462error_rm_adc2_bin:
463 sysfs_remove_bin_file(&indio_dev->dev.kobj, &adc2_bin);
464error_rm_adc1_bin:
465 sysfs_remove_bin_file(&indio_dev->dev.kobj, &adc1_bin);
466error_rm_accel_bin:
467 sysfs_remove_bin_file(&indio_dev->dev.kobj, &accel_bin);
468 return ret;
469}
470
471static int adis16220_remove(struct spi_device *spi)
472{
473 struct iio_dev *indio_dev = spi_get_drvdata(spi);
474
475 sysfs_remove_bin_file(&indio_dev->dev.kobj, &adc2_bin);
476 sysfs_remove_bin_file(&indio_dev->dev.kobj, &adc1_bin);
477 sysfs_remove_bin_file(&indio_dev->dev.kobj, &accel_bin);
478
479 return 0;
480}
481
482static struct spi_driver adis16220_driver = {
483 .driver = {
484 .name = "adis16220",
485 },
486 .probe = adis16220_probe,
487 .remove = adis16220_remove,
488};
489module_spi_driver(adis16220_driver);
490
491MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
492MODULE_DESCRIPTION("Analog Devices ADIS16220 Digital Vibration Sensor");
493MODULE_LICENSE("GPL v2");
494MODULE_ALIAS("spi:adis16220");
diff --git a/drivers/staging/iio/accel/adis16240.h b/drivers/staging/iio/accel/adis16240.h
index 66b5ad2f42c5..b2cb37b95913 100644
--- a/drivers/staging/iio/accel/adis16240.h
+++ b/drivers/staging/iio/accel/adis16240.h
@@ -5,110 +5,160 @@
5 5
6/* Flash memory write count */ 6/* Flash memory write count */
7#define ADIS16240_FLASH_CNT 0x00 7#define ADIS16240_FLASH_CNT 0x00
8
8/* Output, power supply */ 9/* Output, power supply */
9#define ADIS16240_SUPPLY_OUT 0x02 10#define ADIS16240_SUPPLY_OUT 0x02
11
10/* Output, x-axis accelerometer */ 12/* Output, x-axis accelerometer */
11#define ADIS16240_XACCL_OUT 0x04 13#define ADIS16240_XACCL_OUT 0x04
14
12/* Output, y-axis accelerometer */ 15/* Output, y-axis accelerometer */
13#define ADIS16240_YACCL_OUT 0x06 16#define ADIS16240_YACCL_OUT 0x06
17
14/* Output, z-axis accelerometer */ 18/* Output, z-axis accelerometer */
15#define ADIS16240_ZACCL_OUT 0x08 19#define ADIS16240_ZACCL_OUT 0x08
20
16/* Output, auxiliary ADC input */ 21/* Output, auxiliary ADC input */
17#define ADIS16240_AUX_ADC 0x0A 22#define ADIS16240_AUX_ADC 0x0A
23
18/* Output, temperature */ 24/* Output, temperature */
19#define ADIS16240_TEMP_OUT 0x0C 25#define ADIS16240_TEMP_OUT 0x0C
26
20/* Output, x-axis acceleration peak */ 27/* Output, x-axis acceleration peak */
21#define ADIS16240_XPEAK_OUT 0x0E 28#define ADIS16240_XPEAK_OUT 0x0E
29
22/* Output, y-axis acceleration peak */ 30/* Output, y-axis acceleration peak */
23#define ADIS16240_YPEAK_OUT 0x10 31#define ADIS16240_YPEAK_OUT 0x10
32
24/* Output, z-axis acceleration peak */ 33/* Output, z-axis acceleration peak */
25#define ADIS16240_ZPEAK_OUT 0x12 34#define ADIS16240_ZPEAK_OUT 0x12
35
26/* Output, sum-of-squares acceleration peak */ 36/* Output, sum-of-squares acceleration peak */
27#define ADIS16240_XYZPEAK_OUT 0x14 37#define ADIS16240_XYZPEAK_OUT 0x14
38
28/* Output, Capture Buffer 1, X and Y acceleration */ 39/* Output, Capture Buffer 1, X and Y acceleration */
29#define ADIS16240_CAPT_BUF1 0x16 40#define ADIS16240_CAPT_BUF1 0x16
41
30/* Output, Capture Buffer 2, Z acceleration */ 42/* Output, Capture Buffer 2, Z acceleration */
31#define ADIS16240_CAPT_BUF2 0x18 43#define ADIS16240_CAPT_BUF2 0x18
44
32/* Diagnostic, error flags */ 45/* Diagnostic, error flags */
33#define ADIS16240_DIAG_STAT 0x1A 46#define ADIS16240_DIAG_STAT 0x1A
47
34/* Diagnostic, event counter */ 48/* Diagnostic, event counter */
35#define ADIS16240_EVNT_CNTR 0x1C 49#define ADIS16240_EVNT_CNTR 0x1C
50
36/* Diagnostic, check sum value from firmware test */ 51/* Diagnostic, check sum value from firmware test */
37#define ADIS16240_CHK_SUM 0x1E 52#define ADIS16240_CHK_SUM 0x1E
53
38/* Calibration, x-axis acceleration offset adjustment */ 54/* Calibration, x-axis acceleration offset adjustment */
39#define ADIS16240_XACCL_OFF 0x20 55#define ADIS16240_XACCL_OFF 0x20
56
40/* Calibration, y-axis acceleration offset adjustment */ 57/* Calibration, y-axis acceleration offset adjustment */
41#define ADIS16240_YACCL_OFF 0x22 58#define ADIS16240_YACCL_OFF 0x22
59
42/* Calibration, z-axis acceleration offset adjustment */ 60/* Calibration, z-axis acceleration offset adjustment */
43#define ADIS16240_ZACCL_OFF 0x24 61#define ADIS16240_ZACCL_OFF 0x24
62
44/* Clock, hour and minute */ 63/* Clock, hour and minute */
45#define ADIS16240_CLK_TIME 0x2E 64#define ADIS16240_CLK_TIME 0x2E
65
46/* Clock, month and day */ 66/* Clock, month and day */
47#define ADIS16240_CLK_DATE 0x30 67#define ADIS16240_CLK_DATE 0x30
68
48/* Clock, year */ 69/* Clock, year */
49#define ADIS16240_CLK_YEAR 0x32 70#define ADIS16240_CLK_YEAR 0x32
71
50/* Wake-up setting, hour and minute */ 72/* Wake-up setting, hour and minute */
51#define ADIS16240_WAKE_TIME 0x34 73#define ADIS16240_WAKE_TIME 0x34
74
52/* Wake-up setting, month and day */ 75/* Wake-up setting, month and day */
53#define ADIS16240_WAKE_DATE 0x36 76#define ADIS16240_WAKE_DATE 0x36
77
54/* Alarm 1 amplitude threshold */ 78/* Alarm 1 amplitude threshold */
55#define ADIS16240_ALM_MAG1 0x38 79#define ADIS16240_ALM_MAG1 0x38
80
56/* Alarm 2 amplitude threshold */ 81/* Alarm 2 amplitude threshold */
57#define ADIS16240_ALM_MAG2 0x3A 82#define ADIS16240_ALM_MAG2 0x3A
83
58/* Alarm control */ 84/* Alarm control */
59#define ADIS16240_ALM_CTRL 0x3C 85#define ADIS16240_ALM_CTRL 0x3C
86
60/* Capture, external trigger control */ 87/* Capture, external trigger control */
61#define ADIS16240_XTRIG_CTRL 0x3E 88#define ADIS16240_XTRIG_CTRL 0x3E
89
62/* Capture, address pointer */ 90/* Capture, address pointer */
63#define ADIS16240_CAPT_PNTR 0x40 91#define ADIS16240_CAPT_PNTR 0x40
92
64/* Capture, configuration and control */ 93/* Capture, configuration and control */
65#define ADIS16240_CAPT_CTRL 0x42 94#define ADIS16240_CAPT_CTRL 0x42
95
66/* General-purpose digital input/output control */ 96/* General-purpose digital input/output control */
67#define ADIS16240_GPIO_CTRL 0x44 97#define ADIS16240_GPIO_CTRL 0x44
98
68/* Miscellaneous control */ 99/* Miscellaneous control */
69#define ADIS16240_MSC_CTRL 0x46 100#define ADIS16240_MSC_CTRL 0x46
101
70/* Internal sample period (rate) control */ 102/* Internal sample period (rate) control */
71#define ADIS16240_SMPL_PRD 0x48 103#define ADIS16240_SMPL_PRD 0x48
104
72/* System command */ 105/* System command */
73#define ADIS16240_GLOB_CMD 0x4A 106#define ADIS16240_GLOB_CMD 0x4A
74 107
75/* MSC_CTRL */ 108/* MSC_CTRL */
109
76/* Enables sum-of-squares output (XYZPEAK_OUT) */ 110/* Enables sum-of-squares output (XYZPEAK_OUT) */
77#define ADIS16240_MSC_CTRL_XYZPEAK_OUT_EN BIT(15) 111#define ADIS16240_MSC_CTRL_XYZPEAK_OUT_EN BIT(15)
112
78/* Enables peak tracking output (XPEAK_OUT, YPEAK_OUT, and ZPEAK_OUT) */ 113/* Enables peak tracking output (XPEAK_OUT, YPEAK_OUT, and ZPEAK_OUT) */
79#define ADIS16240_MSC_CTRL_X_Y_ZPEAK_OUT_EN BIT(14) 114#define ADIS16240_MSC_CTRL_X_Y_ZPEAK_OUT_EN BIT(14)
115
80/* Self-test enable: 1 = apply electrostatic force, 0 = disabled */ 116/* Self-test enable: 1 = apply electrostatic force, 0 = disabled */
81#define ADIS16240_MSC_CTRL_SELF_TEST_EN BIT(8) 117#define ADIS16240_MSC_CTRL_SELF_TEST_EN BIT(8)
118
82/* Data-ready enable: 1 = enabled, 0 = disabled */ 119/* Data-ready enable: 1 = enabled, 0 = disabled */
83#define ADIS16240_MSC_CTRL_DATA_RDY_EN BIT(2) 120#define ADIS16240_MSC_CTRL_DATA_RDY_EN BIT(2)
121
84/* Data-ready polarity: 1 = active high, 0 = active low */ 122/* Data-ready polarity: 1 = active high, 0 = active low */
85#define ADIS16240_MSC_CTRL_ACTIVE_HIGH BIT(1) 123#define ADIS16240_MSC_CTRL_ACTIVE_HIGH BIT(1)
124
86/* Data-ready line selection: 1 = DIO2, 0 = DIO1 */ 125/* Data-ready line selection: 1 = DIO2, 0 = DIO1 */
87#define ADIS16240_MSC_CTRL_DATA_RDY_DIO2 BIT(0) 126#define ADIS16240_MSC_CTRL_DATA_RDY_DIO2 BIT(0)
88 127
89/* DIAG_STAT */ 128/* DIAG_STAT */
129
90/* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */ 130/* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
91#define ADIS16240_DIAG_STAT_ALARM2 BIT(9) 131#define ADIS16240_DIAG_STAT_ALARM2 BIT(9)
132
92/* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */ 133/* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
93#define ADIS16240_DIAG_STAT_ALARM1 BIT(8) 134#define ADIS16240_DIAG_STAT_ALARM1 BIT(8)
135
94/* Capture buffer full: 1 = capture buffer is full */ 136/* Capture buffer full: 1 = capture buffer is full */
95#define ADIS16240_DIAG_STAT_CPT_BUF_FUL BIT(7) 137#define ADIS16240_DIAG_STAT_CPT_BUF_FUL BIT(7)
138
96/* Flash test, checksum flag: 1 = mismatch, 0 = match */ 139/* Flash test, checksum flag: 1 = mismatch, 0 = match */
97#define ADIS16240_DIAG_STAT_CHKSUM BIT(6) 140#define ADIS16240_DIAG_STAT_CHKSUM BIT(6)
141
98/* Power-on, self-test flag: 1 = failure, 0 = pass */ 142/* Power-on, self-test flag: 1 = failure, 0 = pass */
99#define ADIS16240_DIAG_STAT_PWRON_FAIL_BIT 5 143#define ADIS16240_DIAG_STAT_PWRON_FAIL_BIT 5
144
100/* Power-on self-test: 1 = in-progress, 0 = complete */ 145/* Power-on self-test: 1 = in-progress, 0 = complete */
101#define ADIS16240_DIAG_STAT_PWRON_BUSY BIT(4) 146#define ADIS16240_DIAG_STAT_PWRON_BUSY BIT(4)
147
102/* SPI communications failure */ 148/* SPI communications failure */
103#define ADIS16240_DIAG_STAT_SPI_FAIL_BIT 3 149#define ADIS16240_DIAG_STAT_SPI_FAIL_BIT 3
150
104/* Flash update failure */ 151/* Flash update failure */
105#define ADIS16240_DIAG_STAT_FLASH_UPT_BIT 2 152#define ADIS16240_DIAG_STAT_FLASH_UPT_BIT 2
153
106/* Power supply above 3.625 V */ 154/* Power supply above 3.625 V */
107#define ADIS16240_DIAG_STAT_POWER_HIGH_BIT 1 155#define ADIS16240_DIAG_STAT_POWER_HIGH_BIT 1
156
108 /* Power supply below 3.15 V */ 157 /* Power supply below 3.15 V */
109#define ADIS16240_DIAG_STAT_POWER_LOW_BIT 0 158#define ADIS16240_DIAG_STAT_POWER_LOW_BIT 0
110 159
111/* GLOB_CMD */ 160/* GLOB_CMD */
161
112#define ADIS16240_GLOB_CMD_RESUME BIT(8) 162#define ADIS16240_GLOB_CMD_RESUME BIT(8)
113#define ADIS16240_GLOB_CMD_SW_RESET BIT(7) 163#define ADIS16240_GLOB_CMD_SW_RESET BIT(7)
114#define ADIS16240_GLOB_CMD_STANDBY BIT(2) 164#define ADIS16240_GLOB_CMD_STANDBY BIT(2)
diff --git a/drivers/staging/iio/accel/adis16240_core.c b/drivers/staging/iio/accel/adis16240_core.c
index 1b5b685a8691..d5b99e610d08 100644
--- a/drivers/staging/iio/accel/adis16240_core.c
+++ b/drivers/staging/iio/accel/adis16240_core.c
@@ -29,13 +29,13 @@
29static ssize_t adis16240_spi_read_signed(struct device *dev, 29static ssize_t adis16240_spi_read_signed(struct device *dev,
30 struct device_attribute *attr, 30 struct device_attribute *attr,
31 char *buf, 31 char *buf,
32 unsigned bits) 32 unsigned int bits)
33{ 33{
34 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 34 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
35 struct adis *st = iio_priv(indio_dev); 35 struct adis *st = iio_priv(indio_dev);
36 int ret; 36 int ret;
37 s16 val = 0; 37 s16 val = 0;
38 unsigned shift = 16 - bits; 38 unsigned int shift = 16 - bits;
39 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 39 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
40 40
41 ret = adis_read_reg_16(st, 41 ret = adis_read_reg_16(st,
@@ -222,6 +222,7 @@ static const struct adis_data adis16240_data = {
222 .diag_stat_reg = ADIS16240_DIAG_STAT, 222 .diag_stat_reg = ADIS16240_DIAG_STAT,
223 223
224 .self_test_mask = ADIS16240_MSC_CTRL_SELF_TEST_EN, 224 .self_test_mask = ADIS16240_MSC_CTRL_SELF_TEST_EN,
225 .self_test_no_autoclear = true,
225 .startup_delay = ADIS16240_STARTUP_DELAY, 226 .startup_delay = ADIS16240_STARTUP_DELAY,
226 227
227 .status_error_msgs = adis16240_status_error_msgs, 228 .status_error_msgs = adis16240_status_error_msgs,
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index f843f19cf675..1cf6b79801a9 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -35,10 +35,10 @@
35#define AD7192_REG_DATA 3 /* Data Register (RO, 24/32-bit) */ 35#define AD7192_REG_DATA 3 /* Data Register (RO, 24/32-bit) */
36#define AD7192_REG_ID 4 /* ID Register (RO, 8-bit) */ 36#define AD7192_REG_ID 4 /* ID Register (RO, 8-bit) */
37#define AD7192_REG_GPOCON 5 /* GPOCON Register (RO, 8-bit) */ 37#define AD7192_REG_GPOCON 5 /* GPOCON Register (RO, 8-bit) */
38#define AD7192_REG_OFFSET 6 /* Offset Register (RW, 16-bit 38#define AD7192_REG_OFFSET 6 /* Offset Register (RW, 16-bit */
39 * (AD7792)/24-bit (AD7192)) */ 39 /* (AD7792)/24-bit (AD7192)) */
40#define AD7192_REG_FULLSALE 7 /* Full-Scale Register 40#define AD7192_REG_FULLSALE 7 /* Full-Scale Register */
41 * (RW, 16-bit (AD7792)/24-bit (AD7192)) */ 41 /* (RW, 16-bit (AD7792)/24-bit (AD7192)) */
42 42
43/* Communications Register Bit Designations (AD7192_REG_COMM) */ 43/* Communications Register Bit Designations (AD7192_REG_COMM) */
44#define AD7192_COMM_WEN BIT(7) /* Write Enable */ 44#define AD7192_COMM_WEN BIT(7) /* Write Enable */
@@ -80,13 +80,13 @@
80#define AD7192_MODE_CAL_SYS_FULL 7 /* System Full-Scale Calibration */ 80#define AD7192_MODE_CAL_SYS_FULL 7 /* System Full-Scale Calibration */
81 81
82/* Mode Register: AD7192_MODE_CLKSRC options */ 82/* Mode Register: AD7192_MODE_CLKSRC options */
83#define AD7192_CLK_EXT_MCLK1_2 0 /* External 4.92 MHz Clock connected 83#define AD7192_CLK_EXT_MCLK1_2 0 /* External 4.92 MHz Clock connected*/
84 * from MCLK1 to MCLK2 */ 84 /* from MCLK1 to MCLK2 */
85#define AD7192_CLK_EXT_MCLK2 1 /* External Clock applied to MCLK2 */ 85#define AD7192_CLK_EXT_MCLK2 1 /* External Clock applied to MCLK2 */
86#define AD7192_CLK_INT 2 /* Internal 4.92 MHz Clock not 86#define AD7192_CLK_INT 2 /* Internal 4.92 MHz Clock not */
87 * available at the MCLK2 pin */ 87 /* available at the MCLK2 pin */
88#define AD7192_CLK_INT_CO 3 /* Internal 4.92 MHz Clock available 88#define AD7192_CLK_INT_CO 3 /* Internal 4.92 MHz Clock available*/
89 * at the MCLK2 pin */ 89 /* at the MCLK2 pin */
90 90
91/* Configuration Register Bit Designations (AD7192_REG_CONF) */ 91/* Configuration Register Bit Designations (AD7192_REG_CONF) */
92 92
@@ -349,11 +349,9 @@ static ssize_t ad7192_write_frequency(struct device *dev,
349 if (lval == 0) 349 if (lval == 0)
350 return -EINVAL; 350 return -EINVAL;
351 351
352 mutex_lock(&indio_dev->mlock); 352 ret = iio_device_claim_direct_mode(indio_dev);
353 if (iio_buffer_enabled(indio_dev)) { 353 if (ret)
354 mutex_unlock(&indio_dev->mlock); 354 return ret;
355 return -EBUSY;
356 }
357 355
358 div = st->mclk / (lval * st->f_order * 1024); 356 div = st->mclk / (lval * st->f_order * 1024);
359 if (div < 1 || div > 1023) { 357 if (div < 1 || div > 1023) {
@@ -366,7 +364,7 @@ static ssize_t ad7192_write_frequency(struct device *dev,
366 ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, st->mode); 364 ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, st->mode);
367 365
368out: 366out:
369 mutex_unlock(&indio_dev->mlock); 367 iio_device_release_direct_mode(indio_dev);
370 368
371 return ret ? ret : len; 369 return ret ? ret : len;
372} 370}
@@ -434,11 +432,9 @@ static ssize_t ad7192_set(struct device *dev,
434 if (ret < 0) 432 if (ret < 0)
435 return ret; 433 return ret;
436 434
437 mutex_lock(&indio_dev->mlock); 435 ret = iio_device_claim_direct_mode(indio_dev);
438 if (iio_buffer_enabled(indio_dev)) { 436 if (ret)
439 mutex_unlock(&indio_dev->mlock); 437 return ret;
440 return -EBUSY;
441 }
442 438
443 switch ((u32)this_attr->address) { 439 switch ((u32)this_attr->address) {
444 case AD7192_REG_GPOCON: 440 case AD7192_REG_GPOCON:
@@ -461,7 +457,7 @@ static ssize_t ad7192_set(struct device *dev,
461 ret = -EINVAL; 457 ret = -EINVAL;
462 } 458 }
463 459
464 mutex_unlock(&indio_dev->mlock); 460 iio_device_release_direct_mode(indio_dev);
465 461
466 return ret ? ret : len; 462 return ret ? ret : len;
467} 463}
@@ -555,11 +551,9 @@ static int ad7192_write_raw(struct iio_dev *indio_dev,
555 int ret, i; 551 int ret, i;
556 unsigned int tmp; 552 unsigned int tmp;
557 553
558 mutex_lock(&indio_dev->mlock); 554 ret = iio_device_claim_direct_mode(indio_dev);
559 if (iio_buffer_enabled(indio_dev)) { 555 if (ret)
560 mutex_unlock(&indio_dev->mlock); 556 return ret;
561 return -EBUSY;
562 }
563 557
564 switch (mask) { 558 switch (mask) {
565 case IIO_CHAN_INFO_SCALE: 559 case IIO_CHAN_INFO_SCALE:
@@ -582,7 +576,7 @@ static int ad7192_write_raw(struct iio_dev *indio_dev,
582 ret = -EINVAL; 576 ret = -EINVAL;
583 } 577 }
584 578
585 mutex_unlock(&indio_dev->mlock); 579 iio_device_release_direct_mode(indio_dev);
586 580
587 return ret; 581 return ret;
588} 582}
diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c
index 62e5ecacf634..a06b46cb81ca 100644
--- a/drivers/staging/iio/adc/ad7280a.c
+++ b/drivers/staging/iio/adc/ad7280a.c
@@ -155,7 +155,7 @@ static void ad7280_crc8_build_table(unsigned char *crc_tab)
155 } 155 }
156} 156}
157 157
158static unsigned char ad7280_calc_crc8(unsigned char *crc_tab, unsigned val) 158static unsigned char ad7280_calc_crc8(unsigned char *crc_tab, unsigned int val)
159{ 159{
160 unsigned char crc; 160 unsigned char crc;
161 161
@@ -165,7 +165,7 @@ static unsigned char ad7280_calc_crc8(unsigned char *crc_tab, unsigned val)
165 return crc ^ (val & 0xFF); 165 return crc ^ (val & 0xFF);
166} 166}
167 167
168static int ad7280_check_crc(struct ad7280_state *st, unsigned val) 168static int ad7280_check_crc(struct ad7280_state *st, unsigned int val)
169{ 169{
170 unsigned char crc = ad7280_calc_crc8(st->crc_tab, val >> 10); 170 unsigned char crc = ad7280_calc_crc8(st->crc_tab, val >> 10);
171 171
@@ -191,7 +191,7 @@ static void ad7280_delay(struct ad7280_state *st)
191 usleep_range(250, 500); 191 usleep_range(250, 500);
192} 192}
193 193
194static int __ad7280_read32(struct ad7280_state *st, unsigned *val) 194static int __ad7280_read32(struct ad7280_state *st, unsigned int *val)
195{ 195{
196 int ret; 196 int ret;
197 struct spi_transfer t = { 197 struct spi_transfer t = {
@@ -211,10 +211,10 @@ static int __ad7280_read32(struct ad7280_state *st, unsigned *val)
211 return 0; 211 return 0;
212} 212}
213 213
214static int ad7280_write(struct ad7280_state *st, unsigned devaddr, 214static int ad7280_write(struct ad7280_state *st, unsigned int devaddr,
215 unsigned addr, bool all, unsigned val) 215 unsigned int addr, bool all, unsigned int val)
216{ 216{
217 unsigned reg = devaddr << 27 | addr << 21 | 217 unsigned int reg = devaddr << 27 | addr << 21 |
218 (val & 0xFF) << 13 | all << 12; 218 (val & 0xFF) << 13 | all << 12;
219 219
220 reg |= ad7280_calc_crc8(st->crc_tab, reg >> 11) << 3 | 0x2; 220 reg |= ad7280_calc_crc8(st->crc_tab, reg >> 11) << 3 | 0x2;
@@ -223,11 +223,11 @@ static int ad7280_write(struct ad7280_state *st, unsigned devaddr,
223 return spi_write(st->spi, &st->buf[0], 4); 223 return spi_write(st->spi, &st->buf[0], 4);
224} 224}
225 225
226static int ad7280_read(struct ad7280_state *st, unsigned devaddr, 226static int ad7280_read(struct ad7280_state *st, unsigned int devaddr,
227 unsigned addr) 227 unsigned int addr)
228{ 228{
229 int ret; 229 int ret;
230 unsigned tmp; 230 unsigned int tmp;
231 231
232 /* turns off the read operation on all parts */ 232 /* turns off the read operation on all parts */
233 ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, AD7280A_CONTROL_HB, 1, 233 ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, AD7280A_CONTROL_HB, 1,
@@ -261,11 +261,11 @@ static int ad7280_read(struct ad7280_state *st, unsigned devaddr,
261 return (tmp >> 13) & 0xFF; 261 return (tmp >> 13) & 0xFF;
262} 262}
263 263
264static int ad7280_read_channel(struct ad7280_state *st, unsigned devaddr, 264static int ad7280_read_channel(struct ad7280_state *st, unsigned int devaddr,
265 unsigned addr) 265 unsigned int addr)
266{ 266{
267 int ret; 267 int ret;
268 unsigned tmp; 268 unsigned int tmp;
269 269
270 ret = ad7280_write(st, devaddr, AD7280A_READ, 0, addr << 2); 270 ret = ad7280_write(st, devaddr, AD7280A_READ, 0, addr << 2);
271 if (ret) 271 if (ret)
@@ -299,11 +299,11 @@ static int ad7280_read_channel(struct ad7280_state *st, unsigned devaddr,
299 return (tmp >> 11) & 0xFFF; 299 return (tmp >> 11) & 0xFFF;
300} 300}
301 301
302static int ad7280_read_all_channels(struct ad7280_state *st, unsigned cnt, 302static int ad7280_read_all_channels(struct ad7280_state *st, unsigned int cnt,
303 unsigned *array) 303 unsigned int *array)
304{ 304{
305 int i, ret; 305 int i, ret;
306 unsigned tmp, sum = 0; 306 unsigned int tmp, sum = 0;
307 307
308 ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, AD7280A_READ, 1, 308 ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, AD7280A_READ, 1,
309 AD7280A_CELL_VOLTAGE_1 << 2); 309 AD7280A_CELL_VOLTAGE_1 << 2);
@@ -338,7 +338,7 @@ static int ad7280_read_all_channels(struct ad7280_state *st, unsigned cnt,
338 338
339static int ad7280_chain_setup(struct ad7280_state *st) 339static int ad7280_chain_setup(struct ad7280_state *st)
340{ 340{
341 unsigned val, n; 341 unsigned int val, n;
342 int ret; 342 int ret;
343 343
344 ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, AD7280A_CONTROL_LB, 1, 344 ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, AD7280A_CONTROL_LB, 1,
@@ -401,7 +401,7 @@ static ssize_t ad7280_store_balance_sw(struct device *dev,
401 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 401 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
402 bool readin; 402 bool readin;
403 int ret; 403 int ret;
404 unsigned devaddr, ch; 404 unsigned int devaddr, ch;
405 405
406 ret = strtobool(buf, &readin); 406 ret = strtobool(buf, &readin);
407 if (ret) 407 if (ret)
@@ -431,7 +431,7 @@ static ssize_t ad7280_show_balance_timer(struct device *dev,
431 struct ad7280_state *st = iio_priv(indio_dev); 431 struct ad7280_state *st = iio_priv(indio_dev);
432 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 432 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
433 int ret; 433 int ret;
434 unsigned msecs; 434 unsigned int msecs;
435 435
436 mutex_lock(&indio_dev->mlock); 436 mutex_lock(&indio_dev->mlock);
437 ret = ad7280_read(st, this_attr->address >> 8, 437 ret = ad7280_read(st, this_attr->address >> 8,
@@ -602,7 +602,7 @@ static ssize_t ad7280_read_channel_config(struct device *dev,
602 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 602 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
603 struct ad7280_state *st = iio_priv(indio_dev); 603 struct ad7280_state *st = iio_priv(indio_dev);
604 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 604 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
605 unsigned val; 605 unsigned int val;
606 606
607 switch ((u32)this_attr->address) { 607 switch ((u32)this_attr->address) {
608 case AD7280A_CELL_OVERVOLTAGE: 608 case AD7280A_CELL_OVERVOLTAGE:
@@ -683,7 +683,7 @@ static irqreturn_t ad7280_event_handler(int irq, void *private)
683{ 683{
684 struct iio_dev *indio_dev = private; 684 struct iio_dev *indio_dev = private;
685 struct ad7280_state *st = iio_priv(indio_dev); 685 struct ad7280_state *st = iio_priv(indio_dev);
686 unsigned *channels; 686 unsigned int *channels;
687 int i, ret; 687 int i, ret;
688 688
689 channels = kcalloc(st->scan_cnt, sizeof(*channels), GFP_KERNEL); 689 channels = kcalloc(st->scan_cnt, sizeof(*channels), GFP_KERNEL);
diff --git a/drivers/staging/iio/adc/ad7280a.h b/drivers/staging/iio/adc/ad7280a.h
index 732347a9bce4..ccfb90d20e71 100644
--- a/drivers/staging/iio/adc/ad7280a.h
+++ b/drivers/staging/iio/adc/ad7280a.h
@@ -29,10 +29,10 @@
29#define AD7280A_ALERT_REMOVE_AUX4_AUX5 BIT(1) 29#define AD7280A_ALERT_REMOVE_AUX4_AUX5 BIT(1)
30 30
31struct ad7280_platform_data { 31struct ad7280_platform_data {
32 unsigned acquisition_time; 32 unsigned int acquisition_time;
33 unsigned conversion_averaging; 33 unsigned int conversion_averaging;
34 unsigned chain_last_alert_ignore; 34 unsigned int chain_last_alert_ignore;
35 bool thermistor_term_en; 35 bool thermistor_term_en;
36}; 36};
37 37
38#endif /* IIO_ADC_AD7280_H_ */ 38#endif /* IIO_ADC_AD7280_H_ */
diff --git a/drivers/staging/iio/adc/ad7606.h b/drivers/staging/iio/adc/ad7606.h
index cca946924c58..39f50440d915 100644
--- a/drivers/staging/iio/adc/ad7606.h
+++ b/drivers/staging/iio/adc/ad7606.h
@@ -28,16 +28,16 @@
28 */ 28 */
29 29
30struct ad7606_platform_data { 30struct ad7606_platform_data {
31 unsigned default_os; 31 unsigned int default_os;
32 unsigned default_range; 32 unsigned int default_range;
33 unsigned gpio_convst; 33 unsigned int gpio_convst;
34 unsigned gpio_reset; 34 unsigned int gpio_reset;
35 unsigned gpio_range; 35 unsigned int gpio_range;
36 unsigned gpio_os0; 36 unsigned int gpio_os0;
37 unsigned gpio_os1; 37 unsigned int gpio_os1;
38 unsigned gpio_os2; 38 unsigned int gpio_os2;
39 unsigned gpio_frstdata; 39 unsigned int gpio_frstdata;
40 unsigned gpio_stby; 40 unsigned int gpio_stby;
41}; 41};
42 42
43/** 43/**
@@ -52,7 +52,7 @@ struct ad7606_chip_info {
52 const char *name; 52 const char *name;
53 u16 int_vref_mv; 53 u16 int_vref_mv;
54 const struct iio_chan_spec *channels; 54 const struct iio_chan_spec *channels;
55 unsigned num_channels; 55 unsigned int num_channels;
56}; 56};
57 57
58/** 58/**
@@ -67,8 +67,8 @@ struct ad7606_state {
67 struct work_struct poll_work; 67 struct work_struct poll_work;
68 wait_queue_head_t wq_data_avail; 68 wait_queue_head_t wq_data_avail;
69 const struct ad7606_bus_ops *bops; 69 const struct ad7606_bus_ops *bops;
70 unsigned range; 70 unsigned int range;
71 unsigned oversampling; 71 unsigned int oversampling;
72 bool done; 72 bool done;
73 void __iomem *base_address; 73 void __iomem *base_address;
74 74
@@ -86,7 +86,7 @@ struct ad7606_bus_ops {
86}; 86};
87 87
88struct iio_dev *ad7606_probe(struct device *dev, int irq, 88struct iio_dev *ad7606_probe(struct device *dev, int irq,
89 void __iomem *base_address, unsigned id, 89 void __iomem *base_address, unsigned int id,
90 const struct ad7606_bus_ops *bops); 90 const struct ad7606_bus_ops *bops);
91int ad7606_remove(struct iio_dev *indio_dev, int irq); 91int ad7606_remove(struct iio_dev *indio_dev, int irq);
92int ad7606_reset(struct ad7606_state *st); 92int ad7606_reset(struct ad7606_state *st);
diff --git a/drivers/staging/iio/adc/ad7606_core.c b/drivers/staging/iio/adc/ad7606_core.c
index fe6caeee0843..f79ee61851f6 100644
--- a/drivers/staging/iio/adc/ad7606_core.c
+++ b/drivers/staging/iio/adc/ad7606_core.c
@@ -36,7 +36,7 @@ int ad7606_reset(struct ad7606_state *st)
36 return -ENODEV; 36 return -ENODEV;
37} 37}
38 38
39static int ad7606_scan_direct(struct iio_dev *indio_dev, unsigned ch) 39static int ad7606_scan_direct(struct iio_dev *indio_dev, unsigned int ch)
40{ 40{
41 struct ad7606_state *st = iio_priv(indio_dev); 41 struct ad7606_state *st = iio_priv(indio_dev);
42 int ret; 42 int ret;
@@ -88,12 +88,12 @@ static int ad7606_read_raw(struct iio_dev *indio_dev,
88 88
89 switch (m) { 89 switch (m) {
90 case IIO_CHAN_INFO_RAW: 90 case IIO_CHAN_INFO_RAW:
91 mutex_lock(&indio_dev->mlock); 91 ret = iio_device_claim_direct_mode(indio_dev);
92 if (iio_buffer_enabled(indio_dev)) 92 if (ret)
93 ret = -EBUSY; 93 return ret;
94 else 94
95 ret = ad7606_scan_direct(indio_dev, chan->address); 95 ret = ad7606_scan_direct(indio_dev, chan->address);
96 mutex_unlock(&indio_dev->mlock); 96 iio_device_release_direct_mode(indio_dev);
97 97
98 if (ret < 0) 98 if (ret < 0)
99 return ret; 99 return ret;
@@ -155,7 +155,7 @@ static ssize_t ad7606_show_oversampling_ratio(struct device *dev,
155 return sprintf(buf, "%u\n", st->oversampling); 155 return sprintf(buf, "%u\n", st->oversampling);
156} 156}
157 157
158static int ad7606_oversampling_get_index(unsigned val) 158static int ad7606_oversampling_get_index(unsigned int val)
159{ 159{
160 unsigned char supported[] = {0, 2, 4, 8, 16, 32, 64}; 160 unsigned char supported[] = {0, 2, 4, 8, 16, 32, 64};
161 int i; 161 int i;
@@ -446,7 +446,7 @@ static const struct iio_info ad7606_info_range = {
446 446
447struct iio_dev *ad7606_probe(struct device *dev, int irq, 447struct iio_dev *ad7606_probe(struct device *dev, int irq,
448 void __iomem *base_address, 448 void __iomem *base_address,
449 unsigned id, 449 unsigned int id,
450 const struct ad7606_bus_ops *bops) 450 const struct ad7606_bus_ops *bops)
451{ 451{
452 struct ad7606_platform_data *pdata = dev->platform_data; 452 struct ad7606_platform_data *pdata = dev->platform_data;
diff --git a/drivers/staging/iio/adc/ad7606_spi.c b/drivers/staging/iio/adc/ad7606_spi.c
index d873a5164595..825da0769936 100644
--- a/drivers/staging/iio/adc/ad7606_spi.c
+++ b/drivers/staging/iio/adc/ad7606_spi.c
@@ -21,7 +21,8 @@ static int ad7606_spi_read_block(struct device *dev,
21{ 21{
22 struct spi_device *spi = to_spi_device(dev); 22 struct spi_device *spi = to_spi_device(dev);
23 int i, ret; 23 int i, ret;
24 unsigned short *data = buf; 24 unsigned short *data;
25 __be16 *bdata = buf;
25 26
26 ret = spi_read(spi, buf, count * 2); 27 ret = spi_read(spi, buf, count * 2);
27 if (ret < 0) { 28 if (ret < 0) {
@@ -30,7 +31,7 @@ static int ad7606_spi_read_block(struct device *dev,
30 } 31 }
31 32
32 for (i = 0; i < count; i++) 33 for (i = 0; i < count; i++)
33 data[i] = be16_to_cpu(data[i]); 34 data[i] = be16_to_cpu(bdata[i]);
34 35
35 return 0; 36 return 0;
36} 37}
diff --git a/drivers/staging/iio/adc/ad7780.c b/drivers/staging/iio/adc/ad7780.c
index 1439cfdbb09c..c9a0c2aa602f 100644
--- a/drivers/staging/iio/adc/ad7780.c
+++ b/drivers/staging/iio/adc/ad7780.c
@@ -63,7 +63,7 @@ static int ad7780_set_mode(struct ad_sigma_delta *sigma_delta,
63 enum ad_sigma_delta_mode mode) 63 enum ad_sigma_delta_mode mode)
64{ 64{
65 struct ad7780_state *st = ad_sigma_delta_to_ad7780(sigma_delta); 65 struct ad7780_state *st = ad_sigma_delta_to_ad7780(sigma_delta);
66 unsigned val; 66 unsigned int val;
67 67
68 switch (mode) { 68 switch (mode) {
69 case AD_SD_MODE_SINGLE: 69 case AD_SD_MODE_SINGLE:
diff --git a/drivers/staging/iio/frequency/ad9832.c b/drivers/staging/iio/frequency/ad9832.c
index 18b27a1984b2..358400b22d33 100644
--- a/drivers/staging/iio/frequency/ad9832.c
+++ b/drivers/staging/iio/frequency/ad9832.c
@@ -31,7 +31,7 @@ static unsigned long ad9832_calc_freqreg(unsigned long mclk, unsigned long fout)
31} 31}
32 32
33static int ad9832_write_frequency(struct ad9832_state *st, 33static int ad9832_write_frequency(struct ad9832_state *st,
34 unsigned addr, unsigned long fout) 34 unsigned int addr, unsigned long fout)
35{ 35{
36 unsigned long regval; 36 unsigned long regval;
37 37
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index d1218d896725..9f43976f4ef2 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -12,20 +12,16 @@
12#include <linux/sysfs.h> 12#include <linux/sysfs.h>
13#include <linux/i2c.h> 13#include <linux/i2c.h>
14#include <linux/regulator/consumer.h> 14#include <linux/regulator/consumer.h>
15#include <linux/slab.h>
16#include <linux/types.h> 15#include <linux/types.h>
17#include <linux/err.h> 16#include <linux/err.h>
18#include <linux/delay.h> 17#include <linux/delay.h>
19#include <linux/module.h> 18#include <linux/module.h>
20#include <asm/div64.h>
21 19
22#include <linux/iio/iio.h> 20#include <linux/iio/iio.h>
23#include <linux/iio/sysfs.h> 21#include <linux/iio/sysfs.h>
24#include <linux/iio/buffer.h> 22#include <linux/iio/buffer.h>
25#include <linux/iio/kfifo_buf.h> 23#include <linux/iio/kfifo_buf.h>
26 24
27#include "ad5933.h"
28
29/* AD5933/AD5934 Registers */ 25/* AD5933/AD5934 Registers */
30#define AD5933_REG_CONTROL_HB 0x80 /* R/W, 2 bytes */ 26#define AD5933_REG_CONTROL_HB 0x80 /* R/W, 2 bytes */
31#define AD5933_REG_CONTROL_LB 0x81 /* R/W, 2 bytes */ 27#define AD5933_REG_CONTROL_LB 0x81 /* R/W, 2 bytes */
@@ -86,6 +82,18 @@
86#define AD5933_POLL_TIME_ms 10 82#define AD5933_POLL_TIME_ms 10
87#define AD5933_INIT_EXCITATION_TIME_ms 100 83#define AD5933_INIT_EXCITATION_TIME_ms 100
88 84
85/**
86 * struct ad5933_platform_data - platform specific data
87 * @ext_clk_Hz: the external clock frequency in Hz, if not set
88 * the driver uses the internal clock (16.776 MHz)
89 * @vref_mv: the external reference voltage in millivolt
90 */
91
92struct ad5933_platform_data {
93 unsigned long ext_clk_Hz;
94 unsigned short vref_mv;
95};
96
89struct ad5933_state { 97struct ad5933_state {
90 struct i2c_client *client; 98 struct i2c_client *client;
91 struct regulator *reg; 99 struct regulator *reg;
@@ -93,14 +101,14 @@ struct ad5933_state {
93 unsigned long mclk_hz; 101 unsigned long mclk_hz;
94 unsigned char ctrl_hb; 102 unsigned char ctrl_hb;
95 unsigned char ctrl_lb; 103 unsigned char ctrl_lb;
96 unsigned range_avail[4]; 104 unsigned int range_avail[4];
97 unsigned short vref_mv; 105 unsigned short vref_mv;
98 unsigned short settling_cycles; 106 unsigned short settling_cycles;
99 unsigned short freq_points; 107 unsigned short freq_points;
100 unsigned freq_start; 108 unsigned int freq_start;
101 unsigned freq_inc; 109 unsigned int freq_inc;
102 unsigned state; 110 unsigned int state;
103 unsigned poll_time_jiffies; 111 unsigned int poll_time_jiffies;
104}; 112};
105 113
106static struct ad5933_platform_data ad5933_default_pdata = { 114static struct ad5933_platform_data ad5933_default_pdata = {
@@ -214,7 +222,7 @@ static int ad5933_wait_busy(struct ad5933_state *st, unsigned char event)
214} 222}
215 223
216static int ad5933_set_freq(struct ad5933_state *st, 224static int ad5933_set_freq(struct ad5933_state *st,
217 unsigned reg, unsigned long freq) 225 unsigned int reg, unsigned long freq)
218{ 226{
219 unsigned long long freqreg; 227 unsigned long long freqreg;
220 union { 228 union {
@@ -274,7 +282,7 @@ static int ad5933_setup(struct ad5933_state *st)
274static void ad5933_calc_out_ranges(struct ad5933_state *st) 282static void ad5933_calc_out_ranges(struct ad5933_state *st)
275{ 283{
276 int i; 284 int i;
277 unsigned normalized_3v3[4] = {1980, 198, 383, 970}; 285 unsigned int normalized_3v3[4] = {1980, 198, 383, 970};
278 286
279 for (i = 0; i < 4; i++) 287 for (i = 0; i < 4; i++)
280 st->range_avail[i] = normalized_3v3[i] * st->vref_mv / 3300; 288 st->range_avail[i] = normalized_3v3[i] * st->vref_mv / 3300;
@@ -307,10 +315,10 @@ static ssize_t ad5933_show_frequency(struct device *dev,
307 315
308 freqreg = be32_to_cpu(dat.d32) & 0xFFFFFF; 316 freqreg = be32_to_cpu(dat.d32) & 0xFFFFFF;
309 317
310 freqreg = (u64) freqreg * (u64) (st->mclk_hz / 4); 318 freqreg = (u64)freqreg * (u64)(st->mclk_hz / 4);
311 do_div(freqreg, 1 << 27); 319 do_div(freqreg, 1 << 27);
312 320
313 return sprintf(buf, "%d\n", (int) freqreg); 321 return sprintf(buf, "%d\n", (int)freqreg);
314} 322}
315 323
316static ssize_t ad5933_store_frequency(struct device *dev, 324static ssize_t ad5933_store_frequency(struct device *dev,
@@ -358,7 +366,7 @@ static ssize_t ad5933_show(struct device *dev,
358 int ret = 0, len = 0; 366 int ret = 0, len = 0;
359 367
360 mutex_lock(&indio_dev->mlock); 368 mutex_lock(&indio_dev->mlock);
361 switch ((u32) this_attr->address) { 369 switch ((u32)this_attr->address) {
362 case AD5933_OUT_RANGE: 370 case AD5933_OUT_RANGE:
363 len = sprintf(buf, "%u\n", 371 len = sprintf(buf, "%u\n",
364 st->range_avail[(st->ctrl_hb >> 1) & 0x3]); 372 st->range_avail[(st->ctrl_hb >> 1) & 0x3]);
@@ -409,7 +417,7 @@ static ssize_t ad5933_store(struct device *dev,
409 } 417 }
410 418
411 mutex_lock(&indio_dev->mlock); 419 mutex_lock(&indio_dev->mlock);
412 switch ((u32) this_attr->address) { 420 switch ((u32)this_attr->address) {
413 case AD5933_OUT_RANGE: 421 case AD5933_OUT_RANGE:
414 for (i = 0; i < 4; i++) 422 for (i = 0; i < 4; i++)
415 if (val == st->range_avail[i]) { 423 if (val == st->range_avail[i]) {
@@ -683,8 +691,9 @@ static void ad5933_work(struct work_struct *work)
683 } 691 }
684 692
685 if (status & AD5933_STAT_SWEEP_DONE) { 693 if (status & AD5933_STAT_SWEEP_DONE) {
686 /* last sample received - power down do nothing until 694 /* last sample received - power down do
687 * the ring enable is toggled */ 695 * nothing until the ring enable is toggled
696 */
688 ad5933_cmd(st, AD5933_CTRL_POWER_DOWN); 697 ad5933_cmd(st, AD5933_CTRL_POWER_DOWN);
689 } else { 698 } else {
690 /* we just received a valid datum, move on to the next */ 699 /* we just received a valid datum, move on to the next */
@@ -699,7 +708,7 @@ static int ad5933_probe(struct i2c_client *client,
699 const struct i2c_device_id *id) 708 const struct i2c_device_id *id)
700{ 709{
701 int ret, voltage_uv = 0; 710 int ret, voltage_uv = 0;
702 struct ad5933_platform_data *pdata = client->dev.platform_data; 711 struct ad5933_platform_data *pdata = dev_get_platdata(&client->dev);
703 struct ad5933_state *st; 712 struct ad5933_state *st;
704 struct iio_dev *indio_dev; 713 struct iio_dev *indio_dev;
705 714
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.h b/drivers/staging/iio/impedance-analyzer/ad5933.h
deleted file mode 100644
index b140e42d67cf..000000000000
--- a/drivers/staging/iio/impedance-analyzer/ad5933.h
+++ /dev/null
@@ -1,28 +0,0 @@
1/*
2 * AD5933 AD5934 Impedance Converter, Network Analyzer
3 *
4 * Copyright 2011 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2.
7 */
8
9#ifndef IIO_ADC_AD5933_H_
10#define IIO_ADC_AD5933_H_
11
12/*
13 * TODO: struct ad5933_platform_data needs to go into include/linux/iio
14 */
15
16/**
17 * struct ad5933_platform_data - platform specific data
18 * @ext_clk_Hz: the external clock frequency in Hz, if not set
19 * the driver uses the internal clock (16.776 MHz)
20 * @vref_mv: the external reference voltage in millivolt
21 */
22
23struct ad5933_platform_data {
24 unsigned long ext_clk_Hz;
25 unsigned short vref_mv;
26};
27
28#endif /* IIO_ADC_AD5933_H_ */
diff --git a/drivers/staging/iio/light/isl29028.c b/drivers/staging/iio/light/isl29028.c
index 6e2ba458c24d..2e3b1d64e32a 100644
--- a/drivers/staging/iio/light/isl29028.c
+++ b/drivers/staging/iio/light/isl29028.c
@@ -69,7 +69,6 @@ enum als_ir_mode {
69}; 69};
70 70
71struct isl29028_chip { 71struct isl29028_chip {
72 struct device *dev;
73 struct mutex lock; 72 struct mutex lock;
74 struct regmap *regmap; 73 struct regmap *regmap;
75 74
@@ -166,20 +165,21 @@ static int isl29028_set_als_ir_mode(struct isl29028_chip *chip,
166 165
167static int isl29028_read_als_ir(struct isl29028_chip *chip, int *als_ir) 166static int isl29028_read_als_ir(struct isl29028_chip *chip, int *als_ir)
168{ 167{
168 struct device *dev = regmap_get_device(chip->regmap);
169 unsigned int lsb; 169 unsigned int lsb;
170 unsigned int msb; 170 unsigned int msb;
171 int ret; 171 int ret;
172 172
173 ret = regmap_read(chip->regmap, ISL29028_REG_ALSIR_L, &lsb); 173 ret = regmap_read(chip->regmap, ISL29028_REG_ALSIR_L, &lsb);
174 if (ret < 0) { 174 if (ret < 0) {
175 dev_err(chip->dev, 175 dev_err(dev,
176 "Error in reading register ALSIR_L err %d\n", ret); 176 "Error in reading register ALSIR_L err %d\n", ret);
177 return ret; 177 return ret;
178 } 178 }
179 179
180 ret = regmap_read(chip->regmap, ISL29028_REG_ALSIR_U, &msb); 180 ret = regmap_read(chip->regmap, ISL29028_REG_ALSIR_U, &msb);
181 if (ret < 0) { 181 if (ret < 0) {
182 dev_err(chip->dev, 182 dev_err(dev,
183 "Error in reading register ALSIR_U err %d\n", ret); 183 "Error in reading register ALSIR_U err %d\n", ret);
184 return ret; 184 return ret;
185 } 185 }
@@ -190,12 +190,13 @@ static int isl29028_read_als_ir(struct isl29028_chip *chip, int *als_ir)
190 190
191static int isl29028_read_proxim(struct isl29028_chip *chip, int *prox) 191static int isl29028_read_proxim(struct isl29028_chip *chip, int *prox)
192{ 192{
193 struct device *dev = regmap_get_device(chip->regmap);
193 unsigned int data; 194 unsigned int data;
194 int ret; 195 int ret;
195 196
196 ret = regmap_read(chip->regmap, ISL29028_REG_PROX_DATA, &data); 197 ret = regmap_read(chip->regmap, ISL29028_REG_PROX_DATA, &data);
197 if (ret < 0) { 198 if (ret < 0) {
198 dev_err(chip->dev, "Error in reading register %d, error %d\n", 199 dev_err(dev, "Error in reading register %d, error %d\n",
199 ISL29028_REG_PROX_DATA, ret); 200 ISL29028_REG_PROX_DATA, ret);
200 return ret; 201 return ret;
201 } 202 }
@@ -218,13 +219,14 @@ static int isl29028_proxim_get(struct isl29028_chip *chip, int *prox_data)
218 219
219static int isl29028_als_get(struct isl29028_chip *chip, int *als_data) 220static int isl29028_als_get(struct isl29028_chip *chip, int *als_data)
220{ 221{
222 struct device *dev = regmap_get_device(chip->regmap);
221 int ret; 223 int ret;
222 int als_ir_data; 224 int als_ir_data;
223 225
224 if (chip->als_ir_mode != MODE_ALS) { 226 if (chip->als_ir_mode != MODE_ALS) {
225 ret = isl29028_set_als_ir_mode(chip, MODE_ALS); 227 ret = isl29028_set_als_ir_mode(chip, MODE_ALS);
226 if (ret < 0) { 228 if (ret < 0) {
227 dev_err(chip->dev, 229 dev_err(dev,
228 "Error in enabling ALS mode err %d\n", ret); 230 "Error in enabling ALS mode err %d\n", ret);
229 return ret; 231 return ret;
230 } 232 }
@@ -251,12 +253,13 @@ static int isl29028_als_get(struct isl29028_chip *chip, int *als_data)
251 253
252static int isl29028_ir_get(struct isl29028_chip *chip, int *ir_data) 254static int isl29028_ir_get(struct isl29028_chip *chip, int *ir_data)
253{ 255{
256 struct device *dev = regmap_get_device(chip->regmap);
254 int ret; 257 int ret;
255 258
256 if (chip->als_ir_mode != MODE_IR) { 259 if (chip->als_ir_mode != MODE_IR) {
257 ret = isl29028_set_als_ir_mode(chip, MODE_IR); 260 ret = isl29028_set_als_ir_mode(chip, MODE_IR);
258 if (ret < 0) { 261 if (ret < 0) {
259 dev_err(chip->dev, 262 dev_err(dev,
260 "Error in enabling IR mode err %d\n", ret); 263 "Error in enabling IR mode err %d\n", ret);
261 return ret; 264 return ret;
262 } 265 }
@@ -271,25 +274,26 @@ static int isl29028_write_raw(struct iio_dev *indio_dev,
271 int val, int val2, long mask) 274 int val, int val2, long mask)
272{ 275{
273 struct isl29028_chip *chip = iio_priv(indio_dev); 276 struct isl29028_chip *chip = iio_priv(indio_dev);
277 struct device *dev = regmap_get_device(chip->regmap);
274 int ret = -EINVAL; 278 int ret = -EINVAL;
275 279
276 mutex_lock(&chip->lock); 280 mutex_lock(&chip->lock);
277 switch (chan->type) { 281 switch (chan->type) {
278 case IIO_PROXIMITY: 282 case IIO_PROXIMITY:
279 if (mask != IIO_CHAN_INFO_SAMP_FREQ) { 283 if (mask != IIO_CHAN_INFO_SAMP_FREQ) {
280 dev_err(chip->dev, 284 dev_err(dev,
281 "proximity: mask value 0x%08lx not supported\n", 285 "proximity: mask value 0x%08lx not supported\n",
282 mask); 286 mask);
283 break; 287 break;
284 } 288 }
285 if (val < 1 || val > 100) { 289 if (val < 1 || val > 100) {
286 dev_err(chip->dev, 290 dev_err(dev,
287 "Samp_freq %d is not in range[1:100]\n", val); 291 "Samp_freq %d is not in range[1:100]\n", val);
288 break; 292 break;
289 } 293 }
290 ret = isl29028_set_proxim_sampling(chip, val); 294 ret = isl29028_set_proxim_sampling(chip, val);
291 if (ret < 0) { 295 if (ret < 0) {
292 dev_err(chip->dev, 296 dev_err(dev,
293 "Setting proximity samp_freq fail, err %d\n", 297 "Setting proximity samp_freq fail, err %d\n",
294 ret); 298 ret);
295 break; 299 break;
@@ -299,19 +303,19 @@ static int isl29028_write_raw(struct iio_dev *indio_dev,
299 303
300 case IIO_LIGHT: 304 case IIO_LIGHT:
301 if (mask != IIO_CHAN_INFO_SCALE) { 305 if (mask != IIO_CHAN_INFO_SCALE) {
302 dev_err(chip->dev, 306 dev_err(dev,
303 "light: mask value 0x%08lx not supported\n", 307 "light: mask value 0x%08lx not supported\n",
304 mask); 308 mask);
305 break; 309 break;
306 } 310 }
307 if ((val != 125) && (val != 2000)) { 311 if ((val != 125) && (val != 2000)) {
308 dev_err(chip->dev, 312 dev_err(dev,
309 "lux scale %d is invalid [125, 2000]\n", val); 313 "lux scale %d is invalid [125, 2000]\n", val);
310 break; 314 break;
311 } 315 }
312 ret = isl29028_set_als_scale(chip, val); 316 ret = isl29028_set_als_scale(chip, val);
313 if (ret < 0) { 317 if (ret < 0) {
314 dev_err(chip->dev, 318 dev_err(dev,
315 "Setting lux scale fail with error %d\n", ret); 319 "Setting lux scale fail with error %d\n", ret);
316 break; 320 break;
317 } 321 }
@@ -319,7 +323,7 @@ static int isl29028_write_raw(struct iio_dev *indio_dev,
319 break; 323 break;
320 324
321 default: 325 default:
322 dev_err(chip->dev, "Unsupported channel type\n"); 326 dev_err(dev, "Unsupported channel type\n");
323 break; 327 break;
324 } 328 }
325 mutex_unlock(&chip->lock); 329 mutex_unlock(&chip->lock);
@@ -331,6 +335,7 @@ static int isl29028_read_raw(struct iio_dev *indio_dev,
331 int *val, int *val2, long mask) 335 int *val, int *val2, long mask)
332{ 336{
333 struct isl29028_chip *chip = iio_priv(indio_dev); 337 struct isl29028_chip *chip = iio_priv(indio_dev);
338 struct device *dev = regmap_get_device(chip->regmap);
334 int ret = -EINVAL; 339 int ret = -EINVAL;
335 340
336 mutex_lock(&chip->lock); 341 mutex_lock(&chip->lock);
@@ -370,7 +375,7 @@ static int isl29028_read_raw(struct iio_dev *indio_dev,
370 break; 375 break;
371 376
372 default: 377 default:
373 dev_err(chip->dev, "mask value 0x%08lx not supported\n", mask); 378 dev_err(dev, "mask value 0x%08lx not supported\n", mask);
374 break; 379 break;
375 } 380 }
376 mutex_unlock(&chip->lock); 381 mutex_unlock(&chip->lock);
@@ -417,6 +422,7 @@ static const struct iio_info isl29028_info = {
417 422
418static int isl29028_chip_init(struct isl29028_chip *chip) 423static int isl29028_chip_init(struct isl29028_chip *chip)
419{ 424{
425 struct device *dev = regmap_get_device(chip->regmap);
420 int ret; 426 int ret;
421 427
422 chip->enable_prox = false; 428 chip->enable_prox = false;
@@ -426,35 +432,33 @@ static int isl29028_chip_init(struct isl29028_chip *chip)
426 432
427 ret = regmap_write(chip->regmap, ISL29028_REG_TEST1_MODE, 0x0); 433 ret = regmap_write(chip->regmap, ISL29028_REG_TEST1_MODE, 0x0);
428 if (ret < 0) { 434 if (ret < 0) {
429 dev_err(chip->dev, "%s(): write to reg %d failed, err = %d\n", 435 dev_err(dev, "%s(): write to reg %d failed, err = %d\n",
430 __func__, ISL29028_REG_TEST1_MODE, ret); 436 __func__, ISL29028_REG_TEST1_MODE, ret);
431 return ret; 437 return ret;
432 } 438 }
433 ret = regmap_write(chip->regmap, ISL29028_REG_TEST2_MODE, 0x0); 439 ret = regmap_write(chip->regmap, ISL29028_REG_TEST2_MODE, 0x0);
434 if (ret < 0) { 440 if (ret < 0) {
435 dev_err(chip->dev, "%s(): write to reg %d failed, err = %d\n", 441 dev_err(dev, "%s(): write to reg %d failed, err = %d\n",
436 __func__, ISL29028_REG_TEST2_MODE, ret); 442 __func__, ISL29028_REG_TEST2_MODE, ret);
437 return ret; 443 return ret;
438 } 444 }
439 445
440 ret = regmap_write(chip->regmap, ISL29028_REG_CONFIGURE, 0x0); 446 ret = regmap_write(chip->regmap, ISL29028_REG_CONFIGURE, 0x0);
441 if (ret < 0) { 447 if (ret < 0) {
442 dev_err(chip->dev, "%s(): write to reg %d failed, err = %d\n", 448 dev_err(dev, "%s(): write to reg %d failed, err = %d\n",
443 __func__, ISL29028_REG_CONFIGURE, ret); 449 __func__, ISL29028_REG_CONFIGURE, ret);
444 return ret; 450 return ret;
445 } 451 }
446 452
447 ret = isl29028_set_proxim_sampling(chip, chip->prox_sampling); 453 ret = isl29028_set_proxim_sampling(chip, chip->prox_sampling);
448 if (ret < 0) { 454 if (ret < 0) {
449 dev_err(chip->dev, "setting the proximity, err = %d\n", 455 dev_err(dev, "setting the proximity, err = %d\n", ret);
450 ret);
451 return ret; 456 return ret;
452 } 457 }
453 458
454 ret = isl29028_set_als_scale(chip, chip->lux_scale); 459 ret = isl29028_set_als_scale(chip, chip->lux_scale);
455 if (ret < 0) 460 if (ret < 0)
456 dev_err(chip->dev, 461 dev_err(dev, "setting als scale failed, err = %d\n", ret);
457 "setting als scale failed, err = %d\n", ret);
458 return ret; 462 return ret;
459} 463}
460 464
@@ -496,19 +500,19 @@ static int isl29028_probe(struct i2c_client *client,
496 chip = iio_priv(indio_dev); 500 chip = iio_priv(indio_dev);
497 501
498 i2c_set_clientdata(client, indio_dev); 502 i2c_set_clientdata(client, indio_dev);
499 chip->dev = &client->dev;
500 mutex_init(&chip->lock); 503 mutex_init(&chip->lock);
501 504
502 chip->regmap = devm_regmap_init_i2c(client, &isl29028_regmap_config); 505 chip->regmap = devm_regmap_init_i2c(client, &isl29028_regmap_config);
503 if (IS_ERR(chip->regmap)) { 506 if (IS_ERR(chip->regmap)) {
504 ret = PTR_ERR(chip->regmap); 507 ret = PTR_ERR(chip->regmap);
505 dev_err(chip->dev, "regmap initialization failed: %d\n", ret); 508 dev_err(&client->dev, "regmap initialization failed: %d\n",
509 ret);
506 return ret; 510 return ret;
507 } 511 }
508 512
509 ret = isl29028_chip_init(chip); 513 ret = isl29028_chip_init(chip);
510 if (ret < 0) { 514 if (ret < 0) {
511 dev_err(chip->dev, "chip initialization failed: %d\n", ret); 515 dev_err(&client->dev, "chip initialization failed: %d\n", ret);
512 return ret; 516 return ret;
513 } 517 }
514 518
@@ -520,7 +524,8 @@ static int isl29028_probe(struct i2c_client *client,
520 indio_dev->modes = INDIO_DIRECT_MODE; 524 indio_dev->modes = INDIO_DIRECT_MODE;
521 ret = devm_iio_device_register(indio_dev->dev.parent, indio_dev); 525 ret = devm_iio_device_register(indio_dev->dev.parent, indio_dev);
522 if (ret < 0) { 526 if (ret < 0) {
523 dev_err(chip->dev, "iio registration fails with error %d\n", 527 dev_err(&client->dev,
528 "iio registration fails with error %d\n",
524 ret); 529 ret);
525 return ret; 530 return ret;
526 } 531 }
diff --git a/drivers/staging/iio/light/tsl2x7x_core.c b/drivers/staging/iio/light/tsl2x7x_core.c
index 5f308bae41b9..d553c8e18fcc 100644
--- a/drivers/staging/iio/light/tsl2x7x_core.c
+++ b/drivers/staging/iio/light/tsl2x7x_core.c
@@ -187,9 +187,11 @@ struct tsl2X7X_chip {
187 const struct tsl2x7x_chip_info *chip_info; 187 const struct tsl2x7x_chip_info *chip_info;
188 const struct iio_info *info; 188 const struct iio_info *info;
189 s64 event_timestamp; 189 s64 event_timestamp;
190 /* This structure is intentionally large to accommodate 190 /*
191 * updates via sysfs. */ 191 * This structure is intentionally large to accommodate
192 /* Sized to 9 = max 8 segments + 1 termination segment */ 192 * updates via sysfs.
193 * Sized to 9 = max 8 segments + 1 termination segment
194 */
193 struct tsl2x7x_lux tsl2x7x_device_lux[TSL2X7X_MAX_LUX_TABLE_SIZE]; 195 struct tsl2x7x_lux tsl2x7x_device_lux[TSL2X7X_MAX_LUX_TABLE_SIZE];
194}; 196};
195 197
@@ -349,13 +351,13 @@ static int tsl2x7x_get_lux(struct iio_dev *indio_dev)
349 if (chip->tsl2x7x_chip_status != TSL2X7X_CHIP_WORKING) { 351 if (chip->tsl2x7x_chip_status != TSL2X7X_CHIP_WORKING) {
350 /* device is not enabled */ 352 /* device is not enabled */
351 dev_err(&chip->client->dev, "%s: device is not enabled\n", 353 dev_err(&chip->client->dev, "%s: device is not enabled\n",
352 __func__); 354 __func__);
353 ret = -EBUSY; 355 ret = -EBUSY;
354 goto out_unlock; 356 goto out_unlock;
355 } 357 }
356 358
357 ret = tsl2x7x_i2c_read(chip->client, 359 ret = tsl2x7x_i2c_read(chip->client,
358 (TSL2X7X_CMD_REG | TSL2X7X_STATUS), &buf[0]); 360 (TSL2X7X_CMD_REG | TSL2X7X_STATUS), &buf[0]);
359 if (ret < 0) { 361 if (ret < 0) {
360 dev_err(&chip->client->dev, 362 dev_err(&chip->client->dev,
361 "%s: Failed to read STATUS Reg\n", __func__); 363 "%s: Failed to read STATUS Reg\n", __func__);
@@ -371,8 +373,8 @@ static int tsl2x7x_get_lux(struct iio_dev *indio_dev)
371 373
372 for (i = 0; i < 4; i++) { 374 for (i = 0; i < 4; i++) {
373 ret = tsl2x7x_i2c_read(chip->client, 375 ret = tsl2x7x_i2c_read(chip->client,
374 (TSL2X7X_CMD_REG | (TSL2X7X_ALS_CHAN0LO + i)), 376 (TSL2X7X_CMD_REG |
375 &buf[i]); 377 (TSL2X7X_ALS_CHAN0LO + i)), &buf[i]);
376 if (ret < 0) { 378 if (ret < 0) {
377 dev_err(&chip->client->dev, 379 dev_err(&chip->client->dev,
378 "failed to read. err=%x\n", ret); 380 "failed to read. err=%x\n", ret);
@@ -382,9 +384,9 @@ static int tsl2x7x_get_lux(struct iio_dev *indio_dev)
382 384
383 /* clear any existing interrupt status */ 385 /* clear any existing interrupt status */
384 ret = i2c_smbus_write_byte(chip->client, 386 ret = i2c_smbus_write_byte(chip->client,
385 (TSL2X7X_CMD_REG | 387 (TSL2X7X_CMD_REG |
386 TSL2X7X_CMD_SPL_FN | 388 TSL2X7X_CMD_SPL_FN |
387 TSL2X7X_CMD_ALS_INT_CLR)); 389 TSL2X7X_CMD_ALS_INT_CLR));
388 if (ret < 0) { 390 if (ret < 0) {
389 dev_err(&chip->client->dev, 391 dev_err(&chip->client->dev,
390 "i2c_write_command failed - err = %d\n", ret); 392 "i2c_write_command failed - err = %d\n", ret);
@@ -411,7 +413,7 @@ static int tsl2x7x_get_lux(struct iio_dev *indio_dev)
411 /* calculate ratio */ 413 /* calculate ratio */
412 ratio = (ch1 << 15) / ch0; 414 ratio = (ch1 << 15) / ch0;
413 /* convert to unscaled lux using the pointer to the table */ 415 /* convert to unscaled lux using the pointer to the table */
414 p = (struct tsl2x7x_lux *) chip->tsl2x7x_device_lux; 416 p = (struct tsl2x7x_lux *)chip->tsl2x7x_device_lux;
415 while (p->ratio != 0 && p->ratio < ratio) 417 while (p->ratio != 0 && p->ratio < ratio)
416 p++; 418 p++;
417 419
@@ -488,7 +490,7 @@ static int tsl2x7x_get_prox(struct iio_dev *indio_dev)
488 } 490 }
489 491
490 ret = tsl2x7x_i2c_read(chip->client, 492 ret = tsl2x7x_i2c_read(chip->client,
491 (TSL2X7X_CMD_REG | TSL2X7X_STATUS), &status); 493 (TSL2X7X_CMD_REG | TSL2X7X_STATUS), &status);
492 if (ret < 0) { 494 if (ret < 0) {
493 dev_err(&chip->client->dev, "i2c err=%d\n", ret); 495 dev_err(&chip->client->dev, "i2c err=%d\n", ret);
494 goto prox_poll_err; 496 goto prox_poll_err;
@@ -515,8 +517,8 @@ static int tsl2x7x_get_prox(struct iio_dev *indio_dev)
515 517
516 for (i = 0; i < 2; i++) { 518 for (i = 0; i < 2; i++) {
517 ret = tsl2x7x_i2c_read(chip->client, 519 ret = tsl2x7x_i2c_read(chip->client,
518 (TSL2X7X_CMD_REG | 520 (TSL2X7X_CMD_REG |
519 (TSL2X7X_PRX_LO + i)), &chdata[i]); 521 (TSL2X7X_PRX_LO + i)), &chdata[i]);
520 if (ret < 0) 522 if (ret < 0)
521 goto prox_poll_err; 523 goto prox_poll_err;
522 } 524 }
@@ -542,19 +544,19 @@ static void tsl2x7x_defaults(struct tsl2X7X_chip *chip)
542{ 544{
543 /* If Operational settings defined elsewhere.. */ 545 /* If Operational settings defined elsewhere.. */
544 if (chip->pdata && chip->pdata->platform_default_settings) 546 if (chip->pdata && chip->pdata->platform_default_settings)
545 memcpy(&(chip->tsl2x7x_settings), 547 memcpy(&chip->tsl2x7x_settings,
546 chip->pdata->platform_default_settings, 548 chip->pdata->platform_default_settings,
547 sizeof(tsl2x7x_default_settings)); 549 sizeof(tsl2x7x_default_settings));
548 else 550 else
549 memcpy(&(chip->tsl2x7x_settings), 551 memcpy(&chip->tsl2x7x_settings,
550 &tsl2x7x_default_settings, 552 &tsl2x7x_default_settings,
551 sizeof(tsl2x7x_default_settings)); 553 sizeof(tsl2x7x_default_settings));
552 554
553 /* Load up the proper lux table. */ 555 /* Load up the proper lux table. */
554 if (chip->pdata && chip->pdata->platform_lux_table[0].ratio != 0) 556 if (chip->pdata && chip->pdata->platform_lux_table[0].ratio != 0)
555 memcpy(chip->tsl2x7x_device_lux, 557 memcpy(chip->tsl2x7x_device_lux,
556 chip->pdata->platform_lux_table, 558 chip->pdata->platform_lux_table,
557 sizeof(chip->pdata->platform_lux_table)); 559 sizeof(chip->pdata->platform_lux_table));
558 else 560 else
559 memcpy(chip->tsl2x7x_device_lux, 561 memcpy(chip->tsl2x7x_device_lux,
560 (struct tsl2x7x_lux *)tsl2x7x_default_lux_table_group[chip->id], 562 (struct tsl2x7x_lux *)tsl2x7x_default_lux_table_group[chip->id],
@@ -576,7 +578,7 @@ static int tsl2x7x_als_calibrate(struct iio_dev *indio_dev)
576 int lux_val; 578 int lux_val;
577 579
578 ret = i2c_smbus_write_byte(chip->client, 580 ret = i2c_smbus_write_byte(chip->client,
579 (TSL2X7X_CMD_REG | TSL2X7X_CNTRL)); 581 (TSL2X7X_CMD_REG | TSL2X7X_CNTRL));
580 if (ret < 0) { 582 if (ret < 0) {
581 dev_err(&chip->client->dev, 583 dev_err(&chip->client->dev,
582 "failed to write CNTRL register, ret=%d\n", ret); 584 "failed to write CNTRL register, ret=%d\n", ret);
@@ -592,7 +594,7 @@ static int tsl2x7x_als_calibrate(struct iio_dev *indio_dev)
592 } 594 }
593 595
594 ret = i2c_smbus_write_byte(chip->client, 596 ret = i2c_smbus_write_byte(chip->client,
595 (TSL2X7X_CMD_REG | TSL2X7X_CNTRL)); 597 (TSL2X7X_CMD_REG | TSL2X7X_CNTRL));
596 if (ret < 0) { 598 if (ret < 0) {
597 dev_err(&chip->client->dev, 599 dev_err(&chip->client->dev,
598 "failed to write ctrl reg: ret=%d\n", ret); 600 "failed to write ctrl reg: ret=%d\n", ret);
@@ -609,7 +611,7 @@ static int tsl2x7x_als_calibrate(struct iio_dev *indio_dev)
609 lux_val = tsl2x7x_get_lux(indio_dev); 611 lux_val = tsl2x7x_get_lux(indio_dev);
610 if (lux_val < 0) { 612 if (lux_val < 0) {
611 dev_err(&chip->client->dev, 613 dev_err(&chip->client->dev,
612 "%s: failed to get lux\n", __func__); 614 "%s: failed to get lux\n", __func__);
613 return lux_val; 615 return lux_val;
614 } 616 }
615 617
@@ -620,9 +622,9 @@ static int tsl2x7x_als_calibrate(struct iio_dev *indio_dev)
620 622
621 chip->tsl2x7x_settings.als_gain_trim = gain_trim_val; 623 chip->tsl2x7x_settings.als_gain_trim = gain_trim_val;
622 dev_info(&chip->client->dev, 624 dev_info(&chip->client->dev,
623 "%s als_calibrate completed\n", chip->client->name); 625 "%s als_calibrate completed\n", chip->client->name);
624 626
625 return (int) gain_trim_val; 627 return (int)gain_trim_val;
626} 628}
627 629
628static int tsl2x7x_chip_on(struct iio_dev *indio_dev) 630static int tsl2x7x_chip_on(struct iio_dev *indio_dev)
@@ -695,23 +697,28 @@ static int tsl2x7x_chip_on(struct iio_dev *indio_dev)
695 chip->als_saturation = als_count * 922; /* 90% of full scale */ 697 chip->als_saturation = als_count * 922; /* 90% of full scale */
696 chip->als_time_scale = (als_time + 25) / 50; 698 chip->als_time_scale = (als_time + 25) / 50;
697 699
698 /* TSL2X7X Specific power-on / adc enable sequence 700 /*
699 * Power on the device 1st. */ 701 * TSL2X7X Specific power-on / adc enable sequence
702 * Power on the device 1st.
703 */
700 utmp = TSL2X7X_CNTL_PWR_ON; 704 utmp = TSL2X7X_CNTL_PWR_ON;
701 ret = i2c_smbus_write_byte_data(chip->client, 705 ret = i2c_smbus_write_byte_data(chip->client,
702 TSL2X7X_CMD_REG | TSL2X7X_CNTRL, utmp); 706 TSL2X7X_CMD_REG | TSL2X7X_CNTRL, utmp);
703 if (ret < 0) { 707 if (ret < 0) {
704 dev_err(&chip->client->dev, 708 dev_err(&chip->client->dev,
705 "%s: failed on CNTRL reg.\n", __func__); 709 "%s: failed on CNTRL reg.\n", __func__);
706 return ret; 710 return ret;
707 } 711 }
708 712
709 /* Use the following shadow copy for our delay before enabling ADC. 713 /*
710 * Write all the registers. */ 714 * Use the following shadow copy for our delay before enabling ADC.
715 * Write all the registers.
716 */
711 for (i = 0, dev_reg = chip->tsl2x7x_config; 717 for (i = 0, dev_reg = chip->tsl2x7x_config;
712 i < TSL2X7X_MAX_CONFIG_REG; i++) { 718 i < TSL2X7X_MAX_CONFIG_REG; i++) {
713 ret = i2c_smbus_write_byte_data(chip->client, 719 ret = i2c_smbus_write_byte_data(chip->client,
714 TSL2X7X_CMD_REG + i, *dev_reg++); 720 TSL2X7X_CMD_REG + i,
721 *dev_reg++);
715 if (ret < 0) { 722 if (ret < 0) {
716 dev_err(&chip->client->dev, 723 dev_err(&chip->client->dev,
717 "failed on write to reg %d.\n", i); 724 "failed on write to reg %d.\n", i);
@@ -721,13 +728,15 @@ static int tsl2x7x_chip_on(struct iio_dev *indio_dev)
721 728
722 mdelay(3); /* Power-on settling time */ 729 mdelay(3); /* Power-on settling time */
723 730
724 /* NOW enable the ADC 731 /*
725 * initialize the desired mode of operation */ 732 * NOW enable the ADC
733 * initialize the desired mode of operation
734 */
726 utmp = TSL2X7X_CNTL_PWR_ON | 735 utmp = TSL2X7X_CNTL_PWR_ON |
727 TSL2X7X_CNTL_ADC_ENBL | 736 TSL2X7X_CNTL_ADC_ENBL |
728 TSL2X7X_CNTL_PROX_DET_ENBL; 737 TSL2X7X_CNTL_PROX_DET_ENBL;
729 ret = i2c_smbus_write_byte_data(chip->client, 738 ret = i2c_smbus_write_byte_data(chip->client,
730 TSL2X7X_CMD_REG | TSL2X7X_CNTRL, utmp); 739 TSL2X7X_CMD_REG | TSL2X7X_CNTRL, utmp);
731 if (ret < 0) { 740 if (ret < 0) {
732 dev_err(&chip->client->dev, 741 dev_err(&chip->client->dev,
733 "%s: failed on 2nd CTRL reg.\n", __func__); 742 "%s: failed on 2nd CTRL reg.\n", __func__);
@@ -741,12 +750,13 @@ static int tsl2x7x_chip_on(struct iio_dev *indio_dev)
741 750
742 reg_val = TSL2X7X_CNTL_PWR_ON | TSL2X7X_CNTL_ADC_ENBL; 751 reg_val = TSL2X7X_CNTL_PWR_ON | TSL2X7X_CNTL_ADC_ENBL;
743 if ((chip->tsl2x7x_settings.interrupts_en == 0x20) || 752 if ((chip->tsl2x7x_settings.interrupts_en == 0x20) ||
744 (chip->tsl2x7x_settings.interrupts_en == 0x30)) 753 (chip->tsl2x7x_settings.interrupts_en == 0x30))
745 reg_val |= TSL2X7X_CNTL_PROX_DET_ENBL; 754 reg_val |= TSL2X7X_CNTL_PROX_DET_ENBL;
746 755
747 reg_val |= chip->tsl2x7x_settings.interrupts_en; 756 reg_val |= chip->tsl2x7x_settings.interrupts_en;
748 ret = i2c_smbus_write_byte_data(chip->client, 757 ret = i2c_smbus_write_byte_data(chip->client,
749 (TSL2X7X_CMD_REG | TSL2X7X_CNTRL), reg_val); 758 (TSL2X7X_CMD_REG |
759 TSL2X7X_CNTRL), reg_val);
750 if (ret < 0) 760 if (ret < 0)
751 dev_err(&chip->client->dev, 761 dev_err(&chip->client->dev,
752 "%s: failed in tsl2x7x_IOCTL_INT_SET.\n", 762 "%s: failed in tsl2x7x_IOCTL_INT_SET.\n",
@@ -754,8 +764,9 @@ static int tsl2x7x_chip_on(struct iio_dev *indio_dev)
754 764
755 /* Clear out any initial interrupts */ 765 /* Clear out any initial interrupts */
756 ret = i2c_smbus_write_byte(chip->client, 766 ret = i2c_smbus_write_byte(chip->client,
757 TSL2X7X_CMD_REG | TSL2X7X_CMD_SPL_FN | 767 TSL2X7X_CMD_REG |
758 TSL2X7X_CMD_PROXALS_INT_CLR); 768 TSL2X7X_CMD_SPL_FN |
769 TSL2X7X_CMD_PROXALS_INT_CLR);
759 if (ret < 0) { 770 if (ret < 0) {
760 dev_err(&chip->client->dev, 771 dev_err(&chip->client->dev,
761 "%s: Failed to clear Int status\n", 772 "%s: Failed to clear Int status\n",
@@ -776,7 +787,7 @@ static int tsl2x7x_chip_off(struct iio_dev *indio_dev)
776 chip->tsl2x7x_chip_status = TSL2X7X_CHIP_SUSPENDED; 787 chip->tsl2x7x_chip_status = TSL2X7X_CHIP_SUSPENDED;
777 788
778 ret = i2c_smbus_write_byte_data(chip->client, 789 ret = i2c_smbus_write_byte_data(chip->client,
779 TSL2X7X_CMD_REG | TSL2X7X_CNTRL, 0x00); 790 TSL2X7X_CMD_REG | TSL2X7X_CNTRL, 0x00);
780 791
781 if (chip->pdata && chip->pdata->power_off) 792 if (chip->pdata && chip->pdata->power_off)
782 chip->pdata->power_off(chip->client); 793 chip->pdata->power_off(chip->client);
@@ -819,7 +830,7 @@ int tsl2x7x_invoke_change(struct iio_dev *indio_dev)
819 830
820static 831static
821void tsl2x7x_prox_calculate(int *data, int length, 832void tsl2x7x_prox_calculate(int *data, int length,
822 struct tsl2x7x_prox_stat *statP) 833 struct tsl2x7x_prox_stat *statP)
823{ 834{
824 int i; 835 int i;
825 int sample_sum; 836 int sample_sum;
@@ -843,7 +854,7 @@ void tsl2x7x_prox_calculate(int *data, int length,
843 tmp = data[i] - statP->mean; 854 tmp = data[i] - statP->mean;
844 sample_sum += tmp * tmp; 855 sample_sum += tmp * tmp;
845 } 856 }
846 statP->stddev = int_sqrt((long)sample_sum)/length; 857 statP->stddev = int_sqrt((long)sample_sum) / length;
847} 858}
848 859
849/** 860/**
@@ -886,20 +897,21 @@ static void tsl2x7x_prox_cal(struct iio_dev *indio_dev)
886 tsl2x7x_get_prox(indio_dev); 897 tsl2x7x_get_prox(indio_dev);
887 prox_history[i] = chip->prox_data; 898 prox_history[i] = chip->prox_data;
888 dev_info(&chip->client->dev, "2 i=%d prox data= %d\n", 899 dev_info(&chip->client->dev, "2 i=%d prox data= %d\n",
889 i, chip->prox_data); 900 i, chip->prox_data);
890 } 901 }
891 902
892 tsl2x7x_chip_off(indio_dev); 903 tsl2x7x_chip_off(indio_dev);
893 calP = &prox_stat_data[PROX_STAT_CAL]; 904 calP = &prox_stat_data[PROX_STAT_CAL];
894 tsl2x7x_prox_calculate(prox_history, 905 tsl2x7x_prox_calculate(prox_history,
895 chip->tsl2x7x_settings.prox_max_samples_cal, calP); 906 chip->tsl2x7x_settings.prox_max_samples_cal,
907 calP);
896 chip->tsl2x7x_settings.prox_thres_high = (calP->max << 1) - calP->mean; 908 chip->tsl2x7x_settings.prox_thres_high = (calP->max << 1) - calP->mean;
897 909
898 dev_info(&chip->client->dev, " cal min=%d mean=%d max=%d\n", 910 dev_info(&chip->client->dev, " cal min=%d mean=%d max=%d\n",
899 calP->min, calP->mean, calP->max); 911 calP->min, calP->mean, calP->max);
900 dev_info(&chip->client->dev, 912 dev_info(&chip->client->dev,
901 "%s proximity threshold set to %d\n", 913 "%s proximity threshold set to %d\n",
902 chip->client->name, chip->tsl2x7x_settings.prox_thres_high); 914 chip->client->name, chip->tsl2x7x_settings.prox_thres_high);
903 915
904 /* back to the way they were */ 916 /* back to the way they were */
905 chip->tsl2x7x_settings.interrupts_en = tmp_irq_settings; 917 chip->tsl2x7x_settings.interrupts_en = tmp_irq_settings;
@@ -908,7 +920,8 @@ static void tsl2x7x_prox_cal(struct iio_dev *indio_dev)
908} 920}
909 921
910static ssize_t tsl2x7x_power_state_show(struct device *dev, 922static ssize_t tsl2x7x_power_state_show(struct device *dev,
911 struct device_attribute *attr, char *buf) 923 struct device_attribute *attr,
924 char *buf)
912{ 925{
913 struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev)); 926 struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
914 927
@@ -916,7 +929,8 @@ static ssize_t tsl2x7x_power_state_show(struct device *dev,
916} 929}
917 930
918static ssize_t tsl2x7x_power_state_store(struct device *dev, 931static ssize_t tsl2x7x_power_state_store(struct device *dev,
919 struct device_attribute *attr, const char *buf, size_t len) 932 struct device_attribute *attr,
933 const char *buf, size_t len)
920{ 934{
921 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 935 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
922 bool value; 936 bool value;
@@ -933,7 +947,8 @@ static ssize_t tsl2x7x_power_state_store(struct device *dev,
933} 947}
934 948
935static ssize_t tsl2x7x_gain_available_show(struct device *dev, 949static ssize_t tsl2x7x_gain_available_show(struct device *dev,
936 struct device_attribute *attr, char *buf) 950 struct device_attribute *attr,
951 char *buf)
937{ 952{
938 struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev)); 953 struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
939 954
@@ -950,13 +965,15 @@ static ssize_t tsl2x7x_gain_available_show(struct device *dev,
950} 965}
951 966
952static ssize_t tsl2x7x_prox_gain_available_show(struct device *dev, 967static ssize_t tsl2x7x_prox_gain_available_show(struct device *dev,
953 struct device_attribute *attr, char *buf) 968 struct device_attribute *attr,
969 char *buf)
954{ 970{
955 return snprintf(buf, PAGE_SIZE, "%s\n", "1 2 4 8"); 971 return snprintf(buf, PAGE_SIZE, "%s\n", "1 2 4 8");
956} 972}
957 973
958static ssize_t tsl2x7x_als_time_show(struct device *dev, 974static ssize_t tsl2x7x_als_time_show(struct device *dev,
959 struct device_attribute *attr, char *buf) 975 struct device_attribute *attr,
976 char *buf)
960{ 977{
961 struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev)); 978 struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
962 int y, z; 979 int y, z;
@@ -970,7 +987,8 @@ static ssize_t tsl2x7x_als_time_show(struct device *dev,
970} 987}
971 988
972static ssize_t tsl2x7x_als_time_store(struct device *dev, 989static ssize_t tsl2x7x_als_time_store(struct device *dev,
973 struct device_attribute *attr, const char *buf, size_t len) 990 struct device_attribute *attr,
991 const char *buf, size_t len)
974{ 992{
975 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 993 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
976 struct tsl2X7X_chip *chip = iio_priv(indio_dev); 994 struct tsl2X7X_chip *chip = iio_priv(indio_dev);
@@ -986,7 +1004,7 @@ static ssize_t tsl2x7x_als_time_store(struct device *dev,
986 TSL2X7X_MAX_TIMER_CNT - (u8)result.fract; 1004 TSL2X7X_MAX_TIMER_CNT - (u8)result.fract;
987 1005
988 dev_info(&chip->client->dev, "%s: als time = %d", 1006 dev_info(&chip->client->dev, "%s: als time = %d",
989 __func__, chip->tsl2x7x_settings.als_time); 1007 __func__, chip->tsl2x7x_settings.als_time);
990 1008
991 tsl2x7x_invoke_change(indio_dev); 1009 tsl2x7x_invoke_change(indio_dev);
992 1010
@@ -997,7 +1015,8 @@ static IIO_CONST_ATTR(in_illuminance0_integration_time_available,
997 ".00272 - .696"); 1015 ".00272 - .696");
998 1016
999static ssize_t tsl2x7x_als_cal_target_show(struct device *dev, 1017static ssize_t tsl2x7x_als_cal_target_show(struct device *dev,
1000 struct device_attribute *attr, char *buf) 1018 struct device_attribute *attr,
1019 char *buf)
1001{ 1020{
1002 struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev)); 1021 struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
1003 1022
@@ -1006,7 +1025,8 @@ static ssize_t tsl2x7x_als_cal_target_show(struct device *dev,
1006} 1025}
1007 1026
1008static ssize_t tsl2x7x_als_cal_target_store(struct device *dev, 1027static ssize_t tsl2x7x_als_cal_target_store(struct device *dev,
1009 struct device_attribute *attr, const char *buf, size_t len) 1028 struct device_attribute *attr,
1029 const char *buf, size_t len)
1010{ 1030{
1011 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 1031 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1012 struct tsl2X7X_chip *chip = iio_priv(indio_dev); 1032 struct tsl2X7X_chip *chip = iio_priv(indio_dev);
@@ -1025,7 +1045,8 @@ static ssize_t tsl2x7x_als_cal_target_store(struct device *dev,
1025 1045
1026/* persistence settings */ 1046/* persistence settings */
1027static ssize_t tsl2x7x_als_persistence_show(struct device *dev, 1047static ssize_t tsl2x7x_als_persistence_show(struct device *dev,
1028 struct device_attribute *attr, char *buf) 1048 struct device_attribute *attr,
1049 char *buf)
1029{ 1050{
1030 struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev)); 1051 struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
1031 int y, z, filter_delay; 1052 int y, z, filter_delay;
@@ -1041,7 +1062,8 @@ static ssize_t tsl2x7x_als_persistence_show(struct device *dev,
1041} 1062}
1042 1063
1043static ssize_t tsl2x7x_als_persistence_store(struct device *dev, 1064static ssize_t tsl2x7x_als_persistence_store(struct device *dev,
1044 struct device_attribute *attr, const char *buf, size_t len) 1065 struct device_attribute *attr,
1066 const char *buf, size_t len)
1045{ 1067{
1046 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 1068 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1047 struct tsl2X7X_chip *chip = iio_priv(indio_dev); 1069 struct tsl2X7X_chip *chip = iio_priv(indio_dev);
@@ -1063,7 +1085,7 @@ static ssize_t tsl2x7x_als_persistence_store(struct device *dev,
1063 chip->tsl2x7x_settings.persistence |= (filter_delay & 0x0F); 1085 chip->tsl2x7x_settings.persistence |= (filter_delay & 0x0F);
1064 1086
1065 dev_info(&chip->client->dev, "%s: als persistence = %d", 1087 dev_info(&chip->client->dev, "%s: als persistence = %d",
1066 __func__, filter_delay); 1088 __func__, filter_delay);
1067 1089
1068 tsl2x7x_invoke_change(indio_dev); 1090 tsl2x7x_invoke_change(indio_dev);
1069 1091
@@ -1071,7 +1093,8 @@ static ssize_t tsl2x7x_als_persistence_store(struct device *dev,
1071} 1093}
1072 1094
1073static ssize_t tsl2x7x_prox_persistence_show(struct device *dev, 1095static ssize_t tsl2x7x_prox_persistence_show(struct device *dev,
1074 struct device_attribute *attr, char *buf) 1096 struct device_attribute *attr,
1097 char *buf)
1075{ 1098{
1076 struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev)); 1099 struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
1077 int y, z, filter_delay; 1100 int y, z, filter_delay;
@@ -1087,7 +1110,8 @@ static ssize_t tsl2x7x_prox_persistence_show(struct device *dev,
1087} 1110}
1088 1111
1089static ssize_t tsl2x7x_prox_persistence_store(struct device *dev, 1112static ssize_t tsl2x7x_prox_persistence_store(struct device *dev,
1090 struct device_attribute *attr, const char *buf, size_t len) 1113 struct device_attribute *attr,
1114 const char *buf, size_t len)
1091{ 1115{
1092 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 1116 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1093 struct tsl2X7X_chip *chip = iio_priv(indio_dev); 1117 struct tsl2X7X_chip *chip = iio_priv(indio_dev);
@@ -1109,7 +1133,7 @@ static ssize_t tsl2x7x_prox_persistence_store(struct device *dev,
1109 chip->tsl2x7x_settings.persistence |= ((filter_delay << 4) & 0xF0); 1133 chip->tsl2x7x_settings.persistence |= ((filter_delay << 4) & 0xF0);
1110 1134
1111 dev_info(&chip->client->dev, "%s: prox persistence = %d", 1135 dev_info(&chip->client->dev, "%s: prox persistence = %d",
1112 __func__, filter_delay); 1136 __func__, filter_delay);
1113 1137
1114 tsl2x7x_invoke_change(indio_dev); 1138 tsl2x7x_invoke_change(indio_dev);
1115 1139
@@ -1117,7 +1141,8 @@ static ssize_t tsl2x7x_prox_persistence_store(struct device *dev,
1117} 1141}
1118 1142
1119static ssize_t tsl2x7x_do_calibrate(struct device *dev, 1143static ssize_t tsl2x7x_do_calibrate(struct device *dev,
1120 struct device_attribute *attr, const char *buf, size_t len) 1144 struct device_attribute *attr,
1145 const char *buf, size_t len)
1121{ 1146{
1122 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 1147 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1123 bool value; 1148 bool value;
@@ -1134,7 +1159,8 @@ static ssize_t tsl2x7x_do_calibrate(struct device *dev,
1134} 1159}
1135 1160
1136static ssize_t tsl2x7x_luxtable_show(struct device *dev, 1161static ssize_t tsl2x7x_luxtable_show(struct device *dev,
1137 struct device_attribute *attr, char *buf) 1162 struct device_attribute *attr,
1163 char *buf)
1138{ 1164{
1139 struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev)); 1165 struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
1140 int i = 0; 1166 int i = 0;
@@ -1146,8 +1172,10 @@ static ssize_t tsl2x7x_luxtable_show(struct device *dev,
1146 chip->tsl2x7x_device_lux[i].ch0, 1172 chip->tsl2x7x_device_lux[i].ch0,
1147 chip->tsl2x7x_device_lux[i].ch1); 1173 chip->tsl2x7x_device_lux[i].ch1);
1148 if (chip->tsl2x7x_device_lux[i].ratio == 0) { 1174 if (chip->tsl2x7x_device_lux[i].ratio == 0) {
1149 /* We just printed the first "0" entry. 1175 /*
1150 * Now get rid of the extra "," and break. */ 1176 * We just printed the first "0" entry.
1177 * Now get rid of the extra "," and break.
1178 */
1151 offset--; 1179 offset--;
1152 break; 1180 break;
1153 } 1181 }
@@ -1159,11 +1187,12 @@ static ssize_t tsl2x7x_luxtable_show(struct device *dev,
1159} 1187}
1160 1188
1161static ssize_t tsl2x7x_luxtable_store(struct device *dev, 1189static ssize_t tsl2x7x_luxtable_store(struct device *dev,
1162 struct device_attribute *attr, const char *buf, size_t len) 1190 struct device_attribute *attr,
1191 const char *buf, size_t len)
1163{ 1192{
1164 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 1193 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1165 struct tsl2X7X_chip *chip = iio_priv(indio_dev); 1194 struct tsl2X7X_chip *chip = iio_priv(indio_dev);
1166 int value[ARRAY_SIZE(chip->tsl2x7x_device_lux)*3 + 1]; 1195 int value[ARRAY_SIZE(chip->tsl2x7x_device_lux) * 3 + 1];
1167 int n; 1196 int n;
1168 1197
1169 get_options(buf, ARRAY_SIZE(value), value); 1198 get_options(buf, ARRAY_SIZE(value), value);
@@ -1175,7 +1204,7 @@ static ssize_t tsl2x7x_luxtable_store(struct device *dev,
1175 */ 1204 */
1176 n = value[0]; 1205 n = value[0];
1177 if ((n % 3) || n < 6 || 1206 if ((n % 3) || n < 6 ||
1178 n > ((ARRAY_SIZE(chip->tsl2x7x_device_lux) - 1) * 3)) { 1207 n > ((ARRAY_SIZE(chip->tsl2x7x_device_lux) - 1) * 3)) {
1179 dev_info(dev, "LUX TABLE INPUT ERROR 1 Value[0]=%d\n", n); 1208 dev_info(dev, "LUX TABLE INPUT ERROR 1 Value[0]=%d\n", n);
1180 return -EINVAL; 1209 return -EINVAL;
1181 } 1210 }
@@ -1198,7 +1227,8 @@ static ssize_t tsl2x7x_luxtable_store(struct device *dev,
1198} 1227}
1199 1228
1200static ssize_t tsl2x7x_do_prox_calibrate(struct device *dev, 1229static ssize_t tsl2x7x_do_prox_calibrate(struct device *dev,
1201 struct device_attribute *attr, const char *buf, size_t len) 1230 struct device_attribute *attr,
1231 const char *buf, size_t len)
1202{ 1232{
1203 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 1233 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1204 bool value; 1234 bool value;
@@ -1391,10 +1421,10 @@ static int tsl2x7x_read_raw(struct iio_dev *indio_dev,
1391} 1421}
1392 1422
1393static int tsl2x7x_write_raw(struct iio_dev *indio_dev, 1423static int tsl2x7x_write_raw(struct iio_dev *indio_dev,
1394 struct iio_chan_spec const *chan, 1424 struct iio_chan_spec const *chan,
1395 int val, 1425 int val,
1396 int val2, 1426 int val2,
1397 long mask) 1427 long mask)
1398{ 1428{
1399 struct tsl2X7X_chip *chip = iio_priv(indio_dev); 1429 struct tsl2X7X_chip *chip = iio_priv(indio_dev);
1400 1430
@@ -1529,7 +1559,7 @@ static irqreturn_t tsl2x7x_event_handler(int irq, void *private)
1529 u8 value; 1559 u8 value;
1530 1560
1531 value = i2c_smbus_read_byte_data(chip->client, 1561 value = i2c_smbus_read_byte_data(chip->client,
1532 TSL2X7X_CMD_REG | TSL2X7X_STATUS); 1562 TSL2X7X_CMD_REG | TSL2X7X_STATUS);
1533 1563
1534 /* What type of interrupt do we need to process */ 1564 /* What type of interrupt do we need to process */
1535 if (value & TSL2X7X_STA_PRX_INTR) { 1565 if (value & TSL2X7X_STA_PRX_INTR) {
@@ -1545,16 +1575,16 @@ static irqreturn_t tsl2x7x_event_handler(int irq, void *private)
1545 if (value & TSL2X7X_STA_ALS_INTR) { 1575 if (value & TSL2X7X_STA_ALS_INTR) {
1546 tsl2x7x_get_lux(indio_dev); /* freshen data for ABI */ 1576 tsl2x7x_get_lux(indio_dev); /* freshen data for ABI */
1547 iio_push_event(indio_dev, 1577 iio_push_event(indio_dev,
1548 IIO_UNMOD_EVENT_CODE(IIO_LIGHT, 1578 IIO_UNMOD_EVENT_CODE(IIO_LIGHT,
1549 0, 1579 0,
1550 IIO_EV_TYPE_THRESH, 1580 IIO_EV_TYPE_THRESH,
1551 IIO_EV_DIR_EITHER), 1581 IIO_EV_DIR_EITHER),
1552 timestamp); 1582 timestamp);
1553 } 1583 }
1554 /* Clear interrupt now that we have handled it. */ 1584 /* Clear interrupt now that we have handled it. */
1555 ret = i2c_smbus_write_byte(chip->client, 1585 ret = i2c_smbus_write_byte(chip->client,
1556 TSL2X7X_CMD_REG | TSL2X7X_CMD_SPL_FN | 1586 TSL2X7X_CMD_REG | TSL2X7X_CMD_SPL_FN |
1557 TSL2X7X_CMD_PROXALS_INT_CLR); 1587 TSL2X7X_CMD_PROXALS_INT_CLR);
1558 if (ret < 0) 1588 if (ret < 0)
1559 dev_err(&chip->client->dev, 1589 dev_err(&chip->client->dev,
1560 "Failed to clear irq from event handler. err = %d\n", 1590 "Failed to clear irq from event handler. err = %d\n",
@@ -1616,6 +1646,7 @@ static struct attribute *tsl2X7X_ALS_event_attrs[] = {
1616 &dev_attr_in_intensity0_thresh_period.attr, 1646 &dev_attr_in_intensity0_thresh_period.attr,
1617 NULL, 1647 NULL,
1618}; 1648};
1649
1619static struct attribute *tsl2X7X_PRX_event_attrs[] = { 1650static struct attribute *tsl2X7X_PRX_event_attrs[] = {
1620 &dev_attr_in_proximity0_thresh_period.attr, 1651 &dev_attr_in_proximity0_thresh_period.attr,
1621 NULL, 1652 NULL,
@@ -1857,7 +1888,7 @@ static const struct tsl2x7x_chip_info tsl2x7x_chip_info_tbl[] = {
1857}; 1888};
1858 1889
1859static int tsl2x7x_probe(struct i2c_client *clientp, 1890static int tsl2x7x_probe(struct i2c_client *clientp,
1860 const struct i2c_device_id *id) 1891 const struct i2c_device_id *id)
1861{ 1892{
1862 int ret; 1893 int ret;
1863 unsigned char device_id; 1894 unsigned char device_id;
@@ -1873,14 +1904,14 @@ static int tsl2x7x_probe(struct i2c_client *clientp,
1873 i2c_set_clientdata(clientp, indio_dev); 1904 i2c_set_clientdata(clientp, indio_dev);
1874 1905
1875 ret = tsl2x7x_i2c_read(chip->client, 1906 ret = tsl2x7x_i2c_read(chip->client,
1876 TSL2X7X_CHIPID, &device_id); 1907 TSL2X7X_CHIPID, &device_id);
1877 if (ret < 0) 1908 if (ret < 0)
1878 return ret; 1909 return ret;
1879 1910
1880 if ((!tsl2x7x_device_id(&device_id, id->driver_data)) || 1911 if ((!tsl2x7x_device_id(&device_id, id->driver_data)) ||
1881 (tsl2x7x_device_id(&device_id, id->driver_data) == -EINVAL)) { 1912 (tsl2x7x_device_id(&device_id, id->driver_data) == -EINVAL)) {
1882 dev_info(&chip->client->dev, 1913 dev_info(&chip->client->dev,
1883 "%s: i2c device found does not match expected id\n", 1914 "%s: i2c device found does not match expected id\n",
1884 __func__); 1915 __func__);
1885 return -EINVAL; 1916 return -EINVAL;
1886 } 1917 }
@@ -1892,8 +1923,10 @@ static int tsl2x7x_probe(struct i2c_client *clientp,
1892 return ret; 1923 return ret;
1893 } 1924 }
1894 1925
1895 /* ALS and PROX functions can be invoked via user space poll 1926 /*
1896 * or H/W interrupt. If busy return last sample. */ 1927 * ALS and PROX functions can be invoked via user space poll
1928 * or H/W interrupt. If busy return last sample.
1929 */
1897 mutex_init(&chip->als_mutex); 1930 mutex_init(&chip->als_mutex);
1898 mutex_init(&chip->prox_mutex); 1931 mutex_init(&chip->prox_mutex);
1899 1932
diff --git a/drivers/staging/iio/meter/ade7753.c b/drivers/staging/iio/meter/ade7753.c
index 69287108f793..4b5f05fdadcd 100644
--- a/drivers/staging/iio/meter/ade7753.c
+++ b/drivers/staging/iio/meter/ade7753.c
@@ -333,7 +333,8 @@ static int ade7753_set_irq(struct device *dev, bool enable)
333 333
334 if (enable) 334 if (enable)
335 irqen |= BIT(3); /* Enables an interrupt when a data is 335 irqen |= BIT(3); /* Enables an interrupt when a data is
336 present in the waveform register */ 336 * present in the waveform register
337 */
337 else 338 else
338 irqen &= ~BIT(3); 339 irqen &= ~BIT(3);
339 340
@@ -528,7 +529,6 @@ static int ade7753_probe(struct spi_device *spi)
528 return iio_device_register(indio_dev); 529 return iio_device_register(indio_dev);
529} 530}
530 531
531/* fixme, confirm ordering in this function */
532static int ade7753_remove(struct spi_device *spi) 532static int ade7753_remove(struct spi_device *spi)
533{ 533{
534 struct iio_dev *indio_dev = spi_get_drvdata(spi); 534 struct iio_dev *indio_dev = spi_get_drvdata(spi);
diff --git a/drivers/staging/iio/meter/ade7754.c b/drivers/staging/iio/meter/ade7754.c
index f4188e17d30b..c46bef641613 100644
--- a/drivers/staging/iio/meter/ade7754.c
+++ b/drivers/staging/iio/meter/ade7754.c
@@ -351,7 +351,8 @@ static int ade7754_set_irq(struct device *dev, bool enable)
351 351
352 if (enable) 352 if (enable)
353 irqen |= BIT(14); /* Enables an interrupt when a data is 353 irqen |= BIT(14); /* Enables an interrupt when a data is
354 present in the waveform register */ 354 * present in the waveform register
355 */
355 else 356 else
356 irqen &= ~BIT(14); 357 irqen &= ~BIT(14);
357 358
@@ -558,7 +559,6 @@ powerdown_on_error:
558 return ret; 559 return ret;
559} 560}
560 561
561/* fixme, confirm ordering in this function */
562static int ade7754_remove(struct spi_device *spi) 562static int ade7754_remove(struct spi_device *spi)
563{ 563{
564 struct iio_dev *indio_dev = spi_get_drvdata(spi); 564 struct iio_dev *indio_dev = spi_get_drvdata(spi);
diff --git a/drivers/staging/iio/meter/ade7758.h b/drivers/staging/iio/meter/ade7758.h
index f6739e2c24b1..1d04ec9524c8 100644
--- a/drivers/staging/iio/meter/ade7758.h
+++ b/drivers/staging/iio/meter/ade7758.h
@@ -129,6 +129,7 @@ struct ade7758_state {
129 unsigned char tx_buf[8]; 129 unsigned char tx_buf[8];
130 130
131}; 131};
132
132#ifdef CONFIG_IIO_BUFFER 133#ifdef CONFIG_IIO_BUFFER
133/* At the moment triggers are only used for ring buffer 134/* At the moment triggers are only used for ring buffer
134 * filling. This may change! 135 * filling. This may change!
@@ -138,25 +139,22 @@ void ade7758_remove_trigger(struct iio_dev *indio_dev);
138int ade7758_probe_trigger(struct iio_dev *indio_dev); 139int ade7758_probe_trigger(struct iio_dev *indio_dev);
139 140
140ssize_t ade7758_read_data_from_ring(struct device *dev, 141ssize_t ade7758_read_data_from_ring(struct device *dev,
141 struct device_attribute *attr, 142 struct device_attribute *attr, char *buf);
142 char *buf);
143
144 143
145int ade7758_configure_ring(struct iio_dev *indio_dev); 144int ade7758_configure_ring(struct iio_dev *indio_dev);
146void ade7758_unconfigure_ring(struct iio_dev *indio_dev); 145void ade7758_unconfigure_ring(struct iio_dev *indio_dev);
147 146
148int ade7758_set_irq(struct device *dev, bool enable); 147int ade7758_set_irq(struct device *dev, bool enable);
149 148
150int ade7758_spi_write_reg_8(struct device *dev, 149int ade7758_spi_write_reg_8(struct device *dev, u8 reg_address, u8 val);
151 u8 reg_address, u8 val); 150int ade7758_spi_read_reg_8(struct device *dev, u8 reg_address, u8 *val);
152int ade7758_spi_read_reg_8(struct device *dev,
153 u8 reg_address, u8 *val);
154 151
155#else /* CONFIG_IIO_BUFFER */ 152#else /* CONFIG_IIO_BUFFER */
156 153
157static inline void ade7758_remove_trigger(struct iio_dev *indio_dev) 154static inline void ade7758_remove_trigger(struct iio_dev *indio_dev)
158{ 155{
159} 156}
157
160static inline int ade7758_probe_trigger(struct iio_dev *indio_dev) 158static inline int ade7758_probe_trigger(struct iio_dev *indio_dev)
161{ 159{
162 return 0; 160 return 0;
@@ -166,16 +164,20 @@ static int ade7758_configure_ring(struct iio_dev *indio_dev)
166{ 164{
167 return 0; 165 return 0;
168} 166}
167
169static inline void ade7758_unconfigure_ring(struct iio_dev *indio_dev) 168static inline void ade7758_unconfigure_ring(struct iio_dev *indio_dev)
170{ 169{
171} 170}
171
172static inline int ade7758_initialize_ring(struct iio_ring_buffer *ring) 172static inline int ade7758_initialize_ring(struct iio_ring_buffer *ring)
173{ 173{
174 return 0; 174 return 0;
175} 175}
176
176static inline void ade7758_uninitialize_ring(struct iio_dev *indio_dev) 177static inline void ade7758_uninitialize_ring(struct iio_dev *indio_dev)
177{ 178{
178} 179}
180
179#endif /* CONFIG_IIO_BUFFER */ 181#endif /* CONFIG_IIO_BUFFER */
180 182
181#endif 183#endif
diff --git a/drivers/staging/iio/meter/ade7758_core.c b/drivers/staging/iio/meter/ade7758_core.c
index 40f5afaa984b..ebb8a1993303 100644
--- a/drivers/staging/iio/meter/ade7758_core.c
+++ b/drivers/staging/iio/meter/ade7758_core.c
@@ -24,9 +24,7 @@
24#include "meter.h" 24#include "meter.h"
25#include "ade7758.h" 25#include "ade7758.h"
26 26
27int ade7758_spi_write_reg_8(struct device *dev, 27int ade7758_spi_write_reg_8(struct device *dev, u8 reg_address, u8 val)
28 u8 reg_address,
29 u8 val)
30{ 28{
31 int ret; 29 int ret;
32 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 30 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
@@ -42,9 +40,8 @@ int ade7758_spi_write_reg_8(struct device *dev,
42 return ret; 40 return ret;
43} 41}
44 42
45static int ade7758_spi_write_reg_16(struct device *dev, 43static int ade7758_spi_write_reg_16(struct device *dev, u8 reg_address,
46 u8 reg_address, 44 u16 value)
47 u16 value)
48{ 45{
49 int ret; 46 int ret;
50 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 47 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
@@ -68,9 +65,8 @@ static int ade7758_spi_write_reg_16(struct device *dev,
68 return ret; 65 return ret;
69} 66}
70 67
71static int ade7758_spi_write_reg_24(struct device *dev, 68static int ade7758_spi_write_reg_24(struct device *dev, u8 reg_address,
72 u8 reg_address, 69 u32 value)
73 u32 value)
74{ 70{
75 int ret; 71 int ret;
76 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 72 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
@@ -95,9 +91,7 @@ static int ade7758_spi_write_reg_24(struct device *dev,
95 return ret; 91 return ret;
96} 92}
97 93
98int ade7758_spi_read_reg_8(struct device *dev, 94int ade7758_spi_read_reg_8(struct device *dev, u8 reg_address, u8 *val)
99 u8 reg_address,
100 u8 *val)
101{ 95{
102 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 96 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
103 struct ade7758_state *st = iio_priv(indio_dev); 97 struct ade7758_state *st = iio_priv(indio_dev);
@@ -124,7 +118,7 @@ int ade7758_spi_read_reg_8(struct device *dev,
124 ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers)); 118 ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
125 if (ret) { 119 if (ret) {
126 dev_err(&st->us->dev, "problem when reading 8 bit register 0x%02X", 120 dev_err(&st->us->dev, "problem when reading 8 bit register 0x%02X",
127 reg_address); 121 reg_address);
128 goto error_ret; 122 goto error_ret;
129 } 123 }
130 *val = st->rx[0]; 124 *val = st->rx[0];
@@ -134,9 +128,8 @@ error_ret:
134 return ret; 128 return ret;
135} 129}
136 130
137static int ade7758_spi_read_reg_16(struct device *dev, 131static int ade7758_spi_read_reg_16(struct device *dev, u8 reg_address,
138 u8 reg_address, 132 u16 *val)
139 u16 *val)
140{ 133{
141 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 134 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
142 struct ade7758_state *st = iio_priv(indio_dev); 135 struct ade7758_state *st = iio_priv(indio_dev);
@@ -156,7 +149,6 @@ static int ade7758_spi_read_reg_16(struct device *dev,
156 }, 149 },
157 }; 150 };
158 151
159
160 mutex_lock(&st->buf_lock); 152 mutex_lock(&st->buf_lock);
161 st->tx[0] = ADE7758_READ_REG(reg_address); 153 st->tx[0] = ADE7758_READ_REG(reg_address);
162 st->tx[1] = 0; 154 st->tx[1] = 0;
@@ -165,7 +157,7 @@ static int ade7758_spi_read_reg_16(struct device *dev,
165 ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers)); 157 ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
166 if (ret) { 158 if (ret) {
167 dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X", 159 dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X",
168 reg_address); 160 reg_address);
169 goto error_ret; 161 goto error_ret;
170 } 162 }
171 163
@@ -176,9 +168,8 @@ error_ret:
176 return ret; 168 return ret;
177} 169}
178 170
179static int ade7758_spi_read_reg_24(struct device *dev, 171static int ade7758_spi_read_reg_24(struct device *dev, u8 reg_address,
180 u8 reg_address, 172 u32 *val)
181 u32 *val)
182{ 173{
183 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 174 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
184 struct ade7758_state *st = iio_priv(indio_dev); 175 struct ade7758_state *st = iio_priv(indio_dev);
@@ -207,7 +198,7 @@ static int ade7758_spi_read_reg_24(struct device *dev,
207 ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers)); 198 ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
208 if (ret) { 199 if (ret) {
209 dev_err(&st->us->dev, "problem when reading 24 bit register 0x%02X", 200 dev_err(&st->us->dev, "problem when reading 24 bit register 0x%02X",
210 reg_address); 201 reg_address);
211 goto error_ret; 202 goto error_ret;
212 } 203 }
213 *val = (st->rx[0] << 16) | (st->rx[1] << 8) | st->rx[2]; 204 *val = (st->rx[0] << 16) | (st->rx[1] << 8) | st->rx[2];
@@ -218,8 +209,7 @@ error_ret:
218} 209}
219 210
220static ssize_t ade7758_read_8bit(struct device *dev, 211static ssize_t ade7758_read_8bit(struct device *dev,
221 struct device_attribute *attr, 212 struct device_attribute *attr, char *buf)
222 char *buf)
223{ 213{
224 int ret; 214 int ret;
225 u8 val = 0; 215 u8 val = 0;
@@ -233,8 +223,7 @@ static ssize_t ade7758_read_8bit(struct device *dev,
233} 223}
234 224
235static ssize_t ade7758_read_16bit(struct device *dev, 225static ssize_t ade7758_read_16bit(struct device *dev,
236 struct device_attribute *attr, 226 struct device_attribute *attr, char *buf)
237 char *buf)
238{ 227{
239 int ret; 228 int ret;
240 u16 val = 0; 229 u16 val = 0;
@@ -248,8 +237,7 @@ static ssize_t ade7758_read_16bit(struct device *dev,
248} 237}
249 238
250static ssize_t ade7758_read_24bit(struct device *dev, 239static ssize_t ade7758_read_24bit(struct device *dev,
251 struct device_attribute *attr, 240 struct device_attribute *attr, char *buf)
252 char *buf)
253{ 241{
254 int ret; 242 int ret;
255 u32 val = 0; 243 u32 val = 0;
@@ -263,9 +251,8 @@ static ssize_t ade7758_read_24bit(struct device *dev,
263} 251}
264 252
265static ssize_t ade7758_write_8bit(struct device *dev, 253static ssize_t ade7758_write_8bit(struct device *dev,
266 struct device_attribute *attr, 254 struct device_attribute *attr,
267 const char *buf, 255 const char *buf, size_t len)
268 size_t len)
269{ 256{
270 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 257 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
271 int ret; 258 int ret;
@@ -281,9 +268,8 @@ error_ret:
281} 268}
282 269
283static ssize_t ade7758_write_16bit(struct device *dev, 270static ssize_t ade7758_write_16bit(struct device *dev,
284 struct device_attribute *attr, 271 struct device_attribute *attr,
285 const char *buf, 272 const char *buf, size_t len)
286 size_t len)
287{ 273{
288 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 274 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
289 int ret; 275 int ret;
@@ -427,7 +413,8 @@ int ade7758_set_irq(struct device *dev, bool enable)
427 413
428 if (enable) 414 if (enable)
429 irqen |= BIT(16); /* Enables an interrupt when a data is 415 irqen |= BIT(16); /* Enables an interrupt when a data is
430 present in the waveform register */ 416 * present in the waveform register
417 */
431 else 418 else
432 irqen &= ~BIT(16); 419 irqen &= ~BIT(16);
433 420
@@ -479,16 +466,13 @@ err_ret:
479} 466}
480 467
481static ssize_t ade7758_read_frequency(struct device *dev, 468static ssize_t ade7758_read_frequency(struct device *dev,
482 struct device_attribute *attr, 469 struct device_attribute *attr, char *buf)
483 char *buf)
484{ 470{
485 int ret; 471 int ret;
486 u8 t; 472 u8 t;
487 int sps; 473 int sps;
488 474
489 ret = ade7758_spi_read_reg_8(dev, 475 ret = ade7758_spi_read_reg_8(dev, ADE7758_WAVMODE, &t);
490 ADE7758_WAVMODE,
491 &t);
492 if (ret) 476 if (ret)
493 return ret; 477 return ret;
494 478
@@ -499,9 +483,8 @@ static ssize_t ade7758_read_frequency(struct device *dev,
499} 483}
500 484
501static ssize_t ade7758_write_frequency(struct device *dev, 485static ssize_t ade7758_write_frequency(struct device *dev,
502 struct device_attribute *attr, 486 struct device_attribute *attr,
503 const char *buf, 487 const char *buf, size_t len)
504 size_t len)
505{ 488{
506 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 489 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
507 u16 val; 490 u16 val;
@@ -532,18 +515,14 @@ static ssize_t ade7758_write_frequency(struct device *dev,
532 goto out; 515 goto out;
533 } 516 }
534 517
535 ret = ade7758_spi_read_reg_8(dev, 518 ret = ade7758_spi_read_reg_8(dev, ADE7758_WAVMODE, &reg);
536 ADE7758_WAVMODE,
537 &reg);
538 if (ret) 519 if (ret)
539 goto out; 520 goto out;
540 521
541 reg &= ~(5 << 3); 522 reg &= ~(5 << 3);
542 reg |= t << 5; 523 reg |= t << 5;
543 524
544 ret = ade7758_spi_write_reg_8(dev, 525 ret = ade7758_spi_write_reg_8(dev, ADE7758_WAVMODE, reg);
545 ADE7758_WAVMODE,
546 reg);
547 526
548out: 527out:
549 mutex_unlock(&indio_dev->mlock); 528 mutex_unlock(&indio_dev->mlock);
diff --git a/drivers/staging/iio/meter/ade7758_ring.c b/drivers/staging/iio/meter/ade7758_ring.c
index 9a24e0226f8b..a6b76d4b1c80 100644
--- a/drivers/staging/iio/meter/ade7758_ring.c
+++ b/drivers/staging/iio/meter/ade7758_ring.c
@@ -33,7 +33,7 @@ static int ade7758_spi_read_burst(struct iio_dev *indio_dev)
33 return ret; 33 return ret;
34} 34}
35 35
36static int ade7758_write_waveform_type(struct device *dev, unsigned type) 36static int ade7758_write_waveform_type(struct device *dev, unsigned int type)
37{ 37{
38 int ret; 38 int ret;
39 u8 reg; 39 u8 reg;
@@ -85,7 +85,7 @@ static irqreturn_t ade7758_trigger_handler(int irq, void *p)
85 **/ 85 **/
86static int ade7758_ring_preenable(struct iio_dev *indio_dev) 86static int ade7758_ring_preenable(struct iio_dev *indio_dev)
87{ 87{
88 unsigned channel; 88 unsigned int channel;
89 89
90 if (bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength)) 90 if (bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
91 return -EINVAL; 91 return -EINVAL;
diff --git a/drivers/staging/iio/meter/ade7759.c b/drivers/staging/iio/meter/ade7759.c
index 684e612a88b9..80144d40d9ca 100644
--- a/drivers/staging/iio/meter/ade7759.c
+++ b/drivers/staging/iio/meter/ade7759.c
@@ -289,7 +289,8 @@ static int ade7759_set_irq(struct device *dev, bool enable)
289 289
290 if (enable) 290 if (enable)
291 irqen |= BIT(3); /* Enables an interrupt when a data is 291 irqen |= BIT(3); /* Enables an interrupt when a data is
292 present in the waveform register */ 292 * present in the waveform register
293 */
293 else 294 else
294 irqen &= ~BIT(3); 295 irqen &= ~BIT(3);
295 296
@@ -476,7 +477,6 @@ static int ade7759_probe(struct spi_device *spi)
476 return iio_device_register(indio_dev); 477 return iio_device_register(indio_dev);
477} 478}
478 479
479/* fixme, confirm ordering in this function */
480static int ade7759_remove(struct spi_device *spi) 480static int ade7759_remove(struct spi_device *spi)
481{ 481{
482 struct iio_dev *indio_dev = spi_get_drvdata(spi); 482 struct iio_dev *indio_dev = spi_get_drvdata(spi);
diff --git a/drivers/staging/iio/meter/ade7854.c b/drivers/staging/iio/meter/ade7854.c
index 9e439af7100d..75e8685e6df2 100644
--- a/drivers/staging/iio/meter/ade7854.c
+++ b/drivers/staging/iio/meter/ade7854.c
@@ -421,7 +421,8 @@ static int ade7854_set_irq(struct device *dev, bool enable)
421 421
422 if (enable) 422 if (enable)
423 irqen |= BIT(17); /* 1: interrupt enabled when all periodical 423 irqen |= BIT(17); /* 1: interrupt enabled when all periodical
424 (at 8 kHz rate) DSP computations finish. */ 424 * (at 8 kHz rate) DSP computations finish.
425 */
425 else 426 else
426 irqen &= ~BIT(17); 427 irqen &= ~BIT(17);
427 428
diff --git a/drivers/staging/iio/resolver/ad2s1210.h b/drivers/staging/iio/resolver/ad2s1210.h
index c7158f6e61c2..e9b2147701fc 100644
--- a/drivers/staging/iio/resolver/ad2s1210.h
+++ b/drivers/staging/iio/resolver/ad2s1210.h
@@ -12,9 +12,9 @@
12#define _AD2S1210_H 12#define _AD2S1210_H
13 13
14struct ad2s1210_platform_data { 14struct ad2s1210_platform_data {
15 unsigned sample; 15 unsigned int sample;
16 unsigned a[2]; 16 unsigned int a[2];
17 unsigned res[2]; 17 unsigned int res[2];
18 bool gpioin; 18 bool gpioin;
19}; 19};
20#endif /* _AD2S1210_H */ 20#endif /* _AD2S1210_H */
diff --git a/drivers/staging/iio/trigger/iio-trig-bfin-timer.c b/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
index 035dd456d7d6..38dca69a06eb 100644
--- a/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
+++ b/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
@@ -55,12 +55,12 @@ static struct bfin_timer iio_bfin_timer_code[MAX_BLACKFIN_GPTIMERS] = {
55}; 55};
56 56
57struct bfin_tmr_state { 57struct bfin_tmr_state {
58 struct iio_trigger *trig; 58 struct iio_trigger *trig;
59 struct bfin_timer *t; 59 struct bfin_timer *t;
60 unsigned timer_num; 60 unsigned int timer_num;
61 bool output_enable; 61 bool output_enable;
62 unsigned int duty; 62 unsigned int duty;
63 int irq; 63 int irq;
64}; 64};
65 65
66static int iio_bfin_tmr_set_state(struct iio_trigger *trig, bool state) 66static int iio_bfin_tmr_set_state(struct iio_trigger *trig, bool state)
@@ -178,7 +178,7 @@ static const struct iio_trigger_ops iio_bfin_tmr_trigger_ops = {
178 178
179static int iio_bfin_tmr_trigger_probe(struct platform_device *pdev) 179static int iio_bfin_tmr_trigger_probe(struct platform_device *pdev)
180{ 180{
181 struct iio_bfin_timer_trigger_pdata *pdata = pdev->dev.platform_data; 181 struct iio_bfin_timer_trigger_pdata *pdata;
182 struct bfin_tmr_state *st; 182 struct bfin_tmr_state *st;
183 unsigned int config; 183 unsigned int config;
184 int ret; 184 int ret;
@@ -221,6 +221,7 @@ static int iio_bfin_tmr_trigger_probe(struct platform_device *pdev)
221 221
222 config = PWM_OUT | PERIOD_CNT | IRQ_ENA; 222 config = PWM_OUT | PERIOD_CNT | IRQ_ENA;
223 223
224 pdata = dev_get_platdata(&pdev->dev);
224 if (pdata && pdata->output_enable) { 225 if (pdata && pdata->output_enable) {
225 unsigned long long val; 226 unsigned long long val;
226 227
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs.h b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
index 40af75c4201a..4141afb101bb 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
@@ -60,41 +60,12 @@
60#define LNET_ACCEPTOR_MAX_RESERVED_PORT 1023 60#define LNET_ACCEPTOR_MAX_RESERVED_PORT 1023
61 61
62/* 62/*
63 * libcfs pseudo device operations
64 *
65 * It's just draft now.
66 */
67
68struct cfs_psdev_file {
69 unsigned long off;
70 void *private_data;
71 unsigned long reserved1;
72 unsigned long reserved2;
73};
74
75struct cfs_psdev_ops {
76 int (*p_open)(unsigned long, void *);
77 int (*p_close)(unsigned long, void *);
78 int (*p_read)(struct cfs_psdev_file *, char *, unsigned long);
79 int (*p_write)(struct cfs_psdev_file *, char *, unsigned long);
80 int (*p_ioctl)(struct cfs_psdev_file *, unsigned long, void __user *);
81};
82
83/*
84 * Drop into debugger, if possible. Implementation is provided by platform.
85 */
86
87void cfs_enter_debugger(void);
88
89/*
90 * Defined by platform 63 * Defined by platform
91 */ 64 */
92int unshare_fs_struct(void);
93sigset_t cfs_block_allsigs(void); 65sigset_t cfs_block_allsigs(void);
94sigset_t cfs_block_sigs(unsigned long sigs); 66sigset_t cfs_block_sigs(unsigned long sigs);
95sigset_t cfs_block_sigsinv(unsigned long sigs); 67sigset_t cfs_block_sigsinv(unsigned long sigs);
96void cfs_restore_sigs(sigset_t); 68void cfs_restore_sigs(sigset_t);
97int cfs_signal_pending(void);
98void cfs_clear_sigpending(void); 69void cfs_clear_sigpending(void);
99 70
100/* 71/*
@@ -117,7 +88,25 @@ void cfs_get_random_bytes(void *buf, int size);
117#include "libcfs_workitem.h" 88#include "libcfs_workitem.h"
118#include "libcfs_hash.h" 89#include "libcfs_hash.h"
119#include "libcfs_fail.h" 90#include "libcfs_fail.h"
120#include "libcfs_crypto.h" 91
92struct libcfs_ioctl_handler {
93 struct list_head item;
94 int (*handle_ioctl)(unsigned int cmd, struct libcfs_ioctl_hdr *hdr);
95};
96
97#define DECLARE_IOCTL_HANDLER(ident, func) \
98 struct libcfs_ioctl_handler ident = { \
99 .item = LIST_HEAD_INIT(ident.item), \
100 .handle_ioctl = func \
101 }
102
103int libcfs_register_ioctl(struct libcfs_ioctl_handler *hand);
104int libcfs_deregister_ioctl(struct libcfs_ioctl_handler *hand);
105
106int libcfs_ioctl_getdata(struct libcfs_ioctl_hdr **hdr_pp,
107 const struct libcfs_ioctl_hdr __user *uparam);
108int libcfs_ioctl_data_adjust(struct libcfs_ioctl_data *data);
109int libcfs_ioctl(unsigned long cmd, void __user *arg);
121 110
122/* container_of depends on "likely" which is defined in libcfs_private.h */ 111/* container_of depends on "likely" which is defined in libcfs_private.h */
123static inline void *__container_of(void *ptr, unsigned long shift) 112static inline void *__container_of(void *ptr, unsigned long shift)
@@ -143,8 +132,6 @@ extern struct miscdevice libcfs_dev;
143extern char lnet_upcall[1024]; 132extern char lnet_upcall[1024];
144extern char lnet_debug_log_upcall[1024]; 133extern char lnet_debug_log_upcall[1024];
145 134
146extern struct cfs_psdev_ops libcfs_psdev_ops;
147
148extern struct cfs_wi_sched *cfs_sched_rehash; 135extern struct cfs_wi_sched *cfs_sched_rehash;
149 136
150struct lnet_debugfs_symlink_def { 137struct lnet_debugfs_symlink_def {
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
index 9e62c59714b7..81d8079e3b5e 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
@@ -203,6 +203,85 @@ int cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt);
203 */ 203 */
204int cfs_cpu_ht_nsiblings(int cpu); 204int cfs_cpu_ht_nsiblings(int cpu);
205 205
206/*
207 * allocate per-cpu-partition data, returned value is an array of pointers,
208 * variable can be indexed by CPU ID.
209 * cptab != NULL: size of array is number of CPU partitions
210 * cptab == NULL: size of array is number of HW cores
211 */
212void *cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size);
213/*
214 * destory per-cpu-partition variable
215 */
216void cfs_percpt_free(void *vars);
217int cfs_percpt_number(void *vars);
218
219#define cfs_percpt_for_each(var, i, vars) \
220 for (i = 0; i < cfs_percpt_number(vars) && \
221 ((var) = (vars)[i]) != NULL; i++)
222
223/*
224 * percpu partition lock
225 *
226 * There are some use-cases like this in Lustre:
227 * . each CPU partition has it's own private data which is frequently changed,
228 * and mostly by the local CPU partition.
229 * . all CPU partitions share some global data, these data are rarely changed.
230 *
231 * LNet is typical example.
232 * CPU partition lock is designed for this kind of use-cases:
233 * . each CPU partition has it's own private lock
234 * . change on private data just needs to take the private lock
235 * . read on shared data just needs to take _any_ of private locks
236 * . change on shared data needs to take _all_ private locks,
237 * which is slow and should be really rare.
238 */
239enum {
240 CFS_PERCPT_LOCK_EX = -1, /* negative */
241};
242
243struct cfs_percpt_lock {
244 /* cpu-partition-table for this lock */
245 struct cfs_cpt_table *pcl_cptab;
246 /* exclusively locked */
247 unsigned int pcl_locked;
248 /* private lock table */
249 spinlock_t **pcl_locks;
250};
251
252/* return number of private locks */
253#define cfs_percpt_lock_num(pcl) cfs_cpt_number(pcl->pcl_cptab)
254
255/*
256 * create a cpu-partition lock based on CPU partition table \a cptab,
257 * each private lock has extra \a psize bytes padding data
258 */
259struct cfs_percpt_lock *cfs_percpt_lock_create(struct cfs_cpt_table *cptab,
260 struct lock_class_key *keys);
261/* destroy a cpu-partition lock */
262void cfs_percpt_lock_free(struct cfs_percpt_lock *pcl);
263
264/* lock private lock \a index of \a pcl */
265void cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index);
266
267/* unlock private lock \a index of \a pcl */
268void cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index);
269
270#define CFS_PERCPT_LOCK_KEYS 256
271
272/* NB: don't allocate keys dynamically, lockdep needs them to be in ".data" */
273#define cfs_percpt_lock_alloc(cptab) \
274({ \
275 static struct lock_class_key ___keys[CFS_PERCPT_LOCK_KEYS]; \
276 struct cfs_percpt_lock *___lk; \
277 \
278 if (cfs_cpt_number(cptab) > CFS_PERCPT_LOCK_KEYS) \
279 ___lk = cfs_percpt_lock_create(cptab, NULL); \
280 else \
281 ___lk = cfs_percpt_lock_create(cptab, ___keys); \
282 ___lk; \
283})
284
206/** 285/**
207 * iterate over all CPU partitions in \a cptab 286 * iterate over all CPU partitions in \a cptab
208 */ 287 */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h
index e8663697e7a6..02be7d7608a5 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h
@@ -46,7 +46,8 @@ enum cfs_crypto_hash_alg {
46 CFS_HASH_ALG_SHA384, 46 CFS_HASH_ALG_SHA384,
47 CFS_HASH_ALG_SHA512, 47 CFS_HASH_ALG_SHA512,
48 CFS_HASH_ALG_CRC32C, 48 CFS_HASH_ALG_CRC32C,
49 CFS_HASH_ALG_MAX 49 CFS_HASH_ALG_MAX,
50 CFS_HASH_ALG_UNKNOWN = 0xff
50}; 51};
51 52
52static struct cfs_crypto_hash_type hash_types[] = { 53static struct cfs_crypto_hash_type hash_types[] = {
@@ -59,11 +60,22 @@ static struct cfs_crypto_hash_type hash_types[] = {
59 [CFS_HASH_ALG_SHA256] = { "sha256", 0, 32 }, 60 [CFS_HASH_ALG_SHA256] = { "sha256", 0, 32 },
60 [CFS_HASH_ALG_SHA384] = { "sha384", 0, 48 }, 61 [CFS_HASH_ALG_SHA384] = { "sha384", 0, 48 },
61 [CFS_HASH_ALG_SHA512] = { "sha512", 0, 64 }, 62 [CFS_HASH_ALG_SHA512] = { "sha512", 0, 64 },
63 [CFS_HASH_ALG_MAX] = { NULL, 0, 64 },
62}; 64};
63 65
64/** Return pointer to type of hash for valid hash algorithm identifier */ 66/* Maximum size of hash_types[].cht_size */
67#define CFS_CRYPTO_HASH_DIGESTSIZE_MAX 64
68
69/**
70 * Return hash algorithm information for the specified algorithm identifier
71 *
72 * Hash information includes algorithm name, initial seed, hash size.
73 *
74 * \retval cfs_crypto_hash_type for valid ID (CFS_HASH_ALG_*)
75 * \retval NULL for unknown algorithm identifier
76 */
65static inline const struct cfs_crypto_hash_type * 77static inline const struct cfs_crypto_hash_type *
66 cfs_crypto_hash_type(unsigned char hash_alg) 78cfs_crypto_hash_type(enum cfs_crypto_hash_alg hash_alg)
67{ 79{
68 struct cfs_crypto_hash_type *ht; 80 struct cfs_crypto_hash_type *ht;
69 81
@@ -75,8 +87,16 @@ static inline const struct cfs_crypto_hash_type *
75 return NULL; 87 return NULL;
76} 88}
77 89
78/** Return hash name for valid hash algorithm identifier or "unknown" */ 90/**
79static inline const char *cfs_crypto_hash_name(unsigned char hash_alg) 91 * Return hash name for hash algorithm identifier
92 *
93 * \param[in] hash_alg hash alrgorithm id (CFS_HASH_ALG_*)
94 *
95 * \retval string name of known hash algorithm
96 * \retval "unknown" if hash algorithm is unknown
97 */
98static inline const char *
99cfs_crypto_hash_name(enum cfs_crypto_hash_alg hash_alg)
80{ 100{
81 const struct cfs_crypto_hash_type *ht; 101 const struct cfs_crypto_hash_type *ht;
82 102
@@ -86,8 +106,15 @@ static inline const char *cfs_crypto_hash_name(unsigned char hash_alg)
86 return "unknown"; 106 return "unknown";
87} 107}
88 108
89/** Return digest size for valid algorithm identifier or 0 */ 109/**
90static inline int cfs_crypto_hash_digestsize(unsigned char hash_alg) 110 * Return digest size for hash algorithm type
111 *
112 * \param[in] hash_alg hash alrgorithm id (CFS_HASH_ALG_*)
113 *
114 * \retval hash algorithm digest size in bytes
115 * \retval 0 if hash algorithm type is unknown
116 */
117static inline int cfs_crypto_hash_digestsize(enum cfs_crypto_hash_alg hash_alg)
91{ 118{
92 const struct cfs_crypto_hash_type *ht; 119 const struct cfs_crypto_hash_type *ht;
93 120
@@ -97,36 +124,24 @@ static inline int cfs_crypto_hash_digestsize(unsigned char hash_alg)
97 return 0; 124 return 0;
98} 125}
99 126
100/** Return hash identifier for valid hash algorithm name or 0xFF */ 127/**
128 * Find hash algorithm ID for the specified algorithm name
129 *
130 * \retval hash algorithm ID for valid ID (CFS_HASH_ALG_*)
131 * \retval CFS_HASH_ALG_UNKNOWN for unknown algorithm name
132 */
101static inline unsigned char cfs_crypto_hash_alg(const char *algname) 133static inline unsigned char cfs_crypto_hash_alg(const char *algname)
102{ 134{
103 unsigned char i; 135 enum cfs_crypto_hash_alg hash_alg;
104 136
105 for (i = 0; i < CFS_HASH_ALG_MAX; i++) 137 for (hash_alg = 0; hash_alg < CFS_HASH_ALG_MAX; hash_alg++)
106 if (!strcmp(hash_types[i].cht_name, algname)) 138 if (strcmp(hash_types[hash_alg].cht_name, algname) == 0)
107 break; 139 return hash_alg;
108 return (i == CFS_HASH_ALG_MAX ? 0xFF : i); 140
141 return CFS_HASH_ALG_UNKNOWN;
109} 142}
110 143
111/** Calculate hash digest for buffer. 144int cfs_crypto_hash_digest(enum cfs_crypto_hash_alg hash_alg,
112 * @param alg id of hash algorithm
113 * @param buf buffer of data
114 * @param buf_len buffer len
115 * @param key initial value for algorithm, if it is NULL,
116 * default initial value should be used.
117 * @param key_len len of initial value
118 * @param hash [out] pointer to hash, if it is NULL, hash_len is
119 * set to valid digest size in bytes, retval -ENOSPC.
120 * @param hash_len [in,out] size of hash buffer
121 * @returns status of operation
122 * @retval -EINVAL if buf, buf_len, hash_len or alg_id is invalid
123 * @retval -ENODEV if this algorithm is unsupported
124 * @retval -ENOSPC if pointer to hash is NULL, or hash_len less than
125 * digest size
126 * @retval 0 for success
127 * @retval < 0 other errors from lower layers.
128 */
129int cfs_crypto_hash_digest(unsigned char alg,
130 const void *buf, unsigned int buf_len, 145 const void *buf, unsigned int buf_len,
131 unsigned char *key, unsigned int key_len, 146 unsigned char *key, unsigned int key_len,
132 unsigned char *hash, unsigned int *hash_len); 147 unsigned char *hash, unsigned int *hash_len);
@@ -134,66 +149,17 @@ int cfs_crypto_hash_digest(unsigned char alg,
134/* cfs crypto hash descriptor */ 149/* cfs crypto hash descriptor */
135struct cfs_crypto_hash_desc; 150struct cfs_crypto_hash_desc;
136 151
137/** Allocate and initialize descriptor for hash algorithm. 152struct cfs_crypto_hash_desc *
138 * @param alg algorithm id 153cfs_crypto_hash_init(enum cfs_crypto_hash_alg hash_alg,
139 * @param key initial value for algorithm, if it is NULL, 154 unsigned char *key, unsigned int key_len);
140 * default initial value should be used.
141 * @param key_len len of initial value
142 * @returns pointer to descriptor of hash instance
143 * @retval ERR_PTR(error) when errors occurred.
144 */
145struct cfs_crypto_hash_desc*
146 cfs_crypto_hash_init(unsigned char alg,
147 unsigned char *key, unsigned int key_len);
148
149/** Update digest by part of data.
150 * @param desc hash descriptor
151 * @param page data page
152 * @param offset data offset
153 * @param len data len
154 * @returns status of operation
155 * @retval 0 for success.
156 */
157int cfs_crypto_hash_update_page(struct cfs_crypto_hash_desc *desc, 155int cfs_crypto_hash_update_page(struct cfs_crypto_hash_desc *desc,
158 struct page *page, unsigned int offset, 156 struct page *page, unsigned int offset,
159 unsigned int len); 157 unsigned int len);
160
161/** Update digest by part of data.
162 * @param desc hash descriptor
163 * @param buf pointer to data buffer
164 * @param buf_len size of data at buffer
165 * @returns status of operation
166 * @retval 0 for success.
167 */
168int cfs_crypto_hash_update(struct cfs_crypto_hash_desc *desc, const void *buf, 158int cfs_crypto_hash_update(struct cfs_crypto_hash_desc *desc, const void *buf,
169 unsigned int buf_len); 159 unsigned int buf_len);
170
171/** Finalize hash calculation, copy hash digest to buffer, destroy hash
172 * descriptor.
173 * @param desc hash descriptor
174 * @param hash buffer pointer to store hash digest
175 * @param hash_len pointer to hash buffer size, if NULL
176 * destroy hash descriptor
177 * @returns status of operation
178 * @retval -ENOSPC if hash is NULL, or *hash_len less than
179 * digest size
180 * @retval 0 for success
181 * @retval < 0 other errors from lower layers.
182 */
183int cfs_crypto_hash_final(struct cfs_crypto_hash_desc *desc, 160int cfs_crypto_hash_final(struct cfs_crypto_hash_desc *desc,
184 unsigned char *hash, unsigned int *hash_len); 161 unsigned char *hash, unsigned int *hash_len);
185/**
186 * Register crypto hash algorithms
187 */
188int cfs_crypto_register(void); 162int cfs_crypto_register(void);
189
190/**
191 * Unregister
192 */
193void cfs_crypto_unregister(void); 163void cfs_crypto_unregister(void);
194 164int cfs_crypto_hash_speed(enum cfs_crypto_hash_alg hash_alg);
195/** Return hash speed in Mbytes per second for valid hash algorithm
196 * identifier. If test was unsuccessful -1 would be returned.
197 */
198int cfs_crypto_hash_speed(unsigned char hash_alg);
199#endif 165#endif
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
index 98430e7108c1..455c54d0d17c 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
@@ -85,7 +85,6 @@ struct ptldebug_header {
85#define PH_FLAG_FIRST_RECORD 1 85#define PH_FLAG_FIRST_RECORD 1
86 86
87/* Debugging subsystems (32 bits, non-overlapping) */ 87/* Debugging subsystems (32 bits, non-overlapping) */
88/* keep these in sync with lnet/utils/debug.c and lnet/libcfs/debug.c */
89#define S_UNDEFINED 0x00000001 88#define S_UNDEFINED 0x00000001
90#define S_MDC 0x00000002 89#define S_MDC 0x00000002
91#define S_MDS 0x00000004 90#define S_MDS 0x00000004
@@ -118,10 +117,14 @@ struct ptldebug_header {
118#define S_MGS 0x20000000 117#define S_MGS 0x20000000
119#define S_FID 0x40000000 /* b_new_cmd */ 118#define S_FID 0x40000000 /* b_new_cmd */
120#define S_FLD 0x80000000 /* b_new_cmd */ 119#define S_FLD 0x80000000 /* b_new_cmd */
121/* keep these in sync with lnet/utils/debug.c and lnet/libcfs/debug.c */ 120
121#define LIBCFS_DEBUG_SUBSYS_NAMES { \
122 "undefined", "mdc", "mds", "osc", "ost", "class", "log", \
123 "llite", "rpc", "mgmt", "lnet", "lnd", "pinger", "filter", "", \
124 "echo", "ldlm", "lov", "lquota", "osd", "lfsck", "", "", "lmv", \
125 "", "sec", "gss", "", "mgc", "mgs", "fid", "fld", NULL }
122 126
123/* Debugging masks (32 bits, non-overlapping) */ 127/* Debugging masks (32 bits, non-overlapping) */
124/* keep these in sync with lnet/utils/debug.c and lnet/libcfs/debug.c */
125#define D_TRACE 0x00000001 /* ENTRY/EXIT markers */ 128#define D_TRACE 0x00000001 /* ENTRY/EXIT markers */
126#define D_INODE 0x00000002 129#define D_INODE 0x00000002
127#define D_SUPER 0x00000004 130#define D_SUPER 0x00000004
@@ -151,9 +154,14 @@ struct ptldebug_header {
151#define D_QUOTA 0x04000000 154#define D_QUOTA 0x04000000
152#define D_SEC 0x08000000 155#define D_SEC 0x08000000
153#define D_LFSCK 0x10000000 /* For both OI scrub and LFSCK */ 156#define D_LFSCK 0x10000000 /* For both OI scrub and LFSCK */
154/* keep these in sync with lnet/{utils,libcfs}/debug.c */ 157#define D_HSM 0x20000000
155 158
156#define D_HSM D_TRACE 159#define LIBCFS_DEBUG_MASKS_NAMES { \
160 "trace", "inode", "super", "ext2", "malloc", "cache", "info", \
161 "ioctl", "neterror", "net", "warning", "buffs", "other", \
162 "dentry", "nettrace", "page", "dlmtrace", "error", "emerg", \
163 "ha", "rpctrace", "vfstrace", "reada", "mmap", "config", \
164 "console", "quota", "sec", "lfsck", "hsm", NULL }
157 165
158#define D_CANTMASK (D_ERROR | D_EMERG | D_WARNING | D_CONSOLE) 166#define D_CANTMASK (D_ERROR | D_EMERG | D_WARNING | D_CONSOLE)
159 167
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
index aa69c6a33d19..2e008bffc89a 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
@@ -38,6 +38,7 @@
38 38
39extern unsigned long cfs_fail_loc; 39extern unsigned long cfs_fail_loc;
40extern unsigned int cfs_fail_val; 40extern unsigned int cfs_fail_val;
41extern int cfs_fail_err;
41 42
42extern wait_queue_head_t cfs_race_waitq; 43extern wait_queue_head_t cfs_race_waitq;
43extern int cfs_race_state; 44extern int cfs_race_state;
@@ -70,9 +71,14 @@ enum {
70#define CFS_FAIL_RAND 0x08000000 /* fail 1/N of the times */ 71#define CFS_FAIL_RAND 0x08000000 /* fail 1/N of the times */
71#define CFS_FAIL_USR1 0x04000000 /* user flag */ 72#define CFS_FAIL_USR1 0x04000000 /* user flag */
72 73
73#define CFS_FAIL_PRECHECK(id) (cfs_fail_loc && \ 74#define CFS_FAULT 0x02000000 /* match any CFS_FAULT_CHECK */
74 (cfs_fail_loc & CFS_FAIL_MASK_LOC) == \ 75
75 ((id) & CFS_FAIL_MASK_LOC)) 76static inline bool CFS_FAIL_PRECHECK(__u32 id)
77{
78 return cfs_fail_loc != 0 &&
79 ((cfs_fail_loc & CFS_FAIL_MASK_LOC) == (id & CFS_FAIL_MASK_LOC) ||
80 (cfs_fail_loc & id & CFS_FAULT));
81}
76 82
77static inline int cfs_fail_check_set(__u32 id, __u32 value, 83static inline int cfs_fail_check_set(__u32 id, __u32 value,
78 int set, int quiet) 84 int set, int quiet)
@@ -144,6 +150,9 @@ static inline int cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set)
144#define CFS_FAIL_TIMEOUT_MS_ORSET(id, value, ms) \ 150#define CFS_FAIL_TIMEOUT_MS_ORSET(id, value, ms) \
145 cfs_fail_timeout_set(id, value, ms, CFS_FAIL_LOC_ORSET) 151 cfs_fail_timeout_set(id, value, ms, CFS_FAIL_LOC_ORSET)
146 152
153#define CFS_FAULT_CHECK(id) \
154 CFS_FAIL_CHECK(CFS_FAULT | (id))
155
147/* The idea here is to synchronise two threads to force a race. The 156/* The idea here is to synchronise two threads to force a race. The
148 * first thread that calls this with a matching fail_loc is put to 157 * first thread that calls this with a matching fail_loc is put to
149 * sleep. The next thread that calls with the same fail_loc wakes up 158 * sleep. The next thread that calls with the same fail_loc wakes up
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
index c3f2332fa043..119986bc7961 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
@@ -245,7 +245,7 @@ struct cfs_hash {
245 /** # of iterators (caller of cfs_hash_for_each_*) */ 245 /** # of iterators (caller of cfs_hash_for_each_*) */
246 __u32 hs_iterators; 246 __u32 hs_iterators;
247 /** rehash workitem */ 247 /** rehash workitem */
248 cfs_workitem_t hs_rehash_wi; 248 struct cfs_workitem hs_rehash_wi;
249 /** refcount on this hash table */ 249 /** refcount on this hash table */
250 atomic_t hs_refcount; 250 atomic_t hs_refcount;
251 /** rehash buckets-table */ 251 /** rehash buckets-table */
@@ -262,7 +262,7 @@ struct cfs_hash {
262 /** bits when we found the max depth */ 262 /** bits when we found the max depth */
263 unsigned int hs_dep_bits; 263 unsigned int hs_dep_bits;
264 /** workitem to output max depth */ 264 /** workitem to output max depth */
265 cfs_workitem_t hs_dep_wi; 265 struct cfs_workitem hs_dep_wi;
266#endif 266#endif
267 /** name of htable */ 267 /** name of htable */
268 char hs_name[0]; 268 char hs_name[0];
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
index 5ca99bd6f4e9..4b9102bd95d5 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
@@ -34,13 +34,16 @@
34 * libcfs/include/libcfs/libcfs_ioctl.h 34 * libcfs/include/libcfs/libcfs_ioctl.h
35 * 35 *
36 * Low-level ioctl data structures. Kernel ioctl functions declared here, 36 * Low-level ioctl data structures. Kernel ioctl functions declared here,
37 * and user space functions are in libcfsutil_ioctl.h. 37 * and user space functions are in libcfs/util/ioctl.h.
38 * 38 *
39 */ 39 */
40 40
41#ifndef __LIBCFS_IOCTL_H__ 41#ifndef __LIBCFS_IOCTL_H__
42#define __LIBCFS_IOCTL_H__ 42#define __LIBCFS_IOCTL_H__
43 43
44#include <linux/types.h>
45#include <linux/ioctl.h>
46
44#define LIBCFS_IOCTL_VERSION 0x0001000a 47#define LIBCFS_IOCTL_VERSION 0x0001000a
45#define LIBCFS_IOCTL_VERSION2 0x0001000b 48#define LIBCFS_IOCTL_VERSION2 0x0001000b
46 49
@@ -49,6 +52,9 @@ struct libcfs_ioctl_hdr {
49 __u32 ioc_version; 52 __u32 ioc_version;
50}; 53};
51 54
55/** max size to copy from userspace */
56#define LIBCFS_IOC_DATA_MAX (128 * 1024)
57
52struct libcfs_ioctl_data { 58struct libcfs_ioctl_data {
53 struct libcfs_ioctl_hdr ioc_hdr; 59 struct libcfs_ioctl_hdr ioc_hdr;
54 60
@@ -73,67 +79,48 @@ struct libcfs_ioctl_data {
73 char ioc_bulk[0]; 79 char ioc_bulk[0];
74}; 80};
75 81
76#define ioc_priority ioc_u32[0]
77
78struct libcfs_debug_ioctl_data { 82struct libcfs_debug_ioctl_data {
79 struct libcfs_ioctl_hdr hdr; 83 struct libcfs_ioctl_hdr hdr;
80 unsigned int subs; 84 unsigned int subs;
81 unsigned int debug; 85 unsigned int debug;
82}; 86};
83 87
84#define LIBCFS_IOC_INIT(data) \ 88/* 'f' ioctls are defined in lustre_ioctl.h and lustre_user.h except for: */
85do { \ 89#define LIBCFS_IOC_DEBUG_MASK _IOWR('f', 250, long)
86 memset(&data, 0, sizeof(data)); \ 90#define IOCTL_LIBCFS_TYPE long
87 data.ioc_version = LIBCFS_IOCTL_VERSION; \
88 data.ioc_len = sizeof(data); \
89} while (0)
90
91struct libcfs_ioctl_handler {
92 struct list_head item;
93 int (*handle_ioctl)(unsigned int cmd, struct libcfs_ioctl_hdr *hdr);
94};
95
96#define DECLARE_IOCTL_HANDLER(ident, func) \
97 struct libcfs_ioctl_handler ident = { \
98 /* .item = */ LIST_HEAD_INIT(ident.item), \
99 /* .handle_ioctl = */ func \
100 }
101 91
102/* FIXME check conflict with lustre_lib.h */ 92#define IOC_LIBCFS_TYPE ('e')
103#define LIBCFS_IOC_DEBUG_MASK _IOWR('f', 250, long) 93#define IOC_LIBCFS_MIN_NR 30
104
105#define IOC_LIBCFS_TYPE 'e'
106#define IOC_LIBCFS_MIN_NR 30
107/* libcfs ioctls */ 94/* libcfs ioctls */
108#define IOC_LIBCFS_PANIC _IOWR('e', 30, long) 95/* IOC_LIBCFS_PANIC obsolete in 2.8.0, was _IOWR('e', 30, IOCTL_LIBCFS_TYPE) */
109#define IOC_LIBCFS_CLEAR_DEBUG _IOWR('e', 31, long) 96#define IOC_LIBCFS_CLEAR_DEBUG _IOWR('e', 31, IOCTL_LIBCFS_TYPE)
110#define IOC_LIBCFS_MARK_DEBUG _IOWR('e', 32, long) 97#define IOC_LIBCFS_MARK_DEBUG _IOWR('e', 32, IOCTL_LIBCFS_TYPE)
111#define IOC_LIBCFS_MEMHOG _IOWR('e', 36, long) 98/* IOC_LIBCFS_MEMHOG obsolete in 2.8.0, was _IOWR('e', 36, IOCTL_LIBCFS_TYPE) */
112/* lnet ioctls */ 99/* lnet ioctls */
113#define IOC_LIBCFS_GET_NI _IOWR('e', 50, long) 100#define IOC_LIBCFS_GET_NI _IOWR('e', 50, IOCTL_LIBCFS_TYPE)
114#define IOC_LIBCFS_FAIL_NID _IOWR('e', 51, long) 101#define IOC_LIBCFS_FAIL_NID _IOWR('e', 51, IOCTL_LIBCFS_TYPE)
115#define IOC_LIBCFS_NOTIFY_ROUTER _IOWR('e', 55, long) 102#define IOC_LIBCFS_NOTIFY_ROUTER _IOWR('e', 55, IOCTL_LIBCFS_TYPE)
116#define IOC_LIBCFS_UNCONFIGURE _IOWR('e', 56, long) 103#define IOC_LIBCFS_UNCONFIGURE _IOWR('e', 56, IOCTL_LIBCFS_TYPE)
117/* #define IOC_LIBCFS_PORTALS_COMPATIBILITY _IOWR('e', 57, long) */ 104/* IOC_LIBCFS_PORTALS_COMPATIBILITY _IOWR('e', 57, IOCTL_LIBCFS_TYPE) */
118#define IOC_LIBCFS_LNET_DIST _IOWR('e', 58, long) 105#define IOC_LIBCFS_LNET_DIST _IOWR('e', 58, IOCTL_LIBCFS_TYPE)
119#define IOC_LIBCFS_CONFIGURE _IOWR('e', 59, long) 106#define IOC_LIBCFS_CONFIGURE _IOWR('e', 59, IOCTL_LIBCFS_TYPE)
120#define IOC_LIBCFS_TESTPROTOCOMPAT _IOWR('e', 60, long) 107#define IOC_LIBCFS_TESTPROTOCOMPAT _IOWR('e', 60, IOCTL_LIBCFS_TYPE)
121#define IOC_LIBCFS_PING _IOWR('e', 61, long) 108#define IOC_LIBCFS_PING _IOWR('e', 61, IOCTL_LIBCFS_TYPE)
122/* #define IOC_LIBCFS_DEBUG_PEER _IOWR('e', 62, long) */ 109/* IOC_LIBCFS_DEBUG_PEER _IOWR('e', 62, IOCTL_LIBCFS_TYPE) */
123#define IOC_LIBCFS_LNETST _IOWR('e', 63, long) 110#define IOC_LIBCFS_LNETST _IOWR('e', 63, IOCTL_LIBCFS_TYPE)
124#define IOC_LIBCFS_LNET_FAULT _IOWR('e', 64, long) 111#define IOC_LIBCFS_LNET_FAULT _IOWR('e', 64, IOCTL_LIBCFS_TYPE)
125/* lnd ioctls */ 112/* lnd ioctls */
126#define IOC_LIBCFS_REGISTER_MYNID _IOWR('e', 70, long) 113#define IOC_LIBCFS_REGISTER_MYNID _IOWR('e', 70, IOCTL_LIBCFS_TYPE)
127#define IOC_LIBCFS_CLOSE_CONNECTION _IOWR('e', 71, long) 114#define IOC_LIBCFS_CLOSE_CONNECTION _IOWR('e', 71, IOCTL_LIBCFS_TYPE)
128#define IOC_LIBCFS_PUSH_CONNECTION _IOWR('e', 72, long) 115#define IOC_LIBCFS_PUSH_CONNECTION _IOWR('e', 72, IOCTL_LIBCFS_TYPE)
129#define IOC_LIBCFS_GET_CONN _IOWR('e', 73, long) 116#define IOC_LIBCFS_GET_CONN _IOWR('e', 73, IOCTL_LIBCFS_TYPE)
130#define IOC_LIBCFS_DEL_PEER _IOWR('e', 74, long) 117#define IOC_LIBCFS_DEL_PEER _IOWR('e', 74, IOCTL_LIBCFS_TYPE)
131#define IOC_LIBCFS_ADD_PEER _IOWR('e', 75, long) 118#define IOC_LIBCFS_ADD_PEER _IOWR('e', 75, IOCTL_LIBCFS_TYPE)
132#define IOC_LIBCFS_GET_PEER _IOWR('e', 76, long) 119#define IOC_LIBCFS_GET_PEER _IOWR('e', 76, IOCTL_LIBCFS_TYPE)
133/* ioctl 77 is free for use */ 120/* ioctl 77 is free for use */
134#define IOC_LIBCFS_ADD_INTERFACE _IOWR('e', 78, long) 121#define IOC_LIBCFS_ADD_INTERFACE _IOWR('e', 78, IOCTL_LIBCFS_TYPE)
135#define IOC_LIBCFS_DEL_INTERFACE _IOWR('e', 79, long) 122#define IOC_LIBCFS_DEL_INTERFACE _IOWR('e', 79, IOCTL_LIBCFS_TYPE)
136#define IOC_LIBCFS_GET_INTERFACE _IOWR('e', 80, long) 123#define IOC_LIBCFS_GET_INTERFACE _IOWR('e', 80, IOCTL_LIBCFS_TYPE)
137 124
138/* 125/*
139 * DLC Specific IOCTL numbers. 126 * DLC Specific IOCTL numbers.
@@ -155,76 +142,4 @@ struct libcfs_ioctl_handler {
155#define IOC_LIBCFS_GET_LNET_STATS _IOWR(IOC_LIBCFS_TYPE, 91, IOCTL_CONFIG_SIZE) 142#define IOC_LIBCFS_GET_LNET_STATS _IOWR(IOC_LIBCFS_TYPE, 91, IOCTL_CONFIG_SIZE)
156#define IOC_LIBCFS_MAX_NR 91 143#define IOC_LIBCFS_MAX_NR 91
157 144
158static inline int libcfs_ioctl_packlen(struct libcfs_ioctl_data *data)
159{
160 int len = sizeof(*data);
161
162 len += cfs_size_round(data->ioc_inllen1);
163 len += cfs_size_round(data->ioc_inllen2);
164 return len;
165}
166
167static inline bool libcfs_ioctl_is_invalid(struct libcfs_ioctl_data *data)
168{
169 if (data->ioc_hdr.ioc_len > (1 << 30)) {
170 CERROR("LIBCFS ioctl: ioc_len larger than 1<<30\n");
171 return 1;
172 }
173 if (data->ioc_inllen1 > (1<<30)) {
174 CERROR("LIBCFS ioctl: ioc_inllen1 larger than 1<<30\n");
175 return 1;
176 }
177 if (data->ioc_inllen2 > (1<<30)) {
178 CERROR("LIBCFS ioctl: ioc_inllen2 larger than 1<<30\n");
179 return 1;
180 }
181 if (data->ioc_inlbuf1 && !data->ioc_inllen1) {
182 CERROR("LIBCFS ioctl: inlbuf1 pointer but 0 length\n");
183 return 1;
184 }
185 if (data->ioc_inlbuf2 && !data->ioc_inllen2) {
186 CERROR("LIBCFS ioctl: inlbuf2 pointer but 0 length\n");
187 return 1;
188 }
189 if (data->ioc_pbuf1 && !data->ioc_plen1) {
190 CERROR("LIBCFS ioctl: pbuf1 pointer but 0 length\n");
191 return 1;
192 }
193 if (data->ioc_pbuf2 && !data->ioc_plen2) {
194 CERROR("LIBCFS ioctl: pbuf2 pointer but 0 length\n");
195 return 1;
196 }
197 if (data->ioc_plen1 && !data->ioc_pbuf1) {
198 CERROR("LIBCFS ioctl: plen1 nonzero but no pbuf1 pointer\n");
199 return 1;
200 }
201 if (data->ioc_plen2 && !data->ioc_pbuf2) {
202 CERROR("LIBCFS ioctl: plen2 nonzero but no pbuf2 pointer\n");
203 return 1;
204 }
205 if ((__u32)libcfs_ioctl_packlen(data) != data->ioc_hdr.ioc_len) {
206 CERROR("LIBCFS ioctl: packlen != ioc_len\n");
207 return 1;
208 }
209 if (data->ioc_inllen1 &&
210 data->ioc_bulk[data->ioc_inllen1 - 1] != '\0') {
211 CERROR("LIBCFS ioctl: inlbuf1 not 0 terminated\n");
212 return 1;
213 }
214 if (data->ioc_inllen2 &&
215 data->ioc_bulk[cfs_size_round(data->ioc_inllen1) +
216 data->ioc_inllen2 - 1] != '\0') {
217 CERROR("LIBCFS ioctl: inlbuf2 not 0 terminated\n");
218 return 1;
219 }
220 return 0;
221}
222
223int libcfs_register_ioctl(struct libcfs_ioctl_handler *hand);
224int libcfs_deregister_ioctl(struct libcfs_ioctl_handler *hand);
225int libcfs_ioctl_getdata_len(const struct libcfs_ioctl_hdr __user *arg,
226 __u32 *buf_len);
227int libcfs_ioctl_popdata(void __user *arg, void *buf, int size);
228int libcfs_ioctl_data_adjust(struct libcfs_ioctl_data *data);
229
230#endif /* __LIBCFS_IOCTL_H__ */ 145#endif /* __LIBCFS_IOCTL_H__ */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h
index 082fe6de90e4..ac4e8cfe6c8c 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h
@@ -40,21 +40,32 @@
40#ifndef __LIBCFS_PRIM_H__ 40#ifndef __LIBCFS_PRIM_H__
41#define __LIBCFS_PRIM_H__ 41#define __LIBCFS_PRIM_H__
42 42
43void add_wait_queue_exclusive_head(wait_queue_head_t *, wait_queue_t *);
44
45/* 43/*
46 * Memory 44 * Memory
47 */ 45 */
48#ifndef memory_pressure_get 46#if BITS_PER_LONG == 32
49#define memory_pressure_get() (0) 47/* limit to lowmem on 32-bit systems */
50#endif 48#define NUM_CACHEPAGES \
51#ifndef memory_pressure_set 49 min(totalram_pages, 1UL << (30 - PAGE_SHIFT) * 3 / 4)
52#define memory_pressure_set() do {} while (0) 50#else
53#endif 51#define NUM_CACHEPAGES totalram_pages
54#ifndef memory_pressure_clr
55#define memory_pressure_clr() do {} while (0)
56#endif 52#endif
57 53
54static inline unsigned int memory_pressure_get(void)
55{
56 return current->flags & PF_MEMALLOC;
57}
58
59static inline void memory_pressure_set(void)
60{
61 current->flags |= PF_MEMALLOC;
62}
63
64static inline void memory_pressure_clr(void)
65{
66 current->flags &= ~PF_MEMALLOC;
67}
68
58static inline int cfs_memory_pressure_get_and_set(void) 69static inline int cfs_memory_pressure_get_and_set(void)
59{ 70{
60 int old = memory_pressure_get(); 71 int old = memory_pressure_get();
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
index 13335437c69c..2fd2a9690a34 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
@@ -182,25 +182,6 @@ int libcfs_debug_clear_buffer(void);
182int libcfs_debug_mark_buffer(const char *text); 182int libcfs_debug_mark_buffer(const char *text);
183 183
184/* 184/*
185 * allocate per-cpu-partition data, returned value is an array of pointers,
186 * variable can be indexed by CPU ID.
187 * cptable != NULL: size of array is number of CPU partitions
188 * cptable == NULL: size of array is number of HW cores
189 */
190void *cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size);
191/*
192 * destroy per-cpu-partition variable
193 */
194void cfs_percpt_free(void *vars);
195int cfs_percpt_number(void *vars);
196void *cfs_percpt_current(void *vars);
197void *cfs_percpt_index(void *vars, int idx);
198
199#define cfs_percpt_for_each(var, i, vars) \
200 for (i = 0; i < cfs_percpt_number(vars) && \
201 ((var) = (vars)[i]) != NULL; i++)
202
203/*
204 * allocate a variable array, returned value is an array of pointers. 185 * allocate a variable array, returned value is an array of pointers.
205 * Caller can specify length of array by count. 186 * Caller can specify length of array by count.
206 */ 187 */
@@ -302,62 +283,6 @@ do { \
302#define CFS_ALLOC_PTR(ptr) LIBCFS_ALLOC(ptr, sizeof(*(ptr))) 283#define CFS_ALLOC_PTR(ptr) LIBCFS_ALLOC(ptr, sizeof(*(ptr)))
303#define CFS_FREE_PTR(ptr) LIBCFS_FREE(ptr, sizeof(*(ptr))) 284#define CFS_FREE_PTR(ptr) LIBCFS_FREE(ptr, sizeof(*(ptr)))
304 285
305/*
306 * percpu partition lock
307 *
308 * There are some use-cases like this in Lustre:
309 * . each CPU partition has it's own private data which is frequently changed,
310 * and mostly by the local CPU partition.
311 * . all CPU partitions share some global data, these data are rarely changed.
312 *
313 * LNet is typical example.
314 * CPU partition lock is designed for this kind of use-cases:
315 * . each CPU partition has it's own private lock
316 * . change on private data just needs to take the private lock
317 * . read on shared data just needs to take _any_ of private locks
318 * . change on shared data needs to take _all_ private locks,
319 * which is slow and should be really rare.
320 */
321
322enum {
323 CFS_PERCPT_LOCK_EX = -1, /* negative */
324};
325
326struct cfs_percpt_lock {
327 /* cpu-partition-table for this lock */
328 struct cfs_cpt_table *pcl_cptab;
329 /* exclusively locked */
330 unsigned int pcl_locked;
331 /* private lock table */
332 spinlock_t **pcl_locks;
333};
334
335/* return number of private locks */
336static inline int
337cfs_percpt_lock_num(struct cfs_percpt_lock *pcl)
338{
339 return cfs_cpt_number(pcl->pcl_cptab);
340}
341
342/*
343 * create a cpu-partition lock based on CPU partition table \a cptab,
344 * each private lock has extra \a psize bytes padding data
345 */
346struct cfs_percpt_lock *cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab);
347/* destroy a cpu-partition lock */
348void cfs_percpt_lock_free(struct cfs_percpt_lock *pcl);
349
350/* lock private lock \a index of \a pcl */
351void cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index);
352/* unlock private lock \a index of \a pcl */
353void cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index);
354/* create percpt (atomic) refcount based on @cptab */
355atomic_t **cfs_percpt_atomic_alloc(struct cfs_cpt_table *cptab, int val);
356/* destroy percpt refcount */
357void cfs_percpt_atomic_free(atomic_t **refs);
358/* return sum of all percpu refs */
359int cfs_percpt_atomic_summary(atomic_t **refs);
360
361/** Compile-time assertion. 286/** Compile-time assertion.
362 287
363 * Check an invariant described by a constant expression at compile time by 288 * Check an invariant described by a constant expression at compile time by
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h
index 5cc64f327a87..f9b20c5accbf 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h
@@ -73,7 +73,7 @@ int cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab, int cpt,
73struct cfs_workitem; 73struct cfs_workitem;
74 74
75typedef int (*cfs_wi_action_t) (struct cfs_workitem *); 75typedef int (*cfs_wi_action_t) (struct cfs_workitem *);
76typedef struct cfs_workitem { 76struct cfs_workitem {
77 /** chain on runq or rerunq */ 77 /** chain on runq or rerunq */
78 struct list_head wi_list; 78 struct list_head wi_list;
79 /** working function */ 79 /** working function */
@@ -84,10 +84,10 @@ typedef struct cfs_workitem {
84 unsigned short wi_running:1; 84 unsigned short wi_running:1;
85 /** scheduled */ 85 /** scheduled */
86 unsigned short wi_scheduled:1; 86 unsigned short wi_scheduled:1;
87} cfs_workitem_t; 87};
88 88
89static inline void 89static inline void
90cfs_wi_init(cfs_workitem_t *wi, void *data, cfs_wi_action_t action) 90cfs_wi_init(struct cfs_workitem *wi, void *data, cfs_wi_action_t action)
91{ 91{
92 INIT_LIST_HEAD(&wi->wi_list); 92 INIT_LIST_HEAD(&wi->wi_list);
93 93
@@ -97,9 +97,9 @@ cfs_wi_init(cfs_workitem_t *wi, void *data, cfs_wi_action_t action)
97 wi->wi_action = action; 97 wi->wi_action = action;
98} 98}
99 99
100void cfs_wi_schedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi); 100void cfs_wi_schedule(struct cfs_wi_sched *sched, struct cfs_workitem *wi);
101int cfs_wi_deschedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi); 101int cfs_wi_deschedule(struct cfs_wi_sched *sched, struct cfs_workitem *wi);
102void cfs_wi_exit(struct cfs_wi_sched *sched, cfs_workitem_t *wi); 102void cfs_wi_exit(struct cfs_wi_sched *sched, struct cfs_workitem *wi);
103 103
104int cfs_wi_startup(void); 104int cfs_wi_startup(void);
105void cfs_wi_shutdown(void); 105void cfs_wi_shutdown(void);
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h b/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h
index d94b2661658a..a268ef7aa19d 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h
@@ -60,6 +60,7 @@
60#include <linux/moduleparam.h> 60#include <linux/moduleparam.h>
61#include <linux/mutex.h> 61#include <linux/mutex.h>
62#include <linux/notifier.h> 62#include <linux/notifier.h>
63#include <linux/pagemap.h>
63#include <linux/random.h> 64#include <linux/random.h>
64#include <linux/rbtree.h> 65#include <linux/rbtree.h>
65#include <linux/rwsem.h> 66#include <linux/rwsem.h>
@@ -83,7 +84,6 @@
83#include <stdarg.h> 84#include <stdarg.h>
84#include "linux-cpu.h" 85#include "linux-cpu.h"
85#include "linux-time.h" 86#include "linux-time.h"
86#include "linux-mem.h"
87 87
88#define LUSTRE_TRACE_SIZE (THREAD_SIZE >> 5) 88#define LUSTRE_TRACE_SIZE (THREAD_SIZE >> 5)
89 89
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
index c04979ae0a38..f63cb47bc309 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
@@ -23,7 +23,7 @@
23 * This file is part of Lustre, http://www.lustre.org/ 23 * This file is part of Lustre, http://www.lustre.org/
24 * Lustre is a trademark of Sun Microsystems, Inc. 24 * Lustre is a trademark of Sun Microsystems, Inc.
25 * 25 *
26 * libcfs/include/libcfs/linux/linux-mem.h 26 * libcfs/include/libcfs/linux/linux-cpu.h
27 * 27 *
28 * Basic library routines. 28 * Basic library routines.
29 * 29 *
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h
deleted file mode 100644
index 837eb22749c3..000000000000
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h
+++ /dev/null
@@ -1,80 +0,0 @@
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26/*
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2012, Intel Corporation.
31 */
32/*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * libcfs/include/libcfs/linux/linux-mem.h
37 *
38 * Basic library routines.
39 */
40
41#ifndef __LIBCFS_LINUX_CFS_MEM_H__
42#define __LIBCFS_LINUX_CFS_MEM_H__
43
44#ifndef __LIBCFS_LIBCFS_H__
45#error Do not #include this file directly. #include <linux/libcfs/libcfs.h> instead
46#endif
47
48#include <linux/mm.h>
49#include <linux/vmalloc.h>
50#include <linux/pagemap.h>
51#include <linux/slab.h>
52#include <linux/memcontrol.h>
53#include <linux/mm_inline.h>
54
55#ifndef HAVE_LIBCFS_CPT
56/* Need this for cfs_cpt_table */
57#include "../libcfs_cpu.h"
58#endif
59
60#define CFS_PAGE_MASK (~((__u64)PAGE_SIZE-1))
61#define page_index(p) ((p)->index)
62
63#define memory_pressure_get() (current->flags & PF_MEMALLOC)
64#define memory_pressure_set() do { current->flags |= PF_MEMALLOC; } while (0)
65#define memory_pressure_clr() do { current->flags &= ~PF_MEMALLOC; } while (0)
66
67#if BITS_PER_LONG == 32
68/* limit to lowmem on 32-bit systems */
69#define NUM_CACHEPAGES \
70 min(totalram_pages, 1UL << (30 - PAGE_SHIFT) * 3 / 4)
71#else
72#define NUM_CACHEPAGES totalram_pages
73#endif
74
75#define DECL_MMSPACE mm_segment_t __oldfs
76#define MMSPACE_OPEN \
77 do { __oldfs = get_fs(); set_fs(get_ds()); } while (0)
78#define MMSPACE_CLOSE set_fs(__oldfs)
79
80#endif /* __LINUX_CFS_MEM_H__ */
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h
index ed8764b11c80..7656b09b8752 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h
@@ -70,12 +70,12 @@ static inline unsigned long cfs_time_current(void)
70 70
71static inline long cfs_time_seconds(int seconds) 71static inline long cfs_time_seconds(int seconds)
72{ 72{
73 return ((long)seconds) * HZ; 73 return ((long)seconds) * msecs_to_jiffies(MSEC_PER_SEC);
74} 74}
75 75
76static inline long cfs_duration_sec(long d) 76static inline long cfs_duration_sec(long d)
77{ 77{
78 return d / HZ; 78 return d / msecs_to_jiffies(MSEC_PER_SEC);
79} 79}
80 80
81#define cfs_time_current_64 get_jiffies_64 81#define cfs_time_current_64 get_jiffies_64
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-dlc.h b/drivers/staging/lustre/include/linux/lnet/lib-dlc.h
index 84a19e96ea04..6ce9accb91ad 100644
--- a/drivers/staging/lustre/include/linux/lnet/lib-dlc.h
+++ b/drivers/staging/lustre/include/linux/lnet/lib-dlc.h
@@ -37,10 +37,37 @@
37#define LNET_MAX_SHOW_NUM_CPT 128 37#define LNET_MAX_SHOW_NUM_CPT 128
38#define LNET_UNDEFINED_HOPS ((__u32) -1) 38#define LNET_UNDEFINED_HOPS ((__u32) -1)
39 39
40struct lnet_ioctl_config_lnd_cmn_tunables {
41 __u32 lct_version;
42 __u32 lct_peer_timeout;
43 __u32 lct_peer_tx_credits;
44 __u32 lct_peer_rtr_credits;
45 __u32 lct_max_tx_credits;
46};
47
48struct lnet_ioctl_config_o2iblnd_tunables {
49 __u32 lnd_version;
50 __u32 lnd_peercredits_hiw;
51 __u32 lnd_map_on_demand;
52 __u32 lnd_concurrent_sends;
53 __u32 lnd_fmr_pool_size;
54 __u32 lnd_fmr_flush_trigger;
55 __u32 lnd_fmr_cache;
56 __u32 pad;
57};
58
59struct lnet_ioctl_config_lnd_tunables {
60 struct lnet_ioctl_config_lnd_cmn_tunables lt_cmn;
61 union {
62 struct lnet_ioctl_config_o2iblnd_tunables lt_o2ib;
63 } lt_tun_u;
64};
65
40struct lnet_ioctl_net_config { 66struct lnet_ioctl_net_config {
41 char ni_interfaces[LNET_MAX_INTERFACES][LNET_MAX_STR_LEN]; 67 char ni_interfaces[LNET_MAX_INTERFACES][LNET_MAX_STR_LEN];
42 __u32 ni_status; 68 __u32 ni_status;
43 __u32 ni_cpts[LNET_MAX_SHOW_NUM_CPT]; 69 __u32 ni_cpts[LNET_MAX_SHOW_NUM_CPT];
70 char cfg_bulk[0];
44}; 71};
45 72
46#define LNET_TINY_BUF_IDX 0 73#define LNET_TINY_BUF_IDX 0
@@ -81,7 +108,7 @@ struct lnet_ioctl_config_data {
81 __s32 net_peer_rtr_credits; 108 __s32 net_peer_rtr_credits;
82 __s32 net_max_tx_credits; 109 __s32 net_max_tx_credits;
83 __u32 net_cksum_algo; 110 __u32 net_cksum_algo;
84 __u32 net_pad; 111 __u32 net_interface_count;
85 } cfg_net; 112 } cfg_net;
86 struct { 113 struct {
87 __u32 buf_enable; 114 __u32 buf_enable;
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
index dfc0208dc3a7..513a8225f888 100644
--- a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
+++ b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
@@ -463,10 +463,6 @@ int lnet_del_route(__u32 net, lnet_nid_t gw_nid);
463void lnet_destroy_routes(void); 463void lnet_destroy_routes(void);
464int lnet_get_route(int idx, __u32 *net, __u32 *hops, 464int lnet_get_route(int idx, __u32 *net, __u32 *hops,
465 lnet_nid_t *gateway, __u32 *alive, __u32 *priority); 465 lnet_nid_t *gateway, __u32 *alive, __u32 *priority);
466int lnet_get_net_config(int idx, __u32 *cpt_count, __u64 *nid,
467 int *peer_timeout, int *peer_tx_credits,
468 int *peer_rtr_cr, int *max_tx_credits,
469 struct lnet_ioctl_net_config *net_config);
470int lnet_get_rtr_pool_cfg(int idx, struct lnet_ioctl_pool_cfg *pool_cfg); 466int lnet_get_rtr_pool_cfg(int idx, struct lnet_ioctl_pool_cfg *pool_cfg);
471 467
472void lnet_router_debugfs_init(void); 468void lnet_router_debugfs_init(void);
@@ -478,9 +474,8 @@ int lnet_rtrpools_enable(void);
478void lnet_rtrpools_disable(void); 474void lnet_rtrpools_disable(void);
479void lnet_rtrpools_free(int keep_pools); 475void lnet_rtrpools_free(int keep_pools);
480lnet_remotenet_t *lnet_find_net_locked(__u32 net); 476lnet_remotenet_t *lnet_find_net_locked(__u32 net);
481int lnet_dyn_add_ni(lnet_pid_t requested_pid, char *nets, 477int lnet_dyn_add_ni(lnet_pid_t requested_pid,
482 __s32 peer_timeout, __s32 peer_cr, __s32 peer_buf_cr, 478 struct lnet_ioctl_config_data *conf);
483 __s32 credits);
484int lnet_dyn_del_ni(__u32 net); 479int lnet_dyn_del_ni(__u32 net);
485int lnet_clear_lazy_portal(struct lnet_ni *ni, int portal, char *reason); 480int lnet_clear_lazy_portal(struct lnet_ni *ni, int portal, char *reason);
486 481
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-types.h b/drivers/staging/lustre/include/linux/lnet/lib-types.h
index 29c72f8c2f99..24c4a08e6dc6 100644
--- a/drivers/staging/lustre/include/linux/lnet/lib-types.h
+++ b/drivers/staging/lustre/include/linux/lnet/lib-types.h
@@ -273,6 +273,8 @@ typedef struct lnet_ni {
273 int **ni_refs; /* percpt reference count */ 273 int **ni_refs; /* percpt reference count */
274 time64_t ni_last_alive;/* when I was last alive */ 274 time64_t ni_last_alive;/* when I was last alive */
275 lnet_ni_status_t *ni_status; /* my health status */ 275 lnet_ni_status_t *ni_status; /* my health status */
276 /* per NI LND tunables */
277 struct lnet_ioctl_config_lnd_tunables *ni_lnd_tunables;
276 /* equivalent interfaces to use */ 278 /* equivalent interfaces to use */
277 char *ni_interfaces[LNET_MAX_INTERFACES]; 279 char *ni_interfaces[LNET_MAX_INTERFACES];
278} lnet_ni_t; 280} lnet_ni_t;
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
index 0d32e6541a3f..6c59f2ff2220 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
@@ -335,8 +335,8 @@ int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid)
335 peer->ibp_nid = nid; 335 peer->ibp_nid = nid;
336 peer->ibp_error = 0; 336 peer->ibp_error = 0;
337 peer->ibp_last_alive = 0; 337 peer->ibp_last_alive = 0;
338 peer->ibp_max_frags = IBLND_CFG_RDMA_FRAGS; 338 peer->ibp_max_frags = kiblnd_cfg_rdma_frags(peer->ibp_ni);
339 peer->ibp_queue_depth = *kiblnd_tunables.kib_peertxcredits; 339 peer->ibp_queue_depth = ni->ni_peertxcredits;
340 atomic_set(&peer->ibp_refcount, 1); /* 1 ref for caller */ 340 atomic_set(&peer->ibp_refcount, 1); /* 1 ref for caller */
341 341
342 INIT_LIST_HEAD(&peer->ibp_list); /* not in the peer table yet */ 342 INIT_LIST_HEAD(&peer->ibp_list); /* not in the peer table yet */
@@ -1283,65 +1283,86 @@ static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo)
1283 } 1283 }
1284} 1284}
1285 1285
1286struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev, kib_rdma_desc_t *rd, 1286struct ib_mr *kiblnd_find_rd_dma_mr(struct lnet_ni *ni, kib_rdma_desc_t *rd,
1287 int negotiated_nfrags) 1287 int negotiated_nfrags)
1288{ 1288{
1289 __u16 nfrags = (negotiated_nfrags != -1) ? 1289 kib_net_t *net = ni->ni_data;
1290 negotiated_nfrags : *kiblnd_tunables.kib_map_on_demand; 1290 kib_hca_dev_t *hdev = net->ibn_dev->ibd_hdev;
1291 struct lnet_ioctl_config_o2iblnd_tunables *tunables;
1292 __u16 nfrags;
1293 int mod;
1294
1295 tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
1296 mod = tunables->lnd_map_on_demand;
1297 nfrags = (negotiated_nfrags != -1) ? negotiated_nfrags : mod;
1291 1298
1292 LASSERT(hdev->ibh_mrs); 1299 LASSERT(hdev->ibh_mrs);
1293 1300
1294 if (*kiblnd_tunables.kib_map_on_demand > 0 && 1301 if (mod > 0 && nfrags <= rd->rd_nfrags)
1295 nfrags <= rd->rd_nfrags)
1296 return NULL; 1302 return NULL;
1297 1303
1298 return hdev->ibh_mrs; 1304 return hdev->ibh_mrs;
1299} 1305}
1300 1306
1301static void kiblnd_destroy_fmr_pool(kib_fmr_pool_t *pool) 1307static void kiblnd_destroy_fmr_pool(kib_fmr_pool_t *fpo)
1302{ 1308{
1303 LASSERT(!pool->fpo_map_count); 1309 LASSERT(!fpo->fpo_map_count);
1304 1310
1305 if (pool->fpo_fmr_pool) 1311 if (fpo->fpo_is_fmr) {
1306 ib_destroy_fmr_pool(pool->fpo_fmr_pool); 1312 if (fpo->fmr.fpo_fmr_pool)
1313 ib_destroy_fmr_pool(fpo->fmr.fpo_fmr_pool);
1314 } else {
1315 struct kib_fast_reg_descriptor *frd, *tmp;
1316 int i = 0;
1317
1318 list_for_each_entry_safe(frd, tmp, &fpo->fast_reg.fpo_pool_list,
1319 frd_list) {
1320 list_del(&frd->frd_list);
1321 ib_dereg_mr(frd->frd_mr);
1322 LIBCFS_FREE(frd, sizeof(*frd));
1323 i++;
1324 }
1325 if (i < fpo->fast_reg.fpo_pool_size)
1326 CERROR("FastReg pool still has %d regions registered\n",
1327 fpo->fast_reg.fpo_pool_size - i);
1328 }
1307 1329
1308 if (pool->fpo_hdev) 1330 if (fpo->fpo_hdev)
1309 kiblnd_hdev_decref(pool->fpo_hdev); 1331 kiblnd_hdev_decref(fpo->fpo_hdev);
1310 1332
1311 LIBCFS_FREE(pool, sizeof(*pool)); 1333 LIBCFS_FREE(fpo, sizeof(*fpo));
1312} 1334}
1313 1335
1314static void kiblnd_destroy_fmr_pool_list(struct list_head *head) 1336static void kiblnd_destroy_fmr_pool_list(struct list_head *head)
1315{ 1337{
1316 kib_fmr_pool_t *pool; 1338 kib_fmr_pool_t *fpo, *tmp;
1317 1339
1318 while (!list_empty(head)) { 1340 list_for_each_entry_safe(fpo, tmp, head, fpo_list) {
1319 pool = list_entry(head->next, kib_fmr_pool_t, fpo_list); 1341 list_del(&fpo->fpo_list);
1320 list_del(&pool->fpo_list); 1342 kiblnd_destroy_fmr_pool(fpo);
1321 kiblnd_destroy_fmr_pool(pool);
1322 } 1343 }
1323} 1344}
1324 1345
1325static int kiblnd_fmr_pool_size(int ncpts) 1346static int
1347kiblnd_fmr_pool_size(struct lnet_ioctl_config_o2iblnd_tunables *tunables,
1348 int ncpts)
1326{ 1349{
1327 int size = *kiblnd_tunables.kib_fmr_pool_size / ncpts; 1350 int size = tunables->lnd_fmr_pool_size / ncpts;
1328 1351
1329 return max(IBLND_FMR_POOL, size); 1352 return max(IBLND_FMR_POOL, size);
1330} 1353}
1331 1354
1332static int kiblnd_fmr_flush_trigger(int ncpts) 1355static int
1356kiblnd_fmr_flush_trigger(struct lnet_ioctl_config_o2iblnd_tunables *tunables,
1357 int ncpts)
1333{ 1358{
1334 int size = *kiblnd_tunables.kib_fmr_flush_trigger / ncpts; 1359 int size = tunables->lnd_fmr_flush_trigger / ncpts;
1335 1360
1336 return max(IBLND_FMR_POOL_FLUSH, size); 1361 return max(IBLND_FMR_POOL_FLUSH, size);
1337} 1362}
1338 1363
1339static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps, 1364static int kiblnd_alloc_fmr_pool(kib_fmr_poolset_t *fps, kib_fmr_pool_t *fpo)
1340 kib_fmr_pool_t **pp_fpo)
1341{ 1365{
1342 /* FMR pool for RDMA */
1343 kib_dev_t *dev = fps->fps_net->ibn_dev;
1344 kib_fmr_pool_t *fpo;
1345 struct ib_fmr_pool_param param = { 1366 struct ib_fmr_pool_param param = {
1346 .max_pages_per_fmr = LNET_MAX_PAYLOAD / PAGE_SIZE, 1367 .max_pages_per_fmr = LNET_MAX_PAYLOAD / PAGE_SIZE,
1347 .page_shift = PAGE_SHIFT, 1368 .page_shift = PAGE_SHIFT,
@@ -1351,7 +1372,78 @@ static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps,
1351 .dirty_watermark = fps->fps_flush_trigger, 1372 .dirty_watermark = fps->fps_flush_trigger,
1352 .flush_function = NULL, 1373 .flush_function = NULL,
1353 .flush_arg = NULL, 1374 .flush_arg = NULL,
1354 .cache = !!*kiblnd_tunables.kib_fmr_cache}; 1375 .cache = !!fps->fps_cache };
1376 int rc = 0;
1377
1378 fpo->fmr.fpo_fmr_pool = ib_create_fmr_pool(fpo->fpo_hdev->ibh_pd,
1379 &param);
1380 if (IS_ERR(fpo->fmr.fpo_fmr_pool)) {
1381 rc = PTR_ERR(fpo->fmr.fpo_fmr_pool);
1382 if (rc != -ENOSYS)
1383 CERROR("Failed to create FMR pool: %d\n", rc);
1384 else
1385 CERROR("FMRs are not supported\n");
1386 }
1387
1388 return rc;
1389}
1390
1391static int kiblnd_alloc_freg_pool(kib_fmr_poolset_t *fps, kib_fmr_pool_t *fpo)
1392{
1393 struct kib_fast_reg_descriptor *frd, *tmp;
1394 int i, rc;
1395
1396 INIT_LIST_HEAD(&fpo->fast_reg.fpo_pool_list);
1397 fpo->fast_reg.fpo_pool_size = 0;
1398 for (i = 0; i < fps->fps_pool_size; i++) {
1399 LIBCFS_CPT_ALLOC(frd, lnet_cpt_table(), fps->fps_cpt,
1400 sizeof(*frd));
1401 if (!frd) {
1402 CERROR("Failed to allocate a new fast_reg descriptor\n");
1403 rc = -ENOMEM;
1404 goto out;
1405 }
1406
1407 frd->frd_mr = ib_alloc_mr(fpo->fpo_hdev->ibh_pd,
1408 IB_MR_TYPE_MEM_REG,
1409 LNET_MAX_PAYLOAD / PAGE_SIZE);
1410 if (IS_ERR(frd->frd_mr)) {
1411 rc = PTR_ERR(frd->frd_mr);
1412 CERROR("Failed to allocate ib_alloc_mr: %d\n", rc);
1413 frd->frd_mr = NULL;
1414 goto out_middle;
1415 }
1416
1417 frd->frd_valid = true;
1418
1419 list_add_tail(&frd->frd_list, &fpo->fast_reg.fpo_pool_list);
1420 fpo->fast_reg.fpo_pool_size++;
1421 }
1422
1423 return 0;
1424
1425out_middle:
1426 if (frd->frd_mr)
1427 ib_dereg_mr(frd->frd_mr);
1428 LIBCFS_FREE(frd, sizeof(*frd));
1429
1430out:
1431 list_for_each_entry_safe(frd, tmp, &fpo->fast_reg.fpo_pool_list,
1432 frd_list) {
1433 list_del(&frd->frd_list);
1434 ib_dereg_mr(frd->frd_mr);
1435 LIBCFS_FREE(frd, sizeof(*frd));
1436 }
1437
1438 return rc;
1439}
1440
1441static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps,
1442 kib_fmr_pool_t **pp_fpo)
1443{
1444 kib_dev_t *dev = fps->fps_net->ibn_dev;
1445 struct ib_device_attr *dev_attr;
1446 kib_fmr_pool_t *fpo;
1355 int rc; 1447 int rc;
1356 1448
1357 LIBCFS_CPT_ALLOC(fpo, lnet_cpt_table(), fps->fps_cpt, sizeof(*fpo)); 1449 LIBCFS_CPT_ALLOC(fpo, lnet_cpt_table(), fps->fps_cpt, sizeof(*fpo));
@@ -1359,22 +1451,41 @@ static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps,
1359 return -ENOMEM; 1451 return -ENOMEM;
1360 1452
1361 fpo->fpo_hdev = kiblnd_current_hdev(dev); 1453 fpo->fpo_hdev = kiblnd_current_hdev(dev);
1362 1454 dev_attr = &fpo->fpo_hdev->ibh_ibdev->attrs;
1363 fpo->fpo_fmr_pool = ib_create_fmr_pool(fpo->fpo_hdev->ibh_pd, &param); 1455
1364 if (IS_ERR(fpo->fpo_fmr_pool)) { 1456 /* Check for FMR or FastReg support */
1365 rc = PTR_ERR(fpo->fpo_fmr_pool); 1457 fpo->fpo_is_fmr = 0;
1366 CERROR("Failed to create FMR pool: %d\n", rc); 1458 if (fpo->fpo_hdev->ibh_ibdev->alloc_fmr &&
1367 1459 fpo->fpo_hdev->ibh_ibdev->dealloc_fmr &&
1368 kiblnd_hdev_decref(fpo->fpo_hdev); 1460 fpo->fpo_hdev->ibh_ibdev->map_phys_fmr &&
1369 LIBCFS_FREE(fpo, sizeof(*fpo)); 1461 fpo->fpo_hdev->ibh_ibdev->unmap_fmr) {
1370 return rc; 1462 LCONSOLE_INFO("Using FMR for registration\n");
1463 fpo->fpo_is_fmr = 1;
1464 } else if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
1465 LCONSOLE_INFO("Using FastReg for registration\n");
1466 } else {
1467 rc = -ENOSYS;
1468 LCONSOLE_ERROR_MSG(rc, "IB device does not support FMRs nor FastRegs, can't register memory\n");
1469 goto out_fpo;
1371 } 1470 }
1372 1471
1472 if (fpo->fpo_is_fmr)
1473 rc = kiblnd_alloc_fmr_pool(fps, fpo);
1474 else
1475 rc = kiblnd_alloc_freg_pool(fps, fpo);
1476 if (rc)
1477 goto out_fpo;
1478
1373 fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE); 1479 fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
1374 fpo->fpo_owner = fps; 1480 fpo->fpo_owner = fps;
1375 *pp_fpo = fpo; 1481 *pp_fpo = fpo;
1376 1482
1377 return 0; 1483 return 0;
1484
1485out_fpo:
1486 kiblnd_hdev_decref(fpo->fpo_hdev);
1487 LIBCFS_FREE(fpo, sizeof(*fpo));
1488 return rc;
1378} 1489}
1379 1490
1380static void kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps, 1491static void kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps,
@@ -1407,9 +1518,10 @@ static void kiblnd_fini_fmr_poolset(kib_fmr_poolset_t *fps)
1407 } 1518 }
1408} 1519}
1409 1520
1410static int kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt, 1521static int
1411 kib_net_t *net, int pool_size, 1522kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt, int ncpts,
1412 int flush_trigger) 1523 kib_net_t *net,
1524 struct lnet_ioctl_config_o2iblnd_tunables *tunables)
1413{ 1525{
1414 kib_fmr_pool_t *fpo; 1526 kib_fmr_pool_t *fpo;
1415 int rc; 1527 int rc;
@@ -1418,8 +1530,11 @@ static int kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt,
1418 1530
1419 fps->fps_net = net; 1531 fps->fps_net = net;
1420 fps->fps_cpt = cpt; 1532 fps->fps_cpt = cpt;
1421 fps->fps_pool_size = pool_size; 1533
1422 fps->fps_flush_trigger = flush_trigger; 1534 fps->fps_pool_size = kiblnd_fmr_pool_size(tunables, ncpts);
1535 fps->fps_flush_trigger = kiblnd_fmr_flush_trigger(tunables, ncpts);
1536 fps->fps_cache = tunables->lnd_fmr_cache;
1537
1423 spin_lock_init(&fps->fps_lock); 1538 spin_lock_init(&fps->fps_lock);
1424 INIT_LIST_HEAD(&fps->fps_pool_list); 1539 INIT_LIST_HEAD(&fps->fps_pool_list);
1425 INIT_LIST_HEAD(&fps->fps_failed_pool_list); 1540 INIT_LIST_HEAD(&fps->fps_failed_pool_list);
@@ -1440,25 +1555,64 @@ static int kiblnd_fmr_pool_is_idle(kib_fmr_pool_t *fpo, unsigned long now)
1440 return cfs_time_aftereq(now, fpo->fpo_deadline); 1555 return cfs_time_aftereq(now, fpo->fpo_deadline);
1441} 1556}
1442 1557
1558static int
1559kiblnd_map_tx_pages(kib_tx_t *tx, kib_rdma_desc_t *rd)
1560{
1561 __u64 *pages = tx->tx_pages;
1562 kib_hca_dev_t *hdev;
1563 int npages;
1564 int size;
1565 int i;
1566
1567 hdev = tx->tx_pool->tpo_hdev;
1568
1569 for (i = 0, npages = 0; i < rd->rd_nfrags; i++) {
1570 for (size = 0; size < rd->rd_frags[i].rf_nob;
1571 size += hdev->ibh_page_size) {
1572 pages[npages++] = (rd->rd_frags[i].rf_addr &
1573 hdev->ibh_page_mask) + size;
1574 }
1575 }
1576
1577 return npages;
1578}
1579
1443void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status) 1580void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status)
1444{ 1581{
1445 LIST_HEAD(zombies); 1582 LIST_HEAD(zombies);
1446 kib_fmr_pool_t *fpo = fmr->fmr_pool; 1583 kib_fmr_pool_t *fpo = fmr->fmr_pool;
1447 kib_fmr_poolset_t *fps = fpo->fpo_owner; 1584 kib_fmr_poolset_t *fps;
1448 unsigned long now = cfs_time_current(); 1585 unsigned long now = cfs_time_current();
1449 kib_fmr_pool_t *tmp; 1586 kib_fmr_pool_t *tmp;
1450 int rc; 1587 int rc;
1451 1588
1452 rc = ib_fmr_pool_unmap(fmr->fmr_pfmr); 1589 if (!fpo)
1453 LASSERT(!rc); 1590 return;
1454 1591
1455 if (status) { 1592 fps = fpo->fpo_owner;
1456 rc = ib_flush_fmr_pool(fpo->fpo_fmr_pool); 1593 if (fpo->fpo_is_fmr) {
1457 LASSERT(!rc); 1594 if (fmr->fmr_pfmr) {
1458 } 1595 rc = ib_fmr_pool_unmap(fmr->fmr_pfmr);
1596 LASSERT(!rc);
1597 fmr->fmr_pfmr = NULL;
1598 }
1599
1600 if (status) {
1601 rc = ib_flush_fmr_pool(fpo->fmr.fpo_fmr_pool);
1602 LASSERT(!rc);
1603 }
1604 } else {
1605 struct kib_fast_reg_descriptor *frd = fmr->fmr_frd;
1459 1606
1607 if (frd) {
1608 frd->frd_valid = false;
1609 spin_lock(&fps->fps_lock);
1610 list_add_tail(&frd->frd_list, &fpo->fast_reg.fpo_pool_list);
1611 spin_unlock(&fps->fps_lock);
1612 fmr->fmr_frd = NULL;
1613 }
1614 }
1460 fmr->fmr_pool = NULL; 1615 fmr->fmr_pool = NULL;
1461 fmr->fmr_pfmr = NULL;
1462 1616
1463 spin_lock(&fps->fps_lock); 1617 spin_lock(&fps->fps_lock);
1464 fpo->fpo_map_count--; /* decref the pool */ 1618 fpo->fpo_map_count--; /* decref the pool */
@@ -1479,11 +1633,15 @@ void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status)
1479 kiblnd_destroy_fmr_pool_list(&zombies); 1633 kiblnd_destroy_fmr_pool_list(&zombies);
1480} 1634}
1481 1635
1482int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages, 1636int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, kib_tx_t *tx,
1483 __u64 iov, kib_fmr_t *fmr) 1637 kib_rdma_desc_t *rd, __u32 nob, __u64 iov,
1638 kib_fmr_t *fmr)
1484{ 1639{
1485 struct ib_pool_fmr *pfmr; 1640 __u64 *pages = tx->tx_pages;
1641 bool is_rx = (rd != tx->tx_rd);
1642 bool tx_pages_mapped = 0;
1486 kib_fmr_pool_t *fpo; 1643 kib_fmr_pool_t *fpo;
1644 int npages = 0;
1487 __u64 version; 1645 __u64 version;
1488 int rc; 1646 int rc;
1489 1647
@@ -1493,21 +1651,95 @@ int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages,
1493 list_for_each_entry(fpo, &fps->fps_pool_list, fpo_list) { 1651 list_for_each_entry(fpo, &fps->fps_pool_list, fpo_list) {
1494 fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE); 1652 fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
1495 fpo->fpo_map_count++; 1653 fpo->fpo_map_count++;
1496 spin_unlock(&fps->fps_lock);
1497 1654
1498 pfmr = ib_fmr_pool_map_phys(fpo->fpo_fmr_pool, 1655 if (fpo->fpo_is_fmr) {
1499 pages, npages, iov); 1656 struct ib_pool_fmr *pfmr;
1500 if (likely(!IS_ERR(pfmr))) { 1657
1501 fmr->fmr_pool = fpo; 1658 spin_unlock(&fps->fps_lock);
1502 fmr->fmr_pfmr = pfmr; 1659
1503 return 0; 1660 if (!tx_pages_mapped) {
1661 npages = kiblnd_map_tx_pages(tx, rd);
1662 tx_pages_mapped = 1;
1663 }
1664
1665 pfmr = ib_fmr_pool_map_phys(fpo->fmr.fpo_fmr_pool,
1666 pages, npages, iov);
1667 if (likely(!IS_ERR(pfmr))) {
1668 fmr->fmr_key = is_rx ? pfmr->fmr->rkey :
1669 pfmr->fmr->lkey;
1670 fmr->fmr_frd = NULL;
1671 fmr->fmr_pfmr = pfmr;
1672 fmr->fmr_pool = fpo;
1673 return 0;
1674 }
1675 rc = PTR_ERR(pfmr);
1676 } else {
1677 if (!list_empty(&fpo->fast_reg.fpo_pool_list)) {
1678 struct kib_fast_reg_descriptor *frd;
1679 struct ib_reg_wr *wr;
1680 struct ib_mr *mr;
1681 int n;
1682
1683 frd = list_first_entry(&fpo->fast_reg.fpo_pool_list,
1684 struct kib_fast_reg_descriptor,
1685 frd_list);
1686 list_del(&frd->frd_list);
1687 spin_unlock(&fps->fps_lock);
1688
1689 mr = frd->frd_mr;
1690
1691 if (!frd->frd_valid) {
1692 __u32 key = is_rx ? mr->rkey : mr->lkey;
1693 struct ib_send_wr *inv_wr;
1694
1695 inv_wr = &frd->frd_inv_wr;
1696 memset(inv_wr, 0, sizeof(*inv_wr));
1697 inv_wr->opcode = IB_WR_LOCAL_INV;
1698 inv_wr->wr_id = IBLND_WID_MR;
1699 inv_wr->ex.invalidate_rkey = key;
1700
1701 /* Bump the key */
1702 key = ib_inc_rkey(key);
1703 ib_update_fast_reg_key(mr, key);
1704 }
1705
1706 n = ib_map_mr_sg(mr, tx->tx_frags,
1707 tx->tx_nfrags, NULL, PAGE_SIZE);
1708 if (unlikely(n != tx->tx_nfrags)) {
1709 CERROR("Failed to map mr %d/%d elements\n",
1710 n, tx->tx_nfrags);
1711 return n < 0 ? n : -EINVAL;
1712 }
1713
1714 mr->iova = iov;
1715
1716 /* Prepare FastReg WR */
1717 wr = &frd->frd_fastreg_wr;
1718 memset(wr, 0, sizeof(*wr));
1719 wr->wr.opcode = IB_WR_REG_MR;
1720 wr->wr.wr_id = IBLND_WID_MR;
1721 wr->wr.num_sge = 0;
1722 wr->wr.send_flags = 0;
1723 wr->mr = mr;
1724 wr->key = is_rx ? mr->rkey : mr->lkey;
1725 wr->access = (IB_ACCESS_LOCAL_WRITE |
1726 IB_ACCESS_REMOTE_WRITE);
1727
1728 fmr->fmr_key = is_rx ? mr->rkey : mr->lkey;
1729 fmr->fmr_frd = frd;
1730 fmr->fmr_pfmr = NULL;
1731 fmr->fmr_pool = fpo;
1732 return 0;
1733 }
1734 spin_unlock(&fps->fps_lock);
1735 rc = -EBUSY;
1504 } 1736 }
1505 1737
1506 spin_lock(&fps->fps_lock); 1738 spin_lock(&fps->fps_lock);
1507 fpo->fpo_map_count--; 1739 fpo->fpo_map_count--;
1508 if (PTR_ERR(pfmr) != -EAGAIN) { 1740 if (rc != -EAGAIN) {
1509 spin_unlock(&fps->fps_lock); 1741 spin_unlock(&fps->fps_lock);
1510 return PTR_ERR(pfmr); 1742 return rc;
1511 } 1743 }
1512 1744
1513 /* EAGAIN and ... */ 1745 /* EAGAIN and ... */
@@ -1932,25 +2164,28 @@ static void kiblnd_net_fini_pools(kib_net_t *net)
1932 } 2164 }
1933} 2165}
1934 2166
1935static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts) 2167static int kiblnd_net_init_pools(kib_net_t *net, lnet_ni_t *ni, __u32 *cpts,
2168 int ncpts)
1936{ 2169{
2170 struct lnet_ioctl_config_o2iblnd_tunables *tunables;
1937 unsigned long flags; 2171 unsigned long flags;
1938 int cpt; 2172 int cpt;
1939 int rc = 0; 2173 int rc;
1940 int i; 2174 int i;
1941 2175
2176 tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
2177
1942 read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); 2178 read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1943 if (!*kiblnd_tunables.kib_map_on_demand) { 2179 if (!tunables->lnd_map_on_demand) {
1944 read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); 2180 read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1945 goto create_tx_pool; 2181 goto create_tx_pool;
1946 } 2182 }
1947 2183
1948 read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); 2184 read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1949 2185
1950 if (*kiblnd_tunables.kib_fmr_pool_size < 2186 if (tunables->lnd_fmr_pool_size < *kiblnd_tunables.kib_ntx / 4) {
1951 *kiblnd_tunables.kib_ntx / 4) {
1952 CERROR("Can't set fmr pool size (%d) < ntx / 4(%d)\n", 2187 CERROR("Can't set fmr pool size (%d) < ntx / 4(%d)\n",
1953 *kiblnd_tunables.kib_fmr_pool_size, 2188 tunables->lnd_fmr_pool_size,
1954 *kiblnd_tunables.kib_ntx / 4); 2189 *kiblnd_tunables.kib_ntx / 4);
1955 rc = -EINVAL; 2190 rc = -EINVAL;
1956 goto failed; 2191 goto failed;
@@ -1965,8 +2200,11 @@ static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts)
1965 /* 2200 /*
1966 * premapping can fail if ibd_nmr > 1, so we always create 2201 * premapping can fail if ibd_nmr > 1, so we always create
1967 * FMR pool and map-on-demand if premapping failed 2202 * FMR pool and map-on-demand if premapping failed
2203 *
2204 * cfs_precpt_alloc is creating an array of struct kib_fmr_poolset
2205 * The number of struct kib_fmr_poolsets create is equal to the
2206 * number of CPTs that exist, i.e net->ibn_fmr_ps[cpt].
1968 */ 2207 */
1969
1970 net->ibn_fmr_ps = cfs_percpt_alloc(lnet_cpt_table(), 2208 net->ibn_fmr_ps = cfs_percpt_alloc(lnet_cpt_table(),
1971 sizeof(kib_fmr_poolset_t)); 2209 sizeof(kib_fmr_poolset_t));
1972 if (!net->ibn_fmr_ps) { 2210 if (!net->ibn_fmr_ps) {
@@ -1977,9 +2215,8 @@ static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts)
1977 2215
1978 for (i = 0; i < ncpts; i++) { 2216 for (i = 0; i < ncpts; i++) {
1979 cpt = !cpts ? i : cpts[i]; 2217 cpt = !cpts ? i : cpts[i];
1980 rc = kiblnd_init_fmr_poolset(net->ibn_fmr_ps[cpt], cpt, net, 2218 rc = kiblnd_init_fmr_poolset(net->ibn_fmr_ps[cpt], cpt, ncpts,
1981 kiblnd_fmr_pool_size(ncpts), 2219 net, tunables);
1982 kiblnd_fmr_flush_trigger(ncpts));
1983 if (rc) { 2220 if (rc) {
1984 CERROR("Can't initialize FMR pool for CPT %d: %d\n", 2221 CERROR("Can't initialize FMR pool for CPT %d: %d\n",
1985 cpt, rc); 2222 cpt, rc);
@@ -1991,6 +2228,11 @@ static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts)
1991 LASSERT(i == ncpts); 2228 LASSERT(i == ncpts);
1992 2229
1993 create_tx_pool: 2230 create_tx_pool:
2231 /*
2232 * cfs_precpt_alloc is creating an array of struct kib_tx_poolset
2233 * The number of struct kib_tx_poolsets create is equal to the
2234 * number of CPTs that exist, i.e net->ibn_tx_ps[cpt].
2235 */
1994 net->ibn_tx_ps = cfs_percpt_alloc(lnet_cpt_table(), 2236 net->ibn_tx_ps = cfs_percpt_alloc(lnet_cpt_table(),
1995 sizeof(kib_tx_poolset_t)); 2237 sizeof(kib_tx_poolset_t));
1996 if (!net->ibn_tx_ps) { 2238 if (!net->ibn_tx_ps) {
@@ -2694,10 +2936,9 @@ static int kiblnd_startup(lnet_ni_t *ni)
2694 net->ibn_incarnation = tv.tv_sec * USEC_PER_SEC + 2936 net->ibn_incarnation = tv.tv_sec * USEC_PER_SEC +
2695 tv.tv_nsec / NSEC_PER_USEC; 2937 tv.tv_nsec / NSEC_PER_USEC;
2696 2938
2697 ni->ni_peertimeout = *kiblnd_tunables.kib_peertimeout; 2939 rc = kiblnd_tunables_setup(ni);
2698 ni->ni_maxtxcredits = *kiblnd_tunables.kib_credits; 2940 if (rc)
2699 ni->ni_peertxcredits = *kiblnd_tunables.kib_peertxcredits; 2941 goto net_failed;
2700 ni->ni_peerrtrcredits = *kiblnd_tunables.kib_peerrtrcredits;
2701 2942
2702 if (ni->ni_interfaces[0]) { 2943 if (ni->ni_interfaces[0]) {
2703 /* Use the IPoIB interface specified in 'networks=' */ 2944 /* Use the IPoIB interface specified in 'networks=' */
@@ -2736,7 +2977,7 @@ static int kiblnd_startup(lnet_ni_t *ni)
2736 if (rc) 2977 if (rc)
2737 goto failed; 2978 goto failed;
2738 2979
2739 rc = kiblnd_net_init_pools(net, ni->ni_cpts, ni->ni_ncpts); 2980 rc = kiblnd_net_init_pools(net, ni, ni->ni_cpts, ni->ni_ncpts);
2740 if (rc) { 2981 if (rc) {
2741 CERROR("Failed to initialize NI pools: %d\n", rc); 2982 CERROR("Failed to initialize NI pools: %d\n", rc);
2742 goto failed; 2983 goto failed;
@@ -2779,8 +3020,6 @@ static void __exit ko2iblnd_exit(void)
2779 3020
2780static int __init ko2iblnd_init(void) 3021static int __init ko2iblnd_init(void)
2781{ 3022{
2782 int rc;
2783
2784 CLASSERT(sizeof(kib_msg_t) <= IBLND_MSG_SIZE); 3023 CLASSERT(sizeof(kib_msg_t) <= IBLND_MSG_SIZE);
2785 CLASSERT(offsetof(kib_msg_t, 3024 CLASSERT(offsetof(kib_msg_t,
2786 ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]) 3025 ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
@@ -2789,9 +3028,7 @@ static int __init ko2iblnd_init(void)
2789 ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]) 3028 ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
2790 <= IBLND_MSG_SIZE); 3029 <= IBLND_MSG_SIZE);
2791 3030
2792 rc = kiblnd_tunables_init(); 3031 kiblnd_tunables_init();
2793 if (rc)
2794 return rc;
2795 3032
2796 lnet_register_lnd(&the_o2iblnd); 3033 lnet_register_lnd(&the_o2iblnd);
2797 3034
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
index bfcbdd167da7..b22984fd9ad3 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
@@ -87,22 +87,10 @@ typedef struct {
87 int *kib_timeout; /* comms timeout (seconds) */ 87 int *kib_timeout; /* comms timeout (seconds) */
88 int *kib_keepalive; /* keepalive timeout (seconds) */ 88 int *kib_keepalive; /* keepalive timeout (seconds) */
89 int *kib_ntx; /* # tx descs */ 89 int *kib_ntx; /* # tx descs */
90 int *kib_credits; /* # concurrent sends */
91 int *kib_peertxcredits; /* # concurrent sends to 1 peer */
92 int *kib_peerrtrcredits; /* # per-peer router buffer credits */
93 int *kib_peercredits_hiw; /* # when eagerly to return credits */
94 int *kib_peertimeout; /* seconds to consider peer dead */
95 char **kib_default_ipif; /* default IPoIB interface */ 90 char **kib_default_ipif; /* default IPoIB interface */
96 int *kib_retry_count; 91 int *kib_retry_count;
97 int *kib_rnr_retry_count; 92 int *kib_rnr_retry_count;
98 int *kib_concurrent_sends; /* send work queue sizing */
99 int *kib_ib_mtu; /* IB MTU */ 93 int *kib_ib_mtu; /* IB MTU */
100 int *kib_map_on_demand; /* map-on-demand if RD has more */
101 /* fragments than this value, 0 */
102 /* disable map-on-demand */
103 int *kib_fmr_pool_size; /* # FMRs in pool */
104 int *kib_fmr_flush_trigger; /* When to trigger FMR flush */
105 int *kib_fmr_cache; /* enable FMR pool cache? */
106 int *kib_require_priv_port; /* accept only privileged ports */ 94 int *kib_require_priv_port; /* accept only privileged ports */
107 int *kib_use_priv_port; /* use privileged port for active connect */ 95 int *kib_use_priv_port; /* use privileged port for active connect */
108 int *kib_nscheds; /* # threads on each CPT */ 96 int *kib_nscheds; /* # threads on each CPT */
@@ -116,43 +104,21 @@ extern kib_tunables_t kiblnd_tunables;
116#define IBLND_CREDITS_DEFAULT 8 /* default # of peer credits */ 104#define IBLND_CREDITS_DEFAULT 8 /* default # of peer credits */
117#define IBLND_CREDITS_MAX ((typeof(((kib_msg_t *) 0)->ibm_credits)) - 1) /* Max # of peer credits */ 105#define IBLND_CREDITS_MAX ((typeof(((kib_msg_t *) 0)->ibm_credits)) - 1) /* Max # of peer credits */
118 106
119#define IBLND_MSG_QUEUE_SIZE(v) ((v) == IBLND_MSG_VERSION_1 ? \ 107/* when eagerly to return credits */
120 IBLND_MSG_QUEUE_SIZE_V1 : \ 108#define IBLND_CREDITS_HIGHWATER(t, v) ((v) == IBLND_MSG_VERSION_1 ? \
121 *kiblnd_tunables.kib_peertxcredits) /* # messages/RDMAs in-flight */ 109 IBLND_CREDIT_HIGHWATER_V1 : \
122#define IBLND_CREDITS_HIGHWATER(v) ((v) == IBLND_MSG_VERSION_1 ? \ 110 t->lnd_peercredits_hiw)
123 IBLND_CREDIT_HIGHWATER_V1 : \
124 *kiblnd_tunables.kib_peercredits_hiw) /* when eagerly to return credits */
125 111
126#define kiblnd_rdma_create_id(cb, dev, ps, qpt) rdma_create_id(&init_net, \ 112#define kiblnd_rdma_create_id(cb, dev, ps, qpt) rdma_create_id(&init_net, \
127 cb, dev, \ 113 cb, dev, \
128 ps, qpt) 114 ps, qpt)
129 115
130static inline int
131kiblnd_concurrent_sends_v1(void)
132{
133 if (*kiblnd_tunables.kib_concurrent_sends > IBLND_MSG_QUEUE_SIZE_V1 * 2)
134 return IBLND_MSG_QUEUE_SIZE_V1 * 2;
135
136 if (*kiblnd_tunables.kib_concurrent_sends < IBLND_MSG_QUEUE_SIZE_V1 / 2)
137 return IBLND_MSG_QUEUE_SIZE_V1 / 2;
138
139 return *kiblnd_tunables.kib_concurrent_sends;
140}
141
142#define IBLND_CONCURRENT_SENDS(v) ((v) == IBLND_MSG_VERSION_1 ? \
143 kiblnd_concurrent_sends_v1() : \
144 *kiblnd_tunables.kib_concurrent_sends)
145/* 2 OOB shall suffice for 1 keepalive and 1 returning credits */ 116/* 2 OOB shall suffice for 1 keepalive and 1 returning credits */
146#define IBLND_OOB_CAPABLE(v) ((v) != IBLND_MSG_VERSION_1) 117#define IBLND_OOB_CAPABLE(v) ((v) != IBLND_MSG_VERSION_1)
147#define IBLND_OOB_MSGS(v) (IBLND_OOB_CAPABLE(v) ? 2 : 0) 118#define IBLND_OOB_MSGS(v) (IBLND_OOB_CAPABLE(v) ? 2 : 0)
148 119
149#define IBLND_MSG_SIZE (4 << 10) /* max size of queued messages (inc hdr) */ 120#define IBLND_MSG_SIZE (4 << 10) /* max size of queued messages (inc hdr) */
150#define IBLND_MAX_RDMA_FRAGS LNET_MAX_IOV /* max # of fragments supported */ 121#define IBLND_MAX_RDMA_FRAGS LNET_MAX_IOV /* max # of fragments supported */
151#define IBLND_CFG_RDMA_FRAGS (*kiblnd_tunables.kib_map_on_demand ? \
152 *kiblnd_tunables.kib_map_on_demand : \
153 IBLND_MAX_RDMA_FRAGS) /* max # of fragments configured by user */
154#define IBLND_RDMA_FRAGS(v) ((v) == IBLND_MSG_VERSION_1 ? \
155 IBLND_MAX_RDMA_FRAGS : IBLND_CFG_RDMA_FRAGS)
156 122
157/************************/ 123/************************/
158/* derived constants... */ 124/* derived constants... */
@@ -171,7 +137,8 @@ kiblnd_concurrent_sends_v1(void)
171/* WRs and CQEs (per connection) */ 137/* WRs and CQEs (per connection) */
172#define IBLND_RECV_WRS(c) IBLND_RX_MSGS(c) 138#define IBLND_RECV_WRS(c) IBLND_RX_MSGS(c)
173#define IBLND_SEND_WRS(c) \ 139#define IBLND_SEND_WRS(c) \
174 ((c->ibc_max_frags + 1) * IBLND_CONCURRENT_SENDS(c->ibc_version)) 140 ((c->ibc_max_frags + 1) * kiblnd_concurrent_sends(c->ibc_version, \
141 c->ibc_peer->ibp_ni))
175#define IBLND_CQ_ENTRIES(c) (IBLND_RECV_WRS(c) + IBLND_SEND_WRS(c)) 142#define IBLND_CQ_ENTRIES(c) (IBLND_RECV_WRS(c) + IBLND_SEND_WRS(c))
176 143
177struct kib_hca_dev; 144struct kib_hca_dev;
@@ -286,24 +253,44 @@ typedef struct {
286 int fps_cpt; /* CPT id */ 253 int fps_cpt; /* CPT id */
287 int fps_pool_size; 254 int fps_pool_size;
288 int fps_flush_trigger; 255 int fps_flush_trigger;
256 int fps_cache;
289 int fps_increasing; /* is allocating new pool */ 257 int fps_increasing; /* is allocating new pool */
290 unsigned long fps_next_retry; /* time stamp for retry if*/ 258 unsigned long fps_next_retry; /* time stamp for retry if*/
291 /* failed to allocate */ 259 /* failed to allocate */
292} kib_fmr_poolset_t; 260} kib_fmr_poolset_t;
293 261
262struct kib_fast_reg_descriptor { /* For fast registration */
263 struct list_head frd_list;
264 struct ib_send_wr frd_inv_wr;
265 struct ib_reg_wr frd_fastreg_wr;
266 struct ib_mr *frd_mr;
267 bool frd_valid;
268};
269
294typedef struct { 270typedef struct {
295 struct list_head fpo_list; /* chain on pool list */ 271 struct list_head fpo_list; /* chain on pool list */
296 struct kib_hca_dev *fpo_hdev; /* device for this pool */ 272 struct kib_hca_dev *fpo_hdev; /* device for this pool */
297 kib_fmr_poolset_t *fpo_owner; /* owner of this pool */ 273 kib_fmr_poolset_t *fpo_owner; /* owner of this pool */
298 struct ib_fmr_pool *fpo_fmr_pool; /* IB FMR pool */ 274 union {
275 struct {
276 struct ib_fmr_pool *fpo_fmr_pool; /* IB FMR pool */
277 } fmr;
278 struct { /* For fast registration */
279 struct list_head fpo_pool_list;
280 int fpo_pool_size;
281 } fast_reg;
282 };
299 unsigned long fpo_deadline; /* deadline of this pool */ 283 unsigned long fpo_deadline; /* deadline of this pool */
300 int fpo_failed; /* fmr pool is failed */ 284 int fpo_failed; /* fmr pool is failed */
301 int fpo_map_count; /* # of mapped FMR */ 285 int fpo_map_count; /* # of mapped FMR */
286 int fpo_is_fmr;
302} kib_fmr_pool_t; 287} kib_fmr_pool_t;
303 288
304typedef struct { 289typedef struct {
305 struct ib_pool_fmr *fmr_pfmr; /* IB pool fmr */ 290 kib_fmr_pool_t *fmr_pool; /* pool of FMR */
306 kib_fmr_pool_t *fmr_pool; /* pool of FMR */ 291 struct ib_pool_fmr *fmr_pfmr; /* IB pool fmr */
292 struct kib_fast_reg_descriptor *fmr_frd;
293 u32 fmr_key;
307} kib_fmr_t; 294} kib_fmr_t;
308 295
309typedef struct kib_net { 296typedef struct kib_net {
@@ -615,6 +602,48 @@ extern kib_data_t kiblnd_data;
615 602
616void kiblnd_hdev_destroy(kib_hca_dev_t *hdev); 603void kiblnd_hdev_destroy(kib_hca_dev_t *hdev);
617 604
605int kiblnd_msg_queue_size(int version, struct lnet_ni *ni);
606
607/* max # of fragments configured by user */
608static inline int
609kiblnd_cfg_rdma_frags(struct lnet_ni *ni)
610{
611 struct lnet_ioctl_config_o2iblnd_tunables *tunables;
612 int mod;
613
614 tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
615 mod = tunables->lnd_map_on_demand;
616 return mod ? mod : IBLND_MAX_RDMA_FRAGS;
617}
618
619static inline int
620kiblnd_rdma_frags(int version, struct lnet_ni *ni)
621{
622 return version == IBLND_MSG_VERSION_1 ?
623 IBLND_MAX_RDMA_FRAGS :
624 kiblnd_cfg_rdma_frags(ni);
625}
626
627static inline int
628kiblnd_concurrent_sends(int version, struct lnet_ni *ni)
629{
630 struct lnet_ioctl_config_o2iblnd_tunables *tunables;
631 int concurrent_sends;
632
633 tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
634 concurrent_sends = tunables->lnd_concurrent_sends;
635
636 if (version == IBLND_MSG_VERSION_1) {
637 if (concurrent_sends > IBLND_MSG_QUEUE_SIZE_V1 * 2)
638 return IBLND_MSG_QUEUE_SIZE_V1 * 2;
639
640 if (concurrent_sends < IBLND_MSG_QUEUE_SIZE_V1 / 2)
641 return IBLND_MSG_QUEUE_SIZE_V1 / 2;
642 }
643
644 return concurrent_sends;
645}
646
618static inline void 647static inline void
619kiblnd_hdev_addref_locked(kib_hca_dev_t *hdev) 648kiblnd_hdev_addref_locked(kib_hca_dev_t *hdev)
620{ 649{
@@ -737,10 +766,14 @@ kiblnd_send_keepalive(kib_conn_t *conn)
737static inline int 766static inline int
738kiblnd_need_noop(kib_conn_t *conn) 767kiblnd_need_noop(kib_conn_t *conn)
739{ 768{
769 struct lnet_ioctl_config_o2iblnd_tunables *tunables;
770 lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
771
740 LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED); 772 LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
773 tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
741 774
742 if (conn->ibc_outstanding_credits < 775 if (conn->ibc_outstanding_credits <
743 IBLND_CREDITS_HIGHWATER(conn->ibc_version) && 776 IBLND_CREDITS_HIGHWATER(tunables, conn->ibc_version) &&
744 !kiblnd_send_keepalive(conn)) 777 !kiblnd_send_keepalive(conn))
745 return 0; /* No need to send NOOP */ 778 return 0; /* No need to send NOOP */
746 779
@@ -799,7 +832,8 @@ kiblnd_queue2str(kib_conn_t *conn, struct list_head *q)
799#define IBLND_WID_TX 1 832#define IBLND_WID_TX 1
800#define IBLND_WID_RX 2 833#define IBLND_WID_RX 2
801#define IBLND_WID_RDMA 3 834#define IBLND_WID_RDMA 3
802#define IBLND_WID_MASK 3UL 835#define IBLND_WID_MR 4
836#define IBLND_WID_MASK 7UL
803 837
804static inline __u64 838static inline __u64
805kiblnd_ptr2wreqid(void *ptr, int type) 839kiblnd_ptr2wreqid(void *ptr, int type)
@@ -947,20 +981,20 @@ static inline unsigned int kiblnd_sg_dma_len(struct ib_device *dev,
947#define KIBLND_CONN_PARAM(e) ((e)->param.conn.private_data) 981#define KIBLND_CONN_PARAM(e) ((e)->param.conn.private_data)
948#define KIBLND_CONN_PARAM_LEN(e) ((e)->param.conn.private_data_len) 982#define KIBLND_CONN_PARAM_LEN(e) ((e)->param.conn.private_data_len)
949 983
950struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev, 984struct ib_mr *kiblnd_find_rd_dma_mr(struct lnet_ni *ni, kib_rdma_desc_t *rd,
951 kib_rdma_desc_t *rd,
952 int negotiated_nfrags); 985 int negotiated_nfrags);
953void kiblnd_map_rx_descs(kib_conn_t *conn); 986void kiblnd_map_rx_descs(kib_conn_t *conn);
954void kiblnd_unmap_rx_descs(kib_conn_t *conn); 987void kiblnd_unmap_rx_descs(kib_conn_t *conn);
955void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node); 988void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node);
956struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps); 989struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps);
957 990
958int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, 991int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, kib_tx_t *tx,
959 int npages, __u64 iov, kib_fmr_t *fmr); 992 kib_rdma_desc_t *rd, __u32 nob, __u64 iov,
993 kib_fmr_t *fmr);
960void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status); 994void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status);
961 995
962int kiblnd_tunables_init(void); 996int kiblnd_tunables_setup(struct lnet_ni *ni);
963void kiblnd_tunables_fini(void); 997void kiblnd_tunables_init(void);
964 998
965int kiblnd_connd(void *arg); 999int kiblnd_connd(void *arg);
966int kiblnd_scheduler(void *arg); 1000int kiblnd_scheduler(void *arg);
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index 2323e8d3a318..bbfee53cfcf5 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -561,36 +561,23 @@ kiblnd_kvaddr_to_page(unsigned long vaddr)
561} 561}
562 562
563static int 563static int
564kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob) 564kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, __u32 nob)
565{ 565{
566 kib_hca_dev_t *hdev; 566 kib_hca_dev_t *hdev;
567 __u64 *pages = tx->tx_pages;
568 kib_fmr_poolset_t *fps; 567 kib_fmr_poolset_t *fps;
569 int npages;
570 int size;
571 int cpt; 568 int cpt;
572 int rc; 569 int rc;
573 int i;
574 570
575 LASSERT(tx->tx_pool); 571 LASSERT(tx->tx_pool);
576 LASSERT(tx->tx_pool->tpo_pool.po_owner); 572 LASSERT(tx->tx_pool->tpo_pool.po_owner);
577 573
578 hdev = tx->tx_pool->tpo_hdev; 574 hdev = tx->tx_pool->tpo_hdev;
579
580 for (i = 0, npages = 0; i < rd->rd_nfrags; i++) {
581 for (size = 0; size < rd->rd_frags[i].rf_nob;
582 size += hdev->ibh_page_size) {
583 pages[npages++] = (rd->rd_frags[i].rf_addr &
584 hdev->ibh_page_mask) + size;
585 }
586 }
587
588 cpt = tx->tx_pool->tpo_pool.po_owner->ps_cpt; 575 cpt = tx->tx_pool->tpo_pool.po_owner->ps_cpt;
589 576
590 fps = net->ibn_fmr_ps[cpt]; 577 fps = net->ibn_fmr_ps[cpt];
591 rc = kiblnd_fmr_pool_map(fps, pages, npages, 0, &tx->fmr); 578 rc = kiblnd_fmr_pool_map(fps, tx, rd, nob, 0, &tx->fmr);
592 if (rc) { 579 if (rc) {
593 CERROR("Can't map %d pages: %d\n", npages, rc); 580 CERROR("Can't map %u bytes: %d\n", nob, rc);
594 return rc; 581 return rc;
595 } 582 }
596 583
@@ -598,8 +585,7 @@ kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
598 * If rd is not tx_rd, it's going to get sent to a peer, who will need 585 * If rd is not tx_rd, it's going to get sent to a peer, who will need
599 * the rkey 586 * the rkey
600 */ 587 */
601 rd->rd_key = (rd != tx->tx_rd) ? tx->fmr.fmr_pfmr->fmr->rkey : 588 rd->rd_key = tx->fmr.fmr_key;
602 tx->fmr.fmr_pfmr->fmr->lkey;
603 rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask; 589 rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask;
604 rd->rd_frags[0].rf_nob = nob; 590 rd->rd_frags[0].rf_nob = nob;
605 rd->rd_nfrags = 1; 591 rd->rd_nfrags = 1;
@@ -613,10 +599,8 @@ static void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx)
613 599
614 LASSERT(net); 600 LASSERT(net);
615 601
616 if (net->ibn_fmr_ps && tx->fmr.fmr_pfmr) { 602 if (net->ibn_fmr_ps)
617 kiblnd_fmr_pool_unmap(&tx->fmr, tx->tx_status); 603 kiblnd_fmr_pool_unmap(&tx->fmr, tx->tx_status);
618 tx->fmr.fmr_pfmr = NULL;
619 }
620 604
621 if (tx->tx_nfrags) { 605 if (tx->tx_nfrags) {
622 kiblnd_dma_unmap_sg(tx->tx_pool->tpo_hdev->ibh_ibdev, 606 kiblnd_dma_unmap_sg(tx->tx_pool->tpo_hdev->ibh_ibdev,
@@ -628,8 +612,8 @@ static void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx)
628static int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, 612static int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
629 int nfrags) 613 int nfrags)
630{ 614{
631 kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev;
632 kib_net_t *net = ni->ni_data; 615 kib_net_t *net = ni->ni_data;
616 kib_hca_dev_t *hdev = net->ibn_dev->ibd_hdev;
633 struct ib_mr *mr = NULL; 617 struct ib_mr *mr = NULL;
634 __u32 nob; 618 __u32 nob;
635 int i; 619 int i;
@@ -652,7 +636,7 @@ static int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
652 nob += rd->rd_frags[i].rf_nob; 636 nob += rd->rd_frags[i].rf_nob;
653 } 637 }
654 638
655 mr = kiblnd_find_rd_dma_mr(hdev, rd, tx->tx_conn ? 639 mr = kiblnd_find_rd_dma_mr(ni, rd, tx->tx_conn ?
656 tx->tx_conn->ibc_max_frags : -1); 640 tx->tx_conn->ibc_max_frags : -1);
657 if (mr) { 641 if (mr) {
658 /* found pre-mapping MR */ 642 /* found pre-mapping MR */
@@ -704,7 +688,7 @@ kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
704 fragnob = min(fragnob, (int)PAGE_SIZE - page_offset); 688 fragnob = min(fragnob, (int)PAGE_SIZE - page_offset);
705 689
706 sg_set_page(sg, page, fragnob, page_offset); 690 sg_set_page(sg, page, fragnob, page_offset);
707 sg++; 691 sg = sg_next(sg);
708 692
709 if (offset + fragnob < iov->iov_len) { 693 if (offset + fragnob < iov->iov_len) {
710 offset += fragnob; 694 offset += fragnob;
@@ -748,7 +732,7 @@ kiblnd_setup_rd_kiov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
748 732
749 sg_set_page(sg, kiov->kiov_page, fragnob, 733 sg_set_page(sg, kiov->kiov_page, fragnob,
750 kiov->kiov_offset + offset); 734 kiov->kiov_offset + offset);
751 sg++; 735 sg = sg_next(sg);
752 736
753 offset = 0; 737 offset = 0;
754 kiov++; 738 kiov++;
@@ -765,6 +749,7 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit)
765{ 749{
766 kib_msg_t *msg = tx->tx_msg; 750 kib_msg_t *msg = tx->tx_msg;
767 kib_peer_t *peer = conn->ibc_peer; 751 kib_peer_t *peer = conn->ibc_peer;
752 struct lnet_ni *ni = peer->ibp_ni;
768 int ver = conn->ibc_version; 753 int ver = conn->ibc_version;
769 int rc; 754 int rc;
770 int done; 755 int done;
@@ -780,7 +765,7 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit)
780 LASSERT(conn->ibc_credits >= 0); 765 LASSERT(conn->ibc_credits >= 0);
781 LASSERT(conn->ibc_credits <= conn->ibc_queue_depth); 766 LASSERT(conn->ibc_credits <= conn->ibc_queue_depth);
782 767
783 if (conn->ibc_nsends_posted == IBLND_CONCURRENT_SENDS(ver)) { 768 if (conn->ibc_nsends_posted == kiblnd_concurrent_sends(ver, ni)) {
784 /* tx completions outstanding... */ 769 /* tx completions outstanding... */
785 CDEBUG(D_NET, "%s: posted enough\n", 770 CDEBUG(D_NET, "%s: posted enough\n",
786 libcfs_nid2str(peer->ibp_nid)); 771 libcfs_nid2str(peer->ibp_nid));
@@ -851,14 +836,26 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit)
851 /* close_conn will launch failover */ 836 /* close_conn will launch failover */
852 rc = -ENETDOWN; 837 rc = -ENETDOWN;
853 } else { 838 } else {
854 struct ib_send_wr *wrq = &tx->tx_wrq[tx->tx_nwrq - 1].wr; 839 struct kib_fast_reg_descriptor *frd = tx->fmr.fmr_frd;
840 struct ib_send_wr *bad = &tx->tx_wrq[tx->tx_nwrq - 1].wr;
841 struct ib_send_wr *wrq = &tx->tx_wrq[0].wr;
842
843 if (frd) {
844 if (!frd->frd_valid) {
845 wrq = &frd->frd_inv_wr;
846 wrq->next = &frd->frd_fastreg_wr.wr;
847 } else {
848 wrq = &frd->frd_fastreg_wr.wr;
849 }
850 frd->frd_fastreg_wr.wr.next = &tx->tx_wrq[0].wr;
851 }
855 852
856 LASSERTF(wrq->wr_id == kiblnd_ptr2wreqid(tx, IBLND_WID_TX), 853 LASSERTF(bad->wr_id == kiblnd_ptr2wreqid(tx, IBLND_WID_TX),
857 "bad wr_id %llx, opc %d, flags %d, peer: %s\n", 854 "bad wr_id %llx, opc %d, flags %d, peer: %s\n",
858 wrq->wr_id, wrq->opcode, wrq->send_flags, 855 bad->wr_id, bad->opcode, bad->send_flags,
859 libcfs_nid2str(conn->ibc_peer->ibp_nid)); 856 libcfs_nid2str(conn->ibc_peer->ibp_nid));
860 wrq = NULL; 857 bad = NULL;
861 rc = ib_post_send(conn->ibc_cmid->qp, &tx->tx_wrq->wr, &wrq); 858 rc = ib_post_send(conn->ibc_cmid->qp, wrq, &bad);
862 } 859 }
863 860
864 conn->ibc_last_send = jiffies; 861 conn->ibc_last_send = jiffies;
@@ -919,7 +916,7 @@ kiblnd_check_sends(kib_conn_t *conn)
919 916
920 spin_lock(&conn->ibc_lock); 917 spin_lock(&conn->ibc_lock);
921 918
922 LASSERT(conn->ibc_nsends_posted <= IBLND_CONCURRENT_SENDS(ver)); 919 LASSERT(conn->ibc_nsends_posted <= kiblnd_concurrent_sends(ver, ni));
923 LASSERT(!IBLND_OOB_CAPABLE(ver) || 920 LASSERT(!IBLND_OOB_CAPABLE(ver) ||
924 conn->ibc_noops_posted <= IBLND_OOB_MSGS(ver)); 921 conn->ibc_noops_posted <= IBLND_OOB_MSGS(ver));
925 LASSERT(conn->ibc_reserved_credits >= 0); 922 LASSERT(conn->ibc_reserved_credits >= 0);
@@ -1066,7 +1063,7 @@ kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
1066 kib_msg_t *ibmsg = tx->tx_msg; 1063 kib_msg_t *ibmsg = tx->tx_msg;
1067 kib_rdma_desc_t *srcrd = tx->tx_rd; 1064 kib_rdma_desc_t *srcrd = tx->tx_rd;
1068 struct ib_sge *sge = &tx->tx_sge[0]; 1065 struct ib_sge *sge = &tx->tx_sge[0];
1069 struct ib_rdma_wr *wrq = &tx->tx_wrq[0], *next; 1066 struct ib_rdma_wr *wrq, *next;
1070 int rc = resid; 1067 int rc = resid;
1071 int srcidx = 0; 1068 int srcidx = 0;
1072 int dstidx = 0; 1069 int dstidx = 0;
@@ -2333,11 +2330,11 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
2333 } 2330 }
2334 2331
2335 if (reqmsg->ibm_u.connparams.ibcp_queue_depth > 2332 if (reqmsg->ibm_u.connparams.ibcp_queue_depth >
2336 IBLND_MSG_QUEUE_SIZE(version)) { 2333 kiblnd_msg_queue_size(version, ni)) {
2337 CERROR("Can't accept conn from %s, queue depth too large: %d (<=%d wanted)\n", 2334 CERROR("Can't accept conn from %s, queue depth too large: %d (<=%d wanted)\n",
2338 libcfs_nid2str(nid), 2335 libcfs_nid2str(nid),
2339 reqmsg->ibm_u.connparams.ibcp_queue_depth, 2336 reqmsg->ibm_u.connparams.ibcp_queue_depth,
2340 IBLND_MSG_QUEUE_SIZE(version)); 2337 kiblnd_msg_queue_size(version, ni));
2341 2338
2342 if (version == IBLND_MSG_VERSION) 2339 if (version == IBLND_MSG_VERSION)
2343 rej.ibr_why = IBLND_REJECT_MSG_QUEUE_SIZE; 2340 rej.ibr_why = IBLND_REJECT_MSG_QUEUE_SIZE;
@@ -2346,24 +2343,24 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
2346 } 2343 }
2347 2344
2348 if (reqmsg->ibm_u.connparams.ibcp_max_frags > 2345 if (reqmsg->ibm_u.connparams.ibcp_max_frags >
2349 IBLND_RDMA_FRAGS(version)) { 2346 kiblnd_rdma_frags(version, ni)) {
2350 CWARN("Can't accept conn from %s (version %x): max_frags %d too large (%d wanted)\n", 2347 CWARN("Can't accept conn from %s (version %x): max_frags %d too large (%d wanted)\n",
2351 libcfs_nid2str(nid), version, 2348 libcfs_nid2str(nid), version,
2352 reqmsg->ibm_u.connparams.ibcp_max_frags, 2349 reqmsg->ibm_u.connparams.ibcp_max_frags,
2353 IBLND_RDMA_FRAGS(version)); 2350 kiblnd_rdma_frags(version, ni));
2354 2351
2355 if (version >= IBLND_MSG_VERSION) 2352 if (version >= IBLND_MSG_VERSION)
2356 rej.ibr_why = IBLND_REJECT_RDMA_FRAGS; 2353 rej.ibr_why = IBLND_REJECT_RDMA_FRAGS;
2357 2354
2358 goto failed; 2355 goto failed;
2359 } else if (reqmsg->ibm_u.connparams.ibcp_max_frags < 2356 } else if (reqmsg->ibm_u.connparams.ibcp_max_frags <
2360 IBLND_RDMA_FRAGS(version) && !net->ibn_fmr_ps) { 2357 kiblnd_rdma_frags(version, ni) && !net->ibn_fmr_ps) {
2361 CWARN("Can't accept conn from %s (version %x): max_frags %d incompatible without FMR pool (%d wanted)\n", 2358 CWARN("Can't accept conn from %s (version %x): max_frags %d incompatible without FMR pool (%d wanted)\n",
2362 libcfs_nid2str(nid), version, 2359 libcfs_nid2str(nid), version,
2363 reqmsg->ibm_u.connparams.ibcp_max_frags, 2360 reqmsg->ibm_u.connparams.ibcp_max_frags,
2364 IBLND_RDMA_FRAGS(version)); 2361 kiblnd_rdma_frags(version, ni));
2365 2362
2366 if (version >= IBLND_MSG_VERSION) 2363 if (version == IBLND_MSG_VERSION)
2367 rej.ibr_why = IBLND_REJECT_RDMA_FRAGS; 2364 rej.ibr_why = IBLND_REJECT_RDMA_FRAGS;
2368 2365
2369 goto failed; 2366 goto failed;
@@ -2528,8 +2525,8 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
2528 lnet_ni_decref(ni); 2525 lnet_ni_decref(ni);
2529 2526
2530 rej.ibr_version = version; 2527 rej.ibr_version = version;
2531 rej.ibr_cp.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE(version); 2528 rej.ibr_cp.ibcp_queue_depth = kiblnd_msg_queue_size(version, ni);
2532 rej.ibr_cp.ibcp_max_frags = IBLND_RDMA_FRAGS(version); 2529 rej.ibr_cp.ibcp_max_frags = kiblnd_rdma_frags(version, ni);
2533 kiblnd_reject(cmid, &rej); 2530 kiblnd_reject(cmid, &rej);
2534 2531
2535 return -ECONNREFUSED; 2532 return -ECONNREFUSED;
@@ -2580,12 +2577,15 @@ kiblnd_check_reconnect(kib_conn_t *conn, int version,
2580 reason = "Unknown"; 2577 reason = "Unknown";
2581 break; 2578 break;
2582 2579
2583 case IBLND_REJECT_RDMA_FRAGS: 2580 case IBLND_REJECT_RDMA_FRAGS: {
2581 struct lnet_ioctl_config_lnd_tunables *tunables;
2582
2584 if (!cp) { 2583 if (!cp) {
2585 reason = "can't negotiate max frags"; 2584 reason = "can't negotiate max frags";
2586 goto out; 2585 goto out;
2587 } 2586 }
2588 if (!*kiblnd_tunables.kib_map_on_demand) { 2587 tunables = peer->ibp_ni->ni_lnd_tunables;
2588 if (!tunables->lt_tun_u.lt_o2ib.lnd_map_on_demand) {
2589 reason = "map_on_demand must be enabled"; 2589 reason = "map_on_demand must be enabled";
2590 goto out; 2590 goto out;
2591 } 2591 }
@@ -2597,7 +2597,7 @@ kiblnd_check_reconnect(kib_conn_t *conn, int version,
2597 peer->ibp_max_frags = frag_num; 2597 peer->ibp_max_frags = frag_num;
2598 reason = "rdma fragments"; 2598 reason = "rdma fragments";
2599 break; 2599 break;
2600 2600 }
2601 case IBLND_REJECT_MSG_QUEUE_SIZE: 2601 case IBLND_REJECT_MSG_QUEUE_SIZE:
2602 if (!cp) { 2602 if (!cp) {
2603 reason = "can't negotiate queue depth"; 2603 reason = "can't negotiate queue depth";
@@ -3430,6 +3430,12 @@ kiblnd_complete(struct ib_wc *wc)
3430 default: 3430 default:
3431 LBUG(); 3431 LBUG();
3432 3432
3433 case IBLND_WID_MR:
3434 if (wc->status != IB_WC_SUCCESS &&
3435 wc->status != IB_WC_WR_FLUSH_ERR)
3436 CNETERR("FastReg failed: %d\n", wc->status);
3437 break;
3438
3433 case IBLND_WID_RDMA: 3439 case IBLND_WID_RDMA:
3434 /* 3440 /*
3435 * We only get RDMA completion notification if it fails. All 3441 * We only get RDMA completion notification if it fails. All
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
index b4607dad3712..f8fdd4ae3dbf 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
@@ -152,74 +152,135 @@ kib_tunables_t kiblnd_tunables = {
152 .kib_timeout = &timeout, 152 .kib_timeout = &timeout,
153 .kib_keepalive = &keepalive, 153 .kib_keepalive = &keepalive,
154 .kib_ntx = &ntx, 154 .kib_ntx = &ntx,
155 .kib_credits = &credits,
156 .kib_peertxcredits = &peer_credits,
157 .kib_peercredits_hiw = &peer_credits_hiw,
158 .kib_peerrtrcredits = &peer_buffer_credits,
159 .kib_peertimeout = &peer_timeout,
160 .kib_default_ipif = &ipif_name, 155 .kib_default_ipif = &ipif_name,
161 .kib_retry_count = &retry_count, 156 .kib_retry_count = &retry_count,
162 .kib_rnr_retry_count = &rnr_retry_count, 157 .kib_rnr_retry_count = &rnr_retry_count,
163 .kib_concurrent_sends = &concurrent_sends,
164 .kib_ib_mtu = &ib_mtu, 158 .kib_ib_mtu = &ib_mtu,
165 .kib_map_on_demand = &map_on_demand,
166 .kib_fmr_pool_size = &fmr_pool_size,
167 .kib_fmr_flush_trigger = &fmr_flush_trigger,
168 .kib_fmr_cache = &fmr_cache,
169 .kib_require_priv_port = &require_privileged_port, 159 .kib_require_priv_port = &require_privileged_port,
170 .kib_use_priv_port = &use_privileged_port, 160 .kib_use_priv_port = &use_privileged_port,
171 .kib_nscheds = &nscheds 161 .kib_nscheds = &nscheds
172}; 162};
173 163
174int 164static struct lnet_ioctl_config_o2iblnd_tunables default_tunables;
175kiblnd_tunables_init(void) 165
166/* # messages/RDMAs in-flight */
167int kiblnd_msg_queue_size(int version, lnet_ni_t *ni)
176{ 168{
169 if (version == IBLND_MSG_VERSION_1)
170 return IBLND_MSG_QUEUE_SIZE_V1;
171 else if (ni)
172 return ni->ni_peertxcredits;
173 else
174 return peer_credits;
175}
176
177int kiblnd_tunables_setup(struct lnet_ni *ni)
178{
179 struct lnet_ioctl_config_o2iblnd_tunables *tunables;
180
181 /*
182 * if there was no tunables specified, setup the tunables to be
183 * defaulted
184 */
185 if (!ni->ni_lnd_tunables) {
186 LIBCFS_ALLOC(ni->ni_lnd_tunables,
187 sizeof(*ni->ni_lnd_tunables));
188 if (!ni->ni_lnd_tunables)
189 return -ENOMEM;
190
191 memcpy(&ni->ni_lnd_tunables->lt_tun_u.lt_o2ib,
192 &default_tunables, sizeof(*tunables));
193 }
194 tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
195
196 /* Current API version */
197 tunables->lnd_version = 0;
198
177 if (kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu) < 0) { 199 if (kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu) < 0) {
178 CERROR("Invalid ib_mtu %d, expected 256/512/1024/2048/4096\n", 200 CERROR("Invalid ib_mtu %d, expected 256/512/1024/2048/4096\n",
179 *kiblnd_tunables.kib_ib_mtu); 201 *kiblnd_tunables.kib_ib_mtu);
180 return -EINVAL; 202 return -EINVAL;
181 } 203 }
182 204
183 if (*kiblnd_tunables.kib_peertxcredits < IBLND_CREDITS_DEFAULT) 205 if (!ni->ni_peertimeout)
184 *kiblnd_tunables.kib_peertxcredits = IBLND_CREDITS_DEFAULT; 206 ni->ni_peertimeout = peer_timeout;
207
208 if (!ni->ni_maxtxcredits)
209 ni->ni_maxtxcredits = credits;
210
211 if (!ni->ni_peertxcredits)
212 ni->ni_peertxcredits = peer_credits;
185 213
186 if (*kiblnd_tunables.kib_peertxcredits > IBLND_CREDITS_MAX) 214 if (!ni->ni_peerrtrcredits)
187 *kiblnd_tunables.kib_peertxcredits = IBLND_CREDITS_MAX; 215 ni->ni_peerrtrcredits = peer_buffer_credits;
188 216
189 if (*kiblnd_tunables.kib_peertxcredits > *kiblnd_tunables.kib_credits) 217 if (ni->ni_peertxcredits < IBLND_CREDITS_DEFAULT)
190 *kiblnd_tunables.kib_peertxcredits = *kiblnd_tunables.kib_credits; 218 ni->ni_peertxcredits = IBLND_CREDITS_DEFAULT;
191 219
192 if (*kiblnd_tunables.kib_peercredits_hiw < *kiblnd_tunables.kib_peertxcredits / 2) 220 if (ni->ni_peertxcredits > IBLND_CREDITS_MAX)
193 *kiblnd_tunables.kib_peercredits_hiw = *kiblnd_tunables.kib_peertxcredits / 2; 221 ni->ni_peertxcredits = IBLND_CREDITS_MAX;
194 222
195 if (*kiblnd_tunables.kib_peercredits_hiw >= *kiblnd_tunables.kib_peertxcredits) 223 if (ni->ni_peertxcredits > credits)
196 *kiblnd_tunables.kib_peercredits_hiw = *kiblnd_tunables.kib_peertxcredits - 1; 224 ni->ni_peertxcredits = credits;
197 225
198 if (*kiblnd_tunables.kib_map_on_demand < 0 || 226 if (!tunables->lnd_peercredits_hiw)
199 *kiblnd_tunables.kib_map_on_demand > IBLND_MAX_RDMA_FRAGS) 227 tunables->lnd_peercredits_hiw = peer_credits_hiw;
200 *kiblnd_tunables.kib_map_on_demand = 0; /* disable map-on-demand */
201 228
202 if (*kiblnd_tunables.kib_map_on_demand == 1) 229 if (tunables->lnd_peercredits_hiw < ni->ni_peertxcredits / 2)
203 *kiblnd_tunables.kib_map_on_demand = 2; /* don't make sense to create map if only one fragment */ 230 tunables->lnd_peercredits_hiw = ni->ni_peertxcredits / 2;
204 231
205 if (!*kiblnd_tunables.kib_concurrent_sends) { 232 if (tunables->lnd_peercredits_hiw >= ni->ni_peertxcredits)
206 if (*kiblnd_tunables.kib_map_on_demand > 0 && 233 tunables->lnd_peercredits_hiw = ni->ni_peertxcredits - 1;
207 *kiblnd_tunables.kib_map_on_demand <= IBLND_MAX_RDMA_FRAGS / 8) 234
208 *kiblnd_tunables.kib_concurrent_sends = (*kiblnd_tunables.kib_peertxcredits) * 2; 235 if (tunables->lnd_map_on_demand < 0 ||
209 else 236 tunables->lnd_map_on_demand > IBLND_MAX_RDMA_FRAGS) {
210 *kiblnd_tunables.kib_concurrent_sends = (*kiblnd_tunables.kib_peertxcredits); 237 /* disable map-on-demand */
238 tunables->lnd_map_on_demand = 0;
239 }
240
241 if (tunables->lnd_map_on_demand == 1) {
242 /* don't make sense to create map if only one fragment */
243 tunables->lnd_map_on_demand = 2;
244 }
245
246 if (!tunables->lnd_concurrent_sends) {
247 if (tunables->lnd_map_on_demand > 0 &&
248 tunables->lnd_map_on_demand <= IBLND_MAX_RDMA_FRAGS / 8) {
249 tunables->lnd_concurrent_sends =
250 ni->ni_peertxcredits * 2;
251 } else {
252 tunables->lnd_concurrent_sends = ni->ni_peertxcredits;
253 }
211 } 254 }
212 255
213 if (*kiblnd_tunables.kib_concurrent_sends > *kiblnd_tunables.kib_peertxcredits * 2) 256 if (tunables->lnd_concurrent_sends > ni->ni_peertxcredits * 2)
214 *kiblnd_tunables.kib_concurrent_sends = *kiblnd_tunables.kib_peertxcredits * 2; 257 tunables->lnd_concurrent_sends = ni->ni_peertxcredits * 2;
215 258
216 if (*kiblnd_tunables.kib_concurrent_sends < *kiblnd_tunables.kib_peertxcredits / 2) 259 if (tunables->lnd_concurrent_sends < ni->ni_peertxcredits / 2)
217 *kiblnd_tunables.kib_concurrent_sends = *kiblnd_tunables.kib_peertxcredits / 2; 260 tunables->lnd_concurrent_sends = ni->ni_peertxcredits / 2;
218 261
219 if (*kiblnd_tunables.kib_concurrent_sends < *kiblnd_tunables.kib_peertxcredits) { 262 if (tunables->lnd_concurrent_sends < ni->ni_peertxcredits) {
220 CWARN("Concurrent sends %d is lower than message queue size: %d, performance may drop slightly.\n", 263 CWARN("Concurrent sends %d is lower than message queue size: %d, performance may drop slightly.\n",
221 *kiblnd_tunables.kib_concurrent_sends, *kiblnd_tunables.kib_peertxcredits); 264 tunables->lnd_concurrent_sends, ni->ni_peertxcredits);
222 } 265 }
223 266
267 if (!tunables->lnd_fmr_pool_size)
268 tunables->lnd_fmr_pool_size = fmr_pool_size;
269 if (!tunables->lnd_fmr_flush_trigger)
270 tunables->lnd_fmr_flush_trigger = fmr_flush_trigger;
271 if (!tunables->lnd_fmr_cache)
272 tunables->lnd_fmr_cache = fmr_cache;
273
224 return 0; 274 return 0;
225} 275}
276
277void kiblnd_tunables_init(void)
278{
279 default_tunables.lnd_version = 0;
280 default_tunables.lnd_peercredits_hiw = peer_credits_hiw,
281 default_tunables.lnd_map_on_demand = map_on_demand;
282 default_tunables.lnd_concurrent_sends = concurrent_sends;
283 default_tunables.lnd_fmr_pool_size = fmr_pool_size;
284 default_tunables.lnd_fmr_flush_trigger = fmr_flush_trigger;
285 default_tunables.lnd_fmr_cache = fmr_cache;
286}
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
index cca7b2f7f1a7..406c0e7a57b9 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
@@ -2582,7 +2582,6 @@ ksocknal_debug_peerhash(lnet_ni_t *ni)
2582 } 2582 }
2583 2583
2584 read_unlock(&ksocknal_data.ksnd_global_lock); 2584 read_unlock(&ksocknal_data.ksnd_global_lock);
2585 return;
2586} 2585}
2587 2586
2588void 2587void
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
index d4ce06d0aeeb..964b4e338fe0 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
@@ -675,7 +675,6 @@ ksocknal_lib_set_callback(struct socket *sock, ksock_conn_t *conn)
675 sock->sk->sk_user_data = conn; 675 sock->sk->sk_user_data = conn;
676 sock->sk->sk_data_ready = ksocknal_data_ready; 676 sock->sk->sk_data_ready = ksocknal_data_ready;
677 sock->sk->sk_write_space = ksocknal_write_space; 677 sock->sk->sk_write_space = ksocknal_write_space;
678 return;
679} 678}
680 679
681void 680void
@@ -695,8 +694,6 @@ ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn)
695 * sk_user_data is NULL. 694 * sk_user_data is NULL.
696 */ 695 */
697 sock->sk->sk_user_data = NULL; 696 sock->sk->sk_user_data = NULL;
698
699 return ;
700} 697}
701 698
702int 699int
diff --git a/drivers/staging/lustre/lnet/libcfs/debug.c b/drivers/staging/lustre/lnet/libcfs/debug.c
index c3d628bac5b8..8c260c3d5da4 100644
--- a/drivers/staging/lustre/lnet/libcfs/debug.c
+++ b/drivers/staging/lustre/lnet/libcfs/debug.c
@@ -232,130 +232,24 @@ int libcfs_panic_in_progress;
232static const char * 232static const char *
233libcfs_debug_subsys2str(int subsys) 233libcfs_debug_subsys2str(int subsys)
234{ 234{
235 switch (1 << subsys) { 235 static const char *libcfs_debug_subsystems[] = LIBCFS_DEBUG_SUBSYS_NAMES;
236 default: 236
237 if (subsys >= ARRAY_SIZE(libcfs_debug_subsystems))
237 return NULL; 238 return NULL;
238 case S_UNDEFINED: 239
239 return "undefined"; 240 return libcfs_debug_subsystems[subsys];
240 case S_MDC:
241 return "mdc";
242 case S_MDS:
243 return "mds";
244 case S_OSC:
245 return "osc";
246 case S_OST:
247 return "ost";
248 case S_CLASS:
249 return "class";
250 case S_LOG:
251 return "log";
252 case S_LLITE:
253 return "llite";
254 case S_RPC:
255 return "rpc";
256 case S_LNET:
257 return "lnet";
258 case S_LND:
259 return "lnd";
260 case S_PINGER:
261 return "pinger";
262 case S_FILTER:
263 return "filter";
264 case S_ECHO:
265 return "echo";
266 case S_LDLM:
267 return "ldlm";
268 case S_LOV:
269 return "lov";
270 case S_LQUOTA:
271 return "lquota";
272 case S_OSD:
273 return "osd";
274 case S_LFSCK:
275 return "lfsck";
276 case S_LMV:
277 return "lmv";
278 case S_SEC:
279 return "sec";
280 case S_GSS:
281 return "gss";
282 case S_MGC:
283 return "mgc";
284 case S_MGS:
285 return "mgs";
286 case S_FID:
287 return "fid";
288 case S_FLD:
289 return "fld";
290 }
291} 241}
292 242
293/* libcfs_debug_token2mask() expects the returned string in lower-case */ 243/* libcfs_debug_token2mask() expects the returned string in lower-case */
294static const char * 244static const char *
295libcfs_debug_dbg2str(int debug) 245libcfs_debug_dbg2str(int debug)
296{ 246{
297 switch (1 << debug) { 247 static const char *libcfs_debug_masks[] = LIBCFS_DEBUG_MASKS_NAMES;
298 default: 248
249 if (debug >= ARRAY_SIZE(libcfs_debug_masks))
299 return NULL; 250 return NULL;
300 case D_TRACE: 251
301 return "trace"; 252 return libcfs_debug_masks[debug];
302 case D_INODE:
303 return "inode";
304 case D_SUPER:
305 return "super";
306 case D_EXT2:
307 return "ext2";
308 case D_MALLOC:
309 return "malloc";
310 case D_CACHE:
311 return "cache";
312 case D_INFO:
313 return "info";
314 case D_IOCTL:
315 return "ioctl";
316 case D_NETERROR:
317 return "neterror";
318 case D_NET:
319 return "net";
320 case D_WARNING:
321 return "warning";
322 case D_BUFFS:
323 return "buffs";
324 case D_OTHER:
325 return "other";
326 case D_DENTRY:
327 return "dentry";
328 case D_NETTRACE:
329 return "nettrace";
330 case D_PAGE:
331 return "page";
332 case D_DLMTRACE:
333 return "dlmtrace";
334 case D_ERROR:
335 return "error";
336 case D_EMERG:
337 return "emerg";
338 case D_HA:
339 return "ha";
340 case D_RPCTRACE:
341 return "rpctrace";
342 case D_VFSTRACE:
343 return "vfstrace";
344 case D_READA:
345 return "reada";
346 case D_MMAP:
347 return "mmap";
348 case D_CONFIG:
349 return "config";
350 case D_CONSOLE:
351 return "console";
352 case D_QUOTA:
353 return "quota";
354 case D_SEC:
355 return "sec";
356 case D_LFSCK:
357 return "lfsck";
358 }
359} 253}
360 254
361int 255int
diff --git a/drivers/staging/lustre/lnet/libcfs/fail.c b/drivers/staging/lustre/lnet/libcfs/fail.c
index dadaf7685cbd..086e690bd6f2 100644
--- a/drivers/staging/lustre/lnet/libcfs/fail.c
+++ b/drivers/staging/lustre/lnet/libcfs/fail.c
@@ -41,6 +41,9 @@ EXPORT_SYMBOL(cfs_fail_loc);
41unsigned int cfs_fail_val; 41unsigned int cfs_fail_val;
42EXPORT_SYMBOL(cfs_fail_val); 42EXPORT_SYMBOL(cfs_fail_val);
43 43
44int cfs_fail_err;
45EXPORT_SYMBOL(cfs_fail_err);
46
44DECLARE_WAIT_QUEUE_HEAD(cfs_race_waitq); 47DECLARE_WAIT_QUEUE_HEAD(cfs_race_waitq);
45EXPORT_SYMBOL(cfs_race_waitq); 48EXPORT_SYMBOL(cfs_race_waitq);
46 49
diff --git a/drivers/staging/lustre/lnet/libcfs/hash.c b/drivers/staging/lustre/lnet/libcfs/hash.c
index f60feb3a3dc7..cc45ed82b2be 100644
--- a/drivers/staging/lustre/lnet/libcfs/hash.c
+++ b/drivers/staging/lustre/lnet/libcfs/hash.c
@@ -942,10 +942,10 @@ cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts,
942 * @flags - CFS_HASH_REHASH enable synamic hash resizing 942 * @flags - CFS_HASH_REHASH enable synamic hash resizing
943 * - CFS_HASH_SORT enable chained hash sort 943 * - CFS_HASH_SORT enable chained hash sort
944 */ 944 */
945static int cfs_hash_rehash_worker(cfs_workitem_t *wi); 945static int cfs_hash_rehash_worker(struct cfs_workitem *wi);
946 946
947#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 947#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
948static int cfs_hash_dep_print(cfs_workitem_t *wi) 948static int cfs_hash_dep_print(struct cfs_workitem *wi)
949{ 949{
950 struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_dep_wi); 950 struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_dep_wi);
951 int dep; 951 int dep;
@@ -1847,7 +1847,7 @@ cfs_hash_rehash_bd(struct cfs_hash *hs, struct cfs_hash_bd *old)
1847} 1847}
1848 1848
1849static int 1849static int
1850cfs_hash_rehash_worker(cfs_workitem_t *wi) 1850cfs_hash_rehash_worker(struct cfs_workitem *wi)
1851{ 1851{
1852 struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_rehash_wi); 1852 struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_rehash_wi);
1853 struct cfs_hash_bucket **bkts; 1853 struct cfs_hash_bucket **bkts;
diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c b/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c
index 2de9eeae0232..83543f928279 100644
--- a/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c
+++ b/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c
@@ -49,7 +49,8 @@ EXPORT_SYMBOL(cfs_percpt_lock_free);
49 * reason we always allocate cacheline-aligned memory block. 49 * reason we always allocate cacheline-aligned memory block.
50 */ 50 */
51struct cfs_percpt_lock * 51struct cfs_percpt_lock *
52cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab) 52cfs_percpt_lock_create(struct cfs_cpt_table *cptab,
53 struct lock_class_key *keys)
53{ 54{
54 struct cfs_percpt_lock *pcl; 55 struct cfs_percpt_lock *pcl;
55 spinlock_t *lock; 56 spinlock_t *lock;
@@ -67,12 +68,18 @@ cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab)
67 return NULL; 68 return NULL;
68 } 69 }
69 70
70 cfs_percpt_for_each(lock, i, pcl->pcl_locks) 71 if (!keys)
72 CWARN("Cannot setup class key for percpt lock, you may see recursive locking warnings which are actually fake.\n");
73
74 cfs_percpt_for_each(lock, i, pcl->pcl_locks) {
71 spin_lock_init(lock); 75 spin_lock_init(lock);
76 if (keys != NULL)
77 lockdep_set_class(lock, &keys[i]);
78 }
72 79
73 return pcl; 80 return pcl;
74} 81}
75EXPORT_SYMBOL(cfs_percpt_lock_alloc); 82EXPORT_SYMBOL(cfs_percpt_lock_create);
76 83
77/** 84/**
78 * lock a CPU partition 85 * lock a CPU partition
@@ -142,44 +149,3 @@ cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index)
142 } 149 }
143} 150}
144EXPORT_SYMBOL(cfs_percpt_unlock); 151EXPORT_SYMBOL(cfs_percpt_unlock);
145
146/** free cpu-partition refcount */
147void
148cfs_percpt_atomic_free(atomic_t **refs)
149{
150 cfs_percpt_free(refs);
151}
152EXPORT_SYMBOL(cfs_percpt_atomic_free);
153
154/** allocate cpu-partition refcount with initial value @init_val */
155atomic_t **
156cfs_percpt_atomic_alloc(struct cfs_cpt_table *cptab, int init_val)
157{
158 atomic_t **refs;
159 atomic_t *ref;
160 int i;
161
162 refs = cfs_percpt_alloc(cptab, sizeof(*ref));
163 if (!refs)
164 return NULL;
165
166 cfs_percpt_for_each(ref, i, refs)
167 atomic_set(ref, init_val);
168 return refs;
169}
170EXPORT_SYMBOL(cfs_percpt_atomic_alloc);
171
172/** return sum of cpu-partition refs */
173int
174cfs_percpt_atomic_summary(atomic_t **refs)
175{
176 atomic_t *ref;
177 int i;
178 int val = 0;
179
180 cfs_percpt_for_each(ref, i, refs)
181 val += atomic_read(ref);
182
183 return val;
184}
185EXPORT_SYMBOL(cfs_percpt_atomic_summary);
diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c b/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c
index c5a6951516ed..d0e81bb41cdc 100644
--- a/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c
+++ b/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c
@@ -115,34 +115,6 @@ cfs_percpt_number(void *vars)
115EXPORT_SYMBOL(cfs_percpt_number); 115EXPORT_SYMBOL(cfs_percpt_number);
116 116
117/* 117/*
118 * return memory block shadowed from current CPU
119 */
120void *
121cfs_percpt_current(void *vars)
122{
123 struct cfs_var_array *arr;
124 int cpt;
125
126 arr = container_of(vars, struct cfs_var_array, va_ptrs[0]);
127 cpt = cfs_cpt_current(arr->va_cptab, 0);
128 if (cpt < 0)
129 return NULL;
130
131 return arr->va_ptrs[cpt];
132}
133
134void *
135cfs_percpt_index(void *vars, int idx)
136{
137 struct cfs_var_array *arr;
138
139 arr = container_of(vars, struct cfs_var_array, va_ptrs[0]);
140
141 LASSERT(idx >= 0 && idx < arr->va_count);
142 return arr->va_ptrs[idx];
143}
144
145/*
146 * free variable array, see more detail in cfs_array_alloc 118 * free variable array, see more detail in cfs_array_alloc
147 */ 119 */
148void 120void
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
index 389fb9eeea75..b52518c54efe 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
@@ -755,8 +755,13 @@ cfs_cpt_table_create(int ncpt)
755 struct cfs_cpu_partition *part; 755 struct cfs_cpu_partition *part;
756 int n; 756 int n;
757 757
758 if (cpt >= ncpt) 758 /*
759 goto failed; 759 * Each emulated NUMA node has all allowed CPUs in
760 * the mask.
761 * End loop when all partitions have assigned CPUs.
762 */
763 if (cpt == ncpt)
764 break;
760 765
761 part = &cptab->ctb_parts[cpt]; 766 part = &cptab->ctb_parts[cpt];
762 767
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c
index 8c9377ed850c..84f9b7b47581 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c
@@ -30,13 +30,34 @@
30#include <crypto/hash.h> 30#include <crypto/hash.h>
31#include <linux/scatterlist.h> 31#include <linux/scatterlist.h>
32#include "../../../include/linux/libcfs/libcfs.h" 32#include "../../../include/linux/libcfs/libcfs.h"
33#include "../../../include/linux/libcfs/libcfs_crypto.h"
33#include "linux-crypto.h" 34#include "linux-crypto.h"
35
34/** 36/**
35 * Array of hash algorithm speed in MByte per second 37 * Array of hash algorithm speed in MByte per second
36 */ 38 */
37static int cfs_crypto_hash_speeds[CFS_HASH_ALG_MAX]; 39static int cfs_crypto_hash_speeds[CFS_HASH_ALG_MAX];
38 40
39static int cfs_crypto_hash_alloc(unsigned char alg_id, 41/**
42 * Initialize the state descriptor for the specified hash algorithm.
43 *
44 * An internal routine to allocate the hash-specific state in \a hdesc for
45 * use with cfs_crypto_hash_digest() to compute the hash of a single message,
46 * though possibly in multiple chunks. The descriptor internal state should
47 * be freed with cfs_crypto_hash_final().
48 *
49 * \param[in] hash_alg hash algorithm id (CFS_HASH_ALG_*)
50 * \param[out] type pointer to the hash description in hash_types[]
51 * array
52 * \param[in,out] hdesc hash state descriptor to be initialized
53 * \param[in] key initial hash value/state, NULL to use default
54 * value
55 * \param[in] key_len length of \a key
56 *
57 * \retval 0 on success
58 * \retval negative errno on failure
59 */
60static int cfs_crypto_hash_alloc(enum cfs_crypto_hash_alg hash_alg,
40 const struct cfs_crypto_hash_type **type, 61 const struct cfs_crypto_hash_type **type,
41 struct ahash_request **req, 62 struct ahash_request **req,
42 unsigned char *key, 63 unsigned char *key,
@@ -45,11 +66,11 @@ static int cfs_crypto_hash_alloc(unsigned char alg_id,
45 struct crypto_ahash *tfm; 66 struct crypto_ahash *tfm;
46 int err = 0; 67 int err = 0;
47 68
48 *type = cfs_crypto_hash_type(alg_id); 69 *type = cfs_crypto_hash_type(hash_alg);
49 70
50 if (!*type) { 71 if (!*type) {
51 CWARN("Unsupported hash algorithm id = %d, max id is %d\n", 72 CWARN("Unsupported hash algorithm id = %d, max id is %d\n",
52 alg_id, CFS_HASH_ALG_MAX); 73 hash_alg, CFS_HASH_ALG_MAX);
53 return -EINVAL; 74 return -EINVAL;
54 } 75 }
55 tfm = crypto_alloc_ahash((*type)->cht_name, 0, CRYPTO_ALG_ASYNC); 76 tfm = crypto_alloc_ahash((*type)->cht_name, 0, CRYPTO_ALG_ASYNC);
@@ -70,12 +91,6 @@ static int cfs_crypto_hash_alloc(unsigned char alg_id,
70 91
71 ahash_request_set_callback(*req, 0, NULL, NULL); 92 ahash_request_set_callback(*req, 0, NULL, NULL);
72 93
73 /** Shash have different logic for initialization then digest
74 * shash: crypto_hash_setkey, crypto_hash_init
75 * digest: crypto_digest_init, crypto_digest_setkey
76 * Skip this function for digest, because we use shash logic at
77 * cfs_crypto_hash_alloc.
78 */
79 if (key) 94 if (key)
80 err = crypto_ahash_setkey(tfm, key, key_len); 95 err = crypto_ahash_setkey(tfm, key, key_len);
81 else if ((*type)->cht_key != 0) 96 else if ((*type)->cht_key != 0)
@@ -90,7 +105,7 @@ static int cfs_crypto_hash_alloc(unsigned char alg_id,
90 105
91 CDEBUG(D_INFO, "Using crypto hash: %s (%s) speed %d MB/s\n", 106 CDEBUG(D_INFO, "Using crypto hash: %s (%s) speed %d MB/s\n",
92 crypto_ahash_alg_name(tfm), crypto_ahash_driver_name(tfm), 107 crypto_ahash_alg_name(tfm), crypto_ahash_driver_name(tfm),
93 cfs_crypto_hash_speeds[alg_id]); 108 cfs_crypto_hash_speeds[hash_alg]);
94 109
95 err = crypto_ahash_init(*req); 110 err = crypto_ahash_init(*req);
96 if (err) { 111 if (err) {
@@ -100,7 +115,33 @@ static int cfs_crypto_hash_alloc(unsigned char alg_id,
100 return err; 115 return err;
101} 116}
102 117
103int cfs_crypto_hash_digest(unsigned char alg_id, 118/**
119 * Calculate hash digest for the passed buffer.
120 *
121 * This should be used when computing the hash on a single contiguous buffer.
122 * It combines the hash initialization, computation, and cleanup.
123 *
124 * \param[in] hash_alg id of hash algorithm (CFS_HASH_ALG_*)
125 * \param[in] buf data buffer on which to compute hash
126 * \param[in] buf_len length of \a buf in bytes
127 * \param[in] key initial value/state for algorithm,
128 * if \a key = NULL use default initial value
129 * \param[in] key_len length of \a key in bytes
130 * \param[out] hash pointer to computed hash value,
131 * if \a hash = NULL then \a hash_len is to digest
132 * size in bytes, retval -ENOSPC
133 * \param[in,out] hash_len size of \a hash buffer
134 *
135 * \retval -EINVAL \a buf, \a buf_len, \a hash_len,
136 * \a hash_alg invalid
137 * \retval -ENOENT \a hash_alg is unsupported
138 * \retval -ENOSPC \a hash is NULL, or \a hash_len less than
139 * digest size
140 * \retval 0 for success
141 * \retval negative errno for other errors from lower
142 * layers.
143 */
144int cfs_crypto_hash_digest(enum cfs_crypto_hash_alg hash_alg,
104 const void *buf, unsigned int buf_len, 145 const void *buf, unsigned int buf_len,
105 unsigned char *key, unsigned int key_len, 146 unsigned char *key, unsigned int key_len,
106 unsigned char *hash, unsigned int *hash_len) 147 unsigned char *hash, unsigned int *hash_len)
@@ -113,7 +154,7 @@ int cfs_crypto_hash_digest(unsigned char alg_id,
113 if (!buf || buf_len == 0 || !hash_len) 154 if (!buf || buf_len == 0 || !hash_len)
114 return -EINVAL; 155 return -EINVAL;
115 156
116 err = cfs_crypto_hash_alloc(alg_id, &type, &req, key, key_len); 157 err = cfs_crypto_hash_alloc(hash_alg, &type, &req, key, key_len);
117 if (err != 0) 158 if (err != 0)
118 return err; 159 return err;
119 160
@@ -134,15 +175,32 @@ int cfs_crypto_hash_digest(unsigned char alg_id,
134} 175}
135EXPORT_SYMBOL(cfs_crypto_hash_digest); 176EXPORT_SYMBOL(cfs_crypto_hash_digest);
136 177
178/**
179 * Allocate and initialize desriptor for hash algorithm.
180 *
181 * This should be used to initialize a hash descriptor for multiple calls
182 * to a single hash function when computing the hash across multiple
183 * separate buffers or pages using cfs_crypto_hash_update{,_page}().
184 *
185 * The hash descriptor should be freed with cfs_crypto_hash_final().
186 *
187 * \param[in] hash_alg algorithm id (CFS_HASH_ALG_*)
188 * \param[in] key initial value/state for algorithm, if \a key = NULL
189 * use default initial value
190 * \param[in] key_len length of \a key in bytes
191 *
192 * \retval pointer to descriptor of hash instance
193 * \retval ERR_PTR(errno) in case of error
194 */
137struct cfs_crypto_hash_desc * 195struct cfs_crypto_hash_desc *
138 cfs_crypto_hash_init(unsigned char alg_id, 196cfs_crypto_hash_init(enum cfs_crypto_hash_alg hash_alg,
139 unsigned char *key, unsigned int key_len) 197 unsigned char *key, unsigned int key_len)
140{ 198{
141 struct ahash_request *req; 199 struct ahash_request *req;
142 int err; 200 int err;
143 const struct cfs_crypto_hash_type *type; 201 const struct cfs_crypto_hash_type *type;
144 202
145 err = cfs_crypto_hash_alloc(alg_id, &type, &req, key, key_len); 203 err = cfs_crypto_hash_alloc(hash_alg, &type, &req, key, key_len);
146 204
147 if (err) 205 if (err)
148 return ERR_PTR(err); 206 return ERR_PTR(err);
@@ -150,6 +208,17 @@ struct cfs_crypto_hash_desc *
150} 208}
151EXPORT_SYMBOL(cfs_crypto_hash_init); 209EXPORT_SYMBOL(cfs_crypto_hash_init);
152 210
211/**
212 * Update hash digest computed on data within the given \a page
213 *
214 * \param[in] hdesc hash state descriptor
215 * \param[in] page data page on which to compute the hash
216 * \param[in] offset offset within \a page at which to start hash
217 * \param[in] len length of data on which to compute hash
218 *
219 * \retval 0 for success
220 * \retval negative errno on failure
221 */
153int cfs_crypto_hash_update_page(struct cfs_crypto_hash_desc *hdesc, 222int cfs_crypto_hash_update_page(struct cfs_crypto_hash_desc *hdesc,
154 struct page *page, unsigned int offset, 223 struct page *page, unsigned int offset,
155 unsigned int len) 224 unsigned int len)
@@ -158,13 +227,23 @@ int cfs_crypto_hash_update_page(struct cfs_crypto_hash_desc *hdesc,
158 struct scatterlist sl; 227 struct scatterlist sl;
159 228
160 sg_init_table(&sl, 1); 229 sg_init_table(&sl, 1);
161 sg_set_page(&sl, page, len, offset & ~CFS_PAGE_MASK); 230 sg_set_page(&sl, page, len, offset & ~PAGE_MASK);
162 231
163 ahash_request_set_crypt(req, &sl, NULL, sl.length); 232 ahash_request_set_crypt(req, &sl, NULL, sl.length);
164 return crypto_ahash_update(req); 233 return crypto_ahash_update(req);
165} 234}
166EXPORT_SYMBOL(cfs_crypto_hash_update_page); 235EXPORT_SYMBOL(cfs_crypto_hash_update_page);
167 236
237/**
238 * Update hash digest computed on the specified data
239 *
240 * \param[in] hdesc hash state descriptor
241 * \param[in] buf data buffer on which to compute the hash
242 * \param[in] buf_len length of \buf on which to compute hash
243 *
244 * \retval 0 for success
245 * \retval negative errno on failure
246 */
168int cfs_crypto_hash_update(struct cfs_crypto_hash_desc *hdesc, 247int cfs_crypto_hash_update(struct cfs_crypto_hash_desc *hdesc,
169 const void *buf, unsigned int buf_len) 248 const void *buf, unsigned int buf_len)
170{ 249{
@@ -178,7 +257,18 @@ int cfs_crypto_hash_update(struct cfs_crypto_hash_desc *hdesc,
178} 257}
179EXPORT_SYMBOL(cfs_crypto_hash_update); 258EXPORT_SYMBOL(cfs_crypto_hash_update);
180 259
181/* If hash_len pointer is NULL - destroy descriptor. */ 260/**
261 * Finish hash calculation, copy hash digest to buffer, clean up hash descriptor
262 *
263 * \param[in] hdesc hash descriptor
264 * \param[out] hash pointer to hash buffer to store hash digest
265 * \param[in,out] hash_len pointer to hash buffer size, if \a hdesc = NULL
266 * only free \a hdesc instead of computing the hash
267 *
268 * \retval 0 for success
269 * \retval -EOVERFLOW if hash_len is too small for the hash digest
270 * \retval negative errno for other errors from lower layers
271 */
182int cfs_crypto_hash_final(struct cfs_crypto_hash_desc *hdesc, 272int cfs_crypto_hash_final(struct cfs_crypto_hash_desc *hdesc,
183 unsigned char *hash, unsigned int *hash_len) 273 unsigned char *hash, unsigned int *hash_len)
184{ 274{
@@ -186,99 +276,153 @@ int cfs_crypto_hash_final(struct cfs_crypto_hash_desc *hdesc,
186 struct ahash_request *req = (void *)hdesc; 276 struct ahash_request *req = (void *)hdesc;
187 int size = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); 277 int size = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
188 278
189 if (!hash_len) { 279 if (!hash || !hash_len) {
190 crypto_free_ahash(crypto_ahash_reqtfm(req)); 280 err = 0;
191 ahash_request_free(req); 281 goto free_ahash;
192 return 0;
193 } 282 }
194 if (!hash || *hash_len < size) { 283 if (*hash_len < size) {
195 *hash_len = size; 284 err = -EOVERFLOW;
196 return -ENOSPC; 285 goto free_ahash;
197 } 286 }
287
198 ahash_request_set_crypt(req, NULL, hash, 0); 288 ahash_request_set_crypt(req, NULL, hash, 0);
199 err = crypto_ahash_final(req); 289 err = crypto_ahash_final(req);
200 290 if (!err)
201 if (err < 0) { 291 *hash_len = size;
202 /* May be caller can fix error */ 292free_ahash:
203 return err;
204 }
205 crypto_free_ahash(crypto_ahash_reqtfm(req)); 293 crypto_free_ahash(crypto_ahash_reqtfm(req));
206 ahash_request_free(req); 294 ahash_request_free(req);
207 return err; 295 return err;
208} 296}
209EXPORT_SYMBOL(cfs_crypto_hash_final); 297EXPORT_SYMBOL(cfs_crypto_hash_final);
210 298
211static void cfs_crypto_performance_test(unsigned char alg_id, 299/**
212 const unsigned char *buf, 300 * Compute the speed of specified hash function
213 unsigned int buf_len) 301 *
302 * Run a speed test on the given hash algorithm on buffer of the given size.
303 * The speed is stored internally in the cfs_crypto_hash_speeds[] array, and
304 * is available through the cfs_crypto_hash_speed() function.
305 *
306 * \param[in] hash_alg hash algorithm id (CFS_HASH_ALG_*)
307 * \param[in] buf data buffer on which to compute the hash
308 * \param[in] buf_len length of \buf on which to compute hash
309 */
310static void cfs_crypto_performance_test(enum cfs_crypto_hash_alg hash_alg)
214{ 311{
312 int buf_len = max(PAGE_SIZE, 1048576UL);
313 void *buf;
215 unsigned long start, end; 314 unsigned long start, end;
216 int bcount, err = 0; 315 int bcount, err = 0;
217 int sec = 1; /* do test only 1 sec */ 316 struct page *page;
218 unsigned char hash[64]; 317 unsigned char hash[CFS_CRYPTO_HASH_DIGESTSIZE_MAX];
219 unsigned int hash_len = 64; 318 unsigned int hash_len = sizeof(hash);
220 319
221 for (start = jiffies, end = start + sec * HZ, bcount = 0; 320 page = alloc_page(GFP_KERNEL);
222 time_before(jiffies, end); bcount++) { 321 if (!page) {
223 err = cfs_crypto_hash_digest(alg_id, buf, buf_len, NULL, 0, 322 err = -ENOMEM;
224 hash, &hash_len); 323 goto out_err;
324 }
325
326 buf = kmap(page);
327 memset(buf, 0xAD, PAGE_SIZE);
328 kunmap(page);
329
330 for (start = jiffies, end = start + msecs_to_jiffies(MSEC_PER_SEC),
331 bcount = 0; time_before(jiffies, end); bcount++) {
332 struct cfs_crypto_hash_desc *hdesc;
333 int i;
334
335 hdesc = cfs_crypto_hash_init(hash_alg, NULL, 0);
336 if (IS_ERR(hdesc)) {
337 err = PTR_ERR(hdesc);
338 break;
339 }
340
341 for (i = 0; i < buf_len / PAGE_SIZE; i++) {
342 err = cfs_crypto_hash_update_page(hdesc, page, 0,
343 PAGE_SIZE);
344 if (err)
345 break;
346 }
347
348 err = cfs_crypto_hash_final(hdesc, hash, &hash_len);
225 if (err) 349 if (err)
226 break; 350 break;
227 } 351 }
228 end = jiffies; 352 end = jiffies;
229 353 __free_page(page);
354out_err:
230 if (err) { 355 if (err) {
231 cfs_crypto_hash_speeds[alg_id] = -1; 356 cfs_crypto_hash_speeds[hash_alg] = err;
232 CDEBUG(D_INFO, "Crypto hash algorithm %s, err = %d\n", 357 CDEBUG(D_INFO, "Crypto hash algorithm %s test error: rc = %d\n",
233 cfs_crypto_hash_name(alg_id), err); 358 cfs_crypto_hash_name(hash_alg), err);
234 } else { 359 } else {
235 unsigned long tmp; 360 unsigned long tmp;
236 361
237 tmp = ((bcount * buf_len / jiffies_to_msecs(end - start)) * 362 tmp = ((bcount * buf_len / jiffies_to_msecs(end - start)) *
238 1000) / (1024 * 1024); 363 1000) / (1024 * 1024);
239 cfs_crypto_hash_speeds[alg_id] = (int)tmp; 364 cfs_crypto_hash_speeds[hash_alg] = (int)tmp;
365 CDEBUG(D_CONFIG, "Crypto hash algorithm %s speed = %d MB/s\n",
366 cfs_crypto_hash_name(hash_alg),
367 cfs_crypto_hash_speeds[hash_alg]);
240 } 368 }
241 CDEBUG(D_INFO, "Crypto hash algorithm %s speed = %d MB/s\n",
242 cfs_crypto_hash_name(alg_id), cfs_crypto_hash_speeds[alg_id]);
243} 369}
244 370
245int cfs_crypto_hash_speed(unsigned char hash_alg) 371/**
372 * hash speed in Mbytes per second for valid hash algorithm
373 *
374 * Return the performance of the specified \a hash_alg that was previously
375 * computed using cfs_crypto_performance_test().
376 *
377 * \param[in] hash_alg hash algorithm id (CFS_HASH_ALG_*)
378 *
379 * \retval positive speed of the hash function in MB/s
380 * \retval -ENOENT if \a hash_alg is unsupported
381 * \retval negative errno if \a hash_alg speed is unavailable
382 */
383int cfs_crypto_hash_speed(enum cfs_crypto_hash_alg hash_alg)
246{ 384{
247 if (hash_alg < CFS_HASH_ALG_MAX) 385 if (hash_alg < CFS_HASH_ALG_MAX)
248 return cfs_crypto_hash_speeds[hash_alg]; 386 return cfs_crypto_hash_speeds[hash_alg];
249 return -1; 387 return -ENOENT;
250} 388}
251EXPORT_SYMBOL(cfs_crypto_hash_speed); 389EXPORT_SYMBOL(cfs_crypto_hash_speed);
252 390
253/** 391/**
254 * Do performance test for all hash algorithms. 392 * Run the performance test for all hash algorithms.
393 *
394 * Run the cfs_crypto_performance_test() benchmark for all of the available
395 * hash functions using a 1MB buffer size. This is a reasonable buffer size
396 * for Lustre RPCs, even if the actual RPC size is larger or smaller.
397 *
398 * Since the setup cost and computation speed of various hash algorithms is
399 * a function of the buffer size (and possibly internal contention of offload
400 * engines), this speed only represents an estimate of the actual speed under
401 * actual usage, but is reasonable for comparing available algorithms.
402 *
403 * The actual speeds are available via cfs_crypto_hash_speed() for later
404 * comparison.
405 *
406 * \retval 0 on success
407 * \retval -ENOMEM if no memory is available for test buffer
255 */ 408 */
256static int cfs_crypto_test_hashes(void) 409static int cfs_crypto_test_hashes(void)
257{ 410{
258 unsigned char i; 411 enum cfs_crypto_hash_alg hash_alg;
259 unsigned char *data;
260 unsigned int j;
261 /* Data block size for testing hash. Maximum
262 * kmalloc size for 2.6.18 kernel is 128K
263 */
264 unsigned int data_len = 1 * 128 * 1024;
265
266 data = kmalloc(data_len, 0);
267 if (!data)
268 return -ENOMEM;
269 412
270 for (j = 0; j < data_len; j++) 413 for (hash_alg = 0; hash_alg < CFS_HASH_ALG_MAX; hash_alg++)
271 data[j] = j & 0xff; 414 cfs_crypto_performance_test(hash_alg);
272 415
273 for (i = 0; i < CFS_HASH_ALG_MAX; i++)
274 cfs_crypto_performance_test(i, data, data_len);
275
276 kfree(data);
277 return 0; 416 return 0;
278} 417}
279 418
280static int adler32; 419static int adler32;
281 420
421/**
422 * Register available hash functions
423 *
424 * \retval 0
425 */
282int cfs_crypto_register(void) 426int cfs_crypto_register(void)
283{ 427{
284 request_module("crc32c"); 428 request_module("crc32c");
@@ -290,6 +434,9 @@ int cfs_crypto_register(void)
290 return 0; 434 return 0;
291} 435}
292 436
437/**
438 * Unregister previously registered hash functions
439 */
293void cfs_crypto_unregister(void) 440void cfs_crypto_unregister(void)
294{ 441{
295 if (adler32 == 0) 442 if (adler32 == 0)
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c
index ebc60ac9bb7a..d89f71ee45b2 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c
@@ -40,10 +40,75 @@
40 40
41#define LNET_MINOR 240 41#define LNET_MINOR 240
42 42
43static inline size_t libcfs_ioctl_packlen(struct libcfs_ioctl_data *data)
44{
45 size_t len = sizeof(*data);
46
47 len += cfs_size_round(data->ioc_inllen1);
48 len += cfs_size_round(data->ioc_inllen2);
49 return len;
50}
51
52static inline bool libcfs_ioctl_is_invalid(struct libcfs_ioctl_data *data)
53{
54 if (data->ioc_hdr.ioc_len > BIT(30)) {
55 CERROR("LIBCFS ioctl: ioc_len larger than 1<<30\n");
56 return true;
57 }
58 if (data->ioc_inllen1 > BIT(30)) {
59 CERROR("LIBCFS ioctl: ioc_inllen1 larger than 1<<30\n");
60 return true;
61 }
62 if (data->ioc_inllen2 > BIT(30)) {
63 CERROR("LIBCFS ioctl: ioc_inllen2 larger than 1<<30\n");
64 return true;
65 }
66 if (data->ioc_inlbuf1 && !data->ioc_inllen1) {
67 CERROR("LIBCFS ioctl: inlbuf1 pointer but 0 length\n");
68 return true;
69 }
70 if (data->ioc_inlbuf2 && !data->ioc_inllen2) {
71 CERROR("LIBCFS ioctl: inlbuf2 pointer but 0 length\n");
72 return true;
73 }
74 if (data->ioc_pbuf1 && !data->ioc_plen1) {
75 CERROR("LIBCFS ioctl: pbuf1 pointer but 0 length\n");
76 return true;
77 }
78 if (data->ioc_pbuf2 && !data->ioc_plen2) {
79 CERROR("LIBCFS ioctl: pbuf2 pointer but 0 length\n");
80 return true;
81 }
82 if (data->ioc_plen1 && !data->ioc_pbuf1) {
83 CERROR("LIBCFS ioctl: plen1 nonzero but no pbuf1 pointer\n");
84 return true;
85 }
86 if (data->ioc_plen2 && !data->ioc_pbuf2) {
87 CERROR("LIBCFS ioctl: plen2 nonzero but no pbuf2 pointer\n");
88 return true;
89 }
90 if ((__u32)libcfs_ioctl_packlen(data) != data->ioc_hdr.ioc_len) {
91 CERROR("LIBCFS ioctl: packlen != ioc_len\n");
92 return true;
93 }
94 if (data->ioc_inllen1 &&
95 data->ioc_bulk[data->ioc_inllen1 - 1] != '\0') {
96 CERROR("LIBCFS ioctl: inlbuf1 not 0 terminated\n");
97 return true;
98 }
99 if (data->ioc_inllen2 &&
100 data->ioc_bulk[cfs_size_round(data->ioc_inllen1) +
101 data->ioc_inllen2 - 1] != '\0') {
102 CERROR("LIBCFS ioctl: inlbuf2 not 0 terminated\n");
103 return true;
104 }
105 return false;
106}
107
43int libcfs_ioctl_data_adjust(struct libcfs_ioctl_data *data) 108int libcfs_ioctl_data_adjust(struct libcfs_ioctl_data *data)
44{ 109{
45 if (libcfs_ioctl_is_invalid(data)) { 110 if (libcfs_ioctl_is_invalid(data)) {
46 CERROR("LNET: ioctl not correctly formatted\n"); 111 CERROR("libcfs ioctl: parameter not correctly formatted\n");
47 return -EINVAL; 112 return -EINVAL;
48 } 113 }
49 114
@@ -57,68 +122,47 @@ int libcfs_ioctl_data_adjust(struct libcfs_ioctl_data *data)
57 return 0; 122 return 0;
58} 123}
59 124
60int libcfs_ioctl_getdata_len(const struct libcfs_ioctl_hdr __user *arg, 125int libcfs_ioctl_getdata(struct libcfs_ioctl_hdr **hdr_pp,
61 __u32 *len) 126 const struct libcfs_ioctl_hdr __user *uhdr)
62{ 127{
63 struct libcfs_ioctl_hdr hdr; 128 struct libcfs_ioctl_hdr hdr;
129 int err = 0;
64 130
65 if (copy_from_user(&hdr, arg, sizeof(hdr))) 131 if (copy_from_user(&hdr, uhdr, sizeof(hdr)))
66 return -EFAULT; 132 return -EFAULT;
67 133
68 if (hdr.ioc_version != LIBCFS_IOCTL_VERSION && 134 if (hdr.ioc_version != LIBCFS_IOCTL_VERSION &&
69 hdr.ioc_version != LIBCFS_IOCTL_VERSION2) { 135 hdr.ioc_version != LIBCFS_IOCTL_VERSION2) {
70 CERROR("LNET: version mismatch expected %#x, got %#x\n", 136 CERROR("libcfs ioctl: version mismatch expected %#x, got %#x\n",
71 LIBCFS_IOCTL_VERSION, hdr.ioc_version); 137 LIBCFS_IOCTL_VERSION, hdr.ioc_version);
72 return -EINVAL; 138 return -EINVAL;
73 } 139 }
74 140
75 *len = hdr.ioc_len; 141 if (hdr.ioc_len < sizeof(struct libcfs_ioctl_data)) {
76 142 CERROR("libcfs ioctl: user buffer too small for ioctl\n");
77 return 0; 143 return -EINVAL;
78} 144 }
79
80int libcfs_ioctl_popdata(void __user *arg, void *data, int size)
81{
82 if (copy_to_user(arg, data, size))
83 return -EFAULT;
84 return 0;
85}
86
87static int
88libcfs_psdev_open(struct inode *inode, struct file *file)
89{
90 int rc = 0;
91 145
92 if (!inode) 146 if (hdr.ioc_len > LIBCFS_IOC_DATA_MAX) {
147 CERROR("libcfs ioctl: user buffer is too large %d/%d\n",
148 hdr.ioc_len, LIBCFS_IOC_DATA_MAX);
93 return -EINVAL; 149 return -EINVAL;
94 if (libcfs_psdev_ops.p_open) 150 }
95 rc = libcfs_psdev_ops.p_open(0, NULL);
96 else
97 return -EPERM;
98 return rc;
99}
100 151
101/* called when closing /dev/device */ 152 LIBCFS_ALLOC(*hdr_pp, hdr.ioc_len);
102static int 153 if (!*hdr_pp)
103libcfs_psdev_release(struct inode *inode, struct file *file) 154 return -ENOMEM;
104{
105 int rc = 0;
106 155
107 if (!inode) 156 if (copy_from_user(*hdr_pp, uhdr, hdr.ioc_len)) {
108 return -EINVAL; 157 LIBCFS_FREE(*hdr_pp, hdr.ioc_len);
109 if (libcfs_psdev_ops.p_close) 158 err = -EFAULT;
110 rc = libcfs_psdev_ops.p_close(0, NULL); 159 }
111 else 160 return err;
112 rc = -EPERM;
113 return rc;
114} 161}
115 162
116static long libcfs_ioctl(struct file *file, 163static long
117 unsigned int cmd, unsigned long arg) 164libcfs_psdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
118{ 165{
119 struct cfs_psdev_file pfile;
120 int rc = 0;
121
122 if (!capable(CAP_SYS_ADMIN)) 166 if (!capable(CAP_SYS_ADMIN))
123 return -EACCES; 167 return -EACCES;
124 168
@@ -130,26 +174,12 @@ static long libcfs_ioctl(struct file *file,
130 return -EINVAL; 174 return -EINVAL;
131 } 175 }
132 176
133 /* Handle platform-dependent IOC requests */ 177 return libcfs_ioctl(cmd, (void __user *)arg);
134 switch (cmd) {
135 case IOC_LIBCFS_PANIC:
136 if (!capable(CFS_CAP_SYS_BOOT))
137 return -EPERM;
138 panic("debugctl-invoked panic");
139 return 0;
140 }
141
142 if (libcfs_psdev_ops.p_ioctl)
143 rc = libcfs_psdev_ops.p_ioctl(&pfile, cmd, (void __user *)arg);
144 else
145 rc = -EPERM;
146 return rc;
147} 178}
148 179
149static const struct file_operations libcfs_fops = { 180static const struct file_operations libcfs_fops = {
150 .unlocked_ioctl = libcfs_ioctl, 181 .owner = THIS_MODULE,
151 .open = libcfs_psdev_open, 182 .unlocked_ioctl = libcfs_psdev_ioctl,
152 .release = libcfs_psdev_release,
153}; 183};
154 184
155struct miscdevice libcfs_dev = { 185struct miscdevice libcfs_dev = {
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c
index 89084460231a..bbe19a684c81 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c
@@ -46,30 +46,6 @@
46#include <linux/kgdb.h> 46#include <linux/kgdb.h>
47#endif 47#endif
48 48
49/**
50 * wait_queue_t of Linux (version < 2.6.34) is a FIFO list for exclusively
51 * waiting threads, which is not always desirable because all threads will
52 * be waken up again and again, even user only needs a few of them to be
53 * active most time. This is not good for performance because cache can
54 * be polluted by different threads.
55 *
56 * LIFO list can resolve this problem because we always wakeup the most
57 * recent active thread by default.
58 *
59 * NB: please don't call non-exclusive & exclusive wait on the same
60 * waitq if add_wait_queue_exclusive_head is used.
61 */
62void
63add_wait_queue_exclusive_head(wait_queue_head_t *waitq, wait_queue_t *link)
64{
65 unsigned long flags;
66
67 spin_lock_irqsave(&waitq->lock, flags);
68 __add_wait_queue_exclusive(waitq, link);
69 spin_unlock_irqrestore(&waitq->lock, flags);
70}
71EXPORT_SYMBOL(add_wait_queue_exclusive_head);
72
73sigset_t 49sigset_t
74cfs_block_allsigs(void) 50cfs_block_allsigs(void)
75{ 51{
@@ -128,13 +104,6 @@ cfs_restore_sigs(sigset_t old)
128} 104}
129EXPORT_SYMBOL(cfs_restore_sigs); 105EXPORT_SYMBOL(cfs_restore_sigs);
130 106
131int
132cfs_signal_pending(void)
133{
134 return signal_pending(current);
135}
136EXPORT_SYMBOL(cfs_signal_pending);
137
138void 107void
139cfs_clear_sigpending(void) 108cfs_clear_sigpending(void)
140{ 109{
diff --git a/drivers/staging/lustre/lnet/libcfs/module.c b/drivers/staging/lustre/lnet/libcfs/module.c
index cdc640bfdba8..f2d041118cf7 100644
--- a/drivers/staging/lustre/lnet/libcfs/module.c
+++ b/drivers/staging/lustre/lnet/libcfs/module.c
@@ -54,9 +54,6 @@
54 54
55# define DEBUG_SUBSYSTEM S_LNET 55# define DEBUG_SUBSYSTEM S_LNET
56 56
57#define LNET_MAX_IOCTL_BUF_LEN (sizeof(struct lnet_ioctl_net_config) + \
58 sizeof(struct lnet_ioctl_config_data))
59
60#include "../../include/linux/libcfs/libcfs.h" 57#include "../../include/linux/libcfs/libcfs.h"
61#include <asm/div64.h> 58#include <asm/div64.h>
62 59
@@ -68,20 +65,6 @@
68 65
69static struct dentry *lnet_debugfs_root; 66static struct dentry *lnet_debugfs_root;
70 67
71/* called when opening /dev/device */
72static int libcfs_psdev_open(unsigned long flags, void *args)
73{
74 try_module_get(THIS_MODULE);
75 return 0;
76}
77
78/* called when closing /dev/device */
79static int libcfs_psdev_release(unsigned long flags, void *args)
80{
81 module_put(THIS_MODULE);
82 return 0;
83}
84
85static DECLARE_RWSEM(ioctl_list_sem); 68static DECLARE_RWSEM(ioctl_list_sem);
86static LIST_HEAD(ioctl_list); 69static LIST_HEAD(ioctl_list);
87 70
@@ -115,39 +98,47 @@ int libcfs_deregister_ioctl(struct libcfs_ioctl_handler *hand)
115} 98}
116EXPORT_SYMBOL(libcfs_deregister_ioctl); 99EXPORT_SYMBOL(libcfs_deregister_ioctl);
117 100
118static int libcfs_ioctl_handle(struct cfs_psdev_file *pfile, unsigned long cmd, 101int libcfs_ioctl(unsigned long cmd, void __user *uparam)
119 void __user *arg, struct libcfs_ioctl_hdr *hdr)
120{ 102{
121 struct libcfs_ioctl_data *data = NULL; 103 struct libcfs_ioctl_data *data = NULL;
122 int err = -EINVAL; 104 struct libcfs_ioctl_hdr *hdr;
105 int err;
106
107 /* 'cmd' and permissions get checked in our arch-specific caller */
108 err = libcfs_ioctl_getdata(&hdr, uparam);
109 if (err) {
110 CDEBUG_LIMIT(D_ERROR,
111 "libcfs ioctl: data header error %d\n", err);
112 return err;
113 }
123 114
124 /*
125 * The libcfs_ioctl_data_adjust() function performs adjustment
126 * operations on the libcfs_ioctl_data structure to make
127 * it usable by the code. This doesn't need to be called
128 * for new data structures added.
129 */
130 if (hdr->ioc_version == LIBCFS_IOCTL_VERSION) { 115 if (hdr->ioc_version == LIBCFS_IOCTL_VERSION) {
116 /*
117 * The libcfs_ioctl_data_adjust() function performs adjustment
118 * operations on the libcfs_ioctl_data structure to make
119 * it usable by the code. This doesn't need to be called
120 * for new data structures added.
121 */
131 data = container_of(hdr, struct libcfs_ioctl_data, ioc_hdr); 122 data = container_of(hdr, struct libcfs_ioctl_data, ioc_hdr);
132 err = libcfs_ioctl_data_adjust(data); 123 err = libcfs_ioctl_data_adjust(data);
133 if (err) 124 if (err)
134 return err; 125 goto out;
135 } 126 }
136 127
128 CDEBUG(D_IOCTL, "libcfs ioctl cmd %lu\n", cmd);
137 switch (cmd) { 129 switch (cmd) {
138 case IOC_LIBCFS_CLEAR_DEBUG: 130 case IOC_LIBCFS_CLEAR_DEBUG:
139 libcfs_debug_clear_buffer(); 131 libcfs_debug_clear_buffer();
140 return 0; 132 break;
141 /* 133
142 * case IOC_LIBCFS_PANIC:
143 * Handled in arch/cfs_module.c
144 */
145 case IOC_LIBCFS_MARK_DEBUG: 134 case IOC_LIBCFS_MARK_DEBUG:
146 if (!data->ioc_inlbuf1 || 135 if (!data || !data->ioc_inlbuf1 ||
147 data->ioc_inlbuf1[data->ioc_inllen1 - 1] != '\0') 136 data->ioc_inlbuf1[data->ioc_inllen1 - 1] != '\0') {
148 return -EINVAL; 137 err = -EINVAL;
138 goto out;
139 }
149 libcfs_debug_mark_buffer(data->ioc_inlbuf1); 140 libcfs_debug_mark_buffer(data->ioc_inlbuf1);
150 return 0; 141 break;
151 142
152 default: { 143 default: {
153 struct libcfs_ioctl_handler *hand; 144 struct libcfs_ioctl_handler *hand;
@@ -156,67 +147,23 @@ static int libcfs_ioctl_handle(struct cfs_psdev_file *pfile, unsigned long cmd,
156 down_read(&ioctl_list_sem); 147 down_read(&ioctl_list_sem);
157 list_for_each_entry(hand, &ioctl_list, item) { 148 list_for_each_entry(hand, &ioctl_list, item) {
158 err = hand->handle_ioctl(cmd, hdr); 149 err = hand->handle_ioctl(cmd, hdr);
159 if (err != -EINVAL) { 150 if (err == -EINVAL)
160 if (err == 0) 151 continue;
161 err = libcfs_ioctl_popdata(arg, 152
162 hdr, hdr->ioc_len); 153 if (!err) {
163 break; 154 if (copy_to_user(uparam, hdr, hdr->ioc_len))
155 err = -EFAULT;
164 } 156 }
157 break;
165 } 158 }
166 up_read(&ioctl_list_sem); 159 up_read(&ioctl_list_sem);
167 break; 160 break; }
168 }
169 }
170
171 return err;
172}
173
174static int libcfs_ioctl(struct cfs_psdev_file *pfile, unsigned long cmd,
175 void __user *arg)
176{
177 struct libcfs_ioctl_hdr *hdr;
178 int err = 0;
179 __u32 buf_len;
180
181 err = libcfs_ioctl_getdata_len(arg, &buf_len);
182 if (err)
183 return err;
184
185 /*
186 * do a check here to restrict the size of the memory
187 * to allocate to guard against DoS attacks.
188 */
189 if (buf_len > LNET_MAX_IOCTL_BUF_LEN) {
190 CERROR("LNET: user buffer exceeds kernel buffer\n");
191 return -EINVAL;
192 }
193
194 LIBCFS_ALLOC_GFP(hdr, buf_len, GFP_KERNEL);
195 if (!hdr)
196 return -ENOMEM;
197
198 /* 'cmd' and permissions get checked in our arch-specific caller */
199 if (copy_from_user(hdr, arg, buf_len)) {
200 CERROR("LNET ioctl: data error\n");
201 err = -EFAULT;
202 goto out;
203 } 161 }
204
205 err = libcfs_ioctl_handle(pfile, cmd, arg, hdr);
206
207out: 162out:
208 LIBCFS_FREE(hdr, buf_len); 163 LIBCFS_FREE(hdr, hdr->ioc_len);
209 return err; 164 return err;
210} 165}
211 166
212struct cfs_psdev_ops libcfs_psdev_ops = {
213 libcfs_psdev_open,
214 libcfs_psdev_release,
215 NULL,
216 NULL,
217 libcfs_ioctl
218};
219
220int lprocfs_call_handler(void *data, int write, loff_t *ppos, 167int lprocfs_call_handler(void *data, int write, loff_t *ppos,
221 void __user *buffer, size_t *lenp, 168 void __user *buffer, size_t *lenp,
222 int (*handler)(void *data, int write, loff_t pos, 169 int (*handler)(void *data, int write, loff_t pos,
@@ -478,6 +425,13 @@ static struct ctl_table lnet_table[] = {
478 .proc_handler = &proc_dointvec 425 .proc_handler = &proc_dointvec
479 }, 426 },
480 { 427 {
428 .procname = "fail_err",
429 .data = &cfs_fail_err,
430 .maxlen = sizeof(cfs_fail_err),
431 .mode = 0644,
432 .proc_handler = &proc_dointvec,
433 },
434 {
481 } 435 }
482}; 436};
483 437
diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.c b/drivers/staging/lustre/lnet/libcfs/tracefile.c
index 244eb89eef68..7739b9469c5a 100644
--- a/drivers/staging/lustre/lnet/libcfs/tracefile.c
+++ b/drivers/staging/lustre/lnet/libcfs/tracefile.c
@@ -707,10 +707,9 @@ int cfs_tracefile_dump_all_pages(char *filename)
707 struct cfs_trace_page *tage; 707 struct cfs_trace_page *tage;
708 struct cfs_trace_page *tmp; 708 struct cfs_trace_page *tmp;
709 char *buf; 709 char *buf;
710 mm_segment_t __oldfs;
710 int rc; 711 int rc;
711 712
712 DECL_MMSPACE;
713
714 cfs_tracefile_write_lock(); 713 cfs_tracefile_write_lock();
715 714
716 filp = filp_open(filename, O_CREAT | O_EXCL | O_WRONLY | O_LARGEFILE, 715 filp = filp_open(filename, O_CREAT | O_EXCL | O_WRONLY | O_LARGEFILE,
@@ -729,11 +728,12 @@ int cfs_tracefile_dump_all_pages(char *filename)
729 rc = 0; 728 rc = 0;
730 goto close; 729 goto close;
731 } 730 }
731 __oldfs = get_fs();
732 set_fs(get_ds());
732 733
733 /* ok, for now, just write the pages. in the future we'll be building 734 /* ok, for now, just write the pages. in the future we'll be building
734 * iobufs with the pages and calling generic_direct_IO 735 * iobufs with the pages and calling generic_direct_IO
735 */ 736 */
736 MMSPACE_OPEN;
737 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) { 737 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
738 __LASSERT_TAGE_INVARIANT(tage); 738 __LASSERT_TAGE_INVARIANT(tage);
739 739
@@ -752,7 +752,7 @@ int cfs_tracefile_dump_all_pages(char *filename)
752 list_del(&tage->linkage); 752 list_del(&tage->linkage);
753 cfs_tage_free(tage); 753 cfs_tage_free(tage);
754 } 754 }
755 MMSPACE_CLOSE; 755 set_fs(__oldfs);
756 rc = vfs_fsync(filp, 1); 756 rc = vfs_fsync(filp, 1);
757 if (rc) 757 if (rc)
758 pr_err("sync returns %d\n", rc); 758 pr_err("sync returns %d\n", rc);
@@ -986,13 +986,12 @@ static int tracefiled(void *arg)
986 struct tracefiled_ctl *tctl = arg; 986 struct tracefiled_ctl *tctl = arg;
987 struct cfs_trace_page *tage; 987 struct cfs_trace_page *tage;
988 struct cfs_trace_page *tmp; 988 struct cfs_trace_page *tmp;
989 mm_segment_t __oldfs;
989 struct file *filp; 990 struct file *filp;
990 char *buf; 991 char *buf;
991 int last_loop = 0; 992 int last_loop = 0;
992 int rc; 993 int rc;
993 994
994 DECL_MMSPACE;
995
996 /* we're started late enough that we pick up init's fs context */ 995 /* we're started late enough that we pick up init's fs context */
997 /* this is so broken in uml? what on earth is going on? */ 996 /* this is so broken in uml? what on earth is going on? */
998 997
@@ -1025,8 +1024,8 @@ static int tracefiled(void *arg)
1025 __LASSERT(list_empty(&pc.pc_pages)); 1024 __LASSERT(list_empty(&pc.pc_pages));
1026 goto end_loop; 1025 goto end_loop;
1027 } 1026 }
1028 1027 __oldfs = get_fs();
1029 MMSPACE_OPEN; 1028 set_fs(get_ds());
1030 1029
1031 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) { 1030 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
1032 static loff_t f_pos; 1031 static loff_t f_pos;
@@ -1051,7 +1050,7 @@ static int tracefiled(void *arg)
1051 break; 1050 break;
1052 } 1051 }
1053 } 1052 }
1054 MMSPACE_CLOSE; 1053 set_fs(__oldfs);
1055 1054
1056 filp_close(filp, NULL); 1055 filp_close(filp, NULL);
1057 put_pages_on_daemon_list(&pc); 1056 put_pages_on_daemon_list(&pc);
diff --git a/drivers/staging/lustre/lnet/libcfs/workitem.c b/drivers/staging/lustre/lnet/libcfs/workitem.c
index c72fe00dce8d..92236ae59e49 100644
--- a/drivers/staging/lustre/lnet/libcfs/workitem.c
+++ b/drivers/staging/lustre/lnet/libcfs/workitem.c
@@ -111,7 +111,7 @@ cfs_wi_sched_cansleep(struct cfs_wi_sched *sched)
111 * 1. when it returns no one shall try to schedule the workitem. 111 * 1. when it returns no one shall try to schedule the workitem.
112 */ 112 */
113void 113void
114cfs_wi_exit(struct cfs_wi_sched *sched, cfs_workitem_t *wi) 114cfs_wi_exit(struct cfs_wi_sched *sched, struct cfs_workitem *wi)
115{ 115{
116 LASSERT(!in_interrupt()); /* because we use plain spinlock */ 116 LASSERT(!in_interrupt()); /* because we use plain spinlock */
117 LASSERT(!sched->ws_stopping); 117 LASSERT(!sched->ws_stopping);
@@ -138,7 +138,7 @@ EXPORT_SYMBOL(cfs_wi_exit);
138 * cancel schedule request of workitem \a wi 138 * cancel schedule request of workitem \a wi
139 */ 139 */
140int 140int
141cfs_wi_deschedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi) 141cfs_wi_deschedule(struct cfs_wi_sched *sched, struct cfs_workitem *wi)
142{ 142{
143 int rc; 143 int rc;
144 144
@@ -179,7 +179,7 @@ EXPORT_SYMBOL(cfs_wi_deschedule);
179 * be added, and even dynamic creation of serialised queues might be supported. 179 * be added, and even dynamic creation of serialised queues might be supported.
180 */ 180 */
181void 181void
182cfs_wi_schedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi) 182cfs_wi_schedule(struct cfs_wi_sched *sched, struct cfs_workitem *wi)
183{ 183{
184 LASSERT(!in_interrupt()); /* because we use plain spinlock */ 184 LASSERT(!in_interrupt()); /* because we use plain spinlock */
185 LASSERT(!sched->ws_stopping); 185 LASSERT(!sched->ws_stopping);
@@ -229,12 +229,12 @@ static int cfs_wi_scheduler(void *arg)
229 while (!sched->ws_stopping) { 229 while (!sched->ws_stopping) {
230 int nloops = 0; 230 int nloops = 0;
231 int rc; 231 int rc;
232 cfs_workitem_t *wi; 232 struct cfs_workitem *wi;
233 233
234 while (!list_empty(&sched->ws_runq) && 234 while (!list_empty(&sched->ws_runq) &&
235 nloops < CFS_WI_RESCHED) { 235 nloops < CFS_WI_RESCHED) {
236 wi = list_entry(sched->ws_runq.next, cfs_workitem_t, 236 wi = list_entry(sched->ws_runq.next,
237 wi_list); 237 struct cfs_workitem, wi_list);
238 LASSERT(wi->wi_scheduled && !wi->wi_running); 238 LASSERT(wi->wi_scheduled && !wi->wi_running);
239 239
240 list_del_init(&wi->wi_list); 240 list_del_init(&wi->wi_list);
diff --git a/drivers/staging/lustre/lnet/lnet/api-ni.c b/drivers/staging/lustre/lnet/lnet/api-ni.c
index 8764755544c9..fe0dbe7468e7 100644
--- a/drivers/staging/lustre/lnet/lnet/api-ni.c
+++ b/drivers/staging/lustre/lnet/lnet/api-ni.c
@@ -1215,9 +1215,9 @@ lnet_shutdown_lndni(struct lnet_ni *ni)
1215} 1215}
1216 1216
1217static int 1217static int
1218lnet_startup_lndni(struct lnet_ni *ni, __s32 peer_timeout, 1218lnet_startup_lndni(struct lnet_ni *ni, struct lnet_ioctl_config_data *conf)
1219 __s32 peer_cr, __s32 peer_buf_cr, __s32 credits)
1220{ 1219{
1220 struct lnet_ioctl_config_lnd_tunables *lnd_tunables = NULL;
1221 int rc = -EINVAL; 1221 int rc = -EINVAL;
1222 int lnd_type; 1222 int lnd_type;
1223 lnd_t *lnd; 1223 lnd_t *lnd;
@@ -1275,6 +1275,21 @@ lnet_startup_lndni(struct lnet_ni *ni, __s32 peer_timeout,
1275 1275
1276 ni->ni_lnd = lnd; 1276 ni->ni_lnd = lnd;
1277 1277
1278 if (conf && conf->cfg_hdr.ioc_len > sizeof(*conf))
1279 lnd_tunables = (struct lnet_ioctl_config_lnd_tunables *)conf->cfg_bulk;
1280
1281 if (lnd_tunables) {
1282 LIBCFS_ALLOC(ni->ni_lnd_tunables,
1283 sizeof(*ni->ni_lnd_tunables));
1284 if (!ni->ni_lnd_tunables) {
1285 mutex_unlock(&the_lnet.ln_lnd_mutex);
1286 rc = -ENOMEM;
1287 goto failed0;
1288 }
1289 memcpy(ni->ni_lnd_tunables, lnd_tunables,
1290 sizeof(*ni->ni_lnd_tunables));
1291 }
1292
1278 rc = lnd->lnd_startup(ni); 1293 rc = lnd->lnd_startup(ni);
1279 1294
1280 mutex_unlock(&the_lnet.ln_lnd_mutex); 1295 mutex_unlock(&the_lnet.ln_lnd_mutex);
@@ -1292,20 +1307,28 @@ lnet_startup_lndni(struct lnet_ni *ni, __s32 peer_timeout,
1292 * If given some LND tunable parameters, parse those now to 1307 * If given some LND tunable parameters, parse those now to
1293 * override the values in the NI structure. 1308 * override the values in the NI structure.
1294 */ 1309 */
1295 if (peer_buf_cr >= 0) 1310 if (conf && conf->cfg_config_u.cfg_net.net_peer_rtr_credits >= 0) {
1296 ni->ni_peerrtrcredits = peer_buf_cr; 1311 ni->ni_peerrtrcredits =
1297 if (peer_timeout >= 0) 1312 conf->cfg_config_u.cfg_net.net_peer_rtr_credits;
1298 ni->ni_peertimeout = peer_timeout; 1313 }
1314 if (conf && conf->cfg_config_u.cfg_net.net_peer_timeout >= 0) {
1315 ni->ni_peertimeout =
1316 conf->cfg_config_u.cfg_net.net_peer_timeout;
1317 }
1299 /* 1318 /*
1300 * TODO 1319 * TODO
1301 * Note: For now, don't allow the user to change 1320 * Note: For now, don't allow the user to change
1302 * peertxcredits as this number is used in the 1321 * peertxcredits as this number is used in the
1303 * IB LND to control queue depth. 1322 * IB LND to control queue depth.
1304 * if (peer_cr != -1) 1323 *
1305 * ni->ni_peertxcredits = peer_cr; 1324 * if (conf && conf->cfg_config_u.cfg_net.net_peer_tx_credits != -1)
1325 * ni->ni_peertxcredits =
1326 * conf->cfg_config_u.cfg_net.net_peer_tx_credits;
1306 */ 1327 */
1307 if (credits >= 0) 1328 if (conf && conf->cfg_config_u.cfg_net.net_max_tx_credits >= 0) {
1308 ni->ni_maxtxcredits = credits; 1329 ni->ni_maxtxcredits =
1330 conf->cfg_config_u.cfg_net.net_max_tx_credits;
1331 }
1309 1332
1310 LASSERT(ni->ni_peertimeout <= 0 || lnd->lnd_query); 1333 LASSERT(ni->ni_peertimeout <= 0 || lnd->lnd_query);
1311 1334
@@ -1367,7 +1390,7 @@ lnet_startup_lndnis(struct list_head *nilist)
1367 while (!list_empty(nilist)) { 1390 while (!list_empty(nilist)) {
1368 ni = list_entry(nilist->next, lnet_ni_t, ni_list); 1391 ni = list_entry(nilist->next, lnet_ni_t, ni_list);
1369 list_del(&ni->ni_list); 1392 list_del(&ni->ni_list);
1370 rc = lnet_startup_lndni(ni, -1, -1, -1, -1); 1393 rc = lnet_startup_lndni(ni, NULL);
1371 1394
1372 if (rc < 0) 1395 if (rc < 0)
1373 goto failed; 1396 goto failed;
@@ -1641,25 +1664,20 @@ EXPORT_SYMBOL(LNetNIFini);
1641 * parameters 1664 * parameters
1642 * 1665 *
1643 * \param[in] ni network interface structure 1666 * \param[in] ni network interface structure
1644 * \param[out] cpt_count the number of cpts the ni is on 1667 * \param[out] config NI configuration
1645 * \param[out] nid Network Interface ID
1646 * \param[out] peer_timeout NI peer timeout
1647 * \param[out] peer_tx_crdits NI peer transmit credits
1648 * \param[out] peer_rtr_credits NI peer router credits
1649 * \param[out] max_tx_credits NI max transmit credit
1650 * \param[out] net_config Network configuration
1651 */ 1668 */
1652static void 1669static void
1653lnet_fill_ni_info(struct lnet_ni *ni, __u32 *cpt_count, __u64 *nid, 1670lnet_fill_ni_info(struct lnet_ni *ni, struct lnet_ioctl_config_data *config)
1654 int *peer_timeout, int *peer_tx_credits,
1655 int *peer_rtr_credits, int *max_tx_credits,
1656 struct lnet_ioctl_net_config *net_config)
1657{ 1671{
1672 struct lnet_ioctl_config_lnd_tunables *lnd_cfg = NULL;
1673 struct lnet_ioctl_net_config *net_config;
1674 size_t min_size, tunable_size = 0;
1658 int i; 1675 int i;
1659 1676
1660 if (!ni) 1677 if (!ni || !config)
1661 return; 1678 return;
1662 1679
1680 net_config = (struct lnet_ioctl_net_config *) config->cfg_bulk;
1663 if (!net_config) 1681 if (!net_config)
1664 return; 1682 return;
1665 1683
@@ -1675,11 +1693,11 @@ lnet_fill_ni_info(struct lnet_ni *ni, __u32 *cpt_count, __u64 *nid,
1675 sizeof(net_config->ni_interfaces[i])); 1693 sizeof(net_config->ni_interfaces[i]));
1676 } 1694 }
1677 1695
1678 *nid = ni->ni_nid; 1696 config->cfg_nid = ni->ni_nid;
1679 *peer_timeout = ni->ni_peertimeout; 1697 config->cfg_config_u.cfg_net.net_peer_timeout = ni->ni_peertimeout;
1680 *peer_tx_credits = ni->ni_peertxcredits; 1698 config->cfg_config_u.cfg_net.net_max_tx_credits = ni->ni_maxtxcredits;
1681 *peer_rtr_credits = ni->ni_peerrtrcredits; 1699 config->cfg_config_u.cfg_net.net_peer_tx_credits = ni->ni_peertxcredits;
1682 *max_tx_credits = ni->ni_maxtxcredits; 1700 config->cfg_config_u.cfg_net.net_peer_rtr_credits = ni->ni_peerrtrcredits;
1683 1701
1684 net_config->ni_status = ni->ni_status->ns_status; 1702 net_config->ni_status = ni->ni_status->ns_status;
1685 1703
@@ -1689,18 +1707,40 @@ lnet_fill_ni_info(struct lnet_ni *ni, __u32 *cpt_count, __u64 *nid,
1689 for (i = 0; i < num_cpts; i++) 1707 for (i = 0; i < num_cpts; i++)
1690 net_config->ni_cpts[i] = ni->ni_cpts[i]; 1708 net_config->ni_cpts[i] = ni->ni_cpts[i];
1691 1709
1692 *cpt_count = num_cpts; 1710 config->cfg_ncpts = num_cpts;
1711 }
1712
1713 /*
1714 * See if user land tools sent in a newer and larger version
1715 * of struct lnet_tunables than what the kernel uses.
1716 */
1717 min_size = sizeof(*config) + sizeof(*net_config);
1718
1719 if (config->cfg_hdr.ioc_len > min_size)
1720 tunable_size = config->cfg_hdr.ioc_len - min_size;
1721
1722 /* Don't copy to much data to user space */
1723 min_size = min(tunable_size, sizeof(*ni->ni_lnd_tunables));
1724 lnd_cfg = (struct lnet_ioctl_config_lnd_tunables *)net_config->cfg_bulk;
1725
1726 if (ni->ni_lnd_tunables && lnd_cfg && min_size) {
1727 memcpy(lnd_cfg, ni->ni_lnd_tunables, min_size);
1728 config->cfg_config_u.cfg_net.net_interface_count = 1;
1729
1730 /* Tell user land that kernel side has less data */
1731 if (tunable_size > sizeof(*ni->ni_lnd_tunables)) {
1732 min_size = tunable_size - sizeof(ni->ni_lnd_tunables);
1733 config->cfg_hdr.ioc_len -= min_size;
1734 }
1693 } 1735 }
1694} 1736}
1695 1737
1696int 1738static int
1697lnet_get_net_config(int idx, __u32 *cpt_count, __u64 *nid, int *peer_timeout, 1739lnet_get_net_config(struct lnet_ioctl_config_data *config)
1698 int *peer_tx_credits, int *peer_rtr_credits,
1699 int *max_tx_credits,
1700 struct lnet_ioctl_net_config *net_config)
1701{ 1740{
1702 struct lnet_ni *ni; 1741 struct lnet_ni *ni;
1703 struct list_head *tmp; 1742 struct list_head *tmp;
1743 int idx = config->cfg_count;
1704 int cpt, i = 0; 1744 int cpt, i = 0;
1705 int rc = -ENOENT; 1745 int rc = -ENOENT;
1706 1746
@@ -1712,9 +1752,7 @@ lnet_get_net_config(int idx, __u32 *cpt_count, __u64 *nid, int *peer_timeout,
1712 1752
1713 ni = list_entry(tmp, lnet_ni_t, ni_list); 1753 ni = list_entry(tmp, lnet_ni_t, ni_list);
1714 lnet_ni_lock(ni); 1754 lnet_ni_lock(ni);
1715 lnet_fill_ni_info(ni, cpt_count, nid, peer_timeout, 1755 lnet_fill_ni_info(ni, config);
1716 peer_tx_credits, peer_rtr_credits,
1717 max_tx_credits, net_config);
1718 lnet_ni_unlock(ni); 1756 lnet_ni_unlock(ni);
1719 rc = 0; 1757 rc = 0;
1720 break; 1758 break;
@@ -1725,10 +1763,9 @@ lnet_get_net_config(int idx, __u32 *cpt_count, __u64 *nid, int *peer_timeout,
1725} 1763}
1726 1764
1727int 1765int
1728lnet_dyn_add_ni(lnet_pid_t requested_pid, char *nets, 1766lnet_dyn_add_ni(lnet_pid_t requested_pid, struct lnet_ioctl_config_data *conf)
1729 __s32 peer_timeout, __s32 peer_cr, __s32 peer_buf_cr,
1730 __s32 credits)
1731{ 1767{
1768 char *nets = conf->cfg_config_u.cfg_net.net_intf;
1732 lnet_ping_info_t *pinfo; 1769 lnet_ping_info_t *pinfo;
1733 lnet_handle_md_t md_handle; 1770 lnet_handle_md_t md_handle;
1734 struct lnet_ni *ni; 1771 struct lnet_ni *ni;
@@ -1773,8 +1810,7 @@ lnet_dyn_add_ni(lnet_pid_t requested_pid, char *nets,
1773 1810
1774 list_del_init(&ni->ni_list); 1811 list_del_init(&ni->ni_list);
1775 1812
1776 rc = lnet_startup_lndni(ni, peer_timeout, peer_cr, 1813 rc = lnet_startup_lndni(ni, conf);
1777 peer_buf_cr, credits);
1778 if (rc) 1814 if (rc)
1779 goto failed1; 1815 goto failed1;
1780 1816
@@ -1864,6 +1900,10 @@ LNetCtl(unsigned int cmd, void *arg)
1864 int rc; 1900 int rc;
1865 unsigned long secs_passed; 1901 unsigned long secs_passed;
1866 1902
1903 BUILD_BUG_ON(LIBCFS_IOC_DATA_MAX <
1904 sizeof(struct lnet_ioctl_net_config) +
1905 sizeof(struct lnet_ioctl_config_data));
1906
1867 switch (cmd) { 1907 switch (cmd) {
1868 case IOC_LIBCFS_GET_NI: 1908 case IOC_LIBCFS_GET_NI:
1869 rc = LNetGetId(data->ioc_count, &id); 1909 rc = LNetGetId(data->ioc_count, &id);
@@ -1918,27 +1958,14 @@ LNetCtl(unsigned int cmd, void *arg)
1918 &config->cfg_config_u.cfg_route.rtr_priority); 1958 &config->cfg_config_u.cfg_route.rtr_priority);
1919 1959
1920 case IOC_LIBCFS_GET_NET: { 1960 case IOC_LIBCFS_GET_NET: {
1921 struct lnet_ioctl_net_config *net_config; 1961 size_t total = sizeof(*config) +
1922 size_t total = sizeof(*config) + sizeof(*net_config); 1962 sizeof(struct lnet_ioctl_net_config);
1923
1924 config = arg; 1963 config = arg;
1925 1964
1926 if (config->cfg_hdr.ioc_len < total) 1965 if (config->cfg_hdr.ioc_len < total)
1927 return -EINVAL; 1966 return -EINVAL;
1928 1967
1929 net_config = (struct lnet_ioctl_net_config *) 1968 return lnet_get_net_config(config);
1930 config->cfg_bulk;
1931 if (!net_config)
1932 return -EINVAL;
1933
1934 return lnet_get_net_config(config->cfg_count,
1935 &config->cfg_ncpts,
1936 &config->cfg_nid,
1937 &config->cfg_config_u.cfg_net.net_peer_timeout,
1938 &config->cfg_config_u.cfg_net.net_peer_tx_credits,
1939 &config->cfg_config_u.cfg_net.net_peer_rtr_credits,
1940 &config->cfg_config_u.cfg_net.net_max_tx_credits,
1941 net_config);
1942 } 1969 }
1943 1970
1944 case IOC_LIBCFS_GET_LNET_STATS: { 1971 case IOC_LIBCFS_GET_LNET_STATS: {
diff --git a/drivers/staging/lustre/lnet/lnet/config.c b/drivers/staging/lustre/lnet/lnet/config.c
index 449069c9e649..480cc9c6caab 100644
--- a/drivers/staging/lustre/lnet/lnet/config.c
+++ b/drivers/staging/lustre/lnet/lnet/config.c
@@ -107,6 +107,9 @@ lnet_ni_free(struct lnet_ni *ni)
107 if (ni->ni_cpts) 107 if (ni->ni_cpts)
108 cfs_expr_list_values_free(ni->ni_cpts, ni->ni_ncpts); 108 cfs_expr_list_values_free(ni->ni_cpts, ni->ni_ncpts);
109 109
110 if (ni->ni_lnd_tunables)
111 LIBCFS_FREE(ni->ni_lnd_tunables, sizeof(*ni->ni_lnd_tunables));
112
110 for (i = 0; i < LNET_MAX_INTERFACES && ni->ni_interfaces[i]; i++) { 113 for (i = 0; i < LNET_MAX_INTERFACES && ni->ni_interfaces[i]; i++) {
111 LIBCFS_FREE(ni->ni_interfaces[i], 114 LIBCFS_FREE(ni->ni_interfaces[i],
112 strlen(ni->ni_interfaces[i]) + 1); 115 strlen(ni->ni_interfaces[i]) + 1);
diff --git a/drivers/staging/lustre/lnet/lnet/lib-move.c b/drivers/staging/lustre/lnet/lnet/lib-move.c
index f19aa9320e34..c5d5bedb3128 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-move.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-move.c
@@ -407,7 +407,7 @@ lnet_copy_kiov2iov(unsigned int niov, struct kvec *iov, unsigned int iovoffset,
407 LASSERT(niov > 0); 407 LASSERT(niov > 0);
408 LASSERT(nkiov > 0); 408 LASSERT(nkiov > 0);
409 this_nob = min(iov->iov_len - iovoffset, 409 this_nob = min(iov->iov_len - iovoffset,
410 (__kernel_size_t) kiov->kiov_len - kiovoffset); 410 (__kernel_size_t)kiov->kiov_len - kiovoffset);
411 this_nob = min(this_nob, nob); 411 this_nob = min(this_nob, nob);
412 412
413 if (!addr) 413 if (!addr)
@@ -477,7 +477,7 @@ lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov,
477 do { 477 do {
478 LASSERT(nkiov > 0); 478 LASSERT(nkiov > 0);
479 LASSERT(niov > 0); 479 LASSERT(niov > 0);
480 this_nob = min((__kernel_size_t) kiov->kiov_len - kiovoffset, 480 this_nob = min((__kernel_size_t)kiov->kiov_len - kiovoffset,
481 iov->iov_len - iovoffset); 481 iov->iov_len - iovoffset);
482 this_nob = min(this_nob, nob); 482 this_nob = min(this_nob, nob);
483 483
@@ -996,7 +996,7 @@ lnet_return_tx_credits_locked(lnet_msg_t *msg)
996 LASSERT(msg2->msg_txpeer->lp_ni == ni); 996 LASSERT(msg2->msg_txpeer->lp_ni == ni);
997 LASSERT(msg2->msg_tx_delayed); 997 LASSERT(msg2->msg_tx_delayed);
998 998
999 (void) lnet_post_send_locked(msg2, 1); 999 (void)lnet_post_send_locked(msg2, 1);
1000 } 1000 }
1001 } 1001 }
1002 1002
@@ -1019,7 +1019,7 @@ lnet_return_tx_credits_locked(lnet_msg_t *msg)
1019 LASSERT(msg2->msg_txpeer == txpeer); 1019 LASSERT(msg2->msg_txpeer == txpeer);
1020 LASSERT(msg2->msg_tx_delayed); 1020 LASSERT(msg2->msg_tx_delayed);
1021 1021
1022 (void) lnet_post_send_locked(msg2, 1); 1022 (void)lnet_post_send_locked(msg2, 1);
1023 } 1023 }
1024 } 1024 }
1025 1025
@@ -1142,7 +1142,7 @@ routing_off:
1142 lnet_msg_t, msg_list); 1142 lnet_msg_t, msg_list);
1143 list_del(&msg2->msg_list); 1143 list_del(&msg2->msg_list);
1144 1144
1145 (void) lnet_post_routed_recv_locked(msg2, 1); 1145 (void)lnet_post_routed_recv_locked(msg2, 1);
1146 } 1146 }
1147 } 1147 }
1148 if (rxpeer) { 1148 if (rxpeer) {
diff --git a/drivers/staging/lustre/lnet/lnet/module.c b/drivers/staging/lustre/lnet/lnet/module.c
index 93037c1168ca..246b5c141d01 100644
--- a/drivers/staging/lustre/lnet/lnet/module.c
+++ b/drivers/staging/lustre/lnet/lnet/module.c
@@ -108,12 +108,7 @@ lnet_dyn_configure(struct libcfs_ioctl_hdr *hdr)
108 rc = -EINVAL; 108 rc = -EINVAL;
109 goto out_unlock; 109 goto out_unlock;
110 } 110 }
111 rc = lnet_dyn_add_ni(LNET_PID_LUSTRE, 111 rc = lnet_dyn_add_ni(LNET_PID_LUSTRE, conf);
112 conf->cfg_config_u.cfg_net.net_intf,
113 conf->cfg_config_u.cfg_net.net_peer_timeout,
114 conf->cfg_config_u.cfg_net.net_peer_tx_credits,
115 conf->cfg_config_u.cfg_net.net_peer_rtr_credits,
116 conf->cfg_config_u.cfg_net.net_max_tx_credits);
117out_unlock: 112out_unlock:
118 mutex_unlock(&lnet_config_mutex); 113 mutex_unlock(&lnet_config_mutex);
119 114
diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
index dcb6e506f592..a63d86c4c10d 100644
--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
@@ -49,10 +49,10 @@ module_param(brw_inject_errors, int, 0644);
49MODULE_PARM_DESC(brw_inject_errors, "# data errors to inject randomly, zero by default"); 49MODULE_PARM_DESC(brw_inject_errors, "# data errors to inject randomly, zero by default");
50 50
51static void 51static void
52brw_client_fini(sfw_test_instance_t *tsi) 52brw_client_fini(struct sfw_test_instance *tsi)
53{ 53{
54 srpc_bulk_t *bulk; 54 struct srpc_bulk *bulk;
55 sfw_test_unit_t *tsu; 55 struct sfw_test_unit *tsu;
56 56
57 LASSERT(tsi->tsi_is_client); 57 LASSERT(tsi->tsi_is_client);
58 58
@@ -67,21 +67,21 @@ brw_client_fini(sfw_test_instance_t *tsi)
67} 67}
68 68
69static int 69static int
70brw_client_init(sfw_test_instance_t *tsi) 70brw_client_init(struct sfw_test_instance *tsi)
71{ 71{
72 sfw_session_t *sn = tsi->tsi_batch->bat_session; 72 struct sfw_session *sn = tsi->tsi_batch->bat_session;
73 int flags; 73 int flags;
74 int npg; 74 int npg;
75 int len; 75 int len;
76 int opc; 76 int opc;
77 srpc_bulk_t *bulk; 77 struct srpc_bulk *bulk;
78 sfw_test_unit_t *tsu; 78 struct sfw_test_unit *tsu;
79 79
80 LASSERT(sn); 80 LASSERT(sn);
81 LASSERT(tsi->tsi_is_client); 81 LASSERT(tsi->tsi_is_client);
82 82
83 if (!(sn->sn_features & LST_FEAT_BULK_LEN)) { 83 if (!(sn->sn_features & LST_FEAT_BULK_LEN)) {
84 test_bulk_req_t *breq = &tsi->tsi_u.bulk_v0; 84 struct test_bulk_req *breq = &tsi->tsi_u.bulk_v0;
85 85
86 opc = breq->blk_opc; 86 opc = breq->blk_opc;
87 flags = breq->blk_flags; 87 flags = breq->blk_flags;
@@ -91,9 +91,8 @@ brw_client_init(sfw_test_instance_t *tsi)
91 * but we have to keep it for compatibility 91 * but we have to keep it for compatibility
92 */ 92 */
93 len = npg * PAGE_SIZE; 93 len = npg * PAGE_SIZE;
94
95 } else { 94 } else {
96 test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1; 95 struct test_bulk_req_v1 *breq = &tsi->tsi_u.bulk_v1;
97 96
98 /* 97 /*
99 * I should never get this step if it's unknown feature 98 * I should never get this step if it's unknown feature
@@ -225,7 +224,7 @@ bad_data:
225} 224}
226 225
227static void 226static void
228brw_fill_bulk(srpc_bulk_t *bk, int pattern, __u64 magic) 227brw_fill_bulk(struct srpc_bulk *bk, int pattern, __u64 magic)
229{ 228{
230 int i; 229 int i;
231 struct page *pg; 230 struct page *pg;
@@ -237,7 +236,7 @@ brw_fill_bulk(srpc_bulk_t *bk, int pattern, __u64 magic)
237} 236}
238 237
239static int 238static int
240brw_check_bulk(srpc_bulk_t *bk, int pattern, __u64 magic) 239brw_check_bulk(struct srpc_bulk *bk, int pattern, __u64 magic)
241{ 240{
242 int i; 241 int i;
243 struct page *pg; 242 struct page *pg;
@@ -255,14 +254,14 @@ brw_check_bulk(srpc_bulk_t *bk, int pattern, __u64 magic)
255} 254}
256 255
257static int 256static int
258brw_client_prep_rpc(sfw_test_unit_t *tsu, 257brw_client_prep_rpc(struct sfw_test_unit *tsu,
259 lnet_process_id_t dest, srpc_client_rpc_t **rpcpp) 258 lnet_process_id_t dest, struct srpc_client_rpc **rpcpp)
260{ 259{
261 srpc_bulk_t *bulk = tsu->tsu_private; 260 struct srpc_bulk *bulk = tsu->tsu_private;
262 sfw_test_instance_t *tsi = tsu->tsu_instance; 261 struct sfw_test_instance *tsi = tsu->tsu_instance;
263 sfw_session_t *sn = tsi->tsi_batch->bat_session; 262 struct sfw_session *sn = tsi->tsi_batch->bat_session;
264 srpc_client_rpc_t *rpc; 263 struct srpc_client_rpc *rpc;
265 srpc_brw_reqst_t *req; 264 struct srpc_brw_reqst *req;
266 int flags; 265 int flags;
267 int npg; 266 int npg;
268 int len; 267 int len;
@@ -273,15 +272,14 @@ brw_client_prep_rpc(sfw_test_unit_t *tsu,
273 LASSERT(bulk); 272 LASSERT(bulk);
274 273
275 if (!(sn->sn_features & LST_FEAT_BULK_LEN)) { 274 if (!(sn->sn_features & LST_FEAT_BULK_LEN)) {
276 test_bulk_req_t *breq = &tsi->tsi_u.bulk_v0; 275 struct test_bulk_req *breq = &tsi->tsi_u.bulk_v0;
277 276
278 opc = breq->blk_opc; 277 opc = breq->blk_opc;
279 flags = breq->blk_flags; 278 flags = breq->blk_flags;
280 npg = breq->blk_npg; 279 npg = breq->blk_npg;
281 len = npg * PAGE_SIZE; 280 len = npg * PAGE_SIZE;
282
283 } else { 281 } else {
284 test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1; 282 struct test_bulk_req_v1 *breq = &tsi->tsi_u.bulk_v1;
285 283
286 /* 284 /*
287 * I should never get this step if it's unknown feature 285 * I should never get this step if it's unknown feature
@@ -299,7 +297,7 @@ brw_client_prep_rpc(sfw_test_unit_t *tsu,
299 if (rc) 297 if (rc)
300 return rc; 298 return rc;
301 299
302 memcpy(&rpc->crpc_bulk, bulk, offsetof(srpc_bulk_t, bk_iovs[npg])); 300 memcpy(&rpc->crpc_bulk, bulk, offsetof(struct srpc_bulk, bk_iovs[npg]));
303 if (opc == LST_BRW_WRITE) 301 if (opc == LST_BRW_WRITE)
304 brw_fill_bulk(&rpc->crpc_bulk, flags, BRW_MAGIC); 302 brw_fill_bulk(&rpc->crpc_bulk, flags, BRW_MAGIC);
305 else 303 else
@@ -315,21 +313,21 @@ brw_client_prep_rpc(sfw_test_unit_t *tsu,
315} 313}
316 314
317static void 315static void
318brw_client_done_rpc(sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc) 316brw_client_done_rpc(struct sfw_test_unit *tsu, struct srpc_client_rpc *rpc)
319{ 317{
320 __u64 magic = BRW_MAGIC; 318 __u64 magic = BRW_MAGIC;
321 sfw_test_instance_t *tsi = tsu->tsu_instance; 319 struct sfw_test_instance *tsi = tsu->tsu_instance;
322 sfw_session_t *sn = tsi->tsi_batch->bat_session; 320 struct sfw_session *sn = tsi->tsi_batch->bat_session;
323 srpc_msg_t *msg = &rpc->crpc_replymsg; 321 struct srpc_msg *msg = &rpc->crpc_replymsg;
324 srpc_brw_reply_t *reply = &msg->msg_body.brw_reply; 322 struct srpc_brw_reply *reply = &msg->msg_body.brw_reply;
325 srpc_brw_reqst_t *reqst = &rpc->crpc_reqstmsg.msg_body.brw_reqst; 323 struct srpc_brw_reqst *reqst = &rpc->crpc_reqstmsg.msg_body.brw_reqst;
326 324
327 LASSERT(sn); 325 LASSERT(sn);
328 326
329 if (rpc->crpc_status) { 327 if (rpc->crpc_status) {
330 CERROR("BRW RPC to %s failed with %d\n", 328 CERROR("BRW RPC to %s failed with %d\n",
331 libcfs_id2str(rpc->crpc_dest), rpc->crpc_status); 329 libcfs_id2str(rpc->crpc_dest), rpc->crpc_status);
332 if (!tsi->tsi_stopping) /* rpc could have been aborted */ 330 if (!tsi->tsi_stopping) /* rpc could have been aborted */
333 atomic_inc(&sn->sn_brw_errors); 331 atomic_inc(&sn->sn_brw_errors);
334 return; 332 return;
335 } 333 }
@@ -363,7 +361,7 @@ brw_client_done_rpc(sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc)
363static void 361static void
364brw_server_rpc_done(struct srpc_server_rpc *rpc) 362brw_server_rpc_done(struct srpc_server_rpc *rpc)
365{ 363{
366 srpc_bulk_t *blk = rpc->srpc_bulk; 364 struct srpc_bulk *blk = rpc->srpc_bulk;
367 365
368 if (!blk) 366 if (!blk)
369 return; 367 return;
@@ -384,9 +382,9 @@ static int
384brw_bulk_ready(struct srpc_server_rpc *rpc, int status) 382brw_bulk_ready(struct srpc_server_rpc *rpc, int status)
385{ 383{
386 __u64 magic = BRW_MAGIC; 384 __u64 magic = BRW_MAGIC;
387 srpc_brw_reply_t *reply = &rpc->srpc_replymsg.msg_body.brw_reply; 385 struct srpc_brw_reply *reply = &rpc->srpc_replymsg.msg_body.brw_reply;
388 srpc_brw_reqst_t *reqst; 386 struct srpc_brw_reqst *reqst;
389 srpc_msg_t *reqstmsg; 387 struct srpc_msg *reqstmsg;
390 388
391 LASSERT(rpc->srpc_bulk); 389 LASSERT(rpc->srpc_bulk);
392 LASSERT(rpc->srpc_reqstbuf); 390 LASSERT(rpc->srpc_reqstbuf);
@@ -420,10 +418,10 @@ static int
420brw_server_handle(struct srpc_server_rpc *rpc) 418brw_server_handle(struct srpc_server_rpc *rpc)
421{ 419{
422 struct srpc_service *sv = rpc->srpc_scd->scd_svc; 420 struct srpc_service *sv = rpc->srpc_scd->scd_svc;
423 srpc_msg_t *replymsg = &rpc->srpc_replymsg; 421 struct srpc_msg *replymsg = &rpc->srpc_replymsg;
424 srpc_msg_t *reqstmsg = &rpc->srpc_reqstbuf->buf_msg; 422 struct srpc_msg *reqstmsg = &rpc->srpc_reqstbuf->buf_msg;
425 srpc_brw_reply_t *reply = &replymsg->msg_body.brw_reply; 423 struct srpc_brw_reply *reply = &replymsg->msg_body.brw_reply;
426 srpc_brw_reqst_t *reqst = &reqstmsg->msg_body.brw_reqst; 424 struct srpc_brw_reqst *reqst = &reqstmsg->msg_body.brw_reqst;
427 int npg; 425 int npg;
428 int rc; 426 int rc;
429 427
@@ -459,7 +457,7 @@ brw_server_handle(struct srpc_server_rpc *rpc)
459 457
460 if (!(reqstmsg->msg_ses_feats & LST_FEAT_BULK_LEN)) { 458 if (!(reqstmsg->msg_ses_feats & LST_FEAT_BULK_LEN)) {
461 /* compat with old version */ 459 /* compat with old version */
462 if (reqst->brw_len & ~CFS_PAGE_MASK) { 460 if (reqst->brw_len & ~PAGE_MASK) {
463 reply->brw_status = EINVAL; 461 reply->brw_status = EINVAL;
464 return 0; 462 return 0;
465 } 463 }
@@ -490,7 +488,8 @@ brw_server_handle(struct srpc_server_rpc *rpc)
490 return 0; 488 return 0;
491} 489}
492 490
493sfw_test_client_ops_t brw_test_client; 491struct sfw_test_client_ops brw_test_client;
492
494void brw_init_test_client(void) 493void brw_init_test_client(void)
495{ 494{
496 brw_test_client.tso_init = brw_client_init; 495 brw_test_client.tso_init = brw_client_init;
@@ -499,7 +498,8 @@ void brw_init_test_client(void)
499 brw_test_client.tso_done_rpc = brw_client_done_rpc; 498 brw_test_client.tso_done_rpc = brw_client_done_rpc;
500}; 499};
501 500
502srpc_service_t brw_test_service; 501struct srpc_service brw_test_service;
502
503void brw_init_test_service(void) 503void brw_init_test_service(void)
504{ 504{
505 brw_test_service.sv_id = SRPC_SERVICE_BRW; 505 brw_test_service.sv_id = SRPC_SERVICE_BRW;
diff --git a/drivers/staging/lustre/lnet/selftest/conctl.c b/drivers/staging/lustre/lnet/selftest/conctl.c
index 79ee6c0bf7c1..408c614b6ca3 100644
--- a/drivers/staging/lustre/lnet/selftest/conctl.c
+++ b/drivers/staging/lustre/lnet/selftest/conctl.c
@@ -51,9 +51,9 @@ lst_session_new_ioctl(lstio_session_new_args_t *args)
51 char *name; 51 char *name;
52 int rc; 52 int rc;
53 53
54 if (!args->lstio_ses_idp || /* address for output sid */ 54 if (!args->lstio_ses_idp || /* address for output sid */
55 !args->lstio_ses_key || /* no key is specified */ 55 !args->lstio_ses_key || /* no key is specified */
56 !args->lstio_ses_namep || /* session name */ 56 !args->lstio_ses_namep || /* session name */
57 args->lstio_ses_nmlen <= 0 || 57 args->lstio_ses_nmlen <= 0 ||
58 args->lstio_ses_nmlen > LST_NAME_SIZE) 58 args->lstio_ses_nmlen > LST_NAME_SIZE)
59 return -EINVAL; 59 return -EINVAL;
@@ -95,11 +95,11 @@ lst_session_info_ioctl(lstio_session_info_args_t *args)
95{ 95{
96 /* no checking of key */ 96 /* no checking of key */
97 97
98 if (!args->lstio_ses_idp || /* address for output sid */ 98 if (!args->lstio_ses_idp || /* address for output sid */
99 !args->lstio_ses_keyp || /* address for output key */ 99 !args->lstio_ses_keyp || /* address for output key */
100 !args->lstio_ses_featp || /* address for output features */ 100 !args->lstio_ses_featp || /* address for output features */
101 !args->lstio_ses_ndinfo || /* address for output ndinfo */ 101 !args->lstio_ses_ndinfo || /* address for output ndinfo */
102 !args->lstio_ses_namep || /* address for output name */ 102 !args->lstio_ses_namep || /* address for output name */
103 args->lstio_ses_nmlen <= 0 || 103 args->lstio_ses_nmlen <= 0 ||
104 args->lstio_ses_nmlen > LST_NAME_SIZE) 104 args->lstio_ses_nmlen > LST_NAME_SIZE)
105 return -EINVAL; 105 return -EINVAL;
@@ -125,7 +125,7 @@ lst_debug_ioctl(lstio_debug_args_t *args)
125 if (!args->lstio_dbg_resultp) 125 if (!args->lstio_dbg_resultp)
126 return -EINVAL; 126 return -EINVAL;
127 127
128 if (args->lstio_dbg_namep && /* name of batch/group */ 128 if (args->lstio_dbg_namep && /* name of batch/group */
129 (args->lstio_dbg_nmlen <= 0 || 129 (args->lstio_dbg_nmlen <= 0 ||
130 args->lstio_dbg_nmlen > LST_NAME_SIZE)) 130 args->lstio_dbg_nmlen > LST_NAME_SIZE))
131 return -EINVAL; 131 return -EINVAL;
@@ -326,7 +326,7 @@ lst_nodes_add_ioctl(lstio_group_nodes_args_t *args)
326 if (args->lstio_grp_key != console_session.ses_key) 326 if (args->lstio_grp_key != console_session.ses_key)
327 return -EACCES; 327 return -EACCES;
328 328
329 if (!args->lstio_grp_idsp || /* array of ids */ 329 if (!args->lstio_grp_idsp || /* array of ids */
330 args->lstio_grp_count <= 0 || 330 args->lstio_grp_count <= 0 ||
331 !args->lstio_grp_resultp || 331 !args->lstio_grp_resultp ||
332 !args->lstio_grp_featp || 332 !args->lstio_grp_featp ||
@@ -394,13 +394,13 @@ lst_group_info_ioctl(lstio_group_info_args_t *args)
394 args->lstio_grp_nmlen > LST_NAME_SIZE) 394 args->lstio_grp_nmlen > LST_NAME_SIZE)
395 return -EINVAL; 395 return -EINVAL;
396 396
397 if (!args->lstio_grp_entp && /* output: group entry */ 397 if (!args->lstio_grp_entp && /* output: group entry */
398 !args->lstio_grp_dentsp) /* output: node entry */ 398 !args->lstio_grp_dentsp) /* output: node entry */
399 return -EINVAL; 399 return -EINVAL;
400 400
401 if (args->lstio_grp_dentsp) { /* have node entry */ 401 if (args->lstio_grp_dentsp) { /* have node entry */
402 if (!args->lstio_grp_idxp || /* node index */ 402 if (!args->lstio_grp_idxp || /* node index */
403 !args->lstio_grp_ndentp) /* # of node entry */ 403 !args->lstio_grp_ndentp) /* # of node entry */
404 return -EINVAL; 404 return -EINVAL;
405 405
406 if (copy_from_user(&ndent, args->lstio_grp_ndentp, 406 if (copy_from_user(&ndent, args->lstio_grp_ndentp,
@@ -612,18 +612,18 @@ lst_batch_info_ioctl(lstio_batch_info_args_t *args)
612 if (args->lstio_bat_key != console_session.ses_key) 612 if (args->lstio_bat_key != console_session.ses_key)
613 return -EACCES; 613 return -EACCES;
614 614
615 if (!args->lstio_bat_namep || /* batch name */ 615 if (!args->lstio_bat_namep || /* batch name */
616 args->lstio_bat_nmlen <= 0 || 616 args->lstio_bat_nmlen <= 0 ||
617 args->lstio_bat_nmlen > LST_NAME_SIZE) 617 args->lstio_bat_nmlen > LST_NAME_SIZE)
618 return -EINVAL; 618 return -EINVAL;
619 619
620 if (!args->lstio_bat_entp && /* output: batch entry */ 620 if (!args->lstio_bat_entp && /* output: batch entry */
621 !args->lstio_bat_dentsp) /* output: node entry */ 621 !args->lstio_bat_dentsp) /* output: node entry */
622 return -EINVAL; 622 return -EINVAL;
623 623
624 if (args->lstio_bat_dentsp) { /* have node entry */ 624 if (args->lstio_bat_dentsp) { /* have node entry */
625 if (!args->lstio_bat_idxp || /* node index */ 625 if (!args->lstio_bat_idxp || /* node index */
626 !args->lstio_bat_ndentp) /* # of node entry */ 626 !args->lstio_bat_ndentp) /* # of node entry */
627 return -EINVAL; 627 return -EINVAL;
628 628
629 if (copy_from_user(&index, args->lstio_bat_idxp, 629 if (copy_from_user(&index, args->lstio_bat_idxp,
@@ -722,18 +722,18 @@ static int lst_test_add_ioctl(lstio_test_args_t *args)
722 722
723 if (!args->lstio_tes_resultp || 723 if (!args->lstio_tes_resultp ||
724 !args->lstio_tes_retp || 724 !args->lstio_tes_retp ||
725 !args->lstio_tes_bat_name || /* no specified batch */ 725 !args->lstio_tes_bat_name || /* no specified batch */
726 args->lstio_tes_bat_nmlen <= 0 || 726 args->lstio_tes_bat_nmlen <= 0 ||
727 args->lstio_tes_bat_nmlen > LST_NAME_SIZE || 727 args->lstio_tes_bat_nmlen > LST_NAME_SIZE ||
728 !args->lstio_tes_sgrp_name || /* no source group */ 728 !args->lstio_tes_sgrp_name || /* no source group */
729 args->lstio_tes_sgrp_nmlen <= 0 || 729 args->lstio_tes_sgrp_nmlen <= 0 ||
730 args->lstio_tes_sgrp_nmlen > LST_NAME_SIZE || 730 args->lstio_tes_sgrp_nmlen > LST_NAME_SIZE ||
731 !args->lstio_tes_dgrp_name || /* no target group */ 731 !args->lstio_tes_dgrp_name || /* no target group */
732 args->lstio_tes_dgrp_nmlen <= 0 || 732 args->lstio_tes_dgrp_nmlen <= 0 ||
733 args->lstio_tes_dgrp_nmlen > LST_NAME_SIZE) 733 args->lstio_tes_dgrp_nmlen > LST_NAME_SIZE)
734 return -EINVAL; 734 return -EINVAL;
735 735
736 if (!args->lstio_tes_loop || /* negative is infinite */ 736 if (!args->lstio_tes_loop || /* negative is infinite */
737 args->lstio_tes_concur <= 0 || 737 args->lstio_tes_concur <= 0 ||
738 args->lstio_tes_dist <= 0 || 738 args->lstio_tes_dist <= 0 ||
739 args->lstio_tes_span <= 0) 739 args->lstio_tes_span <= 0)
@@ -743,7 +743,7 @@ static int lst_test_add_ioctl(lstio_test_args_t *args)
743 if (args->lstio_tes_param && 743 if (args->lstio_tes_param &&
744 (args->lstio_tes_param_len <= 0 || 744 (args->lstio_tes_param_len <= 0 ||
745 args->lstio_tes_param_len > 745 args->lstio_tes_param_len >
746 PAGE_SIZE - sizeof(lstcon_test_t))) 746 PAGE_SIZE - sizeof(struct lstcon_test)))
747 return -EINVAL; 747 return -EINVAL;
748 748
749 LIBCFS_ALLOC(batch_name, args->lstio_tes_bat_nmlen + 1); 749 LIBCFS_ALLOC(batch_name, args->lstio_tes_bat_nmlen + 1);
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c
index 35a227d0c657..6f687581117d 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.c
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.c
@@ -46,13 +46,13 @@
46#include "conrpc.h" 46#include "conrpc.h"
47#include "console.h" 47#include "console.h"
48 48
49void lstcon_rpc_stat_reply(lstcon_rpc_trans_t *, srpc_msg_t *, 49void lstcon_rpc_stat_reply(struct lstcon_rpc_trans *, struct srpc_msg *,
50 lstcon_node_t *, lstcon_trans_stat_t *); 50 struct lstcon_node *, lstcon_trans_stat_t *);
51 51
52static void 52static void
53lstcon_rpc_done(srpc_client_rpc_t *rpc) 53lstcon_rpc_done(struct srpc_client_rpc *rpc)
54{ 54{
55 lstcon_rpc_t *crpc = (lstcon_rpc_t *)rpc->crpc_priv; 55 struct lstcon_rpc *crpc = (struct lstcon_rpc *)rpc->crpc_priv;
56 56
57 LASSERT(crpc && rpc == crpc->crp_rpc); 57 LASSERT(crpc && rpc == crpc->crp_rpc);
58 LASSERT(crpc->crp_posted && !crpc->crp_finished); 58 LASSERT(crpc->crp_posted && !crpc->crp_finished);
@@ -90,8 +90,8 @@ lstcon_rpc_done(srpc_client_rpc_t *rpc)
90} 90}
91 91
92static int 92static int
93lstcon_rpc_init(lstcon_node_t *nd, int service, unsigned feats, 93lstcon_rpc_init(struct lstcon_node *nd, int service, unsigned feats,
94 int bulk_npg, int bulk_len, int embedded, lstcon_rpc_t *crpc) 94 int bulk_npg, int bulk_len, int embedded, struct lstcon_rpc *crpc)
95{ 95{
96 crpc->crp_rpc = sfw_create_rpc(nd->nd_id, service, 96 crpc->crp_rpc = sfw_create_rpc(nd->nd_id, service,
97 feats, bulk_npg, bulk_len, 97 feats, bulk_npg, bulk_len,
@@ -115,16 +115,16 @@ lstcon_rpc_init(lstcon_node_t *nd, int service, unsigned feats,
115} 115}
116 116
117static int 117static int
118lstcon_rpc_prep(lstcon_node_t *nd, int service, unsigned feats, 118lstcon_rpc_prep(struct lstcon_node *nd, int service, unsigned feats,
119 int bulk_npg, int bulk_len, lstcon_rpc_t **crpcpp) 119 int bulk_npg, int bulk_len, struct lstcon_rpc **crpcpp)
120{ 120{
121 lstcon_rpc_t *crpc = NULL; 121 struct lstcon_rpc *crpc = NULL;
122 int rc; 122 int rc;
123 123
124 spin_lock(&console_session.ses_rpc_lock); 124 spin_lock(&console_session.ses_rpc_lock);
125 125
126 crpc = list_first_entry_or_null(&console_session.ses_rpc_freelist, 126 crpc = list_first_entry_or_null(&console_session.ses_rpc_freelist,
127 lstcon_rpc_t, crp_link); 127 struct lstcon_rpc, crp_link);
128 if (crpc) 128 if (crpc)
129 list_del_init(&crpc->crp_link); 129 list_del_init(&crpc->crp_link);
130 130
@@ -148,9 +148,9 @@ lstcon_rpc_prep(lstcon_node_t *nd, int service, unsigned feats,
148} 148}
149 149
150void 150void
151lstcon_rpc_put(lstcon_rpc_t *crpc) 151lstcon_rpc_put(struct lstcon_rpc *crpc)
152{ 152{
153 srpc_bulk_t *bulk = &crpc->crp_rpc->crpc_bulk; 153 struct srpc_bulk *bulk = &crpc->crp_rpc->crpc_bulk;
154 int i; 154 int i;
155 155
156 LASSERT(list_empty(&crpc->crp_link)); 156 LASSERT(list_empty(&crpc->crp_link));
@@ -183,9 +183,9 @@ lstcon_rpc_put(lstcon_rpc_t *crpc)
183} 183}
184 184
185static void 185static void
186lstcon_rpc_post(lstcon_rpc_t *crpc) 186lstcon_rpc_post(struct lstcon_rpc *crpc)
187{ 187{
188 lstcon_rpc_trans_t *trans = crpc->crp_trans; 188 struct lstcon_rpc_trans *trans = crpc->crp_trans;
189 189
190 LASSERT(trans); 190 LASSERT(trans);
191 191
@@ -236,9 +236,9 @@ lstcon_rpc_trans_name(int transop)
236 236
237int 237int
238lstcon_rpc_trans_prep(struct list_head *translist, int transop, 238lstcon_rpc_trans_prep(struct list_head *translist, int transop,
239 lstcon_rpc_trans_t **transpp) 239 struct lstcon_rpc_trans **transpp)
240{ 240{
241 lstcon_rpc_trans_t *trans; 241 struct lstcon_rpc_trans *trans;
242 242
243 if (translist) { 243 if (translist) {
244 list_for_each_entry(trans, translist, tas_link) { 244 list_for_each_entry(trans, translist, tas_link) {
@@ -278,26 +278,26 @@ lstcon_rpc_trans_prep(struct list_head *translist, int transop,
278} 278}
279 279
280void 280void
281lstcon_rpc_trans_addreq(lstcon_rpc_trans_t *trans, lstcon_rpc_t *crpc) 281lstcon_rpc_trans_addreq(struct lstcon_rpc_trans *trans, struct lstcon_rpc *crpc)
282{ 282{
283 list_add_tail(&crpc->crp_link, &trans->tas_rpcs_list); 283 list_add_tail(&crpc->crp_link, &trans->tas_rpcs_list);
284 crpc->crp_trans = trans; 284 crpc->crp_trans = trans;
285} 285}
286 286
287void 287void
288lstcon_rpc_trans_abort(lstcon_rpc_trans_t *trans, int error) 288lstcon_rpc_trans_abort(struct lstcon_rpc_trans *trans, int error)
289{ 289{
290 srpc_client_rpc_t *rpc; 290 struct srpc_client_rpc *rpc;
291 lstcon_rpc_t *crpc; 291 struct lstcon_rpc *crpc;
292 lstcon_node_t *nd; 292 struct lstcon_node *nd;
293 293
294 list_for_each_entry(crpc, &trans->tas_rpcs_list, crp_link) { 294 list_for_each_entry(crpc, &trans->tas_rpcs_list, crp_link) {
295 rpc = crpc->crp_rpc; 295 rpc = crpc->crp_rpc;
296 296
297 spin_lock(&rpc->crpc_lock); 297 spin_lock(&rpc->crpc_lock);
298 298
299 if (!crpc->crp_posted || /* not posted */ 299 if (!crpc->crp_posted || /* not posted */
300 crpc->crp_stamp) { /* rpc done or aborted already */ 300 crpc->crp_stamp) { /* rpc done or aborted already */
301 if (!crpc->crp_stamp) { 301 if (!crpc->crp_stamp) {
302 crpc->crp_stamp = cfs_time_current(); 302 crpc->crp_stamp = cfs_time_current();
303 crpc->crp_status = -EINTR; 303 crpc->crp_status = -EINTR;
@@ -326,7 +326,7 @@ lstcon_rpc_trans_abort(lstcon_rpc_trans_t *trans, int error)
326} 326}
327 327
328static int 328static int
329lstcon_rpc_trans_check(lstcon_rpc_trans_t *trans) 329lstcon_rpc_trans_check(struct lstcon_rpc_trans *trans)
330{ 330{
331 if (console_session.ses_shutdown && 331 if (console_session.ses_shutdown &&
332 !list_empty(&trans->tas_olink)) /* Not an end session RPC */ 332 !list_empty(&trans->tas_olink)) /* Not an end session RPC */
@@ -336,9 +336,9 @@ lstcon_rpc_trans_check(lstcon_rpc_trans_t *trans)
336} 336}
337 337
338int 338int
339lstcon_rpc_trans_postwait(lstcon_rpc_trans_t *trans, int timeout) 339lstcon_rpc_trans_postwait(struct lstcon_rpc_trans *trans, int timeout)
340{ 340{
341 lstcon_rpc_t *crpc; 341 struct lstcon_rpc *crpc;
342 int rc; 342 int rc;
343 343
344 if (list_empty(&trans->tas_rpcs_list)) 344 if (list_empty(&trans->tas_rpcs_list))
@@ -386,11 +386,11 @@ lstcon_rpc_trans_postwait(lstcon_rpc_trans_t *trans, int timeout)
386} 386}
387 387
388static int 388static int
389lstcon_rpc_get_reply(lstcon_rpc_t *crpc, srpc_msg_t **msgpp) 389lstcon_rpc_get_reply(struct lstcon_rpc *crpc, struct srpc_msg **msgpp)
390{ 390{
391 lstcon_node_t *nd = crpc->crp_node; 391 struct lstcon_node *nd = crpc->crp_node;
392 srpc_client_rpc_t *rpc = crpc->crp_rpc; 392 struct srpc_client_rpc *rpc = crpc->crp_rpc;
393 srpc_generic_reply_t *rep; 393 struct srpc_generic_reply *rep;
394 394
395 LASSERT(nd && rpc); 395 LASSERT(nd && rpc);
396 LASSERT(crpc->crp_stamp); 396 LASSERT(crpc->crp_stamp);
@@ -423,10 +423,10 @@ lstcon_rpc_get_reply(lstcon_rpc_t *crpc, srpc_msg_t **msgpp)
423} 423}
424 424
425void 425void
426lstcon_rpc_trans_stat(lstcon_rpc_trans_t *trans, lstcon_trans_stat_t *stat) 426lstcon_rpc_trans_stat(struct lstcon_rpc_trans *trans, lstcon_trans_stat_t *stat)
427{ 427{
428 lstcon_rpc_t *crpc; 428 struct lstcon_rpc *crpc;
429 srpc_msg_t *rep; 429 struct srpc_msg *rep;
430 int error; 430 int error;
431 431
432 LASSERT(stat); 432 LASSERT(stat);
@@ -466,17 +466,17 @@ lstcon_rpc_trans_stat(lstcon_rpc_trans_t *trans, lstcon_trans_stat_t *stat)
466} 466}
467 467
468int 468int
469lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans, 469lstcon_rpc_trans_interpreter(struct lstcon_rpc_trans *trans,
470 struct list_head __user *head_up, 470 struct list_head __user *head_up,
471 lstcon_rpc_readent_func_t readent) 471 lstcon_rpc_readent_func_t readent)
472{ 472{
473 struct list_head tmp; 473 struct list_head tmp;
474 struct list_head __user *next; 474 struct list_head __user *next;
475 lstcon_rpc_ent_t *ent; 475 lstcon_rpc_ent_t *ent;
476 srpc_generic_reply_t *rep; 476 struct srpc_generic_reply *rep;
477 lstcon_rpc_t *crpc; 477 struct lstcon_rpc *crpc;
478 srpc_msg_t *msg; 478 struct srpc_msg *msg;
479 lstcon_node_t *nd; 479 struct lstcon_node *nd;
480 long dur; 480 long dur;
481 struct timeval tv; 481 struct timeval tv;
482 int error; 482 int error;
@@ -520,7 +520,7 @@ lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans,
520 continue; 520 continue;
521 521
522 /* RPC is done */ 522 /* RPC is done */
523 rep = (srpc_generic_reply_t *)&msg->msg_body.reply; 523 rep = (struct srpc_generic_reply *)&msg->msg_body.reply;
524 524
525 if (copy_to_user(&ent->rpe_sid, &rep->sid, sizeof(lst_sid_t)) || 525 if (copy_to_user(&ent->rpe_sid, &rep->sid, sizeof(lst_sid_t)) ||
526 copy_to_user(&ent->rpe_fwk_errno, &rep->status, 526 copy_to_user(&ent->rpe_fwk_errno, &rep->status,
@@ -531,7 +531,6 @@ lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans,
531 continue; 531 continue;
532 532
533 error = readent(trans->tas_opc, msg, ent); 533 error = readent(trans->tas_opc, msg, ent);
534
535 if (error) 534 if (error)
536 return error; 535 return error;
537 } 536 }
@@ -540,11 +539,11 @@ lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans,
540} 539}
541 540
542void 541void
543lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans) 542lstcon_rpc_trans_destroy(struct lstcon_rpc_trans *trans)
544{ 543{
545 srpc_client_rpc_t *rpc; 544 struct srpc_client_rpc *rpc;
546 lstcon_rpc_t *crpc; 545 struct lstcon_rpc *crpc;
547 lstcon_rpc_t *tmp; 546 struct lstcon_rpc *tmp;
548 int count = 0; 547 int count = 0;
549 548
550 list_for_each_entry_safe(crpc, tmp, &trans->tas_rpcs_list, crp_link) { 549 list_for_each_entry_safe(crpc, tmp, &trans->tas_rpcs_list, crp_link) {
@@ -563,10 +562,10 @@ lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans)
563 } 562 }
564 563
565 /* 564 /*
566 * rpcs can be still not callbacked (even LNetMDUnlink is called) 565 * rpcs can be still not callbacked (even LNetMDUnlink is
567 * because huge timeout for inaccessible network, don't make 566 * called) because huge timeout for inaccessible network,
568 * user wait for them, just abandon them, they will be recycled 567 * don't make user wait for them, just abandon them, they
569 * in callback 568 * will be recycled in callback
570 */ 569 */
571 LASSERT(crpc->crp_status); 570 LASSERT(crpc->crp_status);
572 571
@@ -593,11 +592,11 @@ lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans)
593} 592}
594 593
595int 594int
596lstcon_sesrpc_prep(lstcon_node_t *nd, int transop, 595lstcon_sesrpc_prep(struct lstcon_node *nd, int transop,
597 unsigned feats, lstcon_rpc_t **crpc) 596 unsigned feats, struct lstcon_rpc **crpc)
598{ 597{
599 srpc_mksn_reqst_t *msrq; 598 struct srpc_mksn_reqst *msrq;
600 srpc_rmsn_reqst_t *rsrq; 599 struct srpc_rmsn_reqst *rsrq;
601 int rc; 600 int rc;
602 601
603 switch (transop) { 602 switch (transop) {
@@ -632,9 +631,9 @@ lstcon_sesrpc_prep(lstcon_node_t *nd, int transop,
632} 631}
633 632
634int 633int
635lstcon_dbgrpc_prep(lstcon_node_t *nd, unsigned feats, lstcon_rpc_t **crpc) 634lstcon_dbgrpc_prep(struct lstcon_node *nd, unsigned feats, struct lstcon_rpc **crpc)
636{ 635{
637 srpc_debug_reqst_t *drq; 636 struct srpc_debug_reqst *drq;
638 int rc; 637 int rc;
639 638
640 rc = lstcon_rpc_prep(nd, SRPC_SERVICE_DEBUG, feats, 0, 0, crpc); 639 rc = lstcon_rpc_prep(nd, SRPC_SERVICE_DEBUG, feats, 0, 0, crpc);
@@ -650,11 +649,11 @@ lstcon_dbgrpc_prep(lstcon_node_t *nd, unsigned feats, lstcon_rpc_t **crpc)
650} 649}
651 650
652int 651int
653lstcon_batrpc_prep(lstcon_node_t *nd, int transop, unsigned feats, 652lstcon_batrpc_prep(struct lstcon_node *nd, int transop, unsigned feats,
654 lstcon_tsb_hdr_t *tsb, lstcon_rpc_t **crpc) 653 struct lstcon_tsb_hdr *tsb, struct lstcon_rpc **crpc)
655{ 654{
656 lstcon_batch_t *batch; 655 struct lstcon_batch *batch;
657 srpc_batch_reqst_t *brq; 656 struct srpc_batch_reqst *brq;
658 int rc; 657 int rc;
659 658
660 rc = lstcon_rpc_prep(nd, SRPC_SERVICE_BATCH, feats, 0, 0, crpc); 659 rc = lstcon_rpc_prep(nd, SRPC_SERVICE_BATCH, feats, 0, 0, crpc);
@@ -676,16 +675,16 @@ lstcon_batrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
676 675
677 LASSERT(!tsb->tsb_index); 676 LASSERT(!tsb->tsb_index);
678 677
679 batch = (lstcon_batch_t *)tsb; 678 batch = (struct lstcon_batch *)tsb;
680 brq->bar_arg = batch->bat_arg; 679 brq->bar_arg = batch->bat_arg;
681 680
682 return 0; 681 return 0;
683} 682}
684 683
685int 684int
686lstcon_statrpc_prep(lstcon_node_t *nd, unsigned feats, lstcon_rpc_t **crpc) 685lstcon_statrpc_prep(struct lstcon_node *nd, unsigned feats, struct lstcon_rpc **crpc)
687{ 686{
688 srpc_stat_reqst_t *srq; 687 struct srpc_stat_reqst *srq;
689 int rc; 688 int rc;
690 689
691 rc = lstcon_rpc_prep(nd, SRPC_SERVICE_QUERY_STAT, feats, 0, 0, crpc); 690 rc = lstcon_rpc_prep(nd, SRPC_SERVICE_QUERY_STAT, feats, 0, 0, crpc);
@@ -716,12 +715,12 @@ lstcon_next_id(int idx, int nkiov, lnet_kiov_t *kiov)
716} 715}
717 716
718static int 717static int
719lstcon_dstnodes_prep(lstcon_group_t *grp, int idx, 718lstcon_dstnodes_prep(struct lstcon_group *grp, int idx,
720 int dist, int span, int nkiov, lnet_kiov_t *kiov) 719 int dist, int span, int nkiov, lnet_kiov_t *kiov)
721{ 720{
722 lnet_process_id_packed_t *pid; 721 lnet_process_id_packed_t *pid;
723 lstcon_ndlink_t *ndl; 722 struct lstcon_ndlink *ndl;
724 lstcon_node_t *nd; 723 struct lstcon_node *nd;
725 int start; 724 int start;
726 int end; 725 int end;
727 int i = 0; 726 int i = 0;
@@ -770,9 +769,9 @@ lstcon_dstnodes_prep(lstcon_group_t *grp, int idx,
770} 769}
771 770
772static int 771static int
773lstcon_pingrpc_prep(lst_test_ping_param_t *param, srpc_test_reqst_t *req) 772lstcon_pingrpc_prep(lst_test_ping_param_t *param, struct srpc_test_reqst *req)
774{ 773{
775 test_ping_req_t *prq = &req->tsr_u.ping; 774 struct test_ping_req *prq = &req->tsr_u.ping;
776 775
777 prq->png_size = param->png_size; 776 prq->png_size = param->png_size;
778 prq->png_flags = param->png_flags; 777 prq->png_flags = param->png_flags;
@@ -781,9 +780,9 @@ lstcon_pingrpc_prep(lst_test_ping_param_t *param, srpc_test_reqst_t *req)
781} 780}
782 781
783static int 782static int
784lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req) 783lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, struct srpc_test_reqst *req)
785{ 784{
786 test_bulk_req_t *brq = &req->tsr_u.bulk_v0; 785 struct test_bulk_req *brq = &req->tsr_u.bulk_v0;
787 786
788 brq->blk_opc = param->blk_opc; 787 brq->blk_opc = param->blk_opc;
789 brq->blk_npg = (param->blk_size + PAGE_SIZE - 1) / 788 brq->blk_npg = (param->blk_size + PAGE_SIZE - 1) /
@@ -794,9 +793,9 @@ lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req)
794} 793}
795 794
796static int 795static int
797lstcon_bulkrpc_v1_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req) 796lstcon_bulkrpc_v1_prep(lst_test_bulk_param_t *param, struct srpc_test_reqst *req)
798{ 797{
799 test_bulk_req_v1_t *brq = &req->tsr_u.bulk_v1; 798 struct test_bulk_req_v1 *brq = &req->tsr_u.bulk_v1;
800 799
801 brq->blk_opc = param->blk_opc; 800 brq->blk_opc = param->blk_opc;
802 brq->blk_flags = param->blk_flags; 801 brq->blk_flags = param->blk_flags;
@@ -807,13 +806,13 @@ lstcon_bulkrpc_v1_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req)
807} 806}
808 807
809int 808int
810lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats, 809lstcon_testrpc_prep(struct lstcon_node *nd, int transop, unsigned feats,
811 lstcon_test_t *test, lstcon_rpc_t **crpc) 810 struct lstcon_test *test, struct lstcon_rpc **crpc)
812{ 811{
813 lstcon_group_t *sgrp = test->tes_src_grp; 812 struct lstcon_group *sgrp = test->tes_src_grp;
814 lstcon_group_t *dgrp = test->tes_dst_grp; 813 struct lstcon_group *dgrp = test->tes_dst_grp;
815 srpc_test_reqst_t *trq; 814 struct srpc_test_reqst *trq;
816 srpc_bulk_t *bulk; 815 struct srpc_bulk *bulk;
817 int i; 816 int i;
818 int npg = 0; 817 int npg = 0;
819 int nob = 0; 818 int nob = 0;
@@ -841,7 +840,6 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
841 840
842 trq->tsr_ndest = 0; 841 trq->tsr_ndest = 0;
843 trq->tsr_loop = nmax * test->tes_dist * test->tes_concur; 842 trq->tsr_loop = nmax * test->tes_dist * test->tes_concur;
844
845 } else { 843 } else {
846 bulk = &(*crpc)->crp_rpc->crpc_bulk; 844 bulk = &(*crpc)->crp_rpc->crpc_bulk;
847 845
@@ -917,10 +915,10 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
917} 915}
918 916
919static int 917static int
920lstcon_sesnew_stat_reply(lstcon_rpc_trans_t *trans, 918lstcon_sesnew_stat_reply(struct lstcon_rpc_trans *trans,
921 lstcon_node_t *nd, srpc_msg_t *reply) 919 struct lstcon_node *nd, struct srpc_msg *reply)
922{ 920{
923 srpc_mksn_reply_t *mksn_rep = &reply->msg_body.mksn_reply; 921 struct srpc_mksn_reply *mksn_rep = &reply->msg_body.mksn_reply;
924 int status = mksn_rep->mksn_status; 922 int status = mksn_rep->mksn_status;
925 923
926 if (!status && 924 if (!status &&
@@ -940,7 +938,7 @@ lstcon_sesnew_stat_reply(lstcon_rpc_trans_t *trans,
940 938
941 if (!trans->tas_feats_updated) { 939 if (!trans->tas_feats_updated) {
942 spin_lock(&console_session.ses_rpc_lock); 940 spin_lock(&console_session.ses_rpc_lock);
943 if (!trans->tas_feats_updated) { /* recheck with lock */ 941 if (!trans->tas_feats_updated) { /* recheck with lock */
944 trans->tas_feats_updated = 1; 942 trans->tas_feats_updated = 1;
945 trans->tas_features = reply->msg_ses_feats; 943 trans->tas_features = reply->msg_ses_feats;
946 } 944 }
@@ -964,14 +962,14 @@ lstcon_sesnew_stat_reply(lstcon_rpc_trans_t *trans,
964} 962}
965 963
966void 964void
967lstcon_rpc_stat_reply(lstcon_rpc_trans_t *trans, srpc_msg_t *msg, 965lstcon_rpc_stat_reply(struct lstcon_rpc_trans *trans, struct srpc_msg *msg,
968 lstcon_node_t *nd, lstcon_trans_stat_t *stat) 966 struct lstcon_node *nd, lstcon_trans_stat_t *stat)
969{ 967{
970 srpc_rmsn_reply_t *rmsn_rep; 968 struct srpc_rmsn_reply *rmsn_rep;
971 srpc_debug_reply_t *dbg_rep; 969 struct srpc_debug_reply *dbg_rep;
972 srpc_batch_reply_t *bat_rep; 970 struct srpc_batch_reply *bat_rep;
973 srpc_test_reply_t *test_rep; 971 struct srpc_test_reply *test_rep;
974 srpc_stat_reply_t *stat_rep; 972 struct srpc_stat_reply *stat_rep;
975 int rc = 0; 973 int rc = 0;
976 974
977 switch (trans->tas_opc) { 975 switch (trans->tas_opc) {
@@ -1085,12 +1083,12 @@ int
1085lstcon_rpc_trans_ndlist(struct list_head *ndlist, 1083lstcon_rpc_trans_ndlist(struct list_head *ndlist,
1086 struct list_head *translist, int transop, 1084 struct list_head *translist, int transop,
1087 void *arg, lstcon_rpc_cond_func_t condition, 1085 void *arg, lstcon_rpc_cond_func_t condition,
1088 lstcon_rpc_trans_t **transpp) 1086 struct lstcon_rpc_trans **transpp)
1089{ 1087{
1090 lstcon_rpc_trans_t *trans; 1088 struct lstcon_rpc_trans *trans;
1091 lstcon_ndlink_t *ndl; 1089 struct lstcon_ndlink *ndl;
1092 lstcon_node_t *nd; 1090 struct lstcon_node *nd;
1093 lstcon_rpc_t *rpc; 1091 struct lstcon_rpc *rpc;
1094 unsigned feats; 1092 unsigned feats;
1095 int rc; 1093 int rc;
1096 1094
@@ -1130,14 +1128,16 @@ lstcon_rpc_trans_ndlist(struct list_head *ndlist,
1130 case LST_TRANS_TSBCLIADD: 1128 case LST_TRANS_TSBCLIADD:
1131 case LST_TRANS_TSBSRVADD: 1129 case LST_TRANS_TSBSRVADD:
1132 rc = lstcon_testrpc_prep(nd, transop, feats, 1130 rc = lstcon_testrpc_prep(nd, transop, feats,
1133 (lstcon_test_t *)arg, &rpc); 1131 (struct lstcon_test *)arg,
1132 &rpc);
1134 break; 1133 break;
1135 case LST_TRANS_TSBRUN: 1134 case LST_TRANS_TSBRUN:
1136 case LST_TRANS_TSBSTOP: 1135 case LST_TRANS_TSBSTOP:
1137 case LST_TRANS_TSBCLIQRY: 1136 case LST_TRANS_TSBCLIQRY:
1138 case LST_TRANS_TSBSRVQRY: 1137 case LST_TRANS_TSBSRVQRY:
1139 rc = lstcon_batrpc_prep(nd, transop, feats, 1138 rc = lstcon_batrpc_prep(nd, transop, feats,
1140 (lstcon_tsb_hdr_t *)arg, &rpc); 1139 (struct lstcon_tsb_hdr *)arg,
1140 &rpc);
1141 break; 1141 break;
1142 case LST_TRANS_STATQRY: 1142 case LST_TRANS_STATQRY:
1143 rc = lstcon_statrpc_prep(nd, feats, &rpc); 1143 rc = lstcon_statrpc_prep(nd, feats, &rpc);
@@ -1170,17 +1170,18 @@ static void
1170lstcon_rpc_pinger(void *arg) 1170lstcon_rpc_pinger(void *arg)
1171{ 1171{
1172 struct stt_timer *ptimer = (struct stt_timer *)arg; 1172 struct stt_timer *ptimer = (struct stt_timer *)arg;
1173 lstcon_rpc_trans_t *trans; 1173 struct lstcon_rpc_trans *trans;
1174 lstcon_rpc_t *crpc; 1174 struct lstcon_rpc *crpc;
1175 srpc_msg_t *rep; 1175 struct srpc_msg *rep;
1176 srpc_debug_reqst_t *drq; 1176 struct srpc_debug_reqst *drq;
1177 lstcon_ndlink_t *ndl; 1177 struct lstcon_ndlink *ndl;
1178 lstcon_node_t *nd; 1178 struct lstcon_node *nd;
1179 int intv; 1179 int intv;
1180 int count = 0; 1180 int count = 0;
1181 int rc; 1181 int rc;
1182 1182
1183 /* RPC pinger is a special case of transaction, 1183 /*
1184 * RPC pinger is a special case of transaction,
1184 * it's called by timer at 8 seconds interval. 1185 * it's called by timer at 8 seconds interval.
1185 */ 1186 */
1186 mutex_lock(&console_session.ses_mutex); 1187 mutex_lock(&console_session.ses_mutex);
@@ -1326,9 +1327,9 @@ lstcon_rpc_pinger_stop(void)
1326void 1327void
1327lstcon_rpc_cleanup_wait(void) 1328lstcon_rpc_cleanup_wait(void)
1328{ 1329{
1329 lstcon_rpc_trans_t *trans; 1330 struct lstcon_rpc_trans *trans;
1330 lstcon_rpc_t *crpc; 1331 struct lstcon_rpc *crpc;
1331 lstcon_rpc_t *temp; 1332 struct lstcon_rpc *temp;
1332 struct list_head *pacer; 1333 struct list_head *pacer;
1333 struct list_head zlist; 1334 struct list_head zlist;
1334 1335
@@ -1338,7 +1339,7 @@ lstcon_rpc_cleanup_wait(void)
1338 1339
1339 while (!list_empty(&console_session.ses_trans_list)) { 1340 while (!list_empty(&console_session.ses_trans_list)) {
1340 list_for_each(pacer, &console_session.ses_trans_list) { 1341 list_for_each(pacer, &console_session.ses_trans_list) {
1341 trans = list_entry(pacer, lstcon_rpc_trans_t, 1342 trans = list_entry(pacer, struct lstcon_rpc_trans,
1342 tas_link); 1343 tas_link);
1343 1344
1344 CDEBUG(D_NET, "Session closed, wakeup transaction %s\n", 1345 CDEBUG(D_NET, "Session closed, wakeup transaction %s\n",
@@ -1370,7 +1371,7 @@ lstcon_rpc_cleanup_wait(void)
1370 1371
1371 list_for_each_entry_safe(crpc, temp, &zlist, crp_link) { 1372 list_for_each_entry_safe(crpc, temp, &zlist, crp_link) {
1372 list_del(&crpc->crp_link); 1373 list_del(&crpc->crp_link);
1373 LIBCFS_FREE(crpc, sizeof(lstcon_rpc_t)); 1374 LIBCFS_FREE(crpc, sizeof(struct lstcon_rpc));
1374 } 1375 }
1375} 1376}
1376 1377
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.h b/drivers/staging/lustre/lnet/selftest/conrpc.h
index 3e7839dad5bb..90c3385a355c 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.h
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.h
@@ -63,9 +63,9 @@ struct lstcon_tsb_hdr;
63struct lstcon_test; 63struct lstcon_test;
64struct lstcon_node; 64struct lstcon_node;
65 65
66typedef struct lstcon_rpc { 66struct lstcon_rpc {
67 struct list_head crp_link; /* chain on rpc transaction */ 67 struct list_head crp_link; /* chain on rpc transaction */
68 srpc_client_rpc_t *crp_rpc; /* client rpc */ 68 struct srpc_client_rpc *crp_rpc; /* client rpc */
69 struct lstcon_node *crp_node; /* destination node */ 69 struct lstcon_node *crp_node; /* destination node */
70 struct lstcon_rpc_trans *crp_trans; /* conrpc transaction */ 70 struct lstcon_rpc_trans *crp_trans; /* conrpc transaction */
71 71
@@ -76,9 +76,9 @@ typedef struct lstcon_rpc {
76 unsigned int crp_embedded:1; 76 unsigned int crp_embedded:1;
77 int crp_status; /* console rpc errors */ 77 int crp_status; /* console rpc errors */
78 unsigned long crp_stamp; /* replied time stamp */ 78 unsigned long crp_stamp; /* replied time stamp */
79} lstcon_rpc_t; 79};
80 80
81typedef struct lstcon_rpc_trans { 81struct lstcon_rpc_trans {
82 struct list_head tas_olink; /* link chain on owner list */ 82 struct list_head tas_olink; /* link chain on owner list */
83 struct list_head tas_link; /* link chain on global list */ 83 struct list_head tas_link; /* link chain on global list */
84 int tas_opc; /* operation code of transaction */ 84 int tas_opc; /* operation code of transaction */
@@ -87,7 +87,7 @@ typedef struct lstcon_rpc_trans {
87 wait_queue_head_t tas_waitq; /* wait queue head */ 87 wait_queue_head_t tas_waitq; /* wait queue head */
88 atomic_t tas_remaining; /* # of un-scheduled rpcs */ 88 atomic_t tas_remaining; /* # of un-scheduled rpcs */
89 struct list_head tas_rpcs_list; /* queued requests */ 89 struct list_head tas_rpcs_list; /* queued requests */
90} lstcon_rpc_trans_t; 90};
91 91
92#define LST_TRANS_PRIVATE 0x1000 92#define LST_TRANS_PRIVATE 0x1000
93 93
@@ -106,35 +106,35 @@ typedef struct lstcon_rpc_trans {
106#define LST_TRANS_STATQRY 0x21 106#define LST_TRANS_STATQRY 0x21
107 107
108typedef int (*lstcon_rpc_cond_func_t)(int, struct lstcon_node *, void *); 108typedef int (*lstcon_rpc_cond_func_t)(int, struct lstcon_node *, void *);
109typedef int (*lstcon_rpc_readent_func_t)(int, srpc_msg_t *, 109typedef int (*lstcon_rpc_readent_func_t)(int, struct srpc_msg *,
110 lstcon_rpc_ent_t __user *); 110 lstcon_rpc_ent_t __user *);
111 111
112int lstcon_sesrpc_prep(struct lstcon_node *nd, int transop, 112int lstcon_sesrpc_prep(struct lstcon_node *nd, int transop,
113 unsigned version, lstcon_rpc_t **crpc); 113 unsigned version, struct lstcon_rpc **crpc);
114int lstcon_dbgrpc_prep(struct lstcon_node *nd, 114int lstcon_dbgrpc_prep(struct lstcon_node *nd,
115 unsigned version, lstcon_rpc_t **crpc); 115 unsigned version, struct lstcon_rpc **crpc);
116int lstcon_batrpc_prep(struct lstcon_node *nd, int transop, unsigned version, 116int lstcon_batrpc_prep(struct lstcon_node *nd, int transop, unsigned version,
117 struct lstcon_tsb_hdr *tsb, lstcon_rpc_t **crpc); 117 struct lstcon_tsb_hdr *tsb, struct lstcon_rpc **crpc);
118int lstcon_testrpc_prep(struct lstcon_node *nd, int transop, unsigned version, 118int lstcon_testrpc_prep(struct lstcon_node *nd, int transop, unsigned version,
119 struct lstcon_test *test, lstcon_rpc_t **crpc); 119 struct lstcon_test *test, struct lstcon_rpc **crpc);
120int lstcon_statrpc_prep(struct lstcon_node *nd, unsigned version, 120int lstcon_statrpc_prep(struct lstcon_node *nd, unsigned version,
121 lstcon_rpc_t **crpc); 121 struct lstcon_rpc **crpc);
122void lstcon_rpc_put(lstcon_rpc_t *crpc); 122void lstcon_rpc_put(struct lstcon_rpc *crpc);
123int lstcon_rpc_trans_prep(struct list_head *translist, 123int lstcon_rpc_trans_prep(struct list_head *translist,
124 int transop, lstcon_rpc_trans_t **transpp); 124 int transop, struct lstcon_rpc_trans **transpp);
125int lstcon_rpc_trans_ndlist(struct list_head *ndlist, 125int lstcon_rpc_trans_ndlist(struct list_head *ndlist,
126 struct list_head *translist, int transop, 126 struct list_head *translist, int transop,
127 void *arg, lstcon_rpc_cond_func_t condition, 127 void *arg, lstcon_rpc_cond_func_t condition,
128 lstcon_rpc_trans_t **transpp); 128 struct lstcon_rpc_trans **transpp);
129void lstcon_rpc_trans_stat(lstcon_rpc_trans_t *trans, 129void lstcon_rpc_trans_stat(struct lstcon_rpc_trans *trans,
130 lstcon_trans_stat_t *stat); 130 lstcon_trans_stat_t *stat);
131int lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans, 131int lstcon_rpc_trans_interpreter(struct lstcon_rpc_trans *trans,
132 struct list_head __user *head_up, 132 struct list_head __user *head_up,
133 lstcon_rpc_readent_func_t readent); 133 lstcon_rpc_readent_func_t readent);
134void lstcon_rpc_trans_abort(lstcon_rpc_trans_t *trans, int error); 134void lstcon_rpc_trans_abort(struct lstcon_rpc_trans *trans, int error);
135void lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans); 135void lstcon_rpc_trans_destroy(struct lstcon_rpc_trans *trans);
136void lstcon_rpc_trans_addreq(lstcon_rpc_trans_t *trans, lstcon_rpc_t *req); 136void lstcon_rpc_trans_addreq(struct lstcon_rpc_trans *trans, struct lstcon_rpc *req);
137int lstcon_rpc_trans_postwait(lstcon_rpc_trans_t *trans, int timeout); 137int lstcon_rpc_trans_postwait(struct lstcon_rpc_trans *trans, int timeout);
138int lstcon_rpc_pinger_start(void); 138int lstcon_rpc_pinger_start(void);
139void lstcon_rpc_pinger_stop(void); 139void lstcon_rpc_pinger_stop(void);
140void lstcon_rpc_cleanup_wait(void); 140void lstcon_rpc_cleanup_wait(void);
diff --git a/drivers/staging/lustre/lnet/selftest/console.c b/drivers/staging/lustre/lnet/selftest/console.c
index 1a923ea3a755..a03e52d29d3f 100644
--- a/drivers/staging/lustre/lnet/selftest/console.c
+++ b/drivers/staging/lustre/lnet/selftest/console.c
@@ -61,7 +61,7 @@ do { \
61struct lstcon_session console_session; 61struct lstcon_session console_session;
62 62
63static void 63static void
64lstcon_node_get(lstcon_node_t *nd) 64lstcon_node_get(struct lstcon_node *nd)
65{ 65{
66 LASSERT(nd->nd_ref >= 1); 66 LASSERT(nd->nd_ref >= 1);
67 67
@@ -69,9 +69,9 @@ lstcon_node_get(lstcon_node_t *nd)
69} 69}
70 70
71static int 71static int
72lstcon_node_find(lnet_process_id_t id, lstcon_node_t **ndpp, int create) 72lstcon_node_find(lnet_process_id_t id, struct lstcon_node **ndpp, int create)
73{ 73{
74 lstcon_ndlink_t *ndl; 74 struct lstcon_ndlink *ndl;
75 unsigned int idx = LNET_NIDADDR(id.nid) % LST_GLOBAL_HASHSIZE; 75 unsigned int idx = LNET_NIDADDR(id.nid) % LST_GLOBAL_HASHSIZE;
76 76
77 LASSERT(id.nid != LNET_NID_ANY); 77 LASSERT(id.nid != LNET_NID_ANY);
@@ -90,11 +90,11 @@ lstcon_node_find(lnet_process_id_t id, lstcon_node_t **ndpp, int create)
90 if (!create) 90 if (!create)
91 return -ENOENT; 91 return -ENOENT;
92 92
93 LIBCFS_ALLOC(*ndpp, sizeof(lstcon_node_t) + sizeof(lstcon_ndlink_t)); 93 LIBCFS_ALLOC(*ndpp, sizeof(struct lstcon_node) + sizeof(struct lstcon_ndlink));
94 if (!*ndpp) 94 if (!*ndpp)
95 return -ENOMEM; 95 return -ENOMEM;
96 96
97 ndl = (lstcon_ndlink_t *)(*ndpp + 1); 97 ndl = (struct lstcon_ndlink *)(*ndpp + 1);
98 98
99 ndl->ndl_node = *ndpp; 99 ndl->ndl_node = *ndpp;
100 100
@@ -103,7 +103,7 @@ lstcon_node_find(lnet_process_id_t id, lstcon_node_t **ndpp, int create)
103 ndl->ndl_node->nd_stamp = cfs_time_current(); 103 ndl->ndl_node->nd_stamp = cfs_time_current();
104 ndl->ndl_node->nd_state = LST_NODE_UNKNOWN; 104 ndl->ndl_node->nd_state = LST_NODE_UNKNOWN;
105 ndl->ndl_node->nd_timeout = 0; 105 ndl->ndl_node->nd_timeout = 0;
106 memset(&ndl->ndl_node->nd_ping, 0, sizeof(lstcon_rpc_t)); 106 memset(&ndl->ndl_node->nd_ping, 0, sizeof(struct lstcon_rpc));
107 107
108 /* 108 /*
109 * queued in global hash & list, no refcount is taken by 109 * queued in global hash & list, no refcount is taken by
@@ -117,16 +117,16 @@ lstcon_node_find(lnet_process_id_t id, lstcon_node_t **ndpp, int create)
117} 117}
118 118
119static void 119static void
120lstcon_node_put(lstcon_node_t *nd) 120lstcon_node_put(struct lstcon_node *nd)
121{ 121{
122 lstcon_ndlink_t *ndl; 122 struct lstcon_ndlink *ndl;
123 123
124 LASSERT(nd->nd_ref > 0); 124 LASSERT(nd->nd_ref > 0);
125 125
126 if (--nd->nd_ref > 0) 126 if (--nd->nd_ref > 0)
127 return; 127 return;
128 128
129 ndl = (lstcon_ndlink_t *)(nd + 1); 129 ndl = (struct lstcon_ndlink *)(nd + 1);
130 130
131 LASSERT(!list_empty(&ndl->ndl_link)); 131 LASSERT(!list_empty(&ndl->ndl_link));
132 LASSERT(!list_empty(&ndl->ndl_hlink)); 132 LASSERT(!list_empty(&ndl->ndl_hlink));
@@ -135,16 +135,16 @@ lstcon_node_put(lstcon_node_t *nd)
135 list_del(&ndl->ndl_link); 135 list_del(&ndl->ndl_link);
136 list_del(&ndl->ndl_hlink); 136 list_del(&ndl->ndl_hlink);
137 137
138 LIBCFS_FREE(nd, sizeof(lstcon_node_t) + sizeof(lstcon_ndlink_t)); 138 LIBCFS_FREE(nd, sizeof(struct lstcon_node) + sizeof(struct lstcon_ndlink));
139} 139}
140 140
141static int 141static int
142lstcon_ndlink_find(struct list_head *hash, 142lstcon_ndlink_find(struct list_head *hash,
143 lnet_process_id_t id, lstcon_ndlink_t **ndlpp, int create) 143 lnet_process_id_t id, struct lstcon_ndlink **ndlpp, int create)
144{ 144{
145 unsigned int idx = LNET_NIDADDR(id.nid) % LST_NODE_HASHSIZE; 145 unsigned int idx = LNET_NIDADDR(id.nid) % LST_NODE_HASHSIZE;
146 lstcon_ndlink_t *ndl; 146 struct lstcon_ndlink *ndl;
147 lstcon_node_t *nd; 147 struct lstcon_node *nd;
148 int rc; 148 int rc;
149 149
150 if (id.nid == LNET_NID_ANY) 150 if (id.nid == LNET_NID_ANY)
@@ -168,7 +168,7 @@ lstcon_ndlink_find(struct list_head *hash,
168 if (rc) 168 if (rc)
169 return rc; 169 return rc;
170 170
171 LIBCFS_ALLOC(ndl, sizeof(lstcon_ndlink_t)); 171 LIBCFS_ALLOC(ndl, sizeof(struct lstcon_ndlink));
172 if (!ndl) { 172 if (!ndl) {
173 lstcon_node_put(nd); 173 lstcon_node_put(nd);
174 return -ENOMEM; 174 return -ENOMEM;
@@ -184,7 +184,7 @@ lstcon_ndlink_find(struct list_head *hash,
184} 184}
185 185
186static void 186static void
187lstcon_ndlink_release(lstcon_ndlink_t *ndl) 187lstcon_ndlink_release(struct lstcon_ndlink *ndl)
188{ 188{
189 LASSERT(list_empty(&ndl->ndl_link)); 189 LASSERT(list_empty(&ndl->ndl_link));
190 LASSERT(!list_empty(&ndl->ndl_hlink)); 190 LASSERT(!list_empty(&ndl->ndl_hlink));
@@ -196,12 +196,12 @@ lstcon_ndlink_release(lstcon_ndlink_t *ndl)
196} 196}
197 197
198static int 198static int
199lstcon_group_alloc(char *name, lstcon_group_t **grpp) 199lstcon_group_alloc(char *name, struct lstcon_group **grpp)
200{ 200{
201 lstcon_group_t *grp; 201 struct lstcon_group *grp;
202 int i; 202 int i;
203 203
204 LIBCFS_ALLOC(grp, offsetof(lstcon_group_t, 204 LIBCFS_ALLOC(grp, offsetof(struct lstcon_group,
205 grp_ndl_hash[LST_NODE_HASHSIZE])); 205 grp_ndl_hash[LST_NODE_HASHSIZE]));
206 if (!grp) 206 if (!grp)
207 return -ENOMEM; 207 return -ENOMEM;
@@ -209,7 +209,7 @@ lstcon_group_alloc(char *name, lstcon_group_t **grpp)
209 grp->grp_ref = 1; 209 grp->grp_ref = 1;
210 if (name) { 210 if (name) {
211 if (strlen(name) > sizeof(grp->grp_name) - 1) { 211 if (strlen(name) > sizeof(grp->grp_name) - 1) {
212 LIBCFS_FREE(grp, offsetof(lstcon_group_t, 212 LIBCFS_FREE(grp, offsetof(struct lstcon_group,
213 grp_ndl_hash[LST_NODE_HASHSIZE])); 213 grp_ndl_hash[LST_NODE_HASHSIZE]));
214 return -E2BIG; 214 return -E2BIG;
215 } 215 }
@@ -229,18 +229,18 @@ lstcon_group_alloc(char *name, lstcon_group_t **grpp)
229} 229}
230 230
231static void 231static void
232lstcon_group_addref(lstcon_group_t *grp) 232lstcon_group_addref(struct lstcon_group *grp)
233{ 233{
234 grp->grp_ref++; 234 grp->grp_ref++;
235} 235}
236 236
237static void lstcon_group_ndlink_release(lstcon_group_t *, lstcon_ndlink_t *); 237static void lstcon_group_ndlink_release(struct lstcon_group *, struct lstcon_ndlink *);
238 238
239static void 239static void
240lstcon_group_drain(lstcon_group_t *grp, int keep) 240lstcon_group_drain(struct lstcon_group *grp, int keep)
241{ 241{
242 lstcon_ndlink_t *ndl; 242 struct lstcon_ndlink *ndl;
243 lstcon_ndlink_t *tmp; 243 struct lstcon_ndlink *tmp;
244 244
245 list_for_each_entry_safe(ndl, tmp, &grp->grp_ndl_list, ndl_link) { 245 list_for_each_entry_safe(ndl, tmp, &grp->grp_ndl_list, ndl_link) {
246 if (!(ndl->ndl_node->nd_state & keep)) 246 if (!(ndl->ndl_node->nd_state & keep))
@@ -249,7 +249,7 @@ lstcon_group_drain(lstcon_group_t *grp, int keep)
249} 249}
250 250
251static void 251static void
252lstcon_group_decref(lstcon_group_t *grp) 252lstcon_group_decref(struct lstcon_group *grp)
253{ 253{
254 int i; 254 int i;
255 255
@@ -264,20 +264,20 @@ lstcon_group_decref(lstcon_group_t *grp)
264 for (i = 0; i < LST_NODE_HASHSIZE; i++) 264 for (i = 0; i < LST_NODE_HASHSIZE; i++)
265 LASSERT(list_empty(&grp->grp_ndl_hash[i])); 265 LASSERT(list_empty(&grp->grp_ndl_hash[i]));
266 266
267 LIBCFS_FREE(grp, offsetof(lstcon_group_t, 267 LIBCFS_FREE(grp, offsetof(struct lstcon_group,
268 grp_ndl_hash[LST_NODE_HASHSIZE])); 268 grp_ndl_hash[LST_NODE_HASHSIZE]));
269} 269}
270 270
271static int 271static int
272lstcon_group_find(const char *name, lstcon_group_t **grpp) 272lstcon_group_find(const char *name, struct lstcon_group **grpp)
273{ 273{
274 lstcon_group_t *grp; 274 struct lstcon_group *grp;
275 275
276 list_for_each_entry(grp, &console_session.ses_grp_list, grp_link) { 276 list_for_each_entry(grp, &console_session.ses_grp_list, grp_link) {
277 if (strncmp(grp->grp_name, name, LST_NAME_SIZE)) 277 if (strncmp(grp->grp_name, name, LST_NAME_SIZE))
278 continue; 278 continue;
279 279
280 lstcon_group_addref(grp); /* +1 ref for caller */ 280 lstcon_group_addref(grp); /* +1 ref for caller */
281 *grpp = grp; 281 *grpp = grp;
282 return 0; 282 return 0;
283 } 283 }
@@ -286,8 +286,8 @@ lstcon_group_find(const char *name, lstcon_group_t **grpp)
286} 286}
287 287
288static int 288static int
289lstcon_group_ndlink_find(lstcon_group_t *grp, lnet_process_id_t id, 289lstcon_group_ndlink_find(struct lstcon_group *grp, lnet_process_id_t id,
290 lstcon_ndlink_t **ndlpp, int create) 290 struct lstcon_ndlink **ndlpp, int create)
291{ 291{
292 int rc; 292 int rc;
293 293
@@ -305,7 +305,7 @@ lstcon_group_ndlink_find(lstcon_group_t *grp, lnet_process_id_t id,
305} 305}
306 306
307static void 307static void
308lstcon_group_ndlink_release(lstcon_group_t *grp, lstcon_ndlink_t *ndl) 308lstcon_group_ndlink_release(struct lstcon_group *grp, struct lstcon_ndlink *ndl)
309{ 309{
310 list_del_init(&ndl->ndl_link); 310 list_del_init(&ndl->ndl_link);
311 lstcon_ndlink_release(ndl); 311 lstcon_ndlink_release(ndl);
@@ -313,8 +313,8 @@ lstcon_group_ndlink_release(lstcon_group_t *grp, lstcon_ndlink_t *ndl)
313} 313}
314 314
315static void 315static void
316lstcon_group_ndlink_move(lstcon_group_t *old, 316lstcon_group_ndlink_move(struct lstcon_group *old,
317 lstcon_group_t *new, lstcon_ndlink_t *ndl) 317 struct lstcon_group *new, struct lstcon_ndlink *ndl)
318{ 318{
319 unsigned int idx = LNET_NIDADDR(ndl->ndl_node->nd_id.nid) % 319 unsigned int idx = LNET_NIDADDR(ndl->ndl_node->nd_id.nid) %
320 LST_NODE_HASHSIZE; 320 LST_NODE_HASHSIZE;
@@ -329,21 +329,21 @@ lstcon_group_ndlink_move(lstcon_group_t *old,
329} 329}
330 330
331static void 331static void
332lstcon_group_move(lstcon_group_t *old, lstcon_group_t *new) 332lstcon_group_move(struct lstcon_group *old, struct lstcon_group *new)
333{ 333{
334 lstcon_ndlink_t *ndl; 334 struct lstcon_ndlink *ndl;
335 335
336 while (!list_empty(&old->grp_ndl_list)) { 336 while (!list_empty(&old->grp_ndl_list)) {
337 ndl = list_entry(old->grp_ndl_list.next, 337 ndl = list_entry(old->grp_ndl_list.next,
338 lstcon_ndlink_t, ndl_link); 338 struct lstcon_ndlink, ndl_link);
339 lstcon_group_ndlink_move(old, new, ndl); 339 lstcon_group_ndlink_move(old, new, ndl);
340 } 340 }
341} 341}
342 342
343static int 343static int
344lstcon_sesrpc_condition(int transop, lstcon_node_t *nd, void *arg) 344lstcon_sesrpc_condition(int transop, struct lstcon_node *nd, void *arg)
345{ 345{
346 lstcon_group_t *grp = (lstcon_group_t *)arg; 346 struct lstcon_group *grp = (struct lstcon_group *)arg;
347 347
348 switch (transop) { 348 switch (transop) {
349 case LST_TRANS_SESNEW: 349 case LST_TRANS_SESNEW:
@@ -370,10 +370,10 @@ lstcon_sesrpc_condition(int transop, lstcon_node_t *nd, void *arg)
370} 370}
371 371
372static int 372static int
373lstcon_sesrpc_readent(int transop, srpc_msg_t *msg, 373lstcon_sesrpc_readent(int transop, struct srpc_msg *msg,
374 lstcon_rpc_ent_t __user *ent_up) 374 lstcon_rpc_ent_t __user *ent_up)
375{ 375{
376 srpc_debug_reply_t *rep; 376 struct srpc_debug_reply *rep;
377 377
378 switch (transop) { 378 switch (transop) {
379 case LST_TRANS_SESNEW: 379 case LST_TRANS_SESNEW:
@@ -399,13 +399,13 @@ lstcon_sesrpc_readent(int transop, srpc_msg_t *msg,
399} 399}
400 400
401static int 401static int
402lstcon_group_nodes_add(lstcon_group_t *grp, 402lstcon_group_nodes_add(struct lstcon_group *grp,
403 int count, lnet_process_id_t __user *ids_up, 403 int count, lnet_process_id_t __user *ids_up,
404 unsigned *featp, struct list_head __user *result_up) 404 unsigned *featp, struct list_head __user *result_up)
405{ 405{
406 lstcon_rpc_trans_t *trans; 406 struct lstcon_rpc_trans *trans;
407 lstcon_ndlink_t *ndl; 407 struct lstcon_ndlink *ndl;
408 lstcon_group_t *tmp; 408 struct lstcon_group *tmp;
409 lnet_process_id_t id; 409 lnet_process_id_t id;
410 int i; 410 int i;
411 int rc; 411 int rc;
@@ -466,13 +466,13 @@ lstcon_group_nodes_add(lstcon_group_t *grp,
466} 466}
467 467
468static int 468static int
469lstcon_group_nodes_remove(lstcon_group_t *grp, 469lstcon_group_nodes_remove(struct lstcon_group *grp,
470 int count, lnet_process_id_t __user *ids_up, 470 int count, lnet_process_id_t __user *ids_up,
471 struct list_head __user *result_up) 471 struct list_head __user *result_up)
472{ 472{
473 lstcon_rpc_trans_t *trans; 473 struct lstcon_rpc_trans *trans;
474 lstcon_ndlink_t *ndl; 474 struct lstcon_ndlink *ndl;
475 lstcon_group_t *tmp; 475 struct lstcon_group *tmp;
476 lnet_process_id_t id; 476 lnet_process_id_t id;
477 int rc; 477 int rc;
478 int i; 478 int i;
@@ -523,7 +523,7 @@ error:
523int 523int
524lstcon_group_add(char *name) 524lstcon_group_add(char *name)
525{ 525{
526 lstcon_group_t *grp; 526 struct lstcon_group *grp;
527 int rc; 527 int rc;
528 528
529 rc = lstcon_group_find(name, &grp) ? 0 : -EEXIST; 529 rc = lstcon_group_find(name, &grp) ? 0 : -EEXIST;
@@ -548,7 +548,7 @@ int
548lstcon_nodes_add(char *name, int count, lnet_process_id_t __user *ids_up, 548lstcon_nodes_add(char *name, int count, lnet_process_id_t __user *ids_up,
549 unsigned *featp, struct list_head __user *result_up) 549 unsigned *featp, struct list_head __user *result_up)
550{ 550{
551 lstcon_group_t *grp; 551 struct lstcon_group *grp;
552 int rc; 552 int rc;
553 553
554 LASSERT(count > 0); 554 LASSERT(count > 0);
@@ -578,8 +578,8 @@ lstcon_nodes_add(char *name, int count, lnet_process_id_t __user *ids_up,
578int 578int
579lstcon_group_del(char *name) 579lstcon_group_del(char *name)
580{ 580{
581 lstcon_rpc_trans_t *trans; 581 struct lstcon_rpc_trans *trans;
582 lstcon_group_t *grp; 582 struct lstcon_group *grp;
583 int rc; 583 int rc;
584 584
585 rc = lstcon_group_find(name, &grp); 585 rc = lstcon_group_find(name, &grp);
@@ -621,7 +621,7 @@ lstcon_group_del(char *name)
621int 621int
622lstcon_group_clean(char *name, int args) 622lstcon_group_clean(char *name, int args)
623{ 623{
624 lstcon_group_t *grp = NULL; 624 struct lstcon_group *grp = NULL;
625 int rc; 625 int rc;
626 626
627 rc = lstcon_group_find(name, &grp); 627 rc = lstcon_group_find(name, &grp);
@@ -654,7 +654,7 @@ int
654lstcon_nodes_remove(char *name, int count, lnet_process_id_t __user *ids_up, 654lstcon_nodes_remove(char *name, int count, lnet_process_id_t __user *ids_up,
655 struct list_head __user *result_up) 655 struct list_head __user *result_up)
656{ 656{
657 lstcon_group_t *grp = NULL; 657 struct lstcon_group *grp = NULL;
658 int rc; 658 int rc;
659 659
660 rc = lstcon_group_find(name, &grp); 660 rc = lstcon_group_find(name, &grp);
@@ -683,8 +683,8 @@ lstcon_nodes_remove(char *name, int count, lnet_process_id_t __user *ids_up,
683int 683int
684lstcon_group_refresh(char *name, struct list_head __user *result_up) 684lstcon_group_refresh(char *name, struct list_head __user *result_up)
685{ 685{
686 lstcon_rpc_trans_t *trans; 686 struct lstcon_rpc_trans *trans;
687 lstcon_group_t *grp; 687 struct lstcon_group *grp;
688 int rc; 688 int rc;
689 689
690 rc = lstcon_group_find(name, &grp); 690 rc = lstcon_group_find(name, &grp);
@@ -725,7 +725,7 @@ lstcon_group_refresh(char *name, struct list_head __user *result_up)
725int 725int
726lstcon_group_list(int index, int len, char __user *name_up) 726lstcon_group_list(int index, int len, char __user *name_up)
727{ 727{
728 lstcon_group_t *grp; 728 struct lstcon_group *grp;
729 729
730 LASSERT(index >= 0); 730 LASSERT(index >= 0);
731 LASSERT(name_up); 731 LASSERT(name_up);
@@ -733,7 +733,7 @@ lstcon_group_list(int index, int len, char __user *name_up)
733 list_for_each_entry(grp, &console_session.ses_grp_list, grp_link) { 733 list_for_each_entry(grp, &console_session.ses_grp_list, grp_link) {
734 if (!index--) { 734 if (!index--) {
735 return copy_to_user(name_up, grp->grp_name, len) ? 735 return copy_to_user(name_up, grp->grp_name, len) ?
736 -EFAULT : 0; 736 -EFAULT : 0;
737 } 737 }
738 } 738 }
739 739
@@ -744,8 +744,8 @@ static int
744lstcon_nodes_getent(struct list_head *head, int *index_p, 744lstcon_nodes_getent(struct list_head *head, int *index_p,
745 int *count_p, lstcon_node_ent_t __user *dents_up) 745 int *count_p, lstcon_node_ent_t __user *dents_up)
746{ 746{
747 lstcon_ndlink_t *ndl; 747 struct lstcon_ndlink *ndl;
748 lstcon_node_t *nd; 748 struct lstcon_node *nd;
749 int count = 0; 749 int count = 0;
750 int index = 0; 750 int index = 0;
751 751
@@ -786,8 +786,8 @@ lstcon_group_info(char *name, lstcon_ndlist_ent_t __user *gents_p,
786 lstcon_node_ent_t __user *dents_up) 786 lstcon_node_ent_t __user *dents_up)
787{ 787{
788 lstcon_ndlist_ent_t *gentp; 788 lstcon_ndlist_ent_t *gentp;
789 lstcon_group_t *grp; 789 struct lstcon_group *grp;
790 lstcon_ndlink_t *ndl; 790 struct lstcon_ndlink *ndl;
791 int rc; 791 int rc;
792 792
793 rc = lstcon_group_find(name, &grp); 793 rc = lstcon_group_find(name, &grp);
@@ -828,9 +828,9 @@ lstcon_group_info(char *name, lstcon_ndlist_ent_t __user *gents_p,
828} 828}
829 829
830static int 830static int
831lstcon_batch_find(const char *name, lstcon_batch_t **batpp) 831lstcon_batch_find(const char *name, struct lstcon_batch **batpp)
832{ 832{
833 lstcon_batch_t *bat; 833 struct lstcon_batch *bat;
834 834
835 list_for_each_entry(bat, &console_session.ses_bat_list, bat_link) { 835 list_for_each_entry(bat, &console_session.ses_bat_list, bat_link) {
836 if (!strncmp(bat->bat_name, name, LST_NAME_SIZE)) { 836 if (!strncmp(bat->bat_name, name, LST_NAME_SIZE)) {
@@ -845,7 +845,7 @@ lstcon_batch_find(const char *name, lstcon_batch_t **batpp)
845int 845int
846lstcon_batch_add(char *name) 846lstcon_batch_add(char *name)
847{ 847{
848 lstcon_batch_t *bat; 848 struct lstcon_batch *bat;
849 int i; 849 int i;
850 int rc; 850 int rc;
851 851
@@ -855,7 +855,7 @@ lstcon_batch_add(char *name)
855 return rc; 855 return rc;
856 } 856 }
857 857
858 LIBCFS_ALLOC(bat, sizeof(lstcon_batch_t)); 858 LIBCFS_ALLOC(bat, sizeof(struct lstcon_batch));
859 if (!bat) { 859 if (!bat) {
860 CERROR("Can't allocate descriptor for batch %s\n", name); 860 CERROR("Can't allocate descriptor for batch %s\n", name);
861 return -ENOMEM; 861 return -ENOMEM;
@@ -865,7 +865,7 @@ lstcon_batch_add(char *name)
865 sizeof(struct list_head) * LST_NODE_HASHSIZE); 865 sizeof(struct list_head) * LST_NODE_HASHSIZE);
866 if (!bat->bat_cli_hash) { 866 if (!bat->bat_cli_hash) {
867 CERROR("Can't allocate hash for batch %s\n", name); 867 CERROR("Can't allocate hash for batch %s\n", name);
868 LIBCFS_FREE(bat, sizeof(lstcon_batch_t)); 868 LIBCFS_FREE(bat, sizeof(struct lstcon_batch));
869 869
870 return -ENOMEM; 870 return -ENOMEM;
871 } 871 }
@@ -875,7 +875,7 @@ lstcon_batch_add(char *name)
875 if (!bat->bat_srv_hash) { 875 if (!bat->bat_srv_hash) {
876 CERROR("Can't allocate hash for batch %s\n", name); 876 CERROR("Can't allocate hash for batch %s\n", name);
877 LIBCFS_FREE(bat->bat_cli_hash, LST_NODE_HASHSIZE); 877 LIBCFS_FREE(bat->bat_cli_hash, LST_NODE_HASHSIZE);
878 LIBCFS_FREE(bat, sizeof(lstcon_batch_t)); 878 LIBCFS_FREE(bat, sizeof(struct lstcon_batch));
879 879
880 return -ENOMEM; 880 return -ENOMEM;
881 } 881 }
@@ -883,7 +883,7 @@ lstcon_batch_add(char *name)
883 if (strlen(name) > sizeof(bat->bat_name) - 1) { 883 if (strlen(name) > sizeof(bat->bat_name) - 1) {
884 LIBCFS_FREE(bat->bat_srv_hash, LST_NODE_HASHSIZE); 884 LIBCFS_FREE(bat->bat_srv_hash, LST_NODE_HASHSIZE);
885 LIBCFS_FREE(bat->bat_cli_hash, LST_NODE_HASHSIZE); 885 LIBCFS_FREE(bat->bat_cli_hash, LST_NODE_HASHSIZE);
886 LIBCFS_FREE(bat, sizeof(lstcon_batch_t)); 886 LIBCFS_FREE(bat, sizeof(struct lstcon_batch));
887 return -E2BIG; 887 return -E2BIG;
888 } 888 }
889 strncpy(bat->bat_name, name, sizeof(bat->bat_name)); 889 strncpy(bat->bat_name, name, sizeof(bat->bat_name));
@@ -911,7 +911,7 @@ lstcon_batch_add(char *name)
911int 911int
912lstcon_batch_list(int index, int len, char __user *name_up) 912lstcon_batch_list(int index, int len, char __user *name_up)
913{ 913{
914 lstcon_batch_t *bat; 914 struct lstcon_batch *bat;
915 915
916 LASSERT(name_up); 916 LASSERT(name_up);
917 LASSERT(index >= 0); 917 LASSERT(index >= 0);
@@ -934,9 +934,9 @@ lstcon_batch_info(char *name, lstcon_test_batch_ent_t __user *ent_up,
934 lstcon_test_batch_ent_t *entp; 934 lstcon_test_batch_ent_t *entp;
935 struct list_head *clilst; 935 struct list_head *clilst;
936 struct list_head *srvlst; 936 struct list_head *srvlst;
937 lstcon_test_t *test = NULL; 937 struct lstcon_test *test = NULL;
938 lstcon_batch_t *bat; 938 struct lstcon_batch *bat;
939 lstcon_ndlink_t *ndl; 939 struct lstcon_ndlink *ndl;
940 int rc; 940 int rc;
941 941
942 rc = lstcon_batch_find(name, &bat); 942 rc = lstcon_batch_find(name, &bat);
@@ -977,7 +977,6 @@ lstcon_batch_info(char *name, lstcon_test_batch_ent_t __user *ent_up,
977 if (!test) { 977 if (!test) {
978 entp->u.tbe_batch.bae_ntest = bat->bat_ntest; 978 entp->u.tbe_batch.bae_ntest = bat->bat_ntest;
979 entp->u.tbe_batch.bae_state = bat->bat_state; 979 entp->u.tbe_batch.bae_state = bat->bat_state;
980
981 } else { 980 } else {
982 entp->u.tbe_test.tse_type = test->tes_type; 981 entp->u.tbe_test.tse_type = test->tes_type;
983 entp->u.tbe_test.tse_loop = test->tes_loop; 982 entp->u.tbe_test.tse_loop = test->tes_loop;
@@ -999,7 +998,7 @@ lstcon_batch_info(char *name, lstcon_test_batch_ent_t __user *ent_up,
999} 998}
1000 999
1001static int 1000static int
1002lstcon_batrpc_condition(int transop, lstcon_node_t *nd, void *arg) 1001lstcon_batrpc_condition(int transop, struct lstcon_node *nd, void *arg)
1003{ 1002{
1004 switch (transop) { 1003 switch (transop) {
1005 case LST_TRANS_TSBRUN: 1004 case LST_TRANS_TSBRUN:
@@ -1021,10 +1020,10 @@ lstcon_batrpc_condition(int transop, lstcon_node_t *nd, void *arg)
1021} 1020}
1022 1021
1023static int 1022static int
1024lstcon_batch_op(lstcon_batch_t *bat, int transop, 1023lstcon_batch_op(struct lstcon_batch *bat, int transop,
1025 struct list_head __user *result_up) 1024 struct list_head __user *result_up)
1026{ 1025{
1027 lstcon_rpc_trans_t *trans; 1026 struct lstcon_rpc_trans *trans;
1028 int rc; 1027 int rc;
1029 1028
1030 rc = lstcon_rpc_trans_ndlist(&bat->bat_cli_list, 1029 rc = lstcon_rpc_trans_ndlist(&bat->bat_cli_list,
@@ -1047,7 +1046,7 @@ lstcon_batch_op(lstcon_batch_t *bat, int transop,
1047int 1046int
1048lstcon_batch_run(char *name, int timeout, struct list_head __user *result_up) 1047lstcon_batch_run(char *name, int timeout, struct list_head __user *result_up)
1049{ 1048{
1050 lstcon_batch_t *bat; 1049 struct lstcon_batch *bat;
1051 int rc; 1050 int rc;
1052 1051
1053 if (lstcon_batch_find(name, &bat)) { 1052 if (lstcon_batch_find(name, &bat)) {
@@ -1069,7 +1068,7 @@ lstcon_batch_run(char *name, int timeout, struct list_head __user *result_up)
1069int 1068int
1070lstcon_batch_stop(char *name, int force, struct list_head __user *result_up) 1069lstcon_batch_stop(char *name, int force, struct list_head __user *result_up)
1071{ 1070{
1072 lstcon_batch_t *bat; 1071 struct lstcon_batch *bat;
1073 int rc; 1072 int rc;
1074 1073
1075 if (lstcon_batch_find(name, &bat)) { 1074 if (lstcon_batch_find(name, &bat)) {
@@ -1089,17 +1088,17 @@ lstcon_batch_stop(char *name, int force, struct list_head __user *result_up)
1089} 1088}
1090 1089
1091static void 1090static void
1092lstcon_batch_destroy(lstcon_batch_t *bat) 1091lstcon_batch_destroy(struct lstcon_batch *bat)
1093{ 1092{
1094 lstcon_ndlink_t *ndl; 1093 struct lstcon_ndlink *ndl;
1095 lstcon_test_t *test; 1094 struct lstcon_test *test;
1096 int i; 1095 int i;
1097 1096
1098 list_del(&bat->bat_link); 1097 list_del(&bat->bat_link);
1099 1098
1100 while (!list_empty(&bat->bat_test_list)) { 1099 while (!list_empty(&bat->bat_test_list)) {
1101 test = list_entry(bat->bat_test_list.next, 1100 test = list_entry(bat->bat_test_list.next,
1102 lstcon_test_t, tes_link); 1101 struct lstcon_test, tes_link);
1103 LASSERT(list_empty(&test->tes_trans_list)); 1102 LASSERT(list_empty(&test->tes_trans_list));
1104 1103
1105 list_del(&test->tes_link); 1104 list_del(&test->tes_link);
@@ -1107,7 +1106,7 @@ lstcon_batch_destroy(lstcon_batch_t *bat)
1107 lstcon_group_decref(test->tes_src_grp); 1106 lstcon_group_decref(test->tes_src_grp);
1108 lstcon_group_decref(test->tes_dst_grp); 1107 lstcon_group_decref(test->tes_dst_grp);
1109 1108
1110 LIBCFS_FREE(test, offsetof(lstcon_test_t, 1109 LIBCFS_FREE(test, offsetof(struct lstcon_test,
1111 tes_param[test->tes_paramlen])); 1110 tes_param[test->tes_paramlen]));
1112 } 1111 }
1113 1112
@@ -1115,7 +1114,7 @@ lstcon_batch_destroy(lstcon_batch_t *bat)
1115 1114
1116 while (!list_empty(&bat->bat_cli_list)) { 1115 while (!list_empty(&bat->bat_cli_list)) {
1117 ndl = list_entry(bat->bat_cli_list.next, 1116 ndl = list_entry(bat->bat_cli_list.next,
1118 lstcon_ndlink_t, ndl_link); 1117 struct lstcon_ndlink, ndl_link);
1119 list_del_init(&ndl->ndl_link); 1118 list_del_init(&ndl->ndl_link);
1120 1119
1121 lstcon_ndlink_release(ndl); 1120 lstcon_ndlink_release(ndl);
@@ -1123,7 +1122,7 @@ lstcon_batch_destroy(lstcon_batch_t *bat)
1123 1122
1124 while (!list_empty(&bat->bat_srv_list)) { 1123 while (!list_empty(&bat->bat_srv_list)) {
1125 ndl = list_entry(bat->bat_srv_list.next, 1124 ndl = list_entry(bat->bat_srv_list.next,
1126 lstcon_ndlink_t, ndl_link); 1125 struct lstcon_ndlink, ndl_link);
1127 list_del_init(&ndl->ndl_link); 1126 list_del_init(&ndl->ndl_link);
1128 1127
1129 lstcon_ndlink_release(ndl); 1128 lstcon_ndlink_release(ndl);
@@ -1138,19 +1137,19 @@ lstcon_batch_destroy(lstcon_batch_t *bat)
1138 sizeof(struct list_head) * LST_NODE_HASHSIZE); 1137 sizeof(struct list_head) * LST_NODE_HASHSIZE);
1139 LIBCFS_FREE(bat->bat_srv_hash, 1138 LIBCFS_FREE(bat->bat_srv_hash,
1140 sizeof(struct list_head) * LST_NODE_HASHSIZE); 1139 sizeof(struct list_head) * LST_NODE_HASHSIZE);
1141 LIBCFS_FREE(bat, sizeof(lstcon_batch_t)); 1140 LIBCFS_FREE(bat, sizeof(struct lstcon_batch));
1142} 1141}
1143 1142
1144static int 1143static int
1145lstcon_testrpc_condition(int transop, lstcon_node_t *nd, void *arg) 1144lstcon_testrpc_condition(int transop, struct lstcon_node *nd, void *arg)
1146{ 1145{
1147 lstcon_test_t *test; 1146 struct lstcon_test *test;
1148 lstcon_batch_t *batch; 1147 struct lstcon_batch *batch;
1149 lstcon_ndlink_t *ndl; 1148 struct lstcon_ndlink *ndl;
1150 struct list_head *hash; 1149 struct list_head *hash;
1151 struct list_head *head; 1150 struct list_head *head;
1152 1151
1153 test = (lstcon_test_t *)arg; 1152 test = (struct lstcon_test *)arg;
1154 LASSERT(test); 1153 LASSERT(test);
1155 1154
1156 batch = test->tes_batch; 1155 batch = test->tes_batch;
@@ -1186,10 +1185,10 @@ lstcon_testrpc_condition(int transop, lstcon_node_t *nd, void *arg)
1186} 1185}
1187 1186
1188static int 1187static int
1189lstcon_test_nodes_add(lstcon_test_t *test, struct list_head __user *result_up) 1188lstcon_test_nodes_add(struct lstcon_test *test, struct list_head __user *result_up)
1190{ 1189{
1191 lstcon_rpc_trans_t *trans; 1190 struct lstcon_rpc_trans *trans;
1192 lstcon_group_t *grp; 1191 struct lstcon_group *grp;
1193 int transop; 1192 int transop;
1194 int rc; 1193 int rc;
1195 1194
@@ -1237,7 +1236,7 @@ again:
1237} 1236}
1238 1237
1239static int 1238static int
1240lstcon_verify_batch(const char *name, lstcon_batch_t **batch) 1239lstcon_verify_batch(const char *name, struct lstcon_batch **batch)
1241{ 1240{
1242 int rc; 1241 int rc;
1243 1242
@@ -1256,10 +1255,10 @@ lstcon_verify_batch(const char *name, lstcon_batch_t **batch)
1256} 1255}
1257 1256
1258static int 1257static int
1259lstcon_verify_group(const char *name, lstcon_group_t **grp) 1258lstcon_verify_group(const char *name, struct lstcon_group **grp)
1260{ 1259{
1261 int rc; 1260 int rc;
1262 lstcon_ndlink_t *ndl; 1261 struct lstcon_ndlink *ndl;
1263 1262
1264 rc = lstcon_group_find(name, grp); 1263 rc = lstcon_group_find(name, grp);
1265 if (rc) { 1264 if (rc) {
@@ -1284,11 +1283,11 @@ lstcon_test_add(char *batch_name, int type, int loop,
1284 void *param, int paramlen, int *retp, 1283 void *param, int paramlen, int *retp,
1285 struct list_head __user *result_up) 1284 struct list_head __user *result_up)
1286{ 1285{
1287 lstcon_test_t *test = NULL; 1286 struct lstcon_test *test = NULL;
1288 int rc; 1287 int rc;
1289 lstcon_group_t *src_grp = NULL; 1288 struct lstcon_group *src_grp = NULL;
1290 lstcon_group_t *dst_grp = NULL; 1289 struct lstcon_group *dst_grp = NULL;
1291 lstcon_batch_t *batch = NULL; 1290 struct lstcon_batch *batch = NULL;
1292 1291
1293 /* 1292 /*
1294 * verify that a batch of the given name exists, and the groups 1293 * verify that a batch of the given name exists, and the groups
@@ -1310,7 +1309,7 @@ lstcon_test_add(char *batch_name, int type, int loop,
1310 if (dst_grp->grp_userland) 1309 if (dst_grp->grp_userland)
1311 *retp = 1; 1310 *retp = 1;
1312 1311
1313 LIBCFS_ALLOC(test, offsetof(lstcon_test_t, tes_param[paramlen])); 1312 LIBCFS_ALLOC(test, offsetof(struct lstcon_test, tes_param[paramlen]));
1314 if (!test) { 1313 if (!test) {
1315 CERROR("Can't allocate test descriptor\n"); 1314 CERROR("Can't allocate test descriptor\n");
1316 rc = -ENOMEM; 1315 rc = -ENOMEM;
@@ -1357,7 +1356,7 @@ lstcon_test_add(char *batch_name, int type, int loop,
1357 return rc; 1356 return rc;
1358out: 1357out:
1359 if (test) 1358 if (test)
1360 LIBCFS_FREE(test, offsetof(lstcon_test_t, tes_param[paramlen])); 1359 LIBCFS_FREE(test, offsetof(struct lstcon_test, tes_param[paramlen]));
1361 1360
1362 if (dst_grp) 1361 if (dst_grp)
1363 lstcon_group_decref(dst_grp); 1362 lstcon_group_decref(dst_grp);
@@ -1369,9 +1368,9 @@ out:
1369} 1368}
1370 1369
1371static int 1370static int
1372lstcon_test_find(lstcon_batch_t *batch, int idx, lstcon_test_t **testpp) 1371lstcon_test_find(struct lstcon_batch *batch, int idx, struct lstcon_test **testpp)
1373{ 1372{
1374 lstcon_test_t *test; 1373 struct lstcon_test *test;
1375 1374
1376 list_for_each_entry(test, &batch->bat_test_list, tes_link) { 1375 list_for_each_entry(test, &batch->bat_test_list, tes_link) {
1377 if (idx == test->tes_hdr.tsb_index) { 1376 if (idx == test->tes_hdr.tsb_index) {
@@ -1384,10 +1383,10 @@ lstcon_test_find(lstcon_batch_t *batch, int idx, lstcon_test_t **testpp)
1384} 1383}
1385 1384
1386static int 1385static int
1387lstcon_tsbrpc_readent(int transop, srpc_msg_t *msg, 1386lstcon_tsbrpc_readent(int transop, struct srpc_msg *msg,
1388 lstcon_rpc_ent_t __user *ent_up) 1387 lstcon_rpc_ent_t __user *ent_up)
1389{ 1388{
1390 srpc_batch_reply_t *rep = &msg->msg_body.bat_reply; 1389 struct srpc_batch_reply *rep = &msg->msg_body.bat_reply;
1391 1390
1392 LASSERT(transop == LST_TRANS_TSBCLIQRY || 1391 LASSERT(transop == LST_TRANS_TSBCLIQRY ||
1393 transop == LST_TRANS_TSBSRVQRY); 1392 transop == LST_TRANS_TSBSRVQRY);
@@ -1404,12 +1403,12 @@ int
1404lstcon_test_batch_query(char *name, int testidx, int client, 1403lstcon_test_batch_query(char *name, int testidx, int client,
1405 int timeout, struct list_head __user *result_up) 1404 int timeout, struct list_head __user *result_up)
1406{ 1405{
1407 lstcon_rpc_trans_t *trans; 1406 struct lstcon_rpc_trans *trans;
1408 struct list_head *translist; 1407 struct list_head *translist;
1409 struct list_head *ndlist; 1408 struct list_head *ndlist;
1410 lstcon_tsb_hdr_t *hdr; 1409 struct lstcon_tsb_hdr *hdr;
1411 lstcon_batch_t *batch; 1410 struct lstcon_batch *batch;
1412 lstcon_test_t *test = NULL; 1411 struct lstcon_test *test = NULL;
1413 int transop; 1412 int transop;
1414 int rc; 1413 int rc;
1415 1414
@@ -1423,7 +1422,6 @@ lstcon_test_batch_query(char *name, int testidx, int client,
1423 translist = &batch->bat_trans_list; 1422 translist = &batch->bat_trans_list;
1424 ndlist = &batch->bat_cli_list; 1423 ndlist = &batch->bat_cli_list;
1425 hdr = &batch->bat_hdr; 1424 hdr = &batch->bat_hdr;
1426
1427 } else { 1425 } else {
1428 /* query specified test only */ 1426 /* query specified test only */
1429 rc = lstcon_test_find(batch, testidx, &test); 1427 rc = lstcon_test_find(batch, testidx, &test);
@@ -1448,7 +1446,8 @@ lstcon_test_batch_query(char *name, int testidx, int client,
1448 1446
1449 lstcon_rpc_trans_postwait(trans, timeout); 1447 lstcon_rpc_trans_postwait(trans, timeout);
1450 1448
1451 if (!testidx && /* query a batch, not a test */ 1449 /* query a batch, not a test */
1450 if (!testidx &&
1452 !lstcon_rpc_stat_failure(lstcon_trans_stat(), 0) && 1451 !lstcon_rpc_stat_failure(lstcon_trans_stat(), 0) &&
1453 !lstcon_tsbqry_stat_run(lstcon_trans_stat(), 0)) { 1452 !lstcon_tsbqry_stat_run(lstcon_trans_stat(), 0)) {
1454 /* all RPCs finished, and no active test */ 1453 /* all RPCs finished, and no active test */
@@ -1463,10 +1462,10 @@ lstcon_test_batch_query(char *name, int testidx, int client,
1463} 1462}
1464 1463
1465static int 1464static int
1466lstcon_statrpc_readent(int transop, srpc_msg_t *msg, 1465lstcon_statrpc_readent(int transop, struct srpc_msg *msg,
1467 lstcon_rpc_ent_t __user *ent_up) 1466 lstcon_rpc_ent_t __user *ent_up)
1468{ 1467{
1469 srpc_stat_reply_t *rep = &msg->msg_body.stat_reply; 1468 struct srpc_stat_reply *rep = &msg->msg_body.stat_reply;
1470 sfw_counters_t __user *sfwk_stat; 1469 sfw_counters_t __user *sfwk_stat;
1471 srpc_counters_t __user *srpc_stat; 1470 srpc_counters_t __user *srpc_stat;
1472 lnet_counters_t __user *lnet_stat; 1471 lnet_counters_t __user *lnet_stat;
@@ -1491,7 +1490,7 @@ lstcon_ndlist_stat(struct list_head *ndlist,
1491 int timeout, struct list_head __user *result_up) 1490 int timeout, struct list_head __user *result_up)
1492{ 1491{
1493 struct list_head head; 1492 struct list_head head;
1494 lstcon_rpc_trans_t *trans; 1493 struct lstcon_rpc_trans *trans;
1495 int rc; 1494 int rc;
1496 1495
1497 INIT_LIST_HEAD(&head); 1496 INIT_LIST_HEAD(&head);
@@ -1516,7 +1515,7 @@ int
1516lstcon_group_stat(char *grp_name, int timeout, 1515lstcon_group_stat(char *grp_name, int timeout,
1517 struct list_head __user *result_up) 1516 struct list_head __user *result_up)
1518{ 1517{
1519 lstcon_group_t *grp; 1518 struct lstcon_group *grp;
1520 int rc; 1519 int rc;
1521 1520
1522 rc = lstcon_group_find(grp_name, &grp); 1521 rc = lstcon_group_find(grp_name, &grp);
@@ -1536,8 +1535,8 @@ int
1536lstcon_nodes_stat(int count, lnet_process_id_t __user *ids_up, 1535lstcon_nodes_stat(int count, lnet_process_id_t __user *ids_up,
1537 int timeout, struct list_head __user *result_up) 1536 int timeout, struct list_head __user *result_up)
1538{ 1537{
1539 lstcon_ndlink_t *ndl; 1538 struct lstcon_ndlink *ndl;
1540 lstcon_group_t *tmp; 1539 struct lstcon_group *tmp;
1541 lnet_process_id_t id; 1540 lnet_process_id_t id;
1542 int i; 1541 int i;
1543 int rc; 1542 int rc;
@@ -1581,7 +1580,7 @@ lstcon_debug_ndlist(struct list_head *ndlist,
1581 struct list_head *translist, 1580 struct list_head *translist,
1582 int timeout, struct list_head __user *result_up) 1581 int timeout, struct list_head __user *result_up)
1583{ 1582{
1584 lstcon_rpc_trans_t *trans; 1583 struct lstcon_rpc_trans *trans;
1585 int rc; 1584 int rc;
1586 1585
1587 rc = lstcon_rpc_trans_ndlist(ndlist, translist, LST_TRANS_SESQRY, 1586 rc = lstcon_rpc_trans_ndlist(ndlist, translist, LST_TRANS_SESQRY,
@@ -1611,7 +1610,7 @@ int
1611lstcon_batch_debug(int timeout, char *name, 1610lstcon_batch_debug(int timeout, char *name,
1612 int client, struct list_head __user *result_up) 1611 int client, struct list_head __user *result_up)
1613{ 1612{
1614 lstcon_batch_t *bat; 1613 struct lstcon_batch *bat;
1615 int rc; 1614 int rc;
1616 1615
1617 rc = lstcon_batch_find(name, &bat); 1616 rc = lstcon_batch_find(name, &bat);
@@ -1629,7 +1628,7 @@ int
1629lstcon_group_debug(int timeout, char *name, 1628lstcon_group_debug(int timeout, char *name,
1630 struct list_head __user *result_up) 1629 struct list_head __user *result_up)
1631{ 1630{
1632 lstcon_group_t *grp; 1631 struct lstcon_group *grp;
1633 int rc; 1632 int rc;
1634 1633
1635 rc = lstcon_group_find(name, &grp); 1634 rc = lstcon_group_find(name, &grp);
@@ -1649,8 +1648,8 @@ lstcon_nodes_debug(int timeout,
1649 struct list_head __user *result_up) 1648 struct list_head __user *result_up)
1650{ 1649{
1651 lnet_process_id_t id; 1650 lnet_process_id_t id;
1652 lstcon_ndlink_t *ndl; 1651 struct lstcon_ndlink *ndl;
1653 lstcon_group_t *grp; 1652 struct lstcon_group *grp;
1654 int i; 1653 int i;
1655 int rc; 1654 int rc;
1656 1655
@@ -1749,7 +1748,7 @@ lstcon_session_new(char *name, int key, unsigned feats,
1749 1748
1750 if (strlen(name) > sizeof(console_session.ses_name) - 1) 1749 if (strlen(name) > sizeof(console_session.ses_name) - 1)
1751 return -E2BIG; 1750 return -E2BIG;
1752 strncpy(console_session.ses_name, name, 1751 strlcpy(console_session.ses_name, name,
1753 sizeof(console_session.ses_name)); 1752 sizeof(console_session.ses_name));
1754 1753
1755 rc = lstcon_batch_add(LST_DEFAULT_BATCH); 1754 rc = lstcon_batch_add(LST_DEFAULT_BATCH);
@@ -1758,7 +1757,7 @@ lstcon_session_new(char *name, int key, unsigned feats,
1758 1757
1759 rc = lstcon_rpc_pinger_start(); 1758 rc = lstcon_rpc_pinger_start();
1760 if (rc) { 1759 if (rc) {
1761 lstcon_batch_t *bat = NULL; 1760 struct lstcon_batch *bat = NULL;
1762 1761
1763 lstcon_batch_find(LST_DEFAULT_BATCH, &bat); 1762 lstcon_batch_find(LST_DEFAULT_BATCH, &bat);
1764 lstcon_batch_destroy(bat); 1763 lstcon_batch_destroy(bat);
@@ -1782,7 +1781,7 @@ lstcon_session_info(lst_sid_t __user *sid_up, int __user *key_up,
1782 char __user *name_up, int len) 1781 char __user *name_up, int len)
1783{ 1782{
1784 lstcon_ndlist_ent_t *entp; 1783 lstcon_ndlist_ent_t *entp;
1785 lstcon_ndlink_t *ndl; 1784 struct lstcon_ndlink *ndl;
1786 int rc = 0; 1785 int rc = 0;
1787 1786
1788 if (console_session.ses_state != LST_SESSION_ACTIVE) 1787 if (console_session.ses_state != LST_SESSION_ACTIVE)
@@ -1813,9 +1812,9 @@ lstcon_session_info(lst_sid_t __user *sid_up, int __user *key_up,
1813int 1812int
1814lstcon_session_end(void) 1813lstcon_session_end(void)
1815{ 1814{
1816 lstcon_rpc_trans_t *trans; 1815 struct lstcon_rpc_trans *trans;
1817 lstcon_group_t *grp; 1816 struct lstcon_group *grp;
1818 lstcon_batch_t *bat; 1817 struct lstcon_batch *bat;
1819 int rc = 0; 1818 int rc = 0;
1820 1819
1821 LASSERT(console_session.ses_state == LST_SESSION_ACTIVE); 1820 LASSERT(console_session.ses_state == LST_SESSION_ACTIVE);
@@ -1849,7 +1848,7 @@ lstcon_session_end(void)
1849 /* destroy all batches */ 1848 /* destroy all batches */
1850 while (!list_empty(&console_session.ses_bat_list)) { 1849 while (!list_empty(&console_session.ses_bat_list)) {
1851 bat = list_entry(console_session.ses_bat_list.next, 1850 bat = list_entry(console_session.ses_bat_list.next,
1852 lstcon_batch_t, bat_link); 1851 struct lstcon_batch, bat_link);
1853 1852
1854 lstcon_batch_destroy(bat); 1853 lstcon_batch_destroy(bat);
1855 } 1854 }
@@ -1857,7 +1856,7 @@ lstcon_session_end(void)
1857 /* destroy all groups */ 1856 /* destroy all groups */
1858 while (!list_empty(&console_session.ses_grp_list)) { 1857 while (!list_empty(&console_session.ses_grp_list)) {
1859 grp = list_entry(console_session.ses_grp_list.next, 1858 grp = list_entry(console_session.ses_grp_list.next,
1860 lstcon_group_t, grp_link); 1859 struct lstcon_group, grp_link);
1861 LASSERT(grp->grp_ref == 1); 1860 LASSERT(grp->grp_ref == 1);
1862 1861
1863 lstcon_group_decref(grp); 1862 lstcon_group_decref(grp);
@@ -1906,12 +1905,12 @@ lstcon_session_feats_check(unsigned feats)
1906static int 1905static int
1907lstcon_acceptor_handle(struct srpc_server_rpc *rpc) 1906lstcon_acceptor_handle(struct srpc_server_rpc *rpc)
1908{ 1907{
1909 srpc_msg_t *rep = &rpc->srpc_replymsg; 1908 struct srpc_msg *rep = &rpc->srpc_replymsg;
1910 srpc_msg_t *req = &rpc->srpc_reqstbuf->buf_msg; 1909 struct srpc_msg *req = &rpc->srpc_reqstbuf->buf_msg;
1911 srpc_join_reqst_t *jreq = &req->msg_body.join_reqst; 1910 struct srpc_join_reqst *jreq = &req->msg_body.join_reqst;
1912 srpc_join_reply_t *jrep = &rep->msg_body.join_reply; 1911 struct srpc_join_reply *jrep = &rep->msg_body.join_reply;
1913 lstcon_group_t *grp = NULL; 1912 struct lstcon_group *grp = NULL;
1914 lstcon_ndlink_t *ndl; 1913 struct lstcon_ndlink *ndl;
1915 int rc = 0; 1914 int rc = 0;
1916 1915
1917 sfw_unpack_message(req); 1916 sfw_unpack_message(req);
@@ -1987,7 +1986,8 @@ out:
1987 return rc; 1986 return rc;
1988} 1987}
1989 1988
1990static srpc_service_t lstcon_acceptor_service; 1989static struct srpc_service lstcon_acceptor_service;
1990
1991static void lstcon_init_acceptor_service(void) 1991static void lstcon_init_acceptor_service(void)
1992{ 1992{
1993 /* initialize selftest console acceptor service table */ 1993 /* initialize selftest console acceptor service table */
diff --git a/drivers/staging/lustre/lnet/selftest/console.h b/drivers/staging/lustre/lnet/selftest/console.h
index 554f582441f1..becd22e41da9 100644
--- a/drivers/staging/lustre/lnet/selftest/console.h
+++ b/drivers/staging/lustre/lnet/selftest/console.h
@@ -50,22 +50,25 @@
50#include "selftest.h" 50#include "selftest.h"
51#include "conrpc.h" 51#include "conrpc.h"
52 52
53typedef struct lstcon_node { 53/* node descriptor */
54struct lstcon_node {
54 lnet_process_id_t nd_id; /* id of the node */ 55 lnet_process_id_t nd_id; /* id of the node */
55 int nd_ref; /* reference count */ 56 int nd_ref; /* reference count */
56 int nd_state; /* state of the node */ 57 int nd_state; /* state of the node */
57 int nd_timeout; /* session timeout */ 58 int nd_timeout; /* session timeout */
58 unsigned long nd_stamp; /* timestamp of last replied RPC */ 59 unsigned long nd_stamp; /* timestamp of last replied RPC */
59 struct lstcon_rpc nd_ping; /* ping rpc */ 60 struct lstcon_rpc nd_ping; /* ping rpc */
60} lstcon_node_t; /* node descriptor */ 61};
61 62
62typedef struct { 63/* node link descriptor */
64struct lstcon_ndlink {
63 struct list_head ndl_link; /* chain on list */ 65 struct list_head ndl_link; /* chain on list */
64 struct list_head ndl_hlink; /* chain on hash */ 66 struct list_head ndl_hlink; /* chain on hash */
65 lstcon_node_t *ndl_node; /* pointer to node */ 67 struct lstcon_node *ndl_node; /* pointer to node */
66} lstcon_ndlink_t; /* node link descriptor */ 68};
67 69
68typedef struct { 70/* (alias of nodes) group descriptor */
71struct lstcon_group {
69 struct list_head grp_link; /* chain on global group list 72 struct list_head grp_link; /* chain on global group list
70 */ 73 */
71 int grp_ref; /* reference count */ 74 int grp_ref; /* reference count */
@@ -76,18 +79,19 @@ typedef struct {
76 struct list_head grp_trans_list; /* transaction list */ 79 struct list_head grp_trans_list; /* transaction list */
77 struct list_head grp_ndl_list; /* nodes list */ 80 struct list_head grp_ndl_list; /* nodes list */
78 struct list_head grp_ndl_hash[0]; /* hash table for nodes */ 81 struct list_head grp_ndl_hash[0]; /* hash table for nodes */
79} lstcon_group_t; /* (alias of nodes) group descriptor */ 82};
80 83
81#define LST_BATCH_IDLE 0xB0 /* idle batch */ 84#define LST_BATCH_IDLE 0xB0 /* idle batch */
82#define LST_BATCH_RUNNING 0xB1 /* running batch */ 85#define LST_BATCH_RUNNING 0xB1 /* running batch */
83 86
84typedef struct lstcon_tsb_hdr { 87struct lstcon_tsb_hdr {
85 lst_bid_t tsb_id; /* batch ID */ 88 lst_bid_t tsb_id; /* batch ID */
86 int tsb_index; /* test index */ 89 int tsb_index; /* test index */
87} lstcon_tsb_hdr_t; 90};
88 91
89typedef struct { 92/* (tests ) batch descriptor */
90 lstcon_tsb_hdr_t bat_hdr; /* test_batch header */ 93struct lstcon_batch {
94 struct lstcon_tsb_hdr bat_hdr; /* test_batch header */
91 struct list_head bat_link; /* chain on session's batches list */ 95 struct list_head bat_link; /* chain on session's batches list */
92 int bat_ntest; /* # of test */ 96 int bat_ntest; /* # of test */
93 int bat_state; /* state of the batch */ 97 int bat_state; /* state of the batch */
@@ -95,20 +99,21 @@ typedef struct {
95 * for run, force for stop */ 99 * for run, force for stop */
96 char bat_name[LST_NAME_SIZE];/* name of batch */ 100 char bat_name[LST_NAME_SIZE];/* name of batch */
97 101
98 struct list_head bat_test_list; /* list head of tests (lstcon_test_t) 102 struct list_head bat_test_list; /* list head of tests (struct lstcon_test)
99 */ 103 */
100 struct list_head bat_trans_list; /* list head of transaction */ 104 struct list_head bat_trans_list; /* list head of transaction */
101 struct list_head bat_cli_list; /* list head of client nodes 105 struct list_head bat_cli_list; /* list head of client nodes
102 * (lstcon_node_t) */ 106 * (struct lstcon_node) */
103 struct list_head *bat_cli_hash; /* hash table of client nodes */ 107 struct list_head *bat_cli_hash; /* hash table of client nodes */
104 struct list_head bat_srv_list; /* list head of server nodes */ 108 struct list_head bat_srv_list; /* list head of server nodes */
105 struct list_head *bat_srv_hash; /* hash table of server nodes */ 109 struct list_head *bat_srv_hash; /* hash table of server nodes */
106} lstcon_batch_t; /* (tests ) batch descriptor */ 110};
107 111
108typedef struct lstcon_test { 112/* a single test descriptor */
109 lstcon_tsb_hdr_t tes_hdr; /* test batch header */ 113struct lstcon_test {
114 struct lstcon_tsb_hdr tes_hdr; /* test batch header */
110 struct list_head tes_link; /* chain on batch's tests list */ 115 struct list_head tes_link; /* chain on batch's tests list */
111 lstcon_batch_t *tes_batch; /* pointer to batch */ 116 struct lstcon_batch *tes_batch; /* pointer to batch */
112 117
113 int tes_type; /* type of the test, i.e: bulk, ping */ 118 int tes_type; /* type of the test, i.e: bulk, ping */
114 int tes_stop_onerr; /* stop on error */ 119 int tes_stop_onerr; /* stop on error */
@@ -120,12 +125,12 @@ typedef struct lstcon_test {
120 int tes_cliidx; /* client index, used for RPC creating */ 125 int tes_cliidx; /* client index, used for RPC creating */
121 126
122 struct list_head tes_trans_list; /* transaction list */ 127 struct list_head tes_trans_list; /* transaction list */
123 lstcon_group_t *tes_src_grp; /* group run the test */ 128 struct lstcon_group *tes_src_grp; /* group run the test */
124 lstcon_group_t *tes_dst_grp; /* target group */ 129 struct lstcon_group *tes_dst_grp; /* target group */
125 130
126 int tes_paramlen; /* test parameter length */ 131 int tes_paramlen; /* test parameter length */
127 char tes_param[0]; /* test parameter */ 132 char tes_param[0]; /* test parameter */
128} lstcon_test_t; /* a single test descriptor */ 133};
129 134
130#define LST_GLOBAL_HASHSIZE 503 /* global nodes hash table size */ 135#define LST_GLOBAL_HASHSIZE 503 /* global nodes hash table size */
131#define LST_NODE_HASHSIZE 239 /* node hash table (for batch or group) */ 136#define LST_NODE_HASHSIZE 239 /* node hash table (for batch or group) */
@@ -152,7 +157,7 @@ struct lstcon_session {
152 unsigned ses_expired:1; /* console is timedout */ 157 unsigned ses_expired:1; /* console is timedout */
153 __u64 ses_id_cookie; /* batch id cookie */ 158 __u64 ses_id_cookie; /* batch id cookie */
154 char ses_name[LST_NAME_SIZE];/* session name */ 159 char ses_name[LST_NAME_SIZE];/* session name */
155 lstcon_rpc_trans_t *ses_ping; /* session pinger */ 160 struct lstcon_rpc_trans *ses_ping; /* session pinger */
156 struct stt_timer ses_ping_timer; /* timer for pinger */ 161 struct stt_timer ses_ping_timer; /* timer for pinger */
157 lstcon_trans_stat_t ses_trans_stat; /* transaction stats */ 162 lstcon_trans_stat_t ses_trans_stat; /* transaction stats */
158 163
diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
index e2c532399366..30e4f71f14c2 100644
--- a/drivers/staging/lustre/lnet/selftest/framework.c
+++ b/drivers/staging/lustre/lnet/selftest/framework.c
@@ -109,19 +109,19 @@ static struct smoketest_framework {
109 struct list_head fw_tests; /* registered test cases */ 109 struct list_head fw_tests; /* registered test cases */
110 atomic_t fw_nzombies; /* # zombie sessions */ 110 atomic_t fw_nzombies; /* # zombie sessions */
111 spinlock_t fw_lock; /* serialise */ 111 spinlock_t fw_lock; /* serialise */
112 sfw_session_t *fw_session; /* _the_ session */ 112 struct sfw_session *fw_session; /* _the_ session */
113 int fw_shuttingdown; /* shutdown in progress */ 113 int fw_shuttingdown; /* shutdown in progress */
114 struct srpc_server_rpc *fw_active_srpc;/* running RPC */ 114 struct srpc_server_rpc *fw_active_srpc;/* running RPC */
115} sfw_data; 115} sfw_data;
116 116
117/* forward ref's */ 117/* forward ref's */
118int sfw_stop_batch(sfw_batch_t *tsb, int force); 118int sfw_stop_batch(struct sfw_batch *tsb, int force);
119void sfw_destroy_session(sfw_session_t *sn); 119void sfw_destroy_session(struct sfw_session *sn);
120 120
121static inline sfw_test_case_t * 121static inline struct sfw_test_case *
122sfw_find_test_case(int id) 122sfw_find_test_case(int id)
123{ 123{
124 sfw_test_case_t *tsc; 124 struct sfw_test_case *tsc;
125 125
126 LASSERT(id <= SRPC_SERVICE_MAX_ID); 126 LASSERT(id <= SRPC_SERVICE_MAX_ID);
127 LASSERT(id > SRPC_FRAMEWORK_SERVICE_MAX_ID); 127 LASSERT(id > SRPC_FRAMEWORK_SERVICE_MAX_ID);
@@ -135,9 +135,9 @@ sfw_find_test_case(int id)
135} 135}
136 136
137static int 137static int
138sfw_register_test(srpc_service_t *service, sfw_test_client_ops_t *cliops) 138sfw_register_test(struct srpc_service *service, struct sfw_test_client_ops *cliops)
139{ 139{
140 sfw_test_case_t *tsc; 140 struct sfw_test_case *tsc;
141 141
142 if (sfw_find_test_case(service->sv_id)) { 142 if (sfw_find_test_case(service->sv_id)) {
143 CERROR("Failed to register test %s (%d)\n", 143 CERROR("Failed to register test %s (%d)\n",
@@ -145,7 +145,7 @@ sfw_register_test(srpc_service_t *service, sfw_test_client_ops_t *cliops)
145 return -EEXIST; 145 return -EEXIST;
146 } 146 }
147 147
148 LIBCFS_ALLOC(tsc, sizeof(sfw_test_case_t)); 148 LIBCFS_ALLOC(tsc, sizeof(struct sfw_test_case));
149 if (!tsc) 149 if (!tsc)
150 return -ENOMEM; 150 return -ENOMEM;
151 151
@@ -159,7 +159,7 @@ sfw_register_test(srpc_service_t *service, sfw_test_client_ops_t *cliops)
159static void 159static void
160sfw_add_session_timer(void) 160sfw_add_session_timer(void)
161{ 161{
162 sfw_session_t *sn = sfw_data.fw_session; 162 struct sfw_session *sn = sfw_data.fw_session;
163 struct stt_timer *timer = &sn->sn_timer; 163 struct stt_timer *timer = &sn->sn_timer;
164 164
165 LASSERT(!sfw_data.fw_shuttingdown); 165 LASSERT(!sfw_data.fw_shuttingdown);
@@ -177,7 +177,7 @@ sfw_add_session_timer(void)
177static int 177static int
178sfw_del_session_timer(void) 178sfw_del_session_timer(void)
179{ 179{
180 sfw_session_t *sn = sfw_data.fw_session; 180 struct sfw_session *sn = sfw_data.fw_session;
181 181
182 if (!sn || !sn->sn_timer_active) 182 if (!sn || !sn->sn_timer_active)
183 return 0; 183 return 0;
@@ -196,10 +196,10 @@ static void
196sfw_deactivate_session(void) 196sfw_deactivate_session(void)
197__must_hold(&sfw_data.fw_lock) 197__must_hold(&sfw_data.fw_lock)
198{ 198{
199 sfw_session_t *sn = sfw_data.fw_session; 199 struct sfw_session *sn = sfw_data.fw_session;
200 int nactive = 0; 200 int nactive = 0;
201 sfw_batch_t *tsb; 201 struct sfw_batch *tsb;
202 sfw_test_case_t *tsc; 202 struct sfw_test_case *tsc;
203 203
204 if (!sn) 204 if (!sn)
205 return; 205 return;
@@ -226,7 +226,7 @@ __must_hold(&sfw_data.fw_lock)
226 } 226 }
227 227
228 if (nactive) 228 if (nactive)
229 return; /* wait for active batches to stop */ 229 return; /* wait for active batches to stop */
230 230
231 list_del_init(&sn->sn_list); 231 list_del_init(&sn->sn_list);
232 spin_unlock(&sfw_data.fw_lock); 232 spin_unlock(&sfw_data.fw_lock);
@@ -239,7 +239,7 @@ __must_hold(&sfw_data.fw_lock)
239static void 239static void
240sfw_session_expired(void *data) 240sfw_session_expired(void *data)
241{ 241{
242 sfw_session_t *sn = data; 242 struct sfw_session *sn = data;
243 243
244 spin_lock(&sfw_data.fw_lock); 244 spin_lock(&sfw_data.fw_lock);
245 245
@@ -257,12 +257,12 @@ sfw_session_expired(void *data)
257} 257}
258 258
259static inline void 259static inline void
260sfw_init_session(sfw_session_t *sn, lst_sid_t sid, 260sfw_init_session(struct sfw_session *sn, lst_sid_t sid,
261 unsigned features, const char *name) 261 unsigned features, const char *name)
262{ 262{
263 struct stt_timer *timer = &sn->sn_timer; 263 struct stt_timer *timer = &sn->sn_timer;
264 264
265 memset(sn, 0, sizeof(sfw_session_t)); 265 memset(sn, 0, sizeof(struct sfw_session));
266 INIT_LIST_HEAD(&sn->sn_list); 266 INIT_LIST_HEAD(&sn->sn_list);
267 INIT_LIST_HEAD(&sn->sn_batches); 267 INIT_LIST_HEAD(&sn->sn_batches);
268 atomic_set(&sn->sn_refcount, 1); /* +1 for caller */ 268 atomic_set(&sn->sn_refcount, 1); /* +1 for caller */
@@ -298,7 +298,7 @@ sfw_server_rpc_done(struct srpc_server_rpc *rpc)
298} 298}
299 299
300static void 300static void
301sfw_client_rpc_fini(srpc_client_rpc_t *rpc) 301sfw_client_rpc_fini(struct srpc_client_rpc *rpc)
302{ 302{
303 LASSERT(!rpc->crpc_bulk.bk_niov); 303 LASSERT(!rpc->crpc_bulk.bk_niov);
304 LASSERT(list_empty(&rpc->crpc_list)); 304 LASSERT(list_empty(&rpc->crpc_list));
@@ -318,11 +318,11 @@ sfw_client_rpc_fini(srpc_client_rpc_t *rpc)
318 spin_unlock(&sfw_data.fw_lock); 318 spin_unlock(&sfw_data.fw_lock);
319} 319}
320 320
321static sfw_batch_t * 321static struct sfw_batch *
322sfw_find_batch(lst_bid_t bid) 322sfw_find_batch(lst_bid_t bid)
323{ 323{
324 sfw_session_t *sn = sfw_data.fw_session; 324 struct sfw_session *sn = sfw_data.fw_session;
325 sfw_batch_t *bat; 325 struct sfw_batch *bat;
326 326
327 LASSERT(sn); 327 LASSERT(sn);
328 328
@@ -334,11 +334,11 @@ sfw_find_batch(lst_bid_t bid)
334 return NULL; 334 return NULL;
335} 335}
336 336
337static sfw_batch_t * 337static struct sfw_batch *
338sfw_bid2batch(lst_bid_t bid) 338sfw_bid2batch(lst_bid_t bid)
339{ 339{
340 sfw_session_t *sn = sfw_data.fw_session; 340 struct sfw_session *sn = sfw_data.fw_session;
341 sfw_batch_t *bat; 341 struct sfw_batch *bat;
342 342
343 LASSERT(sn); 343 LASSERT(sn);
344 344
@@ -346,7 +346,7 @@ sfw_bid2batch(lst_bid_t bid)
346 if (bat) 346 if (bat)
347 return bat; 347 return bat;
348 348
349 LIBCFS_ALLOC(bat, sizeof(sfw_batch_t)); 349 LIBCFS_ALLOC(bat, sizeof(struct sfw_batch));
350 if (!bat) 350 if (!bat)
351 return NULL; 351 return NULL;
352 352
@@ -361,11 +361,11 @@ sfw_bid2batch(lst_bid_t bid)
361} 361}
362 362
363static int 363static int
364sfw_get_stats(srpc_stat_reqst_t *request, srpc_stat_reply_t *reply) 364sfw_get_stats(struct srpc_stat_reqst *request, struct srpc_stat_reply *reply)
365{ 365{
366 sfw_session_t *sn = sfw_data.fw_session; 366 struct sfw_session *sn = sfw_data.fw_session;
367 sfw_counters_t *cnt = &reply->str_fw; 367 sfw_counters_t *cnt = &reply->str_fw;
368 sfw_batch_t *bat; 368 struct sfw_batch *bat;
369 369
370 reply->str_sid = !sn ? LST_INVALID_SID : sn->sn_id; 370 reply->str_sid = !sn ? LST_INVALID_SID : sn->sn_id;
371 371
@@ -402,10 +402,10 @@ sfw_get_stats(srpc_stat_reqst_t *request, srpc_stat_reply_t *reply)
402} 402}
403 403
404int 404int
405sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply) 405sfw_make_session(struct srpc_mksn_reqst *request, struct srpc_mksn_reply *reply)
406{ 406{
407 sfw_session_t *sn = sfw_data.fw_session; 407 struct sfw_session *sn = sfw_data.fw_session;
408 srpc_msg_t *msg = container_of(request, srpc_msg_t, 408 struct srpc_msg *msg = container_of(request, struct srpc_msg,
409 msg_body.mksn_reqst); 409 msg_body.mksn_reqst);
410 int cplen = 0; 410 int cplen = 0;
411 411
@@ -438,7 +438,7 @@ sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply)
438 /* 438 /*
439 * reject the request if it requires unknown features 439 * reject the request if it requires unknown features
440 * NB: old version will always accept all features because it's not 440 * NB: old version will always accept all features because it's not
441 * aware of srpc_msg_t::msg_ses_feats, it's a defect but it's also 441 * aware of srpc_msg::msg_ses_feats, it's a defect but it's also
442 * harmless because it will return zero feature to console, and it's 442 * harmless because it will return zero feature to console, and it's
443 * console's responsibility to make sure all nodes in a session have 443 * console's responsibility to make sure all nodes in a session have
444 * same feature mask. 444 * same feature mask.
@@ -449,7 +449,7 @@ sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply)
449 } 449 }
450 450
451 /* brand new or create by force */ 451 /* brand new or create by force */
452 LIBCFS_ALLOC(sn, sizeof(sfw_session_t)); 452 LIBCFS_ALLOC(sn, sizeof(struct sfw_session));
453 if (!sn) { 453 if (!sn) {
454 CERROR("dropping RPC mksn under memory pressure\n"); 454 CERROR("dropping RPC mksn under memory pressure\n");
455 return -ENOMEM; 455 return -ENOMEM;
@@ -473,9 +473,9 @@ sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply)
473} 473}
474 474
475static int 475static int
476sfw_remove_session(srpc_rmsn_reqst_t *request, srpc_rmsn_reply_t *reply) 476sfw_remove_session(struct srpc_rmsn_reqst *request, struct srpc_rmsn_reply *reply)
477{ 477{
478 sfw_session_t *sn = sfw_data.fw_session; 478 struct sfw_session *sn = sfw_data.fw_session;
479 479
480 reply->rmsn_sid = !sn ? LST_INVALID_SID : sn->sn_id; 480 reply->rmsn_sid = !sn ? LST_INVALID_SID : sn->sn_id;
481 481
@@ -505,9 +505,9 @@ sfw_remove_session(srpc_rmsn_reqst_t *request, srpc_rmsn_reply_t *reply)
505} 505}
506 506
507static int 507static int
508sfw_debug_session(srpc_debug_reqst_t *request, srpc_debug_reply_t *reply) 508sfw_debug_session(struct srpc_debug_reqst *request, struct srpc_debug_reply *reply)
509{ 509{
510 sfw_session_t *sn = sfw_data.fw_session; 510 struct sfw_session *sn = sfw_data.fw_session;
511 511
512 if (!sn) { 512 if (!sn) {
513 reply->dbg_status = ESRCH; 513 reply->dbg_status = ESRCH;
@@ -526,10 +526,10 @@ sfw_debug_session(srpc_debug_reqst_t *request, srpc_debug_reply_t *reply)
526} 526}
527 527
528static void 528static void
529sfw_test_rpc_fini(srpc_client_rpc_t *rpc) 529sfw_test_rpc_fini(struct srpc_client_rpc *rpc)
530{ 530{
531 sfw_test_unit_t *tsu = rpc->crpc_priv; 531 struct sfw_test_unit *tsu = rpc->crpc_priv;
532 sfw_test_instance_t *tsi = tsu->tsu_instance; 532 struct sfw_test_instance *tsi = tsu->tsu_instance;
533 533
534 /* Called with hold of tsi->tsi_lock */ 534 /* Called with hold of tsi->tsi_lock */
535 LASSERT(list_empty(&rpc->crpc_list)); 535 LASSERT(list_empty(&rpc->crpc_list));
@@ -537,7 +537,7 @@ sfw_test_rpc_fini(srpc_client_rpc_t *rpc)
537} 537}
538 538
539static inline int 539static inline int
540sfw_test_buffers(sfw_test_instance_t *tsi) 540sfw_test_buffers(struct sfw_test_instance *tsi)
541{ 541{
542 struct sfw_test_case *tsc; 542 struct sfw_test_case *tsc;
543 struct srpc_service *svc; 543 struct srpc_service *svc;
@@ -614,10 +614,10 @@ sfw_unload_test(struct sfw_test_instance *tsi)
614} 614}
615 615
616static void 616static void
617sfw_destroy_test_instance(sfw_test_instance_t *tsi) 617sfw_destroy_test_instance(struct sfw_test_instance *tsi)
618{ 618{
619 srpc_client_rpc_t *rpc; 619 struct srpc_client_rpc *rpc;
620 sfw_test_unit_t *tsu; 620 struct sfw_test_unit *tsu;
621 621
622 if (!tsi->tsi_is_client) 622 if (!tsi->tsi_is_client)
623 goto clean; 623 goto clean;
@@ -630,14 +630,14 @@ sfw_destroy_test_instance(sfw_test_instance_t *tsi)
630 630
631 while (!list_empty(&tsi->tsi_units)) { 631 while (!list_empty(&tsi->tsi_units)) {
632 tsu = list_entry(tsi->tsi_units.next, 632 tsu = list_entry(tsi->tsi_units.next,
633 sfw_test_unit_t, tsu_list); 633 struct sfw_test_unit, tsu_list);
634 list_del(&tsu->tsu_list); 634 list_del(&tsu->tsu_list);
635 LIBCFS_FREE(tsu, sizeof(*tsu)); 635 LIBCFS_FREE(tsu, sizeof(*tsu));
636 } 636 }
637 637
638 while (!list_empty(&tsi->tsi_free_rpcs)) { 638 while (!list_empty(&tsi->tsi_free_rpcs)) {
639 rpc = list_entry(tsi->tsi_free_rpcs.next, 639 rpc = list_entry(tsi->tsi_free_rpcs.next,
640 srpc_client_rpc_t, crpc_list); 640 struct srpc_client_rpc, crpc_list);
641 list_del(&rpc->crpc_list); 641 list_del(&rpc->crpc_list);
642 LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc)); 642 LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
643 } 643 }
@@ -648,34 +648,34 @@ clean:
648} 648}
649 649
650static void 650static void
651sfw_destroy_batch(sfw_batch_t *tsb) 651sfw_destroy_batch(struct sfw_batch *tsb)
652{ 652{
653 sfw_test_instance_t *tsi; 653 struct sfw_test_instance *tsi;
654 654
655 LASSERT(!sfw_batch_active(tsb)); 655 LASSERT(!sfw_batch_active(tsb));
656 LASSERT(list_empty(&tsb->bat_list)); 656 LASSERT(list_empty(&tsb->bat_list));
657 657
658 while (!list_empty(&tsb->bat_tests)) { 658 while (!list_empty(&tsb->bat_tests)) {
659 tsi = list_entry(tsb->bat_tests.next, 659 tsi = list_entry(tsb->bat_tests.next,
660 sfw_test_instance_t, tsi_list); 660 struct sfw_test_instance, tsi_list);
661 list_del_init(&tsi->tsi_list); 661 list_del_init(&tsi->tsi_list);
662 sfw_destroy_test_instance(tsi); 662 sfw_destroy_test_instance(tsi);
663 } 663 }
664 664
665 LIBCFS_FREE(tsb, sizeof(sfw_batch_t)); 665 LIBCFS_FREE(tsb, sizeof(struct sfw_batch));
666} 666}
667 667
668void 668void
669sfw_destroy_session(sfw_session_t *sn) 669sfw_destroy_session(struct sfw_session *sn)
670{ 670{
671 sfw_batch_t *batch; 671 struct sfw_batch *batch;
672 672
673 LASSERT(list_empty(&sn->sn_list)); 673 LASSERT(list_empty(&sn->sn_list));
674 LASSERT(sn != sfw_data.fw_session); 674 LASSERT(sn != sfw_data.fw_session);
675 675
676 while (!list_empty(&sn->sn_batches)) { 676 while (!list_empty(&sn->sn_batches)) {
677 batch = list_entry(sn->sn_batches.next, 677 batch = list_entry(sn->sn_batches.next,
678 sfw_batch_t, bat_list); 678 struct sfw_batch, bat_list);
679 list_del_init(&batch->bat_list); 679 list_del_init(&batch->bat_list);
680 sfw_destroy_batch(batch); 680 sfw_destroy_batch(batch);
681 } 681 }
@@ -685,28 +685,28 @@ sfw_destroy_session(sfw_session_t *sn)
685} 685}
686 686
687static void 687static void
688sfw_unpack_addtest_req(srpc_msg_t *msg) 688sfw_unpack_addtest_req(struct srpc_msg *msg)
689{ 689{
690 srpc_test_reqst_t *req = &msg->msg_body.tes_reqst; 690 struct srpc_test_reqst *req = &msg->msg_body.tes_reqst;
691 691
692 LASSERT(msg->msg_type == SRPC_MSG_TEST_REQST); 692 LASSERT(msg->msg_type == SRPC_MSG_TEST_REQST);
693 LASSERT(req->tsr_is_client); 693 LASSERT(req->tsr_is_client);
694 694
695 if (msg->msg_magic == SRPC_MSG_MAGIC) 695 if (msg->msg_magic == SRPC_MSG_MAGIC)
696 return; /* no flipping needed */ 696 return; /* no flipping needed */
697 697
698 LASSERT(msg->msg_magic == __swab32(SRPC_MSG_MAGIC)); 698 LASSERT(msg->msg_magic == __swab32(SRPC_MSG_MAGIC));
699 699
700 if (req->tsr_service == SRPC_SERVICE_BRW) { 700 if (req->tsr_service == SRPC_SERVICE_BRW) {
701 if (!(msg->msg_ses_feats & LST_FEAT_BULK_LEN)) { 701 if (!(msg->msg_ses_feats & LST_FEAT_BULK_LEN)) {
702 test_bulk_req_t *bulk = &req->tsr_u.bulk_v0; 702 struct test_bulk_req *bulk = &req->tsr_u.bulk_v0;
703 703
704 __swab32s(&bulk->blk_opc); 704 __swab32s(&bulk->blk_opc);
705 __swab32s(&bulk->blk_npg); 705 __swab32s(&bulk->blk_npg);
706 __swab32s(&bulk->blk_flags); 706 __swab32s(&bulk->blk_flags);
707 707
708 } else { 708 } else {
709 test_bulk_req_v1_t *bulk = &req->tsr_u.bulk_v1; 709 struct test_bulk_req_v1 *bulk = &req->tsr_u.bulk_v1;
710 710
711 __swab16s(&bulk->blk_opc); 711 __swab16s(&bulk->blk_opc);
712 __swab16s(&bulk->blk_flags); 712 __swab16s(&bulk->blk_flags);
@@ -718,7 +718,7 @@ sfw_unpack_addtest_req(srpc_msg_t *msg)
718 } 718 }
719 719
720 if (req->tsr_service == SRPC_SERVICE_PING) { 720 if (req->tsr_service == SRPC_SERVICE_PING) {
721 test_ping_req_t *ping = &req->tsr_u.ping; 721 struct test_ping_req *ping = &req->tsr_u.ping;
722 722
723 __swab32s(&ping->png_size); 723 __swab32s(&ping->png_size);
724 __swab32s(&ping->png_flags); 724 __swab32s(&ping->png_flags);
@@ -729,14 +729,14 @@ sfw_unpack_addtest_req(srpc_msg_t *msg)
729} 729}
730 730
731static int 731static int
732sfw_add_test_instance(sfw_batch_t *tsb, struct srpc_server_rpc *rpc) 732sfw_add_test_instance(struct sfw_batch *tsb, struct srpc_server_rpc *rpc)
733{ 733{
734 srpc_msg_t *msg = &rpc->srpc_reqstbuf->buf_msg; 734 struct srpc_msg *msg = &rpc->srpc_reqstbuf->buf_msg;
735 srpc_test_reqst_t *req = &msg->msg_body.tes_reqst; 735 struct srpc_test_reqst *req = &msg->msg_body.tes_reqst;
736 srpc_bulk_t *bk = rpc->srpc_bulk; 736 struct srpc_bulk *bk = rpc->srpc_bulk;
737 int ndest = req->tsr_ndest; 737 int ndest = req->tsr_ndest;
738 sfw_test_unit_t *tsu; 738 struct sfw_test_unit *tsu;
739 sfw_test_instance_t *tsi; 739 struct sfw_test_instance *tsi;
740 int i; 740 int i;
741 int rc; 741 int rc;
742 742
@@ -789,13 +789,13 @@ sfw_add_test_instance(sfw_batch_t *tsb, struct srpc_server_rpc *rpc)
789 int j; 789 int j;
790 790
791 dests = page_address(bk->bk_iovs[i / SFW_ID_PER_PAGE].kiov_page); 791 dests = page_address(bk->bk_iovs[i / SFW_ID_PER_PAGE].kiov_page);
792 LASSERT(dests); /* my pages are within KVM always */ 792 LASSERT(dests); /* my pages are within KVM always */
793 id = dests[i % SFW_ID_PER_PAGE]; 793 id = dests[i % SFW_ID_PER_PAGE];
794 if (msg->msg_magic != SRPC_MSG_MAGIC) 794 if (msg->msg_magic != SRPC_MSG_MAGIC)
795 sfw_unpack_id(id); 795 sfw_unpack_id(id);
796 796
797 for (j = 0; j < tsi->tsi_concur; j++) { 797 for (j = 0; j < tsi->tsi_concur; j++) {
798 LIBCFS_ALLOC(tsu, sizeof(sfw_test_unit_t)); 798 LIBCFS_ALLOC(tsu, sizeof(struct sfw_test_unit));
799 if (!tsu) { 799 if (!tsu) {
800 rc = -ENOMEM; 800 rc = -ENOMEM;
801 CERROR("Can't allocate tsu for %d\n", 801 CERROR("Can't allocate tsu for %d\n",
@@ -824,11 +824,11 @@ error:
824} 824}
825 825
826static void 826static void
827sfw_test_unit_done(sfw_test_unit_t *tsu) 827sfw_test_unit_done(struct sfw_test_unit *tsu)
828{ 828{
829 sfw_test_instance_t *tsi = tsu->tsu_instance; 829 struct sfw_test_instance *tsi = tsu->tsu_instance;
830 sfw_batch_t *tsb = tsi->tsi_batch; 830 struct sfw_batch *tsb = tsi->tsi_batch;
831 sfw_session_t *sn = tsb->bat_session; 831 struct sfw_session *sn = tsb->bat_session;
832 832
833 LASSERT(sfw_test_active(tsi)); 833 LASSERT(sfw_test_active(tsi));
834 834
@@ -844,8 +844,8 @@ sfw_test_unit_done(sfw_test_unit_t *tsu)
844 844
845 spin_lock(&sfw_data.fw_lock); 845 spin_lock(&sfw_data.fw_lock);
846 846
847 if (!atomic_dec_and_test(&tsb->bat_nactive) ||/* tsb still active */ 847 if (!atomic_dec_and_test(&tsb->bat_nactive) || /* tsb still active */
848 sn == sfw_data.fw_session) { /* sn also active */ 848 sn == sfw_data.fw_session) { /* sn also active */
849 spin_unlock(&sfw_data.fw_lock); 849 spin_unlock(&sfw_data.fw_lock);
850 return; 850 return;
851 } 851 }
@@ -866,10 +866,10 @@ sfw_test_unit_done(sfw_test_unit_t *tsu)
866} 866}
867 867
868static void 868static void
869sfw_test_rpc_done(srpc_client_rpc_t *rpc) 869sfw_test_rpc_done(struct srpc_client_rpc *rpc)
870{ 870{
871 sfw_test_unit_t *tsu = rpc->crpc_priv; 871 struct sfw_test_unit *tsu = rpc->crpc_priv;
872 sfw_test_instance_t *tsi = tsu->tsu_instance; 872 struct sfw_test_instance *tsi = tsu->tsu_instance;
873 int done = 0; 873 int done = 0;
874 874
875 tsi->tsi_ops->tso_done_rpc(tsu, rpc); 875 tsi->tsi_ops->tso_done_rpc(tsu, rpc);
@@ -900,19 +900,19 @@ sfw_test_rpc_done(srpc_client_rpc_t *rpc)
900} 900}
901 901
902int 902int
903sfw_create_test_rpc(sfw_test_unit_t *tsu, lnet_process_id_t peer, 903sfw_create_test_rpc(struct sfw_test_unit *tsu, lnet_process_id_t peer,
904 unsigned features, int nblk, int blklen, 904 unsigned features, int nblk, int blklen,
905 srpc_client_rpc_t **rpcpp) 905 struct srpc_client_rpc **rpcpp)
906{ 906{
907 srpc_client_rpc_t *rpc = NULL; 907 struct srpc_client_rpc *rpc = NULL;
908 sfw_test_instance_t *tsi = tsu->tsu_instance; 908 struct sfw_test_instance *tsi = tsu->tsu_instance;
909 909
910 spin_lock(&tsi->tsi_lock); 910 spin_lock(&tsi->tsi_lock);
911 911
912 LASSERT(sfw_test_active(tsi)); 912 LASSERT(sfw_test_active(tsi));
913 /* pick request from buffer */ 913 /* pick request from buffer */
914 rpc = list_first_entry_or_null(&tsi->tsi_free_rpcs, 914 rpc = list_first_entry_or_null(&tsi->tsi_free_rpcs,
915 srpc_client_rpc_t, crpc_list); 915 struct srpc_client_rpc, crpc_list);
916 if (rpc) { 916 if (rpc) {
917 LASSERT(nblk == rpc->crpc_bulk.bk_niov); 917 LASSERT(nblk == rpc->crpc_bulk.bk_niov);
918 list_del_init(&rpc->crpc_list); 918 list_del_init(&rpc->crpc_list);
@@ -942,11 +942,11 @@ sfw_create_test_rpc(sfw_test_unit_t *tsu, lnet_process_id_t peer,
942} 942}
943 943
944static int 944static int
945sfw_run_test(swi_workitem_t *wi) 945sfw_run_test(struct swi_workitem *wi)
946{ 946{
947 sfw_test_unit_t *tsu = wi->swi_workitem.wi_data; 947 struct sfw_test_unit *tsu = wi->swi_workitem.wi_data;
948 sfw_test_instance_t *tsi = tsu->tsu_instance; 948 struct sfw_test_instance *tsi = tsu->tsu_instance;
949 srpc_client_rpc_t *rpc = NULL; 949 struct srpc_client_rpc *rpc = NULL;
950 950
951 LASSERT(wi == &tsu->tsu_worker); 951 LASSERT(wi == &tsu->tsu_worker);
952 952
@@ -991,11 +991,11 @@ test_done:
991} 991}
992 992
993static int 993static int
994sfw_run_batch(sfw_batch_t *tsb) 994sfw_run_batch(struct sfw_batch *tsb)
995{ 995{
996 swi_workitem_t *wi; 996 struct swi_workitem *wi;
997 sfw_test_unit_t *tsu; 997 struct sfw_test_unit *tsu;
998 sfw_test_instance_t *tsi; 998 struct sfw_test_instance *tsi;
999 999
1000 if (sfw_batch_active(tsb)) { 1000 if (sfw_batch_active(tsb)) {
1001 CDEBUG(D_NET, "Batch already active: %llu (%d)\n", 1001 CDEBUG(D_NET, "Batch already active: %llu (%d)\n",
@@ -1026,10 +1026,10 @@ sfw_run_batch(sfw_batch_t *tsb)
1026} 1026}
1027 1027
1028int 1028int
1029sfw_stop_batch(sfw_batch_t *tsb, int force) 1029sfw_stop_batch(struct sfw_batch *tsb, int force)
1030{ 1030{
1031 sfw_test_instance_t *tsi; 1031 struct sfw_test_instance *tsi;
1032 srpc_client_rpc_t *rpc; 1032 struct srpc_client_rpc *rpc;
1033 1033
1034 if (!sfw_batch_active(tsb)) { 1034 if (!sfw_batch_active(tsb)) {
1035 CDEBUG(D_NET, "Batch %llu inactive\n", tsb->bat_id.bat_id); 1035 CDEBUG(D_NET, "Batch %llu inactive\n", tsb->bat_id.bat_id);
@@ -1068,9 +1068,9 @@ sfw_stop_batch(sfw_batch_t *tsb, int force)
1068} 1068}
1069 1069
1070static int 1070static int
1071sfw_query_batch(sfw_batch_t *tsb, int testidx, srpc_batch_reply_t *reply) 1071sfw_query_batch(struct sfw_batch *tsb, int testidx, struct srpc_batch_reply *reply)
1072{ 1072{
1073 sfw_test_instance_t *tsi; 1073 struct sfw_test_instance *tsi;
1074 1074
1075 if (testidx < 0) 1075 if (testidx < 0)
1076 return -EINVAL; 1076 return -EINVAL;
@@ -1115,11 +1115,11 @@ sfw_alloc_pages(struct srpc_server_rpc *rpc, int cpt, int npages, int len,
1115static int 1115static int
1116sfw_add_test(struct srpc_server_rpc *rpc) 1116sfw_add_test(struct srpc_server_rpc *rpc)
1117{ 1117{
1118 sfw_session_t *sn = sfw_data.fw_session; 1118 struct sfw_session *sn = sfw_data.fw_session;
1119 srpc_test_reply_t *reply = &rpc->srpc_replymsg.msg_body.tes_reply; 1119 struct srpc_test_reply *reply = &rpc->srpc_replymsg.msg_body.tes_reply;
1120 srpc_test_reqst_t *request; 1120 struct srpc_test_reqst *request;
1121 int rc; 1121 int rc;
1122 sfw_batch_t *bat; 1122 struct sfw_batch *bat;
1123 1123
1124 request = &rpc->srpc_reqstbuf->buf_msg.msg_body.tes_reqst; 1124 request = &rpc->srpc_reqstbuf->buf_msg.msg_body.tes_reqst;
1125 reply->tsr_sid = !sn ? LST_INVALID_SID : sn->sn_id; 1125 reply->tsr_sid = !sn ? LST_INVALID_SID : sn->sn_id;
@@ -1183,11 +1183,11 @@ sfw_add_test(struct srpc_server_rpc *rpc)
1183} 1183}
1184 1184
1185static int 1185static int
1186sfw_control_batch(srpc_batch_reqst_t *request, srpc_batch_reply_t *reply) 1186sfw_control_batch(struct srpc_batch_reqst *request, struct srpc_batch_reply *reply)
1187{ 1187{
1188 sfw_session_t *sn = sfw_data.fw_session; 1188 struct sfw_session *sn = sfw_data.fw_session;
1189 int rc = 0; 1189 int rc = 0;
1190 sfw_batch_t *bat; 1190 struct sfw_batch *bat;
1191 1191
1192 reply->bar_sid = !sn ? LST_INVALID_SID : sn->sn_id; 1192 reply->bar_sid = !sn ? LST_INVALID_SID : sn->sn_id;
1193 1193
@@ -1227,8 +1227,8 @@ static int
1227sfw_handle_server_rpc(struct srpc_server_rpc *rpc) 1227sfw_handle_server_rpc(struct srpc_server_rpc *rpc)
1228{ 1228{
1229 struct srpc_service *sv = rpc->srpc_scd->scd_svc; 1229 struct srpc_service *sv = rpc->srpc_scd->scd_svc;
1230 srpc_msg_t *reply = &rpc->srpc_replymsg; 1230 struct srpc_msg *reply = &rpc->srpc_replymsg;
1231 srpc_msg_t *request = &rpc->srpc_reqstbuf->buf_msg; 1231 struct srpc_msg *request = &rpc->srpc_reqstbuf->buf_msg;
1232 unsigned features = LST_FEATS_MASK; 1232 unsigned features = LST_FEATS_MASK;
1233 int rc = 0; 1233 int rc = 0;
1234 1234
@@ -1244,7 +1244,7 @@ sfw_handle_server_rpc(struct srpc_server_rpc *rpc)
1244 1244
1245 /* Remove timer to avoid racing with it or expiring active session */ 1245 /* Remove timer to avoid racing with it or expiring active session */
1246 if (sfw_del_session_timer()) { 1246 if (sfw_del_session_timer()) {
1247 CERROR("Dropping RPC (%s) from %s: racing with expiry timer.", 1247 CERROR("dropping RPC %s from %s: racing with expiry timer\n",
1248 sv->sv_name, libcfs_id2str(rpc->srpc_peer)); 1248 sv->sv_name, libcfs_id2str(rpc->srpc_peer));
1249 spin_unlock(&sfw_data.fw_lock); 1249 spin_unlock(&sfw_data.fw_lock);
1250 return -EAGAIN; 1250 return -EAGAIN;
@@ -1261,7 +1261,7 @@ sfw_handle_server_rpc(struct srpc_server_rpc *rpc)
1261 1261
1262 if (sv->sv_id != SRPC_SERVICE_MAKE_SESSION && 1262 if (sv->sv_id != SRPC_SERVICE_MAKE_SESSION &&
1263 sv->sv_id != SRPC_SERVICE_DEBUG) { 1263 sv->sv_id != SRPC_SERVICE_DEBUG) {
1264 sfw_session_t *sn = sfw_data.fw_session; 1264 struct sfw_session *sn = sfw_data.fw_session;
1265 1265
1266 if (sn && 1266 if (sn &&
1267 sn->sn_features != request->msg_ses_feats) { 1267 sn->sn_features != request->msg_ses_feats) {
@@ -1273,7 +1273,7 @@ sfw_handle_server_rpc(struct srpc_server_rpc *rpc)
1273 } 1273 }
1274 1274
1275 } else if (request->msg_ses_feats & ~LST_FEATS_MASK) { 1275 } else if (request->msg_ses_feats & ~LST_FEATS_MASK) {
1276 /** 1276 /*
1277 * NB: at this point, old version will ignore features and 1277 * NB: at this point, old version will ignore features and
1278 * create new session anyway, so console should be able 1278 * create new session anyway, so console should be able
1279 * to handle this 1279 * to handle this
@@ -1377,12 +1377,12 @@ sfw_bulk_ready(struct srpc_server_rpc *rpc, int status)
1377 return rc; 1377 return rc;
1378} 1378}
1379 1379
1380srpc_client_rpc_t * 1380struct srpc_client_rpc *
1381sfw_create_rpc(lnet_process_id_t peer, int service, 1381sfw_create_rpc(lnet_process_id_t peer, int service,
1382 unsigned features, int nbulkiov, int bulklen, 1382 unsigned features, int nbulkiov, int bulklen,
1383 void (*done)(srpc_client_rpc_t *), void *priv) 1383 void (*done)(struct srpc_client_rpc *), void *priv)
1384{ 1384{
1385 srpc_client_rpc_t *rpc = NULL; 1385 struct srpc_client_rpc *rpc = NULL;
1386 1386
1387 spin_lock(&sfw_data.fw_lock); 1387 spin_lock(&sfw_data.fw_lock);
1388 1388
@@ -1391,7 +1391,7 @@ sfw_create_rpc(lnet_process_id_t peer, int service,
1391 1391
1392 if (!nbulkiov && !list_empty(&sfw_data.fw_zombie_rpcs)) { 1392 if (!nbulkiov && !list_empty(&sfw_data.fw_zombie_rpcs)) {
1393 rpc = list_entry(sfw_data.fw_zombie_rpcs.next, 1393 rpc = list_entry(sfw_data.fw_zombie_rpcs.next,
1394 srpc_client_rpc_t, crpc_list); 1394 struct srpc_client_rpc, crpc_list);
1395 list_del(&rpc->crpc_list); 1395 list_del(&rpc->crpc_list);
1396 1396
1397 srpc_init_client_rpc(rpc, peer, service, 0, 0, 1397 srpc_init_client_rpc(rpc, peer, service, 0, 0,
@@ -1415,7 +1415,7 @@ sfw_create_rpc(lnet_process_id_t peer, int service,
1415} 1415}
1416 1416
1417void 1417void
1418sfw_unpack_message(srpc_msg_t *msg) 1418sfw_unpack_message(struct srpc_msg *msg)
1419{ 1419{
1420 if (msg->msg_magic == SRPC_MSG_MAGIC) 1420 if (msg->msg_magic == SRPC_MSG_MAGIC)
1421 return; /* no flipping needed */ 1421 return; /* no flipping needed */
@@ -1424,7 +1424,7 @@ sfw_unpack_message(srpc_msg_t *msg)
1424 LASSERT(msg->msg_magic == __swab32(SRPC_MSG_MAGIC)); 1424 LASSERT(msg->msg_magic == __swab32(SRPC_MSG_MAGIC));
1425 1425
1426 if (msg->msg_type == SRPC_MSG_STAT_REQST) { 1426 if (msg->msg_type == SRPC_MSG_STAT_REQST) {
1427 srpc_stat_reqst_t *req = &msg->msg_body.stat_reqst; 1427 struct srpc_stat_reqst *req = &msg->msg_body.stat_reqst;
1428 1428
1429 __swab32s(&req->str_type); 1429 __swab32s(&req->str_type);
1430 __swab64s(&req->str_rpyid); 1430 __swab64s(&req->str_rpyid);
@@ -1433,7 +1433,7 @@ sfw_unpack_message(srpc_msg_t *msg)
1433 } 1433 }
1434 1434
1435 if (msg->msg_type == SRPC_MSG_STAT_REPLY) { 1435 if (msg->msg_type == SRPC_MSG_STAT_REPLY) {
1436 srpc_stat_reply_t *rep = &msg->msg_body.stat_reply; 1436 struct srpc_stat_reply *rep = &msg->msg_body.stat_reply;
1437 1437
1438 __swab32s(&rep->str_status); 1438 __swab32s(&rep->str_status);
1439 sfw_unpack_sid(rep->str_sid); 1439 sfw_unpack_sid(rep->str_sid);
@@ -1444,7 +1444,7 @@ sfw_unpack_message(srpc_msg_t *msg)
1444 } 1444 }
1445 1445
1446 if (msg->msg_type == SRPC_MSG_MKSN_REQST) { 1446 if (msg->msg_type == SRPC_MSG_MKSN_REQST) {
1447 srpc_mksn_reqst_t *req = &msg->msg_body.mksn_reqst; 1447 struct srpc_mksn_reqst *req = &msg->msg_body.mksn_reqst;
1448 1448
1449 __swab64s(&req->mksn_rpyid); 1449 __swab64s(&req->mksn_rpyid);
1450 __swab32s(&req->mksn_force); 1450 __swab32s(&req->mksn_force);
@@ -1453,7 +1453,7 @@ sfw_unpack_message(srpc_msg_t *msg)
1453 } 1453 }
1454 1454
1455 if (msg->msg_type == SRPC_MSG_MKSN_REPLY) { 1455 if (msg->msg_type == SRPC_MSG_MKSN_REPLY) {
1456 srpc_mksn_reply_t *rep = &msg->msg_body.mksn_reply; 1456 struct srpc_mksn_reply *rep = &msg->msg_body.mksn_reply;
1457 1457
1458 __swab32s(&rep->mksn_status); 1458 __swab32s(&rep->mksn_status);
1459 __swab32s(&rep->mksn_timeout); 1459 __swab32s(&rep->mksn_timeout);
@@ -1462,7 +1462,7 @@ sfw_unpack_message(srpc_msg_t *msg)
1462 } 1462 }
1463 1463
1464 if (msg->msg_type == SRPC_MSG_RMSN_REQST) { 1464 if (msg->msg_type == SRPC_MSG_RMSN_REQST) {
1465 srpc_rmsn_reqst_t *req = &msg->msg_body.rmsn_reqst; 1465 struct srpc_rmsn_reqst *req = &msg->msg_body.rmsn_reqst;
1466 1466
1467 __swab64s(&req->rmsn_rpyid); 1467 __swab64s(&req->rmsn_rpyid);
1468 sfw_unpack_sid(req->rmsn_sid); 1468 sfw_unpack_sid(req->rmsn_sid);
@@ -1470,7 +1470,7 @@ sfw_unpack_message(srpc_msg_t *msg)
1470 } 1470 }
1471 1471
1472 if (msg->msg_type == SRPC_MSG_RMSN_REPLY) { 1472 if (msg->msg_type == SRPC_MSG_RMSN_REPLY) {
1473 srpc_rmsn_reply_t *rep = &msg->msg_body.rmsn_reply; 1473 struct srpc_rmsn_reply *rep = &msg->msg_body.rmsn_reply;
1474 1474
1475 __swab32s(&rep->rmsn_status); 1475 __swab32s(&rep->rmsn_status);
1476 sfw_unpack_sid(rep->rmsn_sid); 1476 sfw_unpack_sid(rep->rmsn_sid);
@@ -1478,7 +1478,7 @@ sfw_unpack_message(srpc_msg_t *msg)
1478 } 1478 }
1479 1479
1480 if (msg->msg_type == SRPC_MSG_DEBUG_REQST) { 1480 if (msg->msg_type == SRPC_MSG_DEBUG_REQST) {
1481 srpc_debug_reqst_t *req = &msg->msg_body.dbg_reqst; 1481 struct srpc_debug_reqst *req = &msg->msg_body.dbg_reqst;
1482 1482
1483 __swab64s(&req->dbg_rpyid); 1483 __swab64s(&req->dbg_rpyid);
1484 __swab32s(&req->dbg_flags); 1484 __swab32s(&req->dbg_flags);
@@ -1487,7 +1487,7 @@ sfw_unpack_message(srpc_msg_t *msg)
1487 } 1487 }
1488 1488
1489 if (msg->msg_type == SRPC_MSG_DEBUG_REPLY) { 1489 if (msg->msg_type == SRPC_MSG_DEBUG_REPLY) {
1490 srpc_debug_reply_t *rep = &msg->msg_body.dbg_reply; 1490 struct srpc_debug_reply *rep = &msg->msg_body.dbg_reply;
1491 1491
1492 __swab32s(&rep->dbg_nbatch); 1492 __swab32s(&rep->dbg_nbatch);
1493 __swab32s(&rep->dbg_timeout); 1493 __swab32s(&rep->dbg_timeout);
@@ -1496,7 +1496,7 @@ sfw_unpack_message(srpc_msg_t *msg)
1496 } 1496 }
1497 1497
1498 if (msg->msg_type == SRPC_MSG_BATCH_REQST) { 1498 if (msg->msg_type == SRPC_MSG_BATCH_REQST) {
1499 srpc_batch_reqst_t *req = &msg->msg_body.bat_reqst; 1499 struct srpc_batch_reqst *req = &msg->msg_body.bat_reqst;
1500 1500
1501 __swab32s(&req->bar_opc); 1501 __swab32s(&req->bar_opc);
1502 __swab64s(&req->bar_rpyid); 1502 __swab64s(&req->bar_rpyid);
@@ -1508,7 +1508,7 @@ sfw_unpack_message(srpc_msg_t *msg)
1508 } 1508 }
1509 1509
1510 if (msg->msg_type == SRPC_MSG_BATCH_REPLY) { 1510 if (msg->msg_type == SRPC_MSG_BATCH_REPLY) {
1511 srpc_batch_reply_t *rep = &msg->msg_body.bat_reply; 1511 struct srpc_batch_reply *rep = &msg->msg_body.bat_reply;
1512 1512
1513 __swab32s(&rep->bar_status); 1513 __swab32s(&rep->bar_status);
1514 sfw_unpack_sid(rep->bar_sid); 1514 sfw_unpack_sid(rep->bar_sid);
@@ -1516,7 +1516,7 @@ sfw_unpack_message(srpc_msg_t *msg)
1516 } 1516 }
1517 1517
1518 if (msg->msg_type == SRPC_MSG_TEST_REQST) { 1518 if (msg->msg_type == SRPC_MSG_TEST_REQST) {
1519 srpc_test_reqst_t *req = &msg->msg_body.tes_reqst; 1519 struct srpc_test_reqst *req = &msg->msg_body.tes_reqst;
1520 1520
1521 __swab64s(&req->tsr_rpyid); 1521 __swab64s(&req->tsr_rpyid);
1522 __swab64s(&req->tsr_bulkid); 1522 __swab64s(&req->tsr_bulkid);
@@ -1530,7 +1530,7 @@ sfw_unpack_message(srpc_msg_t *msg)
1530 } 1530 }
1531 1531
1532 if (msg->msg_type == SRPC_MSG_TEST_REPLY) { 1532 if (msg->msg_type == SRPC_MSG_TEST_REPLY) {
1533 srpc_test_reply_t *rep = &msg->msg_body.tes_reply; 1533 struct srpc_test_reply *rep = &msg->msg_body.tes_reply;
1534 1534
1535 __swab32s(&rep->tsr_status); 1535 __swab32s(&rep->tsr_status);
1536 sfw_unpack_sid(rep->tsr_sid); 1536 sfw_unpack_sid(rep->tsr_sid);
@@ -1538,7 +1538,7 @@ sfw_unpack_message(srpc_msg_t *msg)
1538 } 1538 }
1539 1539
1540 if (msg->msg_type == SRPC_MSG_JOIN_REQST) { 1540 if (msg->msg_type == SRPC_MSG_JOIN_REQST) {
1541 srpc_join_reqst_t *req = &msg->msg_body.join_reqst; 1541 struct srpc_join_reqst *req = &msg->msg_body.join_reqst;
1542 1542
1543 __swab64s(&req->join_rpyid); 1543 __swab64s(&req->join_rpyid);
1544 sfw_unpack_sid(req->join_sid); 1544 sfw_unpack_sid(req->join_sid);
@@ -1546,7 +1546,7 @@ sfw_unpack_message(srpc_msg_t *msg)
1546 } 1546 }
1547 1547
1548 if (msg->msg_type == SRPC_MSG_JOIN_REPLY) { 1548 if (msg->msg_type == SRPC_MSG_JOIN_REPLY) {
1549 srpc_join_reply_t *rep = &msg->msg_body.join_reply; 1549 struct srpc_join_reply *rep = &msg->msg_body.join_reply;
1550 1550
1551 __swab32s(&rep->join_status); 1551 __swab32s(&rep->join_status);
1552 __swab32s(&rep->join_timeout); 1552 __swab32s(&rep->join_timeout);
@@ -1558,7 +1558,7 @@ sfw_unpack_message(srpc_msg_t *msg)
1558} 1558}
1559 1559
1560void 1560void
1561sfw_abort_rpc(srpc_client_rpc_t *rpc) 1561sfw_abort_rpc(struct srpc_client_rpc *rpc)
1562{ 1562{
1563 LASSERT(atomic_read(&rpc->crpc_refcount) > 0); 1563 LASSERT(atomic_read(&rpc->crpc_refcount) > 0);
1564 LASSERT(rpc->crpc_service <= SRPC_FRAMEWORK_SERVICE_MAX_ID); 1564 LASSERT(rpc->crpc_service <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
@@ -1569,7 +1569,7 @@ sfw_abort_rpc(srpc_client_rpc_t *rpc)
1569} 1569}
1570 1570
1571void 1571void
1572sfw_post_rpc(srpc_client_rpc_t *rpc) 1572sfw_post_rpc(struct srpc_client_rpc *rpc)
1573{ 1573{
1574 spin_lock(&rpc->crpc_lock); 1574 spin_lock(&rpc->crpc_lock);
1575 1575
@@ -1584,7 +1584,7 @@ sfw_post_rpc(srpc_client_rpc_t *rpc)
1584 spin_unlock(&rpc->crpc_lock); 1584 spin_unlock(&rpc->crpc_lock);
1585} 1585}
1586 1586
1587static srpc_service_t sfw_services[] = { 1587static struct srpc_service sfw_services[] = {
1588 { 1588 {
1589 /* sv_id */ SRPC_SERVICE_DEBUG, 1589 /* sv_id */ SRPC_SERVICE_DEBUG,
1590 /* sv_name */ "debug", 1590 /* sv_name */ "debug",
@@ -1628,8 +1628,8 @@ sfw_startup(void)
1628 int i; 1628 int i;
1629 int rc; 1629 int rc;
1630 int error; 1630 int error;
1631 srpc_service_t *sv; 1631 struct srpc_service *sv;
1632 sfw_test_case_t *tsc; 1632 struct sfw_test_case *tsc;
1633 1633
1634 if (session_timeout < 0) { 1634 if (session_timeout < 0) {
1635 CERROR("Session timeout must be non-negative: %d\n", 1635 CERROR("Session timeout must be non-negative: %d\n",
@@ -1721,8 +1721,8 @@ sfw_startup(void)
1721void 1721void
1722sfw_shutdown(void) 1722sfw_shutdown(void)
1723{ 1723{
1724 srpc_service_t *sv; 1724 struct srpc_service *sv;
1725 sfw_test_case_t *tsc; 1725 struct sfw_test_case *tsc;
1726 int i; 1726 int i;
1727 1727
1728 spin_lock(&sfw_data.fw_lock); 1728 spin_lock(&sfw_data.fw_lock);
@@ -1759,10 +1759,10 @@ sfw_shutdown(void)
1759 } 1759 }
1760 1760
1761 while (!list_empty(&sfw_data.fw_zombie_rpcs)) { 1761 while (!list_empty(&sfw_data.fw_zombie_rpcs)) {
1762 srpc_client_rpc_t *rpc; 1762 struct srpc_client_rpc *rpc;
1763 1763
1764 rpc = list_entry(sfw_data.fw_zombie_rpcs.next, 1764 rpc = list_entry(sfw_data.fw_zombie_rpcs.next,
1765 srpc_client_rpc_t, crpc_list); 1765 struct srpc_client_rpc, crpc_list);
1766 list_del(&rpc->crpc_list); 1766 list_del(&rpc->crpc_list);
1767 1767
1768 LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc)); 1768 LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
@@ -1778,7 +1778,7 @@ sfw_shutdown(void)
1778 1778
1779 while (!list_empty(&sfw_data.fw_tests)) { 1779 while (!list_empty(&sfw_data.fw_tests)) {
1780 tsc = list_entry(sfw_data.fw_tests.next, 1780 tsc = list_entry(sfw_data.fw_tests.next,
1781 sfw_test_case_t, tsc_list); 1781 struct sfw_test_case, tsc_list);
1782 1782
1783 srpc_wait_service_shutdown(tsc->tsc_srv_service); 1783 srpc_wait_service_shutdown(tsc->tsc_srv_service);
1784 1784
diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
index 81a45045e186..ad26fe9dd4af 100644
--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
@@ -56,9 +56,9 @@ struct lst_ping_data {
56static struct lst_ping_data lst_ping_data; 56static struct lst_ping_data lst_ping_data;
57 57
58static int 58static int
59ping_client_init(sfw_test_instance_t *tsi) 59ping_client_init(struct sfw_test_instance *tsi)
60{ 60{
61 sfw_session_t *sn = tsi->tsi_batch->bat_session; 61 struct sfw_session *sn = tsi->tsi_batch->bat_session;
62 62
63 LASSERT(tsi->tsi_is_client); 63 LASSERT(tsi->tsi_is_client);
64 LASSERT(sn && !(sn->sn_features & ~LST_FEATS_MASK)); 64 LASSERT(sn && !(sn->sn_features & ~LST_FEATS_MASK));
@@ -70,9 +70,9 @@ ping_client_init(sfw_test_instance_t *tsi)
70} 70}
71 71
72static void 72static void
73ping_client_fini(sfw_test_instance_t *tsi) 73ping_client_fini(struct sfw_test_instance *tsi)
74{ 74{
75 sfw_session_t *sn = tsi->tsi_batch->bat_session; 75 struct sfw_session *sn = tsi->tsi_batch->bat_session;
76 int errors; 76 int errors;
77 77
78 LASSERT(sn); 78 LASSERT(sn);
@@ -86,12 +86,12 @@ ping_client_fini(sfw_test_instance_t *tsi)
86} 86}
87 87
88static int 88static int
89ping_client_prep_rpc(sfw_test_unit_t *tsu, 89ping_client_prep_rpc(struct sfw_test_unit *tsu, lnet_process_id_t dest,
90 lnet_process_id_t dest, srpc_client_rpc_t **rpc) 90 struct srpc_client_rpc **rpc)
91{ 91{
92 srpc_ping_reqst_t *req; 92 struct srpc_ping_reqst *req;
93 sfw_test_instance_t *tsi = tsu->tsu_instance; 93 struct sfw_test_instance *tsi = tsu->tsu_instance;
94 sfw_session_t *sn = tsi->tsi_batch->bat_session; 94 struct sfw_session *sn = tsi->tsi_batch->bat_session;
95 struct timespec64 ts; 95 struct timespec64 ts;
96 int rc; 96 int rc;
97 97
@@ -118,18 +118,18 @@ ping_client_prep_rpc(sfw_test_unit_t *tsu,
118} 118}
119 119
120static void 120static void
121ping_client_done_rpc(sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc) 121ping_client_done_rpc(struct sfw_test_unit *tsu, struct srpc_client_rpc *rpc)
122{ 122{
123 sfw_test_instance_t *tsi = tsu->tsu_instance; 123 struct sfw_test_instance *tsi = tsu->tsu_instance;
124 sfw_session_t *sn = tsi->tsi_batch->bat_session; 124 struct sfw_session *sn = tsi->tsi_batch->bat_session;
125 srpc_ping_reqst_t *reqst = &rpc->crpc_reqstmsg.msg_body.ping_reqst; 125 struct srpc_ping_reqst *reqst = &rpc->crpc_reqstmsg.msg_body.ping_reqst;
126 srpc_ping_reply_t *reply = &rpc->crpc_replymsg.msg_body.ping_reply; 126 struct srpc_ping_reply *reply = &rpc->crpc_replymsg.msg_body.ping_reply;
127 struct timespec64 ts; 127 struct timespec64 ts;
128 128
129 LASSERT(sn); 129 LASSERT(sn);
130 130
131 if (rpc->crpc_status) { 131 if (rpc->crpc_status) {
132 if (!tsi->tsi_stopping) /* rpc could have been aborted */ 132 if (!tsi->tsi_stopping) /* rpc could have been aborted */
133 atomic_inc(&sn->sn_ping_errors); 133 atomic_inc(&sn->sn_ping_errors);
134 CERROR("Unable to ping %s (%d): %d\n", 134 CERROR("Unable to ping %s (%d): %d\n",
135 libcfs_id2str(rpc->crpc_dest), 135 libcfs_id2str(rpc->crpc_dest),
@@ -171,10 +171,10 @@ static int
171ping_server_handle(struct srpc_server_rpc *rpc) 171ping_server_handle(struct srpc_server_rpc *rpc)
172{ 172{
173 struct srpc_service *sv = rpc->srpc_scd->scd_svc; 173 struct srpc_service *sv = rpc->srpc_scd->scd_svc;
174 srpc_msg_t *reqstmsg = &rpc->srpc_reqstbuf->buf_msg; 174 struct srpc_msg *reqstmsg = &rpc->srpc_reqstbuf->buf_msg;
175 srpc_msg_t *replymsg = &rpc->srpc_replymsg; 175 struct srpc_msg *replymsg = &rpc->srpc_replymsg;
176 srpc_ping_reqst_t *req = &reqstmsg->msg_body.ping_reqst; 176 struct srpc_ping_reqst *req = &reqstmsg->msg_body.ping_reqst;
177 srpc_ping_reply_t *rep = &rpc->srpc_replymsg.msg_body.ping_reply; 177 struct srpc_ping_reply *rep = &rpc->srpc_replymsg.msg_body.ping_reply;
178 178
179 LASSERT(sv->sv_id == SRPC_SERVICE_PING); 179 LASSERT(sv->sv_id == SRPC_SERVICE_PING);
180 180
@@ -210,7 +210,8 @@ ping_server_handle(struct srpc_server_rpc *rpc)
210 return 0; 210 return 0;
211} 211}
212 212
213sfw_test_client_ops_t ping_test_client; 213struct sfw_test_client_ops ping_test_client;
214
214void ping_init_test_client(void) 215void ping_init_test_client(void)
215{ 216{
216 ping_test_client.tso_init = ping_client_init; 217 ping_test_client.tso_init = ping_client_init;
@@ -219,7 +220,8 @@ void ping_init_test_client(void)
219 ping_test_client.tso_done_rpc = ping_client_done_rpc; 220 ping_test_client.tso_done_rpc = ping_client_done_rpc;
220} 221}
221 222
222srpc_service_t ping_test_service; 223struct srpc_service ping_test_service;
224
223void ping_init_test_service(void) 225void ping_init_test_service(void)
224{ 226{
225 ping_test_service.sv_id = SRPC_SERVICE_PING; 227 ping_test_service.sv_id = SRPC_SERVICE_PING;
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c
index 7d7748d96332..3c45a7cfae18 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.c
+++ b/drivers/staging/lustre/lnet/selftest/rpc.c
@@ -46,19 +46,19 @@
46 46
47#include "selftest.h" 47#include "selftest.h"
48 48
49typedef enum { 49enum srpc_state {
50 SRPC_STATE_NONE, 50 SRPC_STATE_NONE,
51 SRPC_STATE_NI_INIT, 51 SRPC_STATE_NI_INIT,
52 SRPC_STATE_EQ_INIT, 52 SRPC_STATE_EQ_INIT,
53 SRPC_STATE_RUNNING, 53 SRPC_STATE_RUNNING,
54 SRPC_STATE_STOPPING, 54 SRPC_STATE_STOPPING,
55} srpc_state_t; 55};
56 56
57static struct smoketest_rpc { 57static struct smoketest_rpc {
58 spinlock_t rpc_glock; /* global lock */ 58 spinlock_t rpc_glock; /* global lock */
59 srpc_service_t *rpc_services[SRPC_SERVICE_MAX_ID + 1]; 59 struct srpc_service *rpc_services[SRPC_SERVICE_MAX_ID + 1];
60 lnet_handle_eq_t rpc_lnet_eq; /* _the_ LNet event queue */ 60 lnet_handle_eq_t rpc_lnet_eq; /* _the_ LNet event queue */
61 srpc_state_t rpc_state; 61 enum srpc_state rpc_state;
62 srpc_counters_t rpc_counters; 62 srpc_counters_t rpc_counters;
63 __u64 rpc_matchbits; /* matchbits counter */ 63 __u64 rpc_matchbits; /* matchbits counter */
64} srpc_data; 64} srpc_data;
@@ -71,7 +71,7 @@ srpc_serv_portal(int svc_id)
71} 71}
72 72
73/* forward ref's */ 73/* forward ref's */
74int srpc_handle_rpc(swi_workitem_t *wi); 74int srpc_handle_rpc(struct swi_workitem *wi);
75 75
76void srpc_get_counters(srpc_counters_t *cnt) 76void srpc_get_counters(srpc_counters_t *cnt)
77{ 77{
@@ -88,7 +88,7 @@ void srpc_set_counters(const srpc_counters_t *cnt)
88} 88}
89 89
90static int 90static int
91srpc_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i, int nob) 91srpc_add_bulk_page(struct srpc_bulk *bk, struct page *pg, int i, int nob)
92{ 92{
93 nob = min_t(int, nob, PAGE_SIZE); 93 nob = min_t(int, nob, PAGE_SIZE);
94 94
@@ -102,7 +102,7 @@ srpc_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i, int nob)
102} 102}
103 103
104void 104void
105srpc_free_bulk(srpc_bulk_t *bk) 105srpc_free_bulk(struct srpc_bulk *bk)
106{ 106{
107 int i; 107 int i;
108 struct page *pg; 108 struct page *pg;
@@ -117,25 +117,25 @@ srpc_free_bulk(srpc_bulk_t *bk)
117 __free_page(pg); 117 __free_page(pg);
118 } 118 }
119 119
120 LIBCFS_FREE(bk, offsetof(srpc_bulk_t, bk_iovs[bk->bk_niov])); 120 LIBCFS_FREE(bk, offsetof(struct srpc_bulk, bk_iovs[bk->bk_niov]));
121} 121}
122 122
123srpc_bulk_t * 123struct srpc_bulk *
124srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink) 124srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink)
125{ 125{
126 srpc_bulk_t *bk; 126 struct srpc_bulk *bk;
127 int i; 127 int i;
128 128
129 LASSERT(bulk_npg > 0 && bulk_npg <= LNET_MAX_IOV); 129 LASSERT(bulk_npg > 0 && bulk_npg <= LNET_MAX_IOV);
130 130
131 LIBCFS_CPT_ALLOC(bk, lnet_cpt_table(), cpt, 131 LIBCFS_CPT_ALLOC(bk, lnet_cpt_table(), cpt,
132 offsetof(srpc_bulk_t, bk_iovs[bulk_npg])); 132 offsetof(struct srpc_bulk, bk_iovs[bulk_npg]));
133 if (!bk) { 133 if (!bk) {
134 CERROR("Can't allocate descriptor for %d pages\n", bulk_npg); 134 CERROR("Can't allocate descriptor for %d pages\n", bulk_npg);
135 return NULL; 135 return NULL;
136 } 136 }
137 137
138 memset(bk, 0, offsetof(srpc_bulk_t, bk_iovs[bulk_npg])); 138 memset(bk, 0, offsetof(struct srpc_bulk, bk_iovs[bulk_npg]));
139 bk->bk_sink = sink; 139 bk->bk_sink = sink;
140 bk->bk_len = bulk_len; 140 bk->bk_len = bulk_len;
141 bk->bk_niov = bulk_npg; 141 bk->bk_niov = bulk_npg;
@@ -256,7 +256,7 @@ srpc_service_init(struct srpc_service *svc)
256 svc->sv_shuttingdown = 0; 256 svc->sv_shuttingdown = 0;
257 257
258 svc->sv_cpt_data = cfs_percpt_alloc(lnet_cpt_table(), 258 svc->sv_cpt_data = cfs_percpt_alloc(lnet_cpt_table(),
259 sizeof(struct srpc_service_cd)); 259 sizeof(*svc->sv_cpt_data));
260 if (!svc->sv_cpt_data) 260 if (!svc->sv_cpt_data)
261 return -ENOMEM; 261 return -ENOMEM;
262 262
@@ -338,7 +338,7 @@ srpc_add_service(struct srpc_service *sv)
338} 338}
339 339
340int 340int
341srpc_remove_service(srpc_service_t *sv) 341srpc_remove_service(struct srpc_service *sv)
342{ 342{
343 int id = sv->sv_id; 343 int id = sv->sv_id;
344 344
@@ -357,7 +357,7 @@ srpc_remove_service(srpc_service_t *sv)
357static int 357static int
358srpc_post_passive_rdma(int portal, int local, __u64 matchbits, void *buf, 358srpc_post_passive_rdma(int portal, int local, __u64 matchbits, void *buf,
359 int len, int options, lnet_process_id_t peer, 359 int len, int options, lnet_process_id_t peer,
360 lnet_handle_md_t *mdh, srpc_event_t *ev) 360 lnet_handle_md_t *mdh, struct srpc_event *ev)
361{ 361{
362 int rc; 362 int rc;
363 lnet_md_t md; 363 lnet_md_t md;
@@ -396,7 +396,7 @@ srpc_post_passive_rdma(int portal, int local, __u64 matchbits, void *buf,
396static int 396static int
397srpc_post_active_rdma(int portal, __u64 matchbits, void *buf, int len, 397srpc_post_active_rdma(int portal, __u64 matchbits, void *buf, int len,
398 int options, lnet_process_id_t peer, lnet_nid_t self, 398 int options, lnet_process_id_t peer, lnet_nid_t self,
399 lnet_handle_md_t *mdh, srpc_event_t *ev) 399 lnet_handle_md_t *mdh, struct srpc_event *ev)
400{ 400{
401 int rc; 401 int rc;
402 lnet_md_t md; 402 lnet_md_t md;
@@ -449,7 +449,7 @@ srpc_post_active_rdma(int portal, __u64 matchbits, void *buf, int len,
449 449
450static int 450static int
451srpc_post_passive_rqtbuf(int service, int local, void *buf, int len, 451srpc_post_passive_rqtbuf(int service, int local, void *buf, int len,
452 lnet_handle_md_t *mdh, srpc_event_t *ev) 452 lnet_handle_md_t *mdh, struct srpc_event *ev)
453{ 453{
454 lnet_process_id_t any = { 0 }; 454 lnet_process_id_t any = { 0 };
455 455
@@ -697,7 +697,7 @@ srpc_finish_service(struct srpc_service *sv)
697 697
698/* called with sv->sv_lock held */ 698/* called with sv->sv_lock held */
699static void 699static void
700srpc_service_recycle_buffer(struct srpc_service_cd *scd, srpc_buffer_t *buf) 700srpc_service_recycle_buffer(struct srpc_service_cd *scd, struct srpc_buffer *buf)
701__must_hold(&scd->scd_lock) 701__must_hold(&scd->scd_lock)
702{ 702{
703 if (!scd->scd_svc->sv_shuttingdown && scd->scd_buf_adjust >= 0) { 703 if (!scd->scd_svc->sv_shuttingdown && scd->scd_buf_adjust >= 0) {
@@ -755,11 +755,11 @@ srpc_abort_service(struct srpc_service *sv)
755} 755}
756 756
757void 757void
758srpc_shutdown_service(srpc_service_t *sv) 758srpc_shutdown_service(struct srpc_service *sv)
759{ 759{
760 struct srpc_service_cd *scd; 760 struct srpc_service_cd *scd;
761 struct srpc_server_rpc *rpc; 761 struct srpc_server_rpc *rpc;
762 srpc_buffer_t *buf; 762 struct srpc_buffer *buf;
763 int i; 763 int i;
764 764
765 CDEBUG(D_NET, "Shutting down service: id %d, name %s\n", 765 CDEBUG(D_NET, "Shutting down service: id %d, name %s\n",
@@ -792,9 +792,9 @@ srpc_shutdown_service(srpc_service_t *sv)
792} 792}
793 793
794static int 794static int
795srpc_send_request(srpc_client_rpc_t *rpc) 795srpc_send_request(struct srpc_client_rpc *rpc)
796{ 796{
797 srpc_event_t *ev = &rpc->crpc_reqstev; 797 struct srpc_event *ev = &rpc->crpc_reqstev;
798 int rc; 798 int rc;
799 799
800 ev->ev_fired = 0; 800 ev->ev_fired = 0;
@@ -803,7 +803,7 @@ srpc_send_request(srpc_client_rpc_t *rpc)
803 803
804 rc = srpc_post_active_rdma(srpc_serv_portal(rpc->crpc_service), 804 rc = srpc_post_active_rdma(srpc_serv_portal(rpc->crpc_service),
805 rpc->crpc_service, &rpc->crpc_reqstmsg, 805 rpc->crpc_service, &rpc->crpc_reqstmsg,
806 sizeof(srpc_msg_t), LNET_MD_OP_PUT, 806 sizeof(struct srpc_msg), LNET_MD_OP_PUT,
807 rpc->crpc_dest, LNET_NID_ANY, 807 rpc->crpc_dest, LNET_NID_ANY,
808 &rpc->crpc_reqstmdh, ev); 808 &rpc->crpc_reqstmdh, ev);
809 if (rc) { 809 if (rc) {
@@ -814,9 +814,9 @@ srpc_send_request(srpc_client_rpc_t *rpc)
814} 814}
815 815
816static int 816static int
817srpc_prepare_reply(srpc_client_rpc_t *rpc) 817srpc_prepare_reply(struct srpc_client_rpc *rpc)
818{ 818{
819 srpc_event_t *ev = &rpc->crpc_replyev; 819 struct srpc_event *ev = &rpc->crpc_replyev;
820 __u64 *id = &rpc->crpc_reqstmsg.msg_body.reqst.rpyid; 820 __u64 *id = &rpc->crpc_reqstmsg.msg_body.reqst.rpyid;
821 int rc; 821 int rc;
822 822
@@ -827,7 +827,8 @@ srpc_prepare_reply(srpc_client_rpc_t *rpc)
827 *id = srpc_next_id(); 827 *id = srpc_next_id();
828 828
829 rc = srpc_post_passive_rdma(SRPC_RDMA_PORTAL, 0, *id, 829 rc = srpc_post_passive_rdma(SRPC_RDMA_PORTAL, 0, *id,
830 &rpc->crpc_replymsg, sizeof(srpc_msg_t), 830 &rpc->crpc_replymsg,
831 sizeof(struct srpc_msg),
831 LNET_MD_OP_PUT, rpc->crpc_dest, 832 LNET_MD_OP_PUT, rpc->crpc_dest,
832 &rpc->crpc_replymdh, ev); 833 &rpc->crpc_replymdh, ev);
833 if (rc) { 834 if (rc) {
@@ -838,10 +839,10 @@ srpc_prepare_reply(srpc_client_rpc_t *rpc)
838} 839}
839 840
840static int 841static int
841srpc_prepare_bulk(srpc_client_rpc_t *rpc) 842srpc_prepare_bulk(struct srpc_client_rpc *rpc)
842{ 843{
843 srpc_bulk_t *bk = &rpc->crpc_bulk; 844 struct srpc_bulk *bk = &rpc->crpc_bulk;
844 srpc_event_t *ev = &rpc->crpc_bulkev; 845 struct srpc_event *ev = &rpc->crpc_bulkev;
845 __u64 *id = &rpc->crpc_reqstmsg.msg_body.reqst.bulkid; 846 __u64 *id = &rpc->crpc_reqstmsg.msg_body.reqst.bulkid;
846 int rc; 847 int rc;
847 int opt; 848 int opt;
@@ -873,8 +874,8 @@ srpc_prepare_bulk(srpc_client_rpc_t *rpc)
873static int 874static int
874srpc_do_bulk(struct srpc_server_rpc *rpc) 875srpc_do_bulk(struct srpc_server_rpc *rpc)
875{ 876{
876 srpc_event_t *ev = &rpc->srpc_ev; 877 struct srpc_event *ev = &rpc->srpc_ev;
877 srpc_bulk_t *bk = rpc->srpc_bulk; 878 struct srpc_bulk *bk = rpc->srpc_bulk;
878 __u64 id = rpc->srpc_reqstbuf->buf_msg.msg_body.reqst.bulkid; 879 __u64 id = rpc->srpc_reqstbuf->buf_msg.msg_body.reqst.bulkid;
879 int rc; 880 int rc;
880 int opt; 881 int opt;
@@ -903,7 +904,7 @@ srpc_server_rpc_done(struct srpc_server_rpc *rpc, int status)
903{ 904{
904 struct srpc_service_cd *scd = rpc->srpc_scd; 905 struct srpc_service_cd *scd = rpc->srpc_scd;
905 struct srpc_service *sv = scd->scd_svc; 906 struct srpc_service *sv = scd->scd_svc;
906 srpc_buffer_t *buffer; 907 struct srpc_buffer *buffer;
907 908
908 LASSERT(status || rpc->srpc_wi.swi_state == SWI_STATE_DONE); 909 LASSERT(status || rpc->srpc_wi.swi_state == SWI_STATE_DONE);
909 910
@@ -948,7 +949,7 @@ srpc_server_rpc_done(struct srpc_server_rpc *rpc, int status)
948 949
949 if (!sv->sv_shuttingdown && !list_empty(&scd->scd_buf_blocked)) { 950 if (!sv->sv_shuttingdown && !list_empty(&scd->scd_buf_blocked)) {
950 buffer = list_entry(scd->scd_buf_blocked.next, 951 buffer = list_entry(scd->scd_buf_blocked.next,
951 srpc_buffer_t, buf_list); 952 struct srpc_buffer, buf_list);
952 list_del(&buffer->buf_list); 953 list_del(&buffer->buf_list);
953 954
954 srpc_init_server_rpc(rpc, scd, buffer); 955 srpc_init_server_rpc(rpc, scd, buffer);
@@ -963,12 +964,12 @@ srpc_server_rpc_done(struct srpc_server_rpc *rpc, int status)
963 964
964/* handles an incoming RPC */ 965/* handles an incoming RPC */
965int 966int
966srpc_handle_rpc(swi_workitem_t *wi) 967srpc_handle_rpc(struct swi_workitem *wi)
967{ 968{
968 struct srpc_server_rpc *rpc = wi->swi_workitem.wi_data; 969 struct srpc_server_rpc *rpc = wi->swi_workitem.wi_data;
969 struct srpc_service_cd *scd = rpc->srpc_scd; 970 struct srpc_service_cd *scd = rpc->srpc_scd;
970 struct srpc_service *sv = scd->scd_svc; 971 struct srpc_service *sv = scd->scd_svc;
971 srpc_event_t *ev = &rpc->srpc_ev; 972 struct srpc_event *ev = &rpc->srpc_ev;
972 int rc = 0; 973 int rc = 0;
973 974
974 LASSERT(wi == &rpc->srpc_wi); 975 LASSERT(wi == &rpc->srpc_wi);
@@ -995,8 +996,8 @@ srpc_handle_rpc(swi_workitem_t *wi)
995 default: 996 default:
996 LBUG(); 997 LBUG();
997 case SWI_STATE_NEWBORN: { 998 case SWI_STATE_NEWBORN: {
998 srpc_msg_t *msg; 999 struct srpc_msg *msg;
999 srpc_generic_reply_t *reply; 1000 struct srpc_generic_reply *reply;
1000 1001
1001 msg = &rpc->srpc_reqstbuf->buf_msg; 1002 msg = &rpc->srpc_reqstbuf->buf_msg;
1002 reply = &rpc->srpc_replymsg.msg_body.reply; 1003 reply = &rpc->srpc_replymsg.msg_body.reply;
@@ -1077,7 +1078,7 @@ srpc_handle_rpc(swi_workitem_t *wi)
1077static void 1078static void
1078srpc_client_rpc_expired(void *data) 1079srpc_client_rpc_expired(void *data)
1079{ 1080{
1080 srpc_client_rpc_t *rpc = data; 1081 struct srpc_client_rpc *rpc = data;
1081 1082
1082 CWARN("Client RPC expired: service %d, peer %s, timeout %d.\n", 1083 CWARN("Client RPC expired: service %d, peer %s, timeout %d.\n",
1083 rpc->crpc_service, libcfs_id2str(rpc->crpc_dest), 1084 rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
@@ -1096,7 +1097,7 @@ srpc_client_rpc_expired(void *data)
1096} 1097}
1097 1098
1098static void 1099static void
1099srpc_add_client_rpc_timer(srpc_client_rpc_t *rpc) 1100srpc_add_client_rpc_timer(struct srpc_client_rpc *rpc)
1100{ 1101{
1101 struct stt_timer *timer = &rpc->crpc_timer; 1102 struct stt_timer *timer = &rpc->crpc_timer;
1102 1103
@@ -1117,7 +1118,7 @@ srpc_add_client_rpc_timer(srpc_client_rpc_t *rpc)
1117 * running on any CPU. 1118 * running on any CPU.
1118 */ 1119 */
1119static void 1120static void
1120srpc_del_client_rpc_timer(srpc_client_rpc_t *rpc) 1121srpc_del_client_rpc_timer(struct srpc_client_rpc *rpc)
1121{ 1122{
1122 /* timer not planted or already exploded */ 1123 /* timer not planted or already exploded */
1123 if (!rpc->crpc_timeout) 1124 if (!rpc->crpc_timeout)
@@ -1138,9 +1139,9 @@ srpc_del_client_rpc_timer(srpc_client_rpc_t *rpc)
1138} 1139}
1139 1140
1140static void 1141static void
1141srpc_client_rpc_done(srpc_client_rpc_t *rpc, int status) 1142srpc_client_rpc_done(struct srpc_client_rpc *rpc, int status)
1142{ 1143{
1143 swi_workitem_t *wi = &rpc->crpc_wi; 1144 struct swi_workitem *wi = &rpc->crpc_wi;
1144 1145
1145 LASSERT(status || wi->swi_state == SWI_STATE_DONE); 1146 LASSERT(status || wi->swi_state == SWI_STATE_DONE);
1146 1147
@@ -1175,11 +1176,11 @@ srpc_client_rpc_done(srpc_client_rpc_t *rpc, int status)
1175 1176
1176/* sends an outgoing RPC */ 1177/* sends an outgoing RPC */
1177int 1178int
1178srpc_send_rpc(swi_workitem_t *wi) 1179srpc_send_rpc(struct swi_workitem *wi)
1179{ 1180{
1180 int rc = 0; 1181 int rc = 0;
1181 srpc_client_rpc_t *rpc; 1182 struct srpc_client_rpc *rpc;
1182 srpc_msg_t *reply; 1183 struct srpc_msg *reply;
1183 int do_bulk; 1184 int do_bulk;
1184 1185
1185 LASSERT(wi); 1186 LASSERT(wi);
@@ -1237,7 +1238,7 @@ srpc_send_rpc(swi_workitem_t *wi)
1237 wi->swi_state = SWI_STATE_REQUEST_SENT; 1238 wi->swi_state = SWI_STATE_REQUEST_SENT;
1238 /* perhaps more events, fall thru */ 1239 /* perhaps more events, fall thru */
1239 case SWI_STATE_REQUEST_SENT: { 1240 case SWI_STATE_REQUEST_SENT: {
1240 srpc_msg_type_t type = srpc_service2reply(rpc->crpc_service); 1241 enum srpc_msg_type type = srpc_service2reply(rpc->crpc_service);
1241 1242
1242 if (!rpc->crpc_replyev.ev_fired) 1243 if (!rpc->crpc_replyev.ev_fired)
1243 break; 1244 break;
@@ -1308,15 +1309,15 @@ abort:
1308 return 0; 1309 return 0;
1309} 1310}
1310 1311
1311srpc_client_rpc_t * 1312struct srpc_client_rpc *
1312srpc_create_client_rpc(lnet_process_id_t peer, int service, 1313srpc_create_client_rpc(lnet_process_id_t peer, int service,
1313 int nbulkiov, int bulklen, 1314 int nbulkiov, int bulklen,
1314 void (*rpc_done)(srpc_client_rpc_t *), 1315 void (*rpc_done)(struct srpc_client_rpc *),
1315 void (*rpc_fini)(srpc_client_rpc_t *), void *priv) 1316 void (*rpc_fini)(struct srpc_client_rpc *), void *priv)
1316{ 1317{
1317 srpc_client_rpc_t *rpc; 1318 struct srpc_client_rpc *rpc;
1318 1319
1319 LIBCFS_ALLOC(rpc, offsetof(srpc_client_rpc_t, 1320 LIBCFS_ALLOC(rpc, offsetof(struct srpc_client_rpc,
1320 crpc_bulk.bk_iovs[nbulkiov])); 1321 crpc_bulk.bk_iovs[nbulkiov]));
1321 if (!rpc) 1322 if (!rpc)
1322 return NULL; 1323 return NULL;
@@ -1328,12 +1329,12 @@ srpc_create_client_rpc(lnet_process_id_t peer, int service,
1328 1329
1329/* called with rpc->crpc_lock held */ 1330/* called with rpc->crpc_lock held */
1330void 1331void
1331srpc_abort_rpc(srpc_client_rpc_t *rpc, int why) 1332srpc_abort_rpc(struct srpc_client_rpc *rpc, int why)
1332{ 1333{
1333 LASSERT(why); 1334 LASSERT(why);
1334 1335
1335 if (rpc->crpc_aborted || /* already aborted */ 1336 if (rpc->crpc_aborted || /* already aborted */
1336 rpc->crpc_closed) /* callback imminent */ 1337 rpc->crpc_closed) /* callback imminent */
1337 return; 1338 return;
1338 1339
1339 CDEBUG(D_NET, "Aborting RPC: service %d, peer %s, state %s, why %d\n", 1340 CDEBUG(D_NET, "Aborting RPC: service %d, peer %s, state %s, why %d\n",
@@ -1347,7 +1348,7 @@ srpc_abort_rpc(srpc_client_rpc_t *rpc, int why)
1347 1348
1348/* called with rpc->crpc_lock held */ 1349/* called with rpc->crpc_lock held */
1349void 1350void
1350srpc_post_rpc(srpc_client_rpc_t *rpc) 1351srpc_post_rpc(struct srpc_client_rpc *rpc)
1351{ 1352{
1352 LASSERT(!rpc->crpc_aborted); 1353 LASSERT(!rpc->crpc_aborted);
1353 LASSERT(srpc_data.rpc_state == SRPC_STATE_RUNNING); 1354 LASSERT(srpc_data.rpc_state == SRPC_STATE_RUNNING);
@@ -1363,7 +1364,7 @@ srpc_post_rpc(srpc_client_rpc_t *rpc)
1363int 1364int
1364srpc_send_reply(struct srpc_server_rpc *rpc) 1365srpc_send_reply(struct srpc_server_rpc *rpc)
1365{ 1366{
1366 srpc_event_t *ev = &rpc->srpc_ev; 1367 struct srpc_event *ev = &rpc->srpc_ev;
1367 struct srpc_msg *msg = &rpc->srpc_replymsg; 1368 struct srpc_msg *msg = &rpc->srpc_replymsg;
1368 struct srpc_buffer *buffer = rpc->srpc_reqstbuf; 1369 struct srpc_buffer *buffer = rpc->srpc_reqstbuf;
1369 struct srpc_service_cd *scd = rpc->srpc_scd; 1370 struct srpc_service_cd *scd = rpc->srpc_scd;
@@ -1401,7 +1402,7 @@ srpc_send_reply(struct srpc_server_rpc *rpc)
1401 rpc->srpc_peer, rpc->srpc_self, 1402 rpc->srpc_peer, rpc->srpc_self,
1402 &rpc->srpc_replymdh, ev); 1403 &rpc->srpc_replymdh, ev);
1403 if (rc) 1404 if (rc)
1404 ev->ev_fired = 1; /* no more event expected */ 1405 ev->ev_fired = 1; /* no more event expected */
1405 return rc; 1406 return rc;
1406} 1407}
1407 1408
@@ -1410,13 +1411,13 @@ static void
1410srpc_lnet_ev_handler(lnet_event_t *ev) 1411srpc_lnet_ev_handler(lnet_event_t *ev)
1411{ 1412{
1412 struct srpc_service_cd *scd; 1413 struct srpc_service_cd *scd;
1413 srpc_event_t *rpcev = ev->md.user_ptr; 1414 struct srpc_event *rpcev = ev->md.user_ptr;
1414 srpc_client_rpc_t *crpc; 1415 struct srpc_client_rpc *crpc;
1415 struct srpc_server_rpc *srpc; 1416 struct srpc_server_rpc *srpc;
1416 srpc_buffer_t *buffer; 1417 struct srpc_buffer *buffer;
1417 srpc_service_t *sv; 1418 struct srpc_service *sv;
1418 srpc_msg_t *msg; 1419 struct srpc_msg *msg;
1419 srpc_msg_type_t type; 1420 enum srpc_msg_type type;
1420 1421
1421 LASSERT(!in_interrupt()); 1422 LASSERT(!in_interrupt());
1422 1423
@@ -1486,7 +1487,7 @@ srpc_lnet_ev_handler(lnet_event_t *ev)
1486 LASSERT(ev->type != LNET_EVENT_UNLINK || 1487 LASSERT(ev->type != LNET_EVENT_UNLINK ||
1487 sv->sv_shuttingdown); 1488 sv->sv_shuttingdown);
1488 1489
1489 buffer = container_of(ev->md.start, srpc_buffer_t, buf_msg); 1490 buffer = container_of(ev->md.start, struct srpc_buffer, buf_msg);
1490 buffer->buf_peer = ev->initiator; 1491 buffer->buf_peer = ev->initiator;
1491 buffer->buf_self = ev->target.nid; 1492 buffer->buf_self = ev->target.nid;
1492 1493
@@ -1509,7 +1510,7 @@ srpc_lnet_ev_handler(lnet_event_t *ev)
1509 scd->scd_buf_err = 0; 1510 scd->scd_buf_err = 0;
1510 } 1511 }
1511 1512
1512 if (!scd->scd_buf_err && /* adding buffer is enabled */ 1513 if (!scd->scd_buf_err && /* adding buffer is enabled */
1513 !scd->scd_buf_adjust && 1514 !scd->scd_buf_adjust &&
1514 scd->scd_buf_nposted < scd->scd_buf_low) { 1515 scd->scd_buf_nposted < scd->scd_buf_low) {
1515 scd->scd_buf_adjust = max(scd->scd_buf_total / 2, 1516 scd->scd_buf_adjust = max(scd->scd_buf_total / 2,
@@ -1663,7 +1664,7 @@ srpc_shutdown(void)
1663 spin_lock(&srpc_data.rpc_glock); 1664 spin_lock(&srpc_data.rpc_glock);
1664 1665
1665 for (i = 0; i <= SRPC_SERVICE_MAX_ID; i++) { 1666 for (i = 0; i <= SRPC_SERVICE_MAX_ID; i++) {
1666 srpc_service_t *sv = srpc_data.rpc_services[i]; 1667 struct srpc_service *sv = srpc_data.rpc_services[i];
1667 1668
1668 LASSERTF(!sv, "service not empty: id %d, name %s\n", 1669 LASSERTF(!sv, "service not empty: id %d, name %s\n",
1669 i, sv->sv_name); 1670 i, sv->sv_name);
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.h b/drivers/staging/lustre/lnet/selftest/rpc.h
index a79c315f2ceb..c9b904cade16 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.h
+++ b/drivers/staging/lustre/lnet/selftest/rpc.h
@@ -44,7 +44,7 @@
44 * 44 *
45 * XXX: *REPLY == *REQST + 1 45 * XXX: *REPLY == *REQST + 1
46 */ 46 */
47typedef enum { 47enum srpc_msg_type {
48 SRPC_MSG_MKSN_REQST = 0, 48 SRPC_MSG_MKSN_REQST = 0,
49 SRPC_MSG_MKSN_REPLY = 1, 49 SRPC_MSG_MKSN_REPLY = 1,
50 SRPC_MSG_RMSN_REQST = 2, 50 SRPC_MSG_RMSN_REQST = 2,
@@ -63,7 +63,7 @@ typedef enum {
63 SRPC_MSG_PING_REPLY = 15, 63 SRPC_MSG_PING_REPLY = 15,
64 SRPC_MSG_JOIN_REQST = 16, 64 SRPC_MSG_JOIN_REQST = 16,
65 SRPC_MSG_JOIN_REPLY = 17, 65 SRPC_MSG_JOIN_REPLY = 17,
66} srpc_msg_type_t; 66};
67 67
68/* CAVEAT EMPTOR: 68/* CAVEAT EMPTOR:
69 * All srpc_*_reqst_t's 1st field must be matchbits of reply buffer, 69 * All srpc_*_reqst_t's 1st field must be matchbits of reply buffer,
@@ -72,122 +72,122 @@ typedef enum {
72 * All srpc_*_reply_t's 1st field must be a __u32 status, and 2nd field 72 * All srpc_*_reply_t's 1st field must be a __u32 status, and 2nd field
73 * session id if needed. 73 * session id if needed.
74 */ 74 */
75typedef struct { 75struct srpc_generic_reqst {
76 __u64 rpyid; /* reply buffer matchbits */ 76 __u64 rpyid; /* reply buffer matchbits */
77 __u64 bulkid; /* bulk buffer matchbits */ 77 __u64 bulkid; /* bulk buffer matchbits */
78} WIRE_ATTR srpc_generic_reqst_t; 78} WIRE_ATTR;
79 79
80typedef struct { 80struct srpc_generic_reply {
81 __u32 status; 81 __u32 status;
82 lst_sid_t sid; 82 lst_sid_t sid;
83} WIRE_ATTR srpc_generic_reply_t; 83} WIRE_ATTR;
84 84
85/* FRAMEWORK RPCs */ 85/* FRAMEWORK RPCs */
86typedef struct { 86struct srpc_mksn_reqst {
87 __u64 mksn_rpyid; /* reply buffer matchbits */ 87 __u64 mksn_rpyid; /* reply buffer matchbits */
88 lst_sid_t mksn_sid; /* session id */ 88 lst_sid_t mksn_sid; /* session id */
89 __u32 mksn_force; /* use brute force */ 89 __u32 mksn_force; /* use brute force */
90 char mksn_name[LST_NAME_SIZE]; 90 char mksn_name[LST_NAME_SIZE];
91} WIRE_ATTR srpc_mksn_reqst_t; /* make session request */ 91} WIRE_ATTR; /* make session request */
92 92
93typedef struct { 93struct srpc_mksn_reply {
94 __u32 mksn_status; /* session status */ 94 __u32 mksn_status; /* session status */
95 lst_sid_t mksn_sid; /* session id */ 95 lst_sid_t mksn_sid; /* session id */
96 __u32 mksn_timeout; /* session timeout */ 96 __u32 mksn_timeout; /* session timeout */
97 char mksn_name[LST_NAME_SIZE]; 97 char mksn_name[LST_NAME_SIZE];
98} WIRE_ATTR srpc_mksn_reply_t; /* make session reply */ 98} WIRE_ATTR; /* make session reply */
99 99
100typedef struct { 100struct srpc_rmsn_reqst {
101 __u64 rmsn_rpyid; /* reply buffer matchbits */ 101 __u64 rmsn_rpyid; /* reply buffer matchbits */
102 lst_sid_t rmsn_sid; /* session id */ 102 lst_sid_t rmsn_sid; /* session id */
103} WIRE_ATTR srpc_rmsn_reqst_t; /* remove session request */ 103} WIRE_ATTR; /* remove session request */
104 104
105typedef struct { 105struct srpc_rmsn_reply {
106 __u32 rmsn_status; 106 __u32 rmsn_status;
107 lst_sid_t rmsn_sid; /* session id */ 107 lst_sid_t rmsn_sid; /* session id */
108} WIRE_ATTR srpc_rmsn_reply_t; /* remove session reply */ 108} WIRE_ATTR; /* remove session reply */
109 109
110typedef struct { 110struct srpc_join_reqst {
111 __u64 join_rpyid; /* reply buffer matchbits */ 111 __u64 join_rpyid; /* reply buffer matchbits */
112 lst_sid_t join_sid; /* session id to join */ 112 lst_sid_t join_sid; /* session id to join */
113 char join_group[LST_NAME_SIZE]; /* group name */ 113 char join_group[LST_NAME_SIZE]; /* group name */
114} WIRE_ATTR srpc_join_reqst_t; 114} WIRE_ATTR;
115 115
116typedef struct { 116struct srpc_join_reply {
117 __u32 join_status; /* returned status */ 117 __u32 join_status; /* returned status */
118 lst_sid_t join_sid; /* session id */ 118 lst_sid_t join_sid; /* session id */
119 __u32 join_timeout; /* # seconds' inactivity to 119 __u32 join_timeout; /* # seconds' inactivity to
120 * expire */ 120 * expire */
121 char join_session[LST_NAME_SIZE]; /* session name */ 121 char join_session[LST_NAME_SIZE]; /* session name */
122} WIRE_ATTR srpc_join_reply_t; 122} WIRE_ATTR;
123 123
124typedef struct { 124struct srpc_debug_reqst {
125 __u64 dbg_rpyid; /* reply buffer matchbits */ 125 __u64 dbg_rpyid; /* reply buffer matchbits */
126 lst_sid_t dbg_sid; /* session id */ 126 lst_sid_t dbg_sid; /* session id */
127 __u32 dbg_flags; /* bitmap of debug */ 127 __u32 dbg_flags; /* bitmap of debug */
128} WIRE_ATTR srpc_debug_reqst_t; 128} WIRE_ATTR;
129 129
130typedef struct { 130struct srpc_debug_reply {
131 __u32 dbg_status; /* returned code */ 131 __u32 dbg_status; /* returned code */
132 lst_sid_t dbg_sid; /* session id */ 132 lst_sid_t dbg_sid; /* session id */
133 __u32 dbg_timeout; /* session timeout */ 133 __u32 dbg_timeout; /* session timeout */
134 __u32 dbg_nbatch; /* # of batches in the node */ 134 __u32 dbg_nbatch; /* # of batches in the node */
135 char dbg_name[LST_NAME_SIZE]; /* session name */ 135 char dbg_name[LST_NAME_SIZE]; /* session name */
136} WIRE_ATTR srpc_debug_reply_t; 136} WIRE_ATTR;
137 137
138#define SRPC_BATCH_OPC_RUN 1 138#define SRPC_BATCH_OPC_RUN 1
139#define SRPC_BATCH_OPC_STOP 2 139#define SRPC_BATCH_OPC_STOP 2
140#define SRPC_BATCH_OPC_QUERY 3 140#define SRPC_BATCH_OPC_QUERY 3
141 141
142typedef struct { 142struct srpc_batch_reqst {
143 __u64 bar_rpyid; /* reply buffer matchbits */ 143 __u64 bar_rpyid; /* reply buffer matchbits */
144 lst_sid_t bar_sid; /* session id */ 144 lst_sid_t bar_sid; /* session id */
145 lst_bid_t bar_bid; /* batch id */ 145 lst_bid_t bar_bid; /* batch id */
146 __u32 bar_opc; /* create/start/stop batch */ 146 __u32 bar_opc; /* create/start/stop batch */
147 __u32 bar_testidx; /* index of test */ 147 __u32 bar_testidx; /* index of test */
148 __u32 bar_arg; /* parameters */ 148 __u32 bar_arg; /* parameters */
149} WIRE_ATTR srpc_batch_reqst_t; 149} WIRE_ATTR;
150 150
151typedef struct { 151struct srpc_batch_reply {
152 __u32 bar_status; /* status of request */ 152 __u32 bar_status; /* status of request */
153 lst_sid_t bar_sid; /* session id */ 153 lst_sid_t bar_sid; /* session id */
154 __u32 bar_active; /* # of active tests in batch/test */ 154 __u32 bar_active; /* # of active tests in batch/test */
155 __u32 bar_time; /* remained time */ 155 __u32 bar_time; /* remained time */
156} WIRE_ATTR srpc_batch_reply_t; 156} WIRE_ATTR;
157 157
158typedef struct { 158struct srpc_stat_reqst {
159 __u64 str_rpyid; /* reply buffer matchbits */ 159 __u64 str_rpyid; /* reply buffer matchbits */
160 lst_sid_t str_sid; /* session id */ 160 lst_sid_t str_sid; /* session id */
161 __u32 str_type; /* type of stat */ 161 __u32 str_type; /* type of stat */
162} WIRE_ATTR srpc_stat_reqst_t; 162} WIRE_ATTR;
163 163
164typedef struct { 164struct srpc_stat_reply {
165 __u32 str_status; 165 __u32 str_status;
166 lst_sid_t str_sid; 166 lst_sid_t str_sid;
167 sfw_counters_t str_fw; 167 sfw_counters_t str_fw;
168 srpc_counters_t str_rpc; 168 srpc_counters_t str_rpc;
169 lnet_counters_t str_lnet; 169 lnet_counters_t str_lnet;
170} WIRE_ATTR srpc_stat_reply_t; 170} WIRE_ATTR;
171 171
172typedef struct { 172struct test_bulk_req {
173 __u32 blk_opc; /* bulk operation code */ 173 __u32 blk_opc; /* bulk operation code */
174 __u32 blk_npg; /* # of pages */ 174 __u32 blk_npg; /* # of pages */
175 __u32 blk_flags; /* reserved flags */ 175 __u32 blk_flags; /* reserved flags */
176} WIRE_ATTR test_bulk_req_t; 176} WIRE_ATTR;
177 177
178typedef struct { 178struct test_bulk_req_v1 {
179 __u16 blk_opc; /* bulk operation code */ 179 __u16 blk_opc; /* bulk operation code */
180 __u16 blk_flags; /* data check flags */ 180 __u16 blk_flags; /* data check flags */
181 __u32 blk_len; /* data length */ 181 __u32 blk_len; /* data length */
182 __u32 blk_offset; /* reserved: offset */ 182 __u32 blk_offset; /* reserved: offset */
183} WIRE_ATTR test_bulk_req_v1_t; 183} WIRE_ATTR;
184 184
185typedef struct { 185struct test_ping_req {
186 __u32 png_size; /* size of ping message */ 186 __u32 png_size; /* size of ping message */
187 __u32 png_flags; /* reserved flags */ 187 __u32 png_flags; /* reserved flags */
188} WIRE_ATTR test_ping_req_t; 188} WIRE_ATTR;
189 189
190typedef struct { 190struct srpc_test_reqst {
191 __u64 tsr_rpyid; /* reply buffer matchbits */ 191 __u64 tsr_rpyid; /* reply buffer matchbits */
192 __u64 tsr_bulkid; /* bulk buffer matchbits */ 192 __u64 tsr_bulkid; /* bulk buffer matchbits */
193 lst_sid_t tsr_sid; /* session id */ 193 lst_sid_t tsr_sid; /* session id */
@@ -201,82 +201,82 @@ typedef struct {
201 __u32 tsr_ndest; /* # of dest nodes */ 201 __u32 tsr_ndest; /* # of dest nodes */
202 202
203 union { 203 union {
204 test_ping_req_t ping; 204 struct test_ping_req ping;
205 test_bulk_req_t bulk_v0; 205 struct test_bulk_req bulk_v0;
206 test_bulk_req_v1_t bulk_v1; 206 struct test_bulk_req_v1 bulk_v1;
207 } tsr_u; 207 } tsr_u;
208} WIRE_ATTR srpc_test_reqst_t; 208} WIRE_ATTR;
209 209
210typedef struct { 210struct srpc_test_reply {
211 __u32 tsr_status; /* returned code */ 211 __u32 tsr_status; /* returned code */
212 lst_sid_t tsr_sid; 212 lst_sid_t tsr_sid;
213} WIRE_ATTR srpc_test_reply_t; 213} WIRE_ATTR;
214 214
215/* TEST RPCs */ 215/* TEST RPCs */
216typedef struct { 216struct srpc_ping_reqst {
217 __u64 pnr_rpyid; 217 __u64 pnr_rpyid;
218 __u32 pnr_magic; 218 __u32 pnr_magic;
219 __u32 pnr_seq; 219 __u32 pnr_seq;
220 __u64 pnr_time_sec; 220 __u64 pnr_time_sec;
221 __u64 pnr_time_usec; 221 __u64 pnr_time_usec;
222} WIRE_ATTR srpc_ping_reqst_t; 222} WIRE_ATTR;
223 223
224typedef struct { 224struct srpc_ping_reply {
225 __u32 pnr_status; 225 __u32 pnr_status;
226 __u32 pnr_magic; 226 __u32 pnr_magic;
227 __u32 pnr_seq; 227 __u32 pnr_seq;
228} WIRE_ATTR srpc_ping_reply_t; 228} WIRE_ATTR;
229 229
230typedef struct { 230struct srpc_brw_reqst {
231 __u64 brw_rpyid; /* reply buffer matchbits */ 231 __u64 brw_rpyid; /* reply buffer matchbits */
232 __u64 brw_bulkid; /* bulk buffer matchbits */ 232 __u64 brw_bulkid; /* bulk buffer matchbits */
233 __u32 brw_rw; /* read or write */ 233 __u32 brw_rw; /* read or write */
234 __u32 brw_len; /* bulk data len */ 234 __u32 brw_len; /* bulk data len */
235 __u32 brw_flags; /* bulk data patterns */ 235 __u32 brw_flags; /* bulk data patterns */
236} WIRE_ATTR srpc_brw_reqst_t; /* bulk r/w request */ 236} WIRE_ATTR; /* bulk r/w request */
237 237
238typedef struct { 238struct srpc_brw_reply {
239 __u32 brw_status; 239 __u32 brw_status;
240} WIRE_ATTR srpc_brw_reply_t; /* bulk r/w reply */ 240} WIRE_ATTR; /* bulk r/w reply */
241 241
242#define SRPC_MSG_MAGIC 0xeeb0f00d 242#define SRPC_MSG_MAGIC 0xeeb0f00d
243#define SRPC_MSG_VERSION 1 243#define SRPC_MSG_VERSION 1
244 244
245typedef struct srpc_msg { 245struct srpc_msg {
246 __u32 msg_magic; /* magic number */ 246 __u32 msg_magic; /* magic number */
247 __u32 msg_version; /* message version number */ 247 __u32 msg_version; /* message version number */
248 __u32 msg_type; /* type of message body: srpc_msg_type_t */ 248 __u32 msg_type; /* type of message body: srpc_msg_type */
249 __u32 msg_reserved0; 249 __u32 msg_reserved0;
250 __u32 msg_reserved1; 250 __u32 msg_reserved1;
251 __u32 msg_ses_feats; /* test session features */ 251 __u32 msg_ses_feats; /* test session features */
252 union { 252 union {
253 srpc_generic_reqst_t reqst; 253 struct srpc_generic_reqst reqst;
254 srpc_generic_reply_t reply; 254 struct srpc_generic_reply reply;
255 255
256 srpc_mksn_reqst_t mksn_reqst; 256 struct srpc_mksn_reqst mksn_reqst;
257 srpc_mksn_reply_t mksn_reply; 257 struct srpc_mksn_reply mksn_reply;
258 srpc_rmsn_reqst_t rmsn_reqst; 258 struct srpc_rmsn_reqst rmsn_reqst;
259 srpc_rmsn_reply_t rmsn_reply; 259 struct srpc_rmsn_reply rmsn_reply;
260 srpc_debug_reqst_t dbg_reqst; 260 struct srpc_debug_reqst dbg_reqst;
261 srpc_debug_reply_t dbg_reply; 261 struct srpc_debug_reply dbg_reply;
262 srpc_batch_reqst_t bat_reqst; 262 struct srpc_batch_reqst bat_reqst;
263 srpc_batch_reply_t bat_reply; 263 struct srpc_batch_reply bat_reply;
264 srpc_stat_reqst_t stat_reqst; 264 struct srpc_stat_reqst stat_reqst;
265 srpc_stat_reply_t stat_reply; 265 struct srpc_stat_reply stat_reply;
266 srpc_test_reqst_t tes_reqst; 266 struct srpc_test_reqst tes_reqst;
267 srpc_test_reply_t tes_reply; 267 struct srpc_test_reply tes_reply;
268 srpc_join_reqst_t join_reqst; 268 struct srpc_join_reqst join_reqst;
269 srpc_join_reply_t join_reply; 269 struct srpc_join_reply join_reply;
270 270
271 srpc_ping_reqst_t ping_reqst; 271 struct srpc_ping_reqst ping_reqst;
272 srpc_ping_reply_t ping_reply; 272 struct srpc_ping_reply ping_reply;
273 srpc_brw_reqst_t brw_reqst; 273 struct srpc_brw_reqst brw_reqst;
274 srpc_brw_reply_t brw_reply; 274 struct srpc_brw_reply brw_reply;
275 } msg_body; 275 } msg_body;
276} WIRE_ATTR srpc_msg_t; 276} WIRE_ATTR;
277 277
278static inline void 278static inline void
279srpc_unpack_msg_hdr(srpc_msg_t *msg) 279srpc_unpack_msg_hdr(struct srpc_msg *msg)
280{ 280{
281 if (msg->msg_magic == SRPC_MSG_MAGIC) 281 if (msg->msg_magic == SRPC_MSG_MAGIC)
282 return; /* no flipping needed */ 282 return; /* no flipping needed */
diff --git a/drivers/staging/lustre/lnet/selftest/selftest.h b/drivers/staging/lustre/lnet/selftest/selftest.h
index e689ca1846e1..4eac1c9e639f 100644
--- a/drivers/staging/lustre/lnet/selftest/selftest.h
+++ b/drivers/staging/lustre/lnet/selftest/selftest.h
@@ -93,7 +93,7 @@ struct sfw_test_instance;
93/* all reply/bulk RDMAs go to this portal */ 93/* all reply/bulk RDMAs go to this portal */
94#define SRPC_RDMA_PORTAL 52 94#define SRPC_RDMA_PORTAL 52
95 95
96static inline srpc_msg_type_t 96static inline enum srpc_msg_type
97srpc_service2request(int service) 97srpc_service2request(int service)
98{ 98{
99 switch (service) { 99 switch (service) {
@@ -128,13 +128,13 @@ srpc_service2request(int service)
128 } 128 }
129} 129}
130 130
131static inline srpc_msg_type_t 131static inline enum srpc_msg_type
132srpc_service2reply(int service) 132srpc_service2reply(int service)
133{ 133{
134 return srpc_service2request(service) + 1; 134 return srpc_service2request(service) + 1;
135} 135}
136 136
137typedef enum { 137enum srpc_event_type {
138 SRPC_BULK_REQ_RCVD = 1, /* passive bulk request(PUT sink/GET source) 138 SRPC_BULK_REQ_RCVD = 1, /* passive bulk request(PUT sink/GET source)
139 * received */ 139 * received */
140 SRPC_BULK_PUT_SENT = 2, /* active bulk PUT sent (source) */ 140 SRPC_BULK_PUT_SENT = 2, /* active bulk PUT sent (source) */
@@ -143,57 +143,58 @@ typedef enum {
143 SRPC_REPLY_SENT = 5, /* outgoing reply sent */ 143 SRPC_REPLY_SENT = 5, /* outgoing reply sent */
144 SRPC_REQUEST_RCVD = 6, /* incoming request received */ 144 SRPC_REQUEST_RCVD = 6, /* incoming request received */
145 SRPC_REQUEST_SENT = 7, /* outgoing request sent */ 145 SRPC_REQUEST_SENT = 7, /* outgoing request sent */
146} srpc_event_type_t; 146};
147 147
148/* RPC event */ 148/* RPC event */
149typedef struct { 149struct srpc_event {
150 srpc_event_type_t ev_type; /* what's up */ 150 enum srpc_event_type ev_type; /* what's up */
151 lnet_event_kind_t ev_lnet; /* LNet event type */ 151 lnet_event_kind_t ev_lnet; /* LNet event type */
152 int ev_fired; /* LNet event fired? */ 152 int ev_fired; /* LNet event fired? */
153 int ev_status; /* LNet event status */ 153 int ev_status; /* LNet event status */
154 void *ev_data; /* owning server/client RPC */ 154 void *ev_data; /* owning server/client RPC */
155} srpc_event_t; 155};
156 156
157typedef struct { 157/* bulk descriptor */
158struct srpc_bulk {
158 int bk_len; /* len of bulk data */ 159 int bk_len; /* len of bulk data */
159 lnet_handle_md_t bk_mdh; 160 lnet_handle_md_t bk_mdh;
160 int bk_sink; /* sink/source */ 161 int bk_sink; /* sink/source */
161 int bk_niov; /* # iov in bk_iovs */ 162 int bk_niov; /* # iov in bk_iovs */
162 lnet_kiov_t bk_iovs[0]; 163 lnet_kiov_t bk_iovs[0];
163} srpc_bulk_t; /* bulk descriptor */ 164};
164 165
165/* message buffer descriptor */ 166/* message buffer descriptor */
166typedef struct srpc_buffer { 167struct srpc_buffer {
167 struct list_head buf_list; /* chain on srpc_service::*_msgq */ 168 struct list_head buf_list; /* chain on srpc_service::*_msgq */
168 srpc_msg_t buf_msg; 169 struct srpc_msg buf_msg;
169 lnet_handle_md_t buf_mdh; 170 lnet_handle_md_t buf_mdh;
170 lnet_nid_t buf_self; 171 lnet_nid_t buf_self;
171 lnet_process_id_t buf_peer; 172 lnet_process_id_t buf_peer;
172} srpc_buffer_t; 173};
173 174
174struct swi_workitem; 175struct swi_workitem;
175typedef int (*swi_action_t) (struct swi_workitem *); 176typedef int (*swi_action_t) (struct swi_workitem *);
176 177
177typedef struct swi_workitem { 178struct swi_workitem {
178 struct cfs_wi_sched *swi_sched; 179 struct cfs_wi_sched *swi_sched;
179 cfs_workitem_t swi_workitem; 180 struct cfs_workitem swi_workitem;
180 swi_action_t swi_action; 181 swi_action_t swi_action;
181 int swi_state; 182 int swi_state;
182} swi_workitem_t; 183};
183 184
184/* server-side state of a RPC */ 185/* server-side state of a RPC */
185struct srpc_server_rpc { 186struct srpc_server_rpc {
186 /* chain on srpc_service::*_rpcq */ 187 /* chain on srpc_service::*_rpcq */
187 struct list_head srpc_list; 188 struct list_head srpc_list;
188 struct srpc_service_cd *srpc_scd; 189 struct srpc_service_cd *srpc_scd;
189 swi_workitem_t srpc_wi; 190 struct swi_workitem srpc_wi;
190 srpc_event_t srpc_ev; /* bulk/reply event */ 191 struct srpc_event srpc_ev; /* bulk/reply event */
191 lnet_nid_t srpc_self; 192 lnet_nid_t srpc_self;
192 lnet_process_id_t srpc_peer; 193 lnet_process_id_t srpc_peer;
193 srpc_msg_t srpc_replymsg; 194 struct srpc_msg srpc_replymsg;
194 lnet_handle_md_t srpc_replymdh; 195 lnet_handle_md_t srpc_replymdh;
195 srpc_buffer_t *srpc_reqstbuf; 196 struct srpc_buffer *srpc_reqstbuf;
196 srpc_bulk_t *srpc_bulk; 197 struct srpc_bulk *srpc_bulk;
197 198
198 unsigned int srpc_aborted; /* being given up */ 199 unsigned int srpc_aborted; /* being given up */
199 int srpc_status; 200 int srpc_status;
@@ -201,14 +202,14 @@ struct srpc_server_rpc {
201}; 202};
202 203
203/* client-side state of a RPC */ 204/* client-side state of a RPC */
204typedef struct srpc_client_rpc { 205struct srpc_client_rpc {
205 struct list_head crpc_list; /* chain on user's lists */ 206 struct list_head crpc_list; /* chain on user's lists */
206 spinlock_t crpc_lock; /* serialize */ 207 spinlock_t crpc_lock; /* serialize */
207 int crpc_service; 208 int crpc_service;
208 atomic_t crpc_refcount; 209 atomic_t crpc_refcount;
209 int crpc_timeout; /* # seconds to wait for reply */ 210 int crpc_timeout; /* # seconds to wait for reply */
210 struct stt_timer crpc_timer; 211 struct stt_timer crpc_timer;
211 swi_workitem_t crpc_wi; 212 struct swi_workitem crpc_wi;
212 lnet_process_id_t crpc_dest; 213 lnet_process_id_t crpc_dest;
213 214
214 void (*crpc_done)(struct srpc_client_rpc *); 215 void (*crpc_done)(struct srpc_client_rpc *);
@@ -221,20 +222,20 @@ typedef struct srpc_client_rpc {
221 unsigned int crpc_closed:1; /* completed */ 222 unsigned int crpc_closed:1; /* completed */
222 223
223 /* RPC events */ 224 /* RPC events */
224 srpc_event_t crpc_bulkev; /* bulk event */ 225 struct srpc_event crpc_bulkev; /* bulk event */
225 srpc_event_t crpc_reqstev; /* request event */ 226 struct srpc_event crpc_reqstev; /* request event */
226 srpc_event_t crpc_replyev; /* reply event */ 227 struct srpc_event crpc_replyev; /* reply event */
227 228
228 /* bulk, request(reqst), and reply exchanged on wire */ 229 /* bulk, request(reqst), and reply exchanged on wire */
229 srpc_msg_t crpc_reqstmsg; 230 struct srpc_msg crpc_reqstmsg;
230 srpc_msg_t crpc_replymsg; 231 struct srpc_msg crpc_replymsg;
231 lnet_handle_md_t crpc_reqstmdh; 232 lnet_handle_md_t crpc_reqstmdh;
232 lnet_handle_md_t crpc_replymdh; 233 lnet_handle_md_t crpc_replymdh;
233 srpc_bulk_t crpc_bulk; 234 struct srpc_bulk crpc_bulk;
234} srpc_client_rpc_t; 235};
235 236
236#define srpc_client_rpc_size(rpc) \ 237#define srpc_client_rpc_size(rpc) \
237offsetof(srpc_client_rpc_t, crpc_bulk.bk_iovs[(rpc)->crpc_bulk.bk_niov]) 238offsetof(struct srpc_client_rpc, crpc_bulk.bk_iovs[(rpc)->crpc_bulk.bk_niov])
238 239
239#define srpc_client_rpc_addref(rpc) \ 240#define srpc_client_rpc_addref(rpc) \
240do { \ 241do { \
@@ -266,13 +267,13 @@ struct srpc_service_cd {
266 /** backref to service */ 267 /** backref to service */
267 struct srpc_service *scd_svc; 268 struct srpc_service *scd_svc;
268 /** event buffer */ 269 /** event buffer */
269 srpc_event_t scd_ev; 270 struct srpc_event scd_ev;
270 /** free RPC descriptors */ 271 /** free RPC descriptors */
271 struct list_head scd_rpc_free; 272 struct list_head scd_rpc_free;
272 /** in-flight RPCs */ 273 /** in-flight RPCs */
273 struct list_head scd_rpc_active; 274 struct list_head scd_rpc_active;
274 /** workitem for posting buffer */ 275 /** workitem for posting buffer */
275 swi_workitem_t scd_buf_wi; 276 struct swi_workitem scd_buf_wi;
276 /** CPT id */ 277 /** CPT id */
277 int scd_cpt; 278 int scd_cpt;
278 /** error code for scd_buf_wi */ 279 /** error code for scd_buf_wi */
@@ -306,7 +307,7 @@ struct srpc_service_cd {
306#define SFW_FRWK_WI_MIN 16 307#define SFW_FRWK_WI_MIN 16
307#define SFW_FRWK_WI_MAX 256 308#define SFW_FRWK_WI_MAX 256
308 309
309typedef struct srpc_service { 310struct srpc_service {
310 int sv_id; /* service id */ 311 int sv_id; /* service id */
311 const char *sv_name; /* human readable name */ 312 const char *sv_name; /* human readable name */
312 int sv_wi_total; /* total server workitems */ 313 int sv_wi_total; /* total server workitems */
@@ -320,9 +321,9 @@ typedef struct srpc_service {
320 */ 321 */
321 int (*sv_handler)(struct srpc_server_rpc *); 322 int (*sv_handler)(struct srpc_server_rpc *);
322 int (*sv_bulk_ready)(struct srpc_server_rpc *, int); 323 int (*sv_bulk_ready)(struct srpc_server_rpc *, int);
323} srpc_service_t; 324};
324 325
325typedef struct { 326struct sfw_session {
326 struct list_head sn_list; /* chain on fw_zombie_sessions */ 327 struct list_head sn_list; /* chain on fw_zombie_sessions */
327 lst_sid_t sn_id; /* unique identifier */ 328 lst_sid_t sn_id; /* unique identifier */
328 unsigned int sn_timeout; /* # seconds' inactivity to expire */ 329 unsigned int sn_timeout; /* # seconds' inactivity to expire */
@@ -335,37 +336,37 @@ typedef struct {
335 atomic_t sn_brw_errors; 336 atomic_t sn_brw_errors;
336 atomic_t sn_ping_errors; 337 atomic_t sn_ping_errors;
337 unsigned long sn_started; 338 unsigned long sn_started;
338} sfw_session_t; 339};
339 340
340#define sfw_sid_equal(sid0, sid1) ((sid0).ses_nid == (sid1).ses_nid && \ 341#define sfw_sid_equal(sid0, sid1) ((sid0).ses_nid == (sid1).ses_nid && \
341 (sid0).ses_stamp == (sid1).ses_stamp) 342 (sid0).ses_stamp == (sid1).ses_stamp)
342 343
343typedef struct { 344struct sfw_batch {
344 struct list_head bat_list; /* chain on sn_batches */ 345 struct list_head bat_list; /* chain on sn_batches */
345 lst_bid_t bat_id; /* batch id */ 346 lst_bid_t bat_id; /* batch id */
346 int bat_error; /* error code of batch */ 347 int bat_error; /* error code of batch */
347 sfw_session_t *bat_session; /* batch's session */ 348 struct sfw_session *bat_session; /* batch's session */
348 atomic_t bat_nactive; /* # of active tests */ 349 atomic_t bat_nactive; /* # of active tests */
349 struct list_head bat_tests; /* test instances */ 350 struct list_head bat_tests; /* test instances */
350} sfw_batch_t; 351};
351 352
352typedef struct { 353struct sfw_test_client_ops {
353 int (*tso_init)(struct sfw_test_instance *tsi); /* initialize test 354 int (*tso_init)(struct sfw_test_instance *tsi); /* initialize test
354 * client */ 355 * client */
355 void (*tso_fini)(struct sfw_test_instance *tsi); /* finalize test 356 void (*tso_fini)(struct sfw_test_instance *tsi); /* finalize test
356 * client */ 357 * client */
357 int (*tso_prep_rpc)(struct sfw_test_unit *tsu, 358 int (*tso_prep_rpc)(struct sfw_test_unit *tsu,
358 lnet_process_id_t dest, 359 lnet_process_id_t dest,
359 srpc_client_rpc_t **rpc); /* prep a tests rpc */ 360 struct srpc_client_rpc **rpc); /* prep a tests rpc */
360 void (*tso_done_rpc)(struct sfw_test_unit *tsu, 361 void (*tso_done_rpc)(struct sfw_test_unit *tsu,
361 srpc_client_rpc_t *rpc); /* done a test rpc */ 362 struct srpc_client_rpc *rpc); /* done a test rpc */
362} sfw_test_client_ops_t; 363};
363 364
364typedef struct sfw_test_instance { 365struct sfw_test_instance {
365 struct list_head tsi_list; /* chain on batch */ 366 struct list_head tsi_list; /* chain on batch */
366 int tsi_service; /* test type */ 367 int tsi_service; /* test type */
367 sfw_batch_t *tsi_batch; /* batch */ 368 struct sfw_batch *tsi_batch; /* batch */
368 sfw_test_client_ops_t *tsi_ops; /* test client operation 369 struct sfw_test_client_ops *tsi_ops; /* test client operation
369 */ 370 */
370 371
371 /* public parameter for all test units */ 372 /* public parameter for all test units */
@@ -384,11 +385,11 @@ typedef struct sfw_test_instance {
384 struct list_head tsi_active_rpcs; /* active rpcs */ 385 struct list_head tsi_active_rpcs; /* active rpcs */
385 386
386 union { 387 union {
387 test_ping_req_t ping; /* ping parameter */ 388 struct test_ping_req ping; /* ping parameter */
388 test_bulk_req_t bulk_v0; /* bulk parameter */ 389 struct test_bulk_req bulk_v0; /* bulk parameter */
389 test_bulk_req_v1_t bulk_v1; /* bulk v1 parameter */ 390 struct test_bulk_req_v1 bulk_v1; /* bulk v1 parameter */
390 } tsi_u; 391 } tsi_u;
391} sfw_test_instance_t; 392};
392 393
393/* XXX: trailing (PAGE_SIZE % sizeof(lnet_process_id_t)) bytes at the end of 394/* XXX: trailing (PAGE_SIZE % sizeof(lnet_process_id_t)) bytes at the end of
394 * pages are not used */ 395 * pages are not used */
@@ -397,57 +398,58 @@ typedef struct sfw_test_instance {
397#define SFW_MAX_NDESTS (LNET_MAX_IOV * SFW_ID_PER_PAGE) 398#define SFW_MAX_NDESTS (LNET_MAX_IOV * SFW_ID_PER_PAGE)
398#define sfw_id_pages(n) (((n) + SFW_ID_PER_PAGE - 1) / SFW_ID_PER_PAGE) 399#define sfw_id_pages(n) (((n) + SFW_ID_PER_PAGE - 1) / SFW_ID_PER_PAGE)
399 400
400typedef struct sfw_test_unit { 401struct sfw_test_unit {
401 struct list_head tsu_list; /* chain on lst_test_instance */ 402 struct list_head tsu_list; /* chain on lst_test_instance */
402 lnet_process_id_t tsu_dest; /* id of dest node */ 403 lnet_process_id_t tsu_dest; /* id of dest node */
403 int tsu_loop; /* loop count of the test */ 404 int tsu_loop; /* loop count of the test */
404 sfw_test_instance_t *tsu_instance; /* pointer to test instance */ 405 struct sfw_test_instance *tsu_instance; /* pointer to test instance */
405 void *tsu_private; /* private data */ 406 void *tsu_private; /* private data */
406 swi_workitem_t tsu_worker; /* workitem of the test unit */ 407 struct swi_workitem tsu_worker; /* workitem of the test unit */
407} sfw_test_unit_t; 408};
408 409
409typedef struct sfw_test_case { 410struct sfw_test_case {
410 struct list_head tsc_list; /* chain on fw_tests */ 411 struct list_head tsc_list; /* chain on fw_tests */
411 srpc_service_t *tsc_srv_service; /* test service */ 412 struct srpc_service *tsc_srv_service; /* test service */
412 sfw_test_client_ops_t *tsc_cli_ops; /* ops of test client */ 413 struct sfw_test_client_ops *tsc_cli_ops; /* ops of test client */
413} sfw_test_case_t; 414};
414 415
415srpc_client_rpc_t * 416struct srpc_client_rpc *
416sfw_create_rpc(lnet_process_id_t peer, int service, 417sfw_create_rpc(lnet_process_id_t peer, int service,
417 unsigned features, int nbulkiov, int bulklen, 418 unsigned features, int nbulkiov, int bulklen,
418 void (*done)(srpc_client_rpc_t *), void *priv); 419 void (*done)(struct srpc_client_rpc *), void *priv);
419int sfw_create_test_rpc(sfw_test_unit_t *tsu, 420int sfw_create_test_rpc(struct sfw_test_unit *tsu,
420 lnet_process_id_t peer, unsigned features, 421 lnet_process_id_t peer, unsigned features,
421 int nblk, int blklen, srpc_client_rpc_t **rpc); 422 int nblk, int blklen, struct srpc_client_rpc **rpc);
422void sfw_abort_rpc(srpc_client_rpc_t *rpc); 423void sfw_abort_rpc(struct srpc_client_rpc *rpc);
423void sfw_post_rpc(srpc_client_rpc_t *rpc); 424void sfw_post_rpc(struct srpc_client_rpc *rpc);
424void sfw_client_rpc_done(srpc_client_rpc_t *rpc); 425void sfw_client_rpc_done(struct srpc_client_rpc *rpc);
425void sfw_unpack_message(srpc_msg_t *msg); 426void sfw_unpack_message(struct srpc_msg *msg);
426void sfw_free_pages(struct srpc_server_rpc *rpc); 427void sfw_free_pages(struct srpc_server_rpc *rpc);
427void sfw_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i); 428void sfw_add_bulk_page(struct srpc_bulk *bk, struct page *pg, int i);
428int sfw_alloc_pages(struct srpc_server_rpc *rpc, int cpt, int npages, int len, 429int sfw_alloc_pages(struct srpc_server_rpc *rpc, int cpt, int npages, int len,
429 int sink); 430 int sink);
430int sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply); 431int sfw_make_session(struct srpc_mksn_reqst *request,
432 struct srpc_mksn_reply *reply);
431 433
432srpc_client_rpc_t * 434struct srpc_client_rpc *
433srpc_create_client_rpc(lnet_process_id_t peer, int service, 435srpc_create_client_rpc(lnet_process_id_t peer, int service,
434 int nbulkiov, int bulklen, 436 int nbulkiov, int bulklen,
435 void (*rpc_done)(srpc_client_rpc_t *), 437 void (*rpc_done)(struct srpc_client_rpc *),
436 void (*rpc_fini)(srpc_client_rpc_t *), void *priv); 438 void (*rpc_fini)(struct srpc_client_rpc *), void *priv);
437void srpc_post_rpc(srpc_client_rpc_t *rpc); 439void srpc_post_rpc(struct srpc_client_rpc *rpc);
438void srpc_abort_rpc(srpc_client_rpc_t *rpc, int why); 440void srpc_abort_rpc(struct srpc_client_rpc *rpc, int why);
439void srpc_free_bulk(srpc_bulk_t *bk); 441void srpc_free_bulk(struct srpc_bulk *bk);
440srpc_bulk_t *srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, 442struct srpc_bulk *srpc_alloc_bulk(int cpt, unsigned bulk_npg,
441 int sink); 443 unsigned bulk_len, int sink);
442int srpc_send_rpc(swi_workitem_t *wi); 444int srpc_send_rpc(struct swi_workitem *wi);
443int srpc_send_reply(struct srpc_server_rpc *rpc); 445int srpc_send_reply(struct srpc_server_rpc *rpc);
444int srpc_add_service(srpc_service_t *sv); 446int srpc_add_service(struct srpc_service *sv);
445int srpc_remove_service(srpc_service_t *sv); 447int srpc_remove_service(struct srpc_service *sv);
446void srpc_shutdown_service(srpc_service_t *sv); 448void srpc_shutdown_service(struct srpc_service *sv);
447void srpc_abort_service(srpc_service_t *sv); 449void srpc_abort_service(struct srpc_service *sv);
448int srpc_finish_service(srpc_service_t *sv); 450int srpc_finish_service(struct srpc_service *sv);
449int srpc_service_add_buffers(srpc_service_t *sv, int nbuffer); 451int srpc_service_add_buffers(struct srpc_service *sv, int nbuffer);
450void srpc_service_remove_buffers(srpc_service_t *sv, int nbuffer); 452void srpc_service_remove_buffers(struct srpc_service *sv, int nbuffer);
451void srpc_get_counters(srpc_counters_t *cnt); 453void srpc_get_counters(srpc_counters_t *cnt);
452void srpc_set_counters(const srpc_counters_t *cnt); 454void srpc_set_counters(const srpc_counters_t *cnt);
453 455
@@ -461,15 +463,17 @@ srpc_serv_is_framework(struct srpc_service *svc)
461} 463}
462 464
463static inline int 465static inline int
464swi_wi_action(cfs_workitem_t *wi) 466swi_wi_action(struct cfs_workitem *wi)
465{ 467{
466 swi_workitem_t *swi = container_of(wi, swi_workitem_t, swi_workitem); 468 struct swi_workitem *swi;
469
470 swi = container_of(wi, struct swi_workitem, swi_workitem);
467 471
468 return swi->swi_action(swi); 472 return swi->swi_action(swi);
469} 473}
470 474
471static inline void 475static inline void
472swi_init_workitem(swi_workitem_t *swi, void *data, 476swi_init_workitem(struct swi_workitem *swi, void *data,
473 swi_action_t action, struct cfs_wi_sched *sched) 477 swi_action_t action, struct cfs_wi_sched *sched)
474{ 478{
475 swi->swi_sched = sched; 479 swi->swi_sched = sched;
@@ -479,19 +483,19 @@ swi_init_workitem(swi_workitem_t *swi, void *data,
479} 483}
480 484
481static inline void 485static inline void
482swi_schedule_workitem(swi_workitem_t *wi) 486swi_schedule_workitem(struct swi_workitem *wi)
483{ 487{
484 cfs_wi_schedule(wi->swi_sched, &wi->swi_workitem); 488 cfs_wi_schedule(wi->swi_sched, &wi->swi_workitem);
485} 489}
486 490
487static inline void 491static inline void
488swi_exit_workitem(swi_workitem_t *swi) 492swi_exit_workitem(struct swi_workitem *swi)
489{ 493{
490 cfs_wi_exit(swi->swi_sched, &swi->swi_workitem); 494 cfs_wi_exit(swi->swi_sched, &swi->swi_workitem);
491} 495}
492 496
493static inline int 497static inline int
494swi_deschedule_workitem(swi_workitem_t *swi) 498swi_deschedule_workitem(struct swi_workitem *swi)
495{ 499{
496 return cfs_wi_deschedule(swi->swi_sched, &swi->swi_workitem); 500 return cfs_wi_deschedule(swi->swi_sched, &swi->swi_workitem);
497} 501}
@@ -502,7 +506,7 @@ void sfw_shutdown(void);
502void srpc_shutdown(void); 506void srpc_shutdown(void);
503 507
504static inline void 508static inline void
505srpc_destroy_client_rpc(srpc_client_rpc_t *rpc) 509srpc_destroy_client_rpc(struct srpc_client_rpc *rpc)
506{ 510{
507 LASSERT(rpc); 511 LASSERT(rpc);
508 LASSERT(!srpc_event_pending(rpc)); 512 LASSERT(!srpc_event_pending(rpc));
@@ -515,14 +519,14 @@ srpc_destroy_client_rpc(srpc_client_rpc_t *rpc)
515} 519}
516 520
517static inline void 521static inline void
518srpc_init_client_rpc(srpc_client_rpc_t *rpc, lnet_process_id_t peer, 522srpc_init_client_rpc(struct srpc_client_rpc *rpc, lnet_process_id_t peer,
519 int service, int nbulkiov, int bulklen, 523 int service, int nbulkiov, int bulklen,
520 void (*rpc_done)(srpc_client_rpc_t *), 524 void (*rpc_done)(struct srpc_client_rpc *),
521 void (*rpc_fini)(srpc_client_rpc_t *), void *priv) 525 void (*rpc_fini)(struct srpc_client_rpc *), void *priv)
522{ 526{
523 LASSERT(nbulkiov <= LNET_MAX_IOV); 527 LASSERT(nbulkiov <= LNET_MAX_IOV);
524 528
525 memset(rpc, 0, offsetof(srpc_client_rpc_t, 529 memset(rpc, 0, offsetof(struct srpc_client_rpc,
526 crpc_bulk.bk_iovs[nbulkiov])); 530 crpc_bulk.bk_iovs[nbulkiov]));
527 531
528 INIT_LIST_HEAD(&rpc->crpc_list); 532 INIT_LIST_HEAD(&rpc->crpc_list);
@@ -592,7 +596,7 @@ do { \
592} while (0) 596} while (0)
593 597
594static inline void 598static inline void
595srpc_wait_service_shutdown(srpc_service_t *sv) 599srpc_wait_service_shutdown(struct srpc_service *sv)
596{ 600{
597 int i = 2; 601 int i = 2;
598 602
@@ -607,16 +611,16 @@ srpc_wait_service_shutdown(srpc_service_t *sv)
607 } 611 }
608} 612}
609 613
610extern sfw_test_client_ops_t brw_test_client; 614extern struct sfw_test_client_ops brw_test_client;
611void brw_init_test_client(void); 615void brw_init_test_client(void);
612 616
613extern srpc_service_t brw_test_service; 617extern struct srpc_service brw_test_service;
614void brw_init_test_service(void); 618void brw_init_test_service(void);
615 619
616extern sfw_test_client_ops_t ping_test_client; 620extern struct sfw_test_client_ops ping_test_client;
617void ping_init_test_client(void); 621void ping_init_test_client(void);
618 622
619extern srpc_service_t ping_test_service; 623extern struct srpc_service ping_test_service;
620void ping_init_test_service(void); 624void ping_init_test_service(void);
621 625
622#endif /* __SELFTEST_SELFTEST_H__ */ 626#endif /* __SELFTEST_SELFTEST_H__ */
diff --git a/drivers/staging/lustre/lnet/selftest/timer.c b/drivers/staging/lustre/lnet/selftest/timer.c
index 8be52526ae5a..b6c4aae007af 100644
--- a/drivers/staging/lustre/lnet/selftest/timer.c
+++ b/drivers/staging/lustre/lnet/selftest/timer.c
@@ -49,7 +49,7 @@
49 * sorted by increasing expiry time. The number of slots is 2**7 (128), 49 * sorted by increasing expiry time. The number of slots is 2**7 (128),
50 * to cover a time period of 1024 seconds into the future before wrapping. 50 * to cover a time period of 1024 seconds into the future before wrapping.
51 */ 51 */
52#define STTIMER_MINPOLL 3 /* log2 min poll interval (8 s) */ 52#define STTIMER_MINPOLL 3 /* log2 min poll interval (8 s) */
53#define STTIMER_SLOTTIME (1 << STTIMER_MINPOLL) 53#define STTIMER_SLOTTIME (1 << STTIMER_MINPOLL)
54#define STTIMER_SLOTTIMEMASK (~(STTIMER_SLOTTIME - 1)) 54#define STTIMER_SLOTTIMEMASK (~(STTIMER_SLOTTIME - 1))
55#define STTIMER_NSLOTS (1 << 7) 55#define STTIMER_NSLOTS (1 << 7)
@@ -170,20 +170,22 @@ stt_check_timers(unsigned long *last)
170static int 170static int
171stt_timer_main(void *arg) 171stt_timer_main(void *arg)
172{ 172{
173 int rc = 0;
174
173 cfs_block_allsigs(); 175 cfs_block_allsigs();
174 176
175 while (!stt_data.stt_shuttingdown) { 177 while (!stt_data.stt_shuttingdown) {
176 stt_check_timers(&stt_data.stt_prev_slot); 178 stt_check_timers(&stt_data.stt_prev_slot);
177 179
178 wait_event_timeout(stt_data.stt_waitq, 180 rc = wait_event_timeout(stt_data.stt_waitq,
179 stt_data.stt_shuttingdown, 181 stt_data.stt_shuttingdown,
180 cfs_time_seconds(STTIMER_SLOTTIME)); 182 cfs_time_seconds(STTIMER_SLOTTIME));
181 } 183 }
182 184
183 spin_lock(&stt_data.stt_lock); 185 spin_lock(&stt_data.stt_lock);
184 stt_data.stt_nthreads--; 186 stt_data.stt_nthreads--;
185 spin_unlock(&stt_data.stt_lock); 187 spin_unlock(&stt_data.stt_lock);
186 return 0; 188 return rc;
187} 189}
188 190
189static int 191static int
diff --git a/drivers/staging/lustre/lustre/fid/fid_request.c b/drivers/staging/lustre/lustre/fid/fid_request.c
index 39269c3c56a6..3a4df626462f 100644
--- a/drivers/staging/lustre/lustre/fid/fid_request.c
+++ b/drivers/staging/lustre/lustre/fid/fid_request.c
@@ -66,6 +66,7 @@ static int seq_client_rpc(struct lu_client_seq *seq,
66 unsigned int debug_mask; 66 unsigned int debug_mask;
67 int rc; 67 int rc;
68 68
69 LASSERT(exp && !IS_ERR(exp));
69 req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_SEQ_QUERY, 70 req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_SEQ_QUERY,
70 LUSTRE_MDS_VERSION, SEQ_QUERY); 71 LUSTRE_MDS_VERSION, SEQ_QUERY);
71 if (!req) 72 if (!req)
@@ -101,19 +102,22 @@ static int seq_client_rpc(struct lu_client_seq *seq,
101 req->rq_no_delay = req->rq_no_resend = 1; 102 req->rq_no_delay = req->rq_no_resend = 1;
102 debug_mask = D_CONSOLE; 103 debug_mask = D_CONSOLE;
103 } else { 104 } else {
104 if (seq->lcs_type == LUSTRE_SEQ_METADATA) 105 if (seq->lcs_type == LUSTRE_SEQ_METADATA) {
106 req->rq_reply_portal = MDC_REPLY_PORTAL;
105 req->rq_request_portal = SEQ_METADATA_PORTAL; 107 req->rq_request_portal = SEQ_METADATA_PORTAL;
106 else 108 } else {
109 req->rq_reply_portal = OSC_REPLY_PORTAL;
107 req->rq_request_portal = SEQ_DATA_PORTAL; 110 req->rq_request_portal = SEQ_DATA_PORTAL;
111 }
108 debug_mask = D_INFO; 112 debug_mask = D_INFO;
109 } 113 }
110 114
111 ptlrpc_at_set_req_timeout(req); 115 ptlrpc_at_set_req_timeout(req);
112 116
113 if (seq->lcs_type == LUSTRE_SEQ_METADATA) 117 if (opc != SEQ_ALLOC_SUPER && seq->lcs_type == LUSTRE_SEQ_METADATA)
114 mdc_get_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL); 118 mdc_get_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
115 rc = ptlrpc_queue_wait(req); 119 rc = ptlrpc_queue_wait(req);
116 if (seq->lcs_type == LUSTRE_SEQ_METADATA) 120 if (opc != SEQ_ALLOC_SUPER && seq->lcs_type == LUSTRE_SEQ_METADATA)
117 mdc_put_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL); 121 mdc_put_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
118 if (rc) 122 if (rc)
119 goto out_req; 123 goto out_req;
diff --git a/drivers/staging/lustre/lustre/fld/fld_cache.c b/drivers/staging/lustre/lustre/fld/fld_cache.c
index 062f388cf38a..5a04e99d9249 100644
--- a/drivers/staging/lustre/lustre/fld/fld_cache.c
+++ b/drivers/staging/lustre/lustre/fld/fld_cache.c
@@ -178,8 +178,9 @@ restart_fixup:
178 if (n_range->lsr_end <= c_range->lsr_end) { 178 if (n_range->lsr_end <= c_range->lsr_end) {
179 *n_range = *c_range; 179 *n_range = *c_range;
180 fld_cache_entry_delete(cache, f_curr); 180 fld_cache_entry_delete(cache, f_curr);
181 } else 181 } else {
182 n_range->lsr_start = c_range->lsr_end; 182 n_range->lsr_start = c_range->lsr_end;
183 }
183 } 184 }
184 185
185 /* we could have overlap over next 186 /* we could have overlap over next
diff --git a/drivers/staging/lustre/lustre/fld/fld_internal.h b/drivers/staging/lustre/lustre/fld/fld_internal.h
index e8a3caf20c9b..75d6a48637a9 100644
--- a/drivers/staging/lustre/lustre/fld/fld_internal.h
+++ b/drivers/staging/lustre/lustre/fld/fld_internal.h
@@ -101,12 +101,6 @@ struct fld_cache {
101 unsigned int fci_no_shrink:1; 101 unsigned int fci_no_shrink:1;
102}; 102};
103 103
104enum fld_op {
105 FLD_CREATE = 0,
106 FLD_DELETE = 1,
107 FLD_LOOKUP = 2
108};
109
110enum { 104enum {
111 /* 4M of FLD cache will not hurt client a lot. */ 105 /* 4M of FLD cache will not hurt client a lot. */
112 FLD_SERVER_CACHE_SIZE = (4 * 0x100000), 106 FLD_SERVER_CACHE_SIZE = (4 * 0x100000),
@@ -126,7 +120,8 @@ enum {
126extern struct lu_fld_hash fld_hash[]; 120extern struct lu_fld_hash fld_hash[];
127 121
128int fld_client_rpc(struct obd_export *exp, 122int fld_client_rpc(struct obd_export *exp,
129 struct lu_seq_range *range, __u32 fld_op); 123 struct lu_seq_range *range, __u32 fld_op,
124 struct ptlrpc_request **reqp);
130 125
131extern struct lprocfs_vars fld_client_debugfs_list[]; 126extern struct lprocfs_vars fld_client_debugfs_list[];
132 127
diff --git a/drivers/staging/lustre/lustre/fld/fld_request.c b/drivers/staging/lustre/lustre/fld/fld_request.c
index a3d122d85c8d..304c0ec268c9 100644
--- a/drivers/staging/lustre/lustre/fld/fld_request.c
+++ b/drivers/staging/lustre/lustre/fld/fld_request.c
@@ -64,9 +64,9 @@ static int fld_req_avail(struct client_obd *cli, struct mdc_cache_waiter *mcw)
64{ 64{
65 int rc; 65 int rc;
66 66
67 client_obd_list_lock(&cli->cl_loi_list_lock); 67 spin_lock(&cli->cl_loi_list_lock);
68 rc = list_empty(&mcw->mcw_entry); 68 rc = list_empty(&mcw->mcw_entry);
69 client_obd_list_unlock(&cli->cl_loi_list_lock); 69 spin_unlock(&cli->cl_loi_list_lock);
70 return rc; 70 return rc;
71}; 71};
72 72
@@ -75,15 +75,15 @@ static void fld_enter_request(struct client_obd *cli)
75 struct mdc_cache_waiter mcw; 75 struct mdc_cache_waiter mcw;
76 struct l_wait_info lwi = { 0 }; 76 struct l_wait_info lwi = { 0 };
77 77
78 client_obd_list_lock(&cli->cl_loi_list_lock); 78 spin_lock(&cli->cl_loi_list_lock);
79 if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) { 79 if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
80 list_add_tail(&mcw.mcw_entry, &cli->cl_cache_waiters); 80 list_add_tail(&mcw.mcw_entry, &cli->cl_cache_waiters);
81 init_waitqueue_head(&mcw.mcw_waitq); 81 init_waitqueue_head(&mcw.mcw_waitq);
82 client_obd_list_unlock(&cli->cl_loi_list_lock); 82 spin_unlock(&cli->cl_loi_list_lock);
83 l_wait_event(mcw.mcw_waitq, fld_req_avail(cli, &mcw), &lwi); 83 l_wait_event(mcw.mcw_waitq, fld_req_avail(cli, &mcw), &lwi);
84 } else { 84 } else {
85 cli->cl_r_in_flight++; 85 cli->cl_r_in_flight++;
86 client_obd_list_unlock(&cli->cl_loi_list_lock); 86 spin_unlock(&cli->cl_loi_list_lock);
87 } 87 }
88} 88}
89 89
@@ -92,10 +92,9 @@ static void fld_exit_request(struct client_obd *cli)
92 struct list_head *l, *tmp; 92 struct list_head *l, *tmp;
93 struct mdc_cache_waiter *mcw; 93 struct mdc_cache_waiter *mcw;
94 94
95 client_obd_list_lock(&cli->cl_loi_list_lock); 95 spin_lock(&cli->cl_loi_list_lock);
96 cli->cl_r_in_flight--; 96 cli->cl_r_in_flight--;
97 list_for_each_safe(l, tmp, &cli->cl_cache_waiters) { 97 list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
98
99 if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) { 98 if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
100 /* No free request slots anymore */ 99 /* No free request slots anymore */
101 break; 100 break;
@@ -106,7 +105,7 @@ static void fld_exit_request(struct client_obd *cli)
106 cli->cl_r_in_flight++; 105 cli->cl_r_in_flight++;
107 wake_up(&mcw->mcw_waitq); 106 wake_up(&mcw->mcw_waitq);
108 } 107 }
109 client_obd_list_unlock(&cli->cl_loi_list_lock); 108 spin_unlock(&cli->cl_loi_list_lock);
110} 109}
111 110
112static int fld_rrb_hash(struct lu_client_fld *fld, u64 seq) 111static int fld_rrb_hash(struct lu_client_fld *fld, u64 seq)
@@ -392,55 +391,82 @@ void fld_client_fini(struct lu_client_fld *fld)
392EXPORT_SYMBOL(fld_client_fini); 391EXPORT_SYMBOL(fld_client_fini);
393 392
394int fld_client_rpc(struct obd_export *exp, 393int fld_client_rpc(struct obd_export *exp,
395 struct lu_seq_range *range, __u32 fld_op) 394 struct lu_seq_range *range, __u32 fld_op,
395 struct ptlrpc_request **reqp)
396{ 396{
397 struct ptlrpc_request *req; 397 struct ptlrpc_request *req = NULL;
398 struct lu_seq_range *prange; 398 struct lu_seq_range *prange;
399 __u32 *op; 399 __u32 *op;
400 int rc; 400 int rc = 0;
401 struct obd_import *imp; 401 struct obd_import *imp;
402 402
403 LASSERT(exp); 403 LASSERT(exp);
404 404
405 imp = class_exp2cliimp(exp); 405 imp = class_exp2cliimp(exp);
406 req = ptlrpc_request_alloc_pack(imp, &RQF_FLD_QUERY, LUSTRE_MDS_VERSION, 406 switch (fld_op) {
407 FLD_QUERY); 407 case FLD_QUERY:
408 if (!req) 408 req = ptlrpc_request_alloc_pack(imp, &RQF_FLD_QUERY,
409 return -ENOMEM; 409 LUSTRE_MDS_VERSION, FLD_QUERY);
410 410 if (!req)
411 op = req_capsule_client_get(&req->rq_pill, &RMF_FLD_OPC); 411 return -ENOMEM;
412 *op = fld_op; 412
413 /*
414 * XXX: only needed when talking to old server(< 2.6), it should
415 * be removed when < 2.6 server is not supported
416 */
417 op = req_capsule_client_get(&req->rq_pill, &RMF_FLD_OPC);
418 *op = FLD_LOOKUP;
419
420 if (imp->imp_connect_flags_orig & OBD_CONNECT_MDS_MDS)
421 req->rq_allow_replay = 1;
422 break;
423 case FLD_READ:
424 req = ptlrpc_request_alloc_pack(imp, &RQF_FLD_READ,
425 LUSTRE_MDS_VERSION, FLD_READ);
426 if (!req)
427 return -ENOMEM;
428
429 req_capsule_set_size(&req->rq_pill, &RMF_GENERIC_DATA,
430 RCL_SERVER, PAGE_SIZE);
431 break;
432 default:
433 rc = -EINVAL;
434 break;
435 }
436 if (rc)
437 return rc;
413 438
414 prange = req_capsule_client_get(&req->rq_pill, &RMF_FLD_MDFLD); 439 prange = req_capsule_client_get(&req->rq_pill, &RMF_FLD_MDFLD);
415 *prange = *range; 440 *prange = *range;
416
417 ptlrpc_request_set_replen(req); 441 ptlrpc_request_set_replen(req);
418 req->rq_request_portal = FLD_REQUEST_PORTAL; 442 req->rq_request_portal = FLD_REQUEST_PORTAL;
419 req->rq_reply_portal = MDC_REPLY_PORTAL; 443 req->rq_reply_portal = MDC_REPLY_PORTAL;
420 ptlrpc_at_set_req_timeout(req); 444 ptlrpc_at_set_req_timeout(req);
421 445
422 if (fld_op == FLD_LOOKUP &&
423 imp->imp_connect_flags_orig & OBD_CONNECT_MDS_MDS)
424 req->rq_allow_replay = 1;
425
426 if (fld_op != FLD_LOOKUP)
427 mdc_get_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
428 fld_enter_request(&exp->exp_obd->u.cli); 446 fld_enter_request(&exp->exp_obd->u.cli);
429 rc = ptlrpc_queue_wait(req); 447 rc = ptlrpc_queue_wait(req);
430 fld_exit_request(&exp->exp_obd->u.cli); 448 fld_exit_request(&exp->exp_obd->u.cli);
431 if (fld_op != FLD_LOOKUP)
432 mdc_put_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
433 if (rc) 449 if (rc)
434 goto out_req; 450 goto out_req;
435 451
436 prange = req_capsule_server_get(&req->rq_pill, &RMF_FLD_MDFLD); 452 if (fld_op == FLD_QUERY) {
437 if (!prange) { 453 prange = req_capsule_server_get(&req->rq_pill, &RMF_FLD_MDFLD);
438 rc = -EFAULT; 454 if (!prange) {
439 goto out_req; 455 rc = -EFAULT;
456 goto out_req;
457 }
458 *range = *prange;
440 } 459 }
441 *range = *prange; 460
442out_req: 461out_req:
443 ptlrpc_req_finished(req); 462 if (rc || !reqp) {
463 ptlrpc_req_finished(req);
464 req = NULL;
465 }
466
467 if (reqp)
468 *reqp = req;
469
444 return rc; 470 return rc;
445} 471}
446 472
@@ -468,7 +494,7 @@ int fld_client_lookup(struct lu_client_fld *fld, u64 seq, u32 *mds,
468 494
469 res.lsr_start = seq; 495 res.lsr_start = seq;
470 fld_range_set_type(&res, flags); 496 fld_range_set_type(&res, flags);
471 rc = fld_client_rpc(target->ft_exp, &res, FLD_LOOKUP); 497 rc = fld_client_rpc(target->ft_exp, &res, FLD_QUERY, NULL);
472 498
473 if (rc == 0) { 499 if (rc == 0) {
474 *mds = res.lsr_index; 500 *mds = res.lsr_index;
diff --git a/drivers/staging/lustre/lustre/include/cl_object.h b/drivers/staging/lustre/lustre/include/cl_object.h
index fb971ded5a1b..d4c33dd110ab 100644
--- a/drivers/staging/lustre/lustre/include/cl_object.h
+++ b/drivers/staging/lustre/lustre/include/cl_object.h
@@ -82,7 +82,6 @@
82 * - i_mutex 82 * - i_mutex
83 * - PG_locked 83 * - PG_locked
84 * - cl_object_header::coh_page_guard 84 * - cl_object_header::coh_page_guard
85 * - cl_object_header::coh_lock_guard
86 * - lu_site::ls_guard 85 * - lu_site::ls_guard
87 * 86 *
88 * See the top comment in cl_object.c for the description of overall locking and 87 * See the top comment in cl_object.c for the description of overall locking and
@@ -98,9 +97,12 @@
98 * super-class definitions. 97 * super-class definitions.
99 */ 98 */
100#include "lu_object.h" 99#include "lu_object.h"
100#include <linux/atomic.h>
101#include "linux/lustre_compat25.h" 101#include "linux/lustre_compat25.h"
102#include <linux/mutex.h> 102#include <linux/mutex.h>
103#include <linux/radix-tree.h> 103#include <linux/radix-tree.h>
104#include <linux/spinlock.h>
105#include <linux/wait.h>
104 106
105struct inode; 107struct inode;
106 108
@@ -138,7 +140,7 @@ struct cl_device_operations {
138 * cl_req_slice_add(). 140 * cl_req_slice_add().
139 * 141 *
140 * \see osc_req_init(), lov_req_init(), lovsub_req_init() 142 * \see osc_req_init(), lov_req_init(), lovsub_req_init()
141 * \see ccc_req_init() 143 * \see vvp_req_init()
142 */ 144 */
143 int (*cdo_req_init)(const struct lu_env *env, struct cl_device *dev, 145 int (*cdo_req_init)(const struct lu_env *env, struct cl_device *dev,
144 struct cl_req *req); 146 struct cl_req *req);
@@ -147,7 +149,7 @@ struct cl_device_operations {
147/** 149/**
148 * Device in the client stack. 150 * Device in the client stack.
149 * 151 *
150 * \see ccc_device, lov_device, lovsub_device, osc_device 152 * \see vvp_device, lov_device, lovsub_device, osc_device
151 */ 153 */
152struct cl_device { 154struct cl_device {
153 /** Super-class. */ 155 /** Super-class. */
@@ -243,7 +245,7 @@ enum cl_attr_valid {
243 * be discarded from the memory, all its sub-objects are torn-down and 245 * be discarded from the memory, all its sub-objects are torn-down and
244 * destroyed too. 246 * destroyed too.
245 * 247 *
246 * \see ccc_object, lov_object, lovsub_object, osc_object 248 * \see vvp_object, lov_object, lovsub_object, osc_object
247 */ 249 */
248struct cl_object { 250struct cl_object {
249 /** super class */ 251 /** super class */
@@ -322,7 +324,7 @@ struct cl_object_operations {
322 * to be used instead of newly created. 324 * to be used instead of newly created.
323 */ 325 */
324 int (*coo_page_init)(const struct lu_env *env, struct cl_object *obj, 326 int (*coo_page_init)(const struct lu_env *env, struct cl_object *obj,
325 struct cl_page *page, struct page *vmpage); 327 struct cl_page *page, pgoff_t index);
326 /** 328 /**
327 * Initialize lock slice for this layer. Called top-to-bottom through 329 * Initialize lock slice for this layer. Called top-to-bottom through
328 * every object layer when a new cl_lock is instantiated. Layer 330 * every object layer when a new cl_lock is instantiated. Layer
@@ -383,11 +385,17 @@ struct cl_object_operations {
383 * object. Layers are supposed to fill parts of \a lvb that will be 385 * object. Layers are supposed to fill parts of \a lvb that will be
384 * shipped to the glimpse originator as a glimpse result. 386 * shipped to the glimpse originator as a glimpse result.
385 * 387 *
386 * \see ccc_object_glimpse(), lovsub_object_glimpse(), 388 * \see vvp_object_glimpse(), lovsub_object_glimpse(),
387 * \see osc_object_glimpse() 389 * \see osc_object_glimpse()
388 */ 390 */
389 int (*coo_glimpse)(const struct lu_env *env, 391 int (*coo_glimpse)(const struct lu_env *env,
390 const struct cl_object *obj, struct ost_lvb *lvb); 392 const struct cl_object *obj, struct ost_lvb *lvb);
393 /**
394 * Object prune method. Called when the layout is going to change on
395 * this object, therefore each layer has to clean up their cache,
396 * mainly pages and locks.
397 */
398 int (*coo_prune)(const struct lu_env *env, struct cl_object *obj);
391}; 399};
392 400
393/** 401/**
@@ -398,22 +406,6 @@ struct cl_object_header {
398 * here. 406 * here.
399 */ 407 */
400 struct lu_object_header coh_lu; 408 struct lu_object_header coh_lu;
401 /** \name locks
402 * \todo XXX move locks below to the separate cache-lines, they are
403 * mostly useless otherwise.
404 */
405 /** @{ */
406 /** Lock protecting page tree. */
407 spinlock_t coh_page_guard;
408 /** Lock protecting lock list. */
409 spinlock_t coh_lock_guard;
410 /** @} locks */
411 /** Radix tree of cl_page's, cached for this object. */
412 struct radix_tree_root coh_tree;
413 /** # of pages in radix tree. */
414 unsigned long coh_pages;
415 /** List of cl_lock's granted for this object. */
416 struct list_head coh_locks;
417 409
418 /** 410 /**
419 * Parent object. It is assumed that an object has a well-defined 411 * Parent object. It is assumed that an object has a well-defined
@@ -460,10 +452,6 @@ struct cl_object_header {
460 co_lu.lo_linkage) 452 co_lu.lo_linkage)
461/** @} cl_object */ 453/** @} cl_object */
462 454
463#ifndef pgoff_t
464#define pgoff_t unsigned long
465#endif
466
467#define CL_PAGE_EOF ((pgoff_t)~0ull) 455#define CL_PAGE_EOF ((pgoff_t)~0ull)
468 456
469/** \addtogroup cl_page cl_page 457/** \addtogroup cl_page cl_page
@@ -727,16 +715,10 @@ struct cl_page {
727 atomic_t cp_ref; 715 atomic_t cp_ref;
728 /** An object this page is a part of. Immutable after creation. */ 716 /** An object this page is a part of. Immutable after creation. */
729 struct cl_object *cp_obj; 717 struct cl_object *cp_obj;
730 /** Logical page index within the object. Immutable after creation. */
731 pgoff_t cp_index;
732 /** List of slices. Immutable after creation. */ 718 /** List of slices. Immutable after creation. */
733 struct list_head cp_layers; 719 struct list_head cp_layers;
734 /** Parent page, NULL for top-level page. Immutable after creation. */ 720 /** vmpage */
735 struct cl_page *cp_parent; 721 struct page *cp_vmpage;
736 /** Lower-layer page. NULL for bottommost page. Immutable after
737 * creation.
738 */
739 struct cl_page *cp_child;
740 /** 722 /**
741 * Page state. This field is const to avoid accidental update, it is 723 * Page state. This field is const to avoid accidental update, it is
742 * modified only internally within cl_page.c. Protected by a VM lock. 724 * modified only internally within cl_page.c. Protected by a VM lock.
@@ -787,10 +769,11 @@ struct cl_page {
787/** 769/**
788 * Per-layer part of cl_page. 770 * Per-layer part of cl_page.
789 * 771 *
790 * \see ccc_page, lov_page, osc_page 772 * \see vvp_page, lov_page, osc_page
791 */ 773 */
792struct cl_page_slice { 774struct cl_page_slice {
793 struct cl_page *cpl_page; 775 struct cl_page *cpl_page;
776 pgoff_t cpl_index;
794 /** 777 /**
795 * Object slice corresponding to this page slice. Immutable after 778 * Object slice corresponding to this page slice. Immutable after
796 * creation. 779 * creation.
@@ -804,16 +787,9 @@ struct cl_page_slice {
804/** 787/**
805 * Lock mode. For the client extent locks. 788 * Lock mode. For the client extent locks.
806 * 789 *
807 * \warning: cl_lock_mode_match() assumes particular ordering here.
808 * \ingroup cl_lock 790 * \ingroup cl_lock
809 */ 791 */
810enum cl_lock_mode { 792enum cl_lock_mode {
811 /**
812 * Mode of a lock that protects no data, and exists only as a
813 * placeholder. This is used for `glimpse' requests. A phantom lock
814 * might get promoted to real lock at some point.
815 */
816 CLM_PHANTOM,
817 CLM_READ, 793 CLM_READ,
818 CLM_WRITE, 794 CLM_WRITE,
819 CLM_GROUP 795 CLM_GROUP
@@ -846,11 +822,6 @@ struct cl_page_operations {
846 */ 822 */
847 823
848 /** 824 /**
849 * \return the underlying VM page. Optional.
850 */
851 struct page *(*cpo_vmpage)(const struct lu_env *env,
852 const struct cl_page_slice *slice);
853 /**
854 * Called when \a io acquires this page into the exclusive 825 * Called when \a io acquires this page into the exclusive
855 * ownership. When this method returns, it is guaranteed that the is 826 * ownership. When this method returns, it is guaranteed that the is
856 * not owned by other io, and no transfer is going on against 827 * not owned by other io, and no transfer is going on against
@@ -897,14 +868,6 @@ struct cl_page_operations {
897 void (*cpo_export)(const struct lu_env *env, 868 void (*cpo_export)(const struct lu_env *env,
898 const struct cl_page_slice *slice, int uptodate); 869 const struct cl_page_slice *slice, int uptodate);
899 /** 870 /**
900 * Unmaps page from the user space (if it is mapped).
901 *
902 * \see cl_page_unmap()
903 * \see vvp_page_unmap()
904 */
905 int (*cpo_unmap)(const struct lu_env *env,
906 const struct cl_page_slice *slice, struct cl_io *io);
907 /**
908 * Checks whether underlying VM page is locked (in the suitable 871 * Checks whether underlying VM page is locked (in the suitable
909 * sense). Used for assertions. 872 * sense). Used for assertions.
910 * 873 *
@@ -957,7 +920,7 @@ struct cl_page_operations {
957 */ 920 */
958 int (*cpo_is_under_lock)(const struct lu_env *env, 921 int (*cpo_is_under_lock)(const struct lu_env *env,
959 const struct cl_page_slice *slice, 922 const struct cl_page_slice *slice,
960 struct cl_io *io); 923 struct cl_io *io, pgoff_t *max);
961 924
962 /** 925 /**
963 * Optional debugging helper. Prints given page slice. 926 * Optional debugging helper. Prints given page slice.
@@ -1027,26 +990,6 @@ struct cl_page_operations {
1027 */ 990 */
1028 int (*cpo_make_ready)(const struct lu_env *env, 991 int (*cpo_make_ready)(const struct lu_env *env,
1029 const struct cl_page_slice *slice); 992 const struct cl_page_slice *slice);
1030 /**
1031 * Announce that this page is to be written out
1032 * opportunistically, that is, page is dirty, it is not
1033 * necessary to start write-out transfer right now, but
1034 * eventually page has to be written out.
1035 *
1036 * Main caller of this is the write path (see
1037 * vvp_io_commit_write()), using this method to build a
1038 * "transfer cache" from which large transfers are then
1039 * constructed by the req-formation engine.
1040 *
1041 * \todo XXX it would make sense to add page-age tracking
1042 * semantics here, and to oblige the req-formation engine to
1043 * send the page out not later than it is too old.
1044 *
1045 * \see cl_page_cache_add()
1046 */
1047 int (*cpo_cache_add)(const struct lu_env *env,
1048 const struct cl_page_slice *slice,
1049 struct cl_io *io);
1050 } io[CRT_NR]; 993 } io[CRT_NR];
1051 /** 994 /**
1052 * Tell transfer engine that only [to, from] part of a page should be 995 * Tell transfer engine that only [to, from] part of a page should be
@@ -1098,9 +1041,8 @@ struct cl_page_operations {
1098 */ 1041 */
1099#define CL_PAGE_DEBUG(mask, env, page, format, ...) \ 1042#define CL_PAGE_DEBUG(mask, env, page, format, ...) \
1100do { \ 1043do { \
1101 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
1102 \
1103 if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \ 1044 if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
1045 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
1104 cl_page_print(env, &msgdata, lu_cdebug_printer, page); \ 1046 cl_page_print(env, &msgdata, lu_cdebug_printer, page); \
1105 CDEBUG(mask, format, ## __VA_ARGS__); \ 1047 CDEBUG(mask, format, ## __VA_ARGS__); \
1106 } \ 1048 } \
@@ -1111,9 +1053,8 @@ do { \
1111 */ 1053 */
1112#define CL_PAGE_HEADER(mask, env, page, format, ...) \ 1054#define CL_PAGE_HEADER(mask, env, page, format, ...) \
1113do { \ 1055do { \
1114 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
1115 \
1116 if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \ 1056 if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
1057 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
1117 cl_page_header_print(env, &msgdata, lu_cdebug_printer, page); \ 1058 cl_page_header_print(env, &msgdata, lu_cdebug_printer, page); \
1118 CDEBUG(mask, format, ## __VA_ARGS__); \ 1059 CDEBUG(mask, format, ## __VA_ARGS__); \
1119 } \ 1060 } \
@@ -1130,6 +1071,12 @@ static inline int __page_in_use(const struct cl_page *page, int refc)
1130#define cl_page_in_use(pg) __page_in_use(pg, 1) 1071#define cl_page_in_use(pg) __page_in_use(pg, 1)
1131#define cl_page_in_use_noref(pg) __page_in_use(pg, 0) 1072#define cl_page_in_use_noref(pg) __page_in_use(pg, 0)
1132 1073
1074static inline struct page *cl_page_vmpage(struct cl_page *page)
1075{
1076 LASSERT(page->cp_vmpage);
1077 return page->cp_vmpage;
1078}
1079
1133/** @} cl_page */ 1080/** @} cl_page */
1134 1081
1135/** \addtogroup cl_lock cl_lock 1082/** \addtogroup cl_lock cl_lock
@@ -1150,12 +1097,6 @@ static inline int __page_in_use(const struct cl_page *page, int refc)
1150 * (struct cl_lock) and a list of layers (struct cl_lock_slice), linked to 1097 * (struct cl_lock) and a list of layers (struct cl_lock_slice), linked to
1151 * cl_lock::cll_layers list through cl_lock_slice::cls_linkage. 1098 * cl_lock::cll_layers list through cl_lock_slice::cls_linkage.
1152 * 1099 *
1153 * All locks for a given object are linked into cl_object_header::coh_locks
1154 * list (protected by cl_object_header::coh_lock_guard spin-lock) through
1155 * cl_lock::cll_linkage. Currently this list is not sorted in any way. We can
1156 * sort it in starting lock offset, or use altogether different data structure
1157 * like a tree.
1158 *
1159 * Typical cl_lock consists of the two layers: 1100 * Typical cl_lock consists of the two layers:
1160 * 1101 *
1161 * - vvp_lock (vvp specific data), and 1102 * - vvp_lock (vvp specific data), and
@@ -1177,111 +1118,29 @@ static inline int __page_in_use(const struct cl_page *page, int refc)
1177 * 1118 *
1178 * LIFE CYCLE 1119 * LIFE CYCLE
1179 * 1120 *
1180 * cl_lock is reference counted. When reference counter drops to 0, lock is 1121 * cl_lock is a cacheless data container for the requirements of locks to
1181 * placed in the cache, except when lock is in CLS_FREEING state. CLS_FREEING 1122 * complete the IO. cl_lock is created before I/O starts and destroyed when the
1182 * lock is destroyed when last reference is released. Referencing between 1123 * I/O is complete.
1183 * top-lock and its sub-locks is described in the lov documentation module. 1124 *
1184 * 1125 * cl_lock depends on LDLM lock to fulfill lock semantics. LDLM lock is attached
1185 * STATE MACHINE 1126 * to cl_lock at OSC layer. LDLM lock is still cacheable.
1186 *
1187 * Also, cl_lock is a state machine. This requires some clarification. One of
1188 * the goals of client IO re-write was to make IO path non-blocking, or at
1189 * least to make it easier to make it non-blocking in the future. Here
1190 * `non-blocking' means that when a system call (read, write, truncate)
1191 * reaches a situation where it has to wait for a communication with the
1192 * server, it should --instead of waiting-- remember its current state and
1193 * switch to some other work. E.g,. instead of waiting for a lock enqueue,
1194 * client should proceed doing IO on the next stripe, etc. Obviously this is
1195 * rather radical redesign, and it is not planned to be fully implemented at
1196 * this time, instead we are putting some infrastructure in place, that would
1197 * make it easier to do asynchronous non-blocking IO easier in the
1198 * future. Specifically, where old locking code goes to sleep (waiting for
1199 * enqueue, for example), new code returns cl_lock_transition::CLO_WAIT. When
1200 * enqueue reply comes, its completion handler signals that lock state-machine
1201 * is ready to transit to the next state. There is some generic code in
1202 * cl_lock.c that sleeps, waiting for these signals. As a result, for users of
1203 * this cl_lock.c code, it looks like locking is done in normal blocking
1204 * fashion, and it the same time it is possible to switch to the non-blocking
1205 * locking (simply by returning cl_lock_transition::CLO_WAIT from cl_lock.c
1206 * functions).
1207 *
1208 * For a description of state machine states and transitions see enum
1209 * cl_lock_state.
1210 *
1211 * There are two ways to restrict a set of states which lock might move to:
1212 *
1213 * - placing a "hold" on a lock guarantees that lock will not be moved
1214 * into cl_lock_state::CLS_FREEING state until hold is released. Hold
1215 * can be only acquired on a lock that is not in
1216 * cl_lock_state::CLS_FREEING. All holds on a lock are counted in
1217 * cl_lock::cll_holds. Hold protects lock from cancellation and
1218 * destruction. Requests to cancel and destroy a lock on hold will be
1219 * recorded, but only honored when last hold on a lock is released;
1220 *
1221 * - placing a "user" on a lock guarantees that lock will not leave
1222 * cl_lock_state::CLS_NEW, cl_lock_state::CLS_QUEUING,
1223 * cl_lock_state::CLS_ENQUEUED and cl_lock_state::CLS_HELD set of
1224 * states, once it enters this set. That is, if a user is added onto a
1225 * lock in a state not from this set, it doesn't immediately enforce
1226 * lock to move to this set, but once lock enters this set it will
1227 * remain there until all users are removed. Lock users are counted in
1228 * cl_lock::cll_users.
1229 *
1230 * User is used to assure that lock is not canceled or destroyed while
1231 * it is being enqueued, or actively used by some IO.
1232 *
1233 * Currently, a user always comes with a hold (cl_lock_invariant()
1234 * checks that a number of holds is not less than a number of users).
1235 *
1236 * CONCURRENCY
1237 *
1238 * This is how lock state-machine operates. struct cl_lock contains a mutex
1239 * cl_lock::cll_guard that protects struct fields.
1240 *
1241 * - mutex is taken, and cl_lock::cll_state is examined.
1242 *
1243 * - for every state there are possible target states where lock can move
1244 * into. They are tried in order. Attempts to move into next state are
1245 * done by _try() functions in cl_lock.c:cl_{enqueue,unlock,wait}_try().
1246 *
1247 * - if the transition can be performed immediately, state is changed,
1248 * and mutex is released.
1249 *
1250 * - if the transition requires blocking, _try() function returns
1251 * cl_lock_transition::CLO_WAIT. Caller unlocks mutex and goes to
1252 * sleep, waiting for possibility of lock state change. It is woken
1253 * up when some event occurs, that makes lock state change possible
1254 * (e.g., the reception of the reply from the server), and repeats
1255 * the loop.
1256 *
1257 * Top-lock and sub-lock has separate mutexes and the latter has to be taken
1258 * first to avoid dead-lock.
1259 *
1260 * To see an example of interaction of all these issues, take a look at the
1261 * lov_cl.c:lov_lock_enqueue() function. It is called as a part of
1262 * cl_enqueue_try(), and tries to advance top-lock to ENQUEUED state, by
1263 * advancing state-machines of its sub-locks (lov_lock_enqueue_one()). Note
1264 * also, that it uses trylock to grab sub-lock mutex to avoid dead-lock. It
1265 * also has to handle CEF_ASYNC enqueue, when sub-locks enqueues have to be
1266 * done in parallel, rather than one after another (this is used for glimpse
1267 * locks, that cannot dead-lock).
1268 * 1127 *
1269 * INTERFACE AND USAGE 1128 * INTERFACE AND USAGE
1270 * 1129 *
1271 * struct cl_lock_operations provide a number of call-backs that are invoked 1130 * Two major methods are supported for cl_lock: clo_enqueue and clo_cancel. A
1272 * when events of interest occurs. Layers can intercept and handle glimpse, 1131 * cl_lock is enqueued by cl_lock_request(), which will call clo_enqueue()
1273 * blocking, cancel ASTs and a reception of the reply from the server. 1132 * methods for each layer to enqueue the lock. At the LOV layer, if a cl_lock
1133 * consists of multiple sub cl_locks, each sub locks will be enqueued
1134 * correspondingly. At OSC layer, the lock enqueue request will tend to reuse
1135 * cached LDLM lock; otherwise a new LDLM lock will have to be requested from
1136 * OST side.
1274 * 1137 *
1275 * One important difference with the old client locking model is that new 1138 * cl_lock_cancel() must be called to release a cl_lock after use. clo_cancel()
1276 * client has a representation for the top-lock, whereas in the old code only 1139 * method will be called for each layer to release the resource held by this
1277 * sub-locks existed as real data structures and file-level locks are 1140 * lock. At OSC layer, the reference count of LDLM lock, which is held at
1278 * represented by "request sets" that are created and destroyed on each and 1141 * clo_enqueue time, is released.
1279 * every lock creation.
1280 * 1142 *
1281 * Top-locks are cached, and can be found in the cache by the system calls. It 1143 * LDLM lock can only be canceled if there is no cl_lock using it.
1282 * is possible that top-lock is in cache, but some of its sub-locks were
1283 * canceled and destroyed. In that case top-lock has to be enqueued again
1284 * before it can be used.
1285 * 1144 *
1286 * Overall process of the locking during IO operation is as following: 1145 * Overall process of the locking during IO operation is as following:
1287 * 1146 *
@@ -1294,7 +1153,7 @@ static inline int __page_in_use(const struct cl_page *page, int refc)
1294 * 1153 *
1295 * - when all locks are acquired, IO is performed; 1154 * - when all locks are acquired, IO is performed;
1296 * 1155 *
1297 * - locks are released into cache. 1156 * - locks are released after IO is complete.
1298 * 1157 *
1299 * Striping introduces major additional complexity into locking. The 1158 * Striping introduces major additional complexity into locking. The
1300 * fundamental problem is that it is generally unsafe to actively use (hold) 1159 * fundamental problem is that it is generally unsafe to actively use (hold)
@@ -1316,16 +1175,6 @@ static inline int __page_in_use(const struct cl_page *page, int refc)
1316 * buf is a part of memory mapped Lustre file, a lock or locks protecting buf 1175 * buf is a part of memory mapped Lustre file, a lock or locks protecting buf
1317 * has to be held together with the usual lock on [offset, offset + count]. 1176 * has to be held together with the usual lock on [offset, offset + count].
1318 * 1177 *
1319 * As multi-stripe locks have to be allowed, it makes sense to cache them, so
1320 * that, for example, a sequence of O_APPEND writes can proceed quickly
1321 * without going down to the individual stripes to do lock matching. On the
1322 * other hand, multi-stripe locks shouldn't be used by normal read/write
1323 * calls. To achieve this, every layer can implement ->clo_fits_into() method,
1324 * that is called by lock matching code (cl_lock_lookup()), and that can be
1325 * used to selectively disable matching of certain locks for certain IOs. For
1326 * example, lov layer implements lov_lock_fits_into() that allow multi-stripe
1327 * locks to be matched only for truncates and O_APPEND writes.
1328 *
1329 * Interaction with DLM 1178 * Interaction with DLM
1330 * 1179 *
1331 * In the expected setup, cl_lock is ultimately backed up by a collection of 1180 * In the expected setup, cl_lock is ultimately backed up by a collection of
@@ -1356,295 +1205,27 @@ struct cl_lock_descr {
1356 __u32 cld_enq_flags; 1205 __u32 cld_enq_flags;
1357}; 1206};
1358 1207
1359#define DDESCR "%s(%d):[%lu, %lu]" 1208#define DDESCR "%s(%d):[%lu, %lu]:%x"
1360#define PDESCR(descr) \ 1209#define PDESCR(descr) \
1361 cl_lock_mode_name((descr)->cld_mode), (descr)->cld_mode, \ 1210 cl_lock_mode_name((descr)->cld_mode), (descr)->cld_mode, \
1362 (descr)->cld_start, (descr)->cld_end 1211 (descr)->cld_start, (descr)->cld_end, (descr)->cld_enq_flags
1363 1212
1364const char *cl_lock_mode_name(const enum cl_lock_mode mode); 1213const char *cl_lock_mode_name(const enum cl_lock_mode mode);
1365 1214
1366/** 1215/**
1367 * Lock state-machine states.
1368 *
1369 * \htmlonly
1370 * <pre>
1371 *
1372 * Possible state transitions:
1373 *
1374 * +------------------>NEW
1375 * | |
1376 * | | cl_enqueue_try()
1377 * | |
1378 * | cl_unuse_try() V
1379 * | +--------------QUEUING (*)
1380 * | | |
1381 * | | | cl_enqueue_try()
1382 * | | |
1383 * | | cl_unuse_try() V
1384 * sub-lock | +-------------ENQUEUED (*)
1385 * canceled | | |
1386 * | | | cl_wait_try()
1387 * | | |
1388 * | | (R)
1389 * | | |
1390 * | | V
1391 * | | HELD<---------+
1392 * | | | |
1393 * | | | | cl_use_try()
1394 * | | cl_unuse_try() | |
1395 * | | | |
1396 * | | V ---+
1397 * | +------------>INTRANSIT (D) <--+
1398 * | | |
1399 * | cl_unuse_try() | | cached lock found
1400 * | | | cl_use_try()
1401 * | | |
1402 * | V |
1403 * +------------------CACHED---------+
1404 * |
1405 * (C)
1406 * |
1407 * V
1408 * FREEING
1409 *
1410 * Legend:
1411 *
1412 * In states marked with (*) transition to the same state (i.e., a loop
1413 * in the diagram) is possible.
1414 *
1415 * (R) is the point where Receive call-back is invoked: it allows layers
1416 * to handle arrival of lock reply.
1417 *
1418 * (C) is the point where Cancellation call-back is invoked.
1419 *
1420 * (D) is the transit state which means the lock is changing.
1421 *
1422 * Transition to FREEING state is possible from any other state in the
1423 * diagram in case of unrecoverable error.
1424 * </pre>
1425 * \endhtmlonly
1426 *
1427 * These states are for individual cl_lock object. Top-lock and its sub-locks
1428 * can be in the different states. Another way to say this is that we have
1429 * nested state-machines.
1430 *
1431 * Separate QUEUING and ENQUEUED states are needed to support non-blocking
1432 * operation for locks with multiple sub-locks. Imagine lock on a file F, that
1433 * intersects 3 stripes S0, S1, and S2. To enqueue F client has to send
1434 * enqueue to S0, wait for its completion, then send enqueue for S1, wait for
1435 * its completion and at last enqueue lock for S2, and wait for its
1436 * completion. In that case, top-lock is in QUEUING state while S0, S1 are
1437 * handled, and is in ENQUEUED state after enqueue to S2 has been sent (note
1438 * that in this case, sub-locks move from state to state, and top-lock remains
1439 * in the same state).
1440 */
1441enum cl_lock_state {
1442 /**
1443 * Lock that wasn't yet enqueued
1444 */
1445 CLS_NEW,
1446 /**
1447 * Enqueue is in progress, blocking for some intermediate interaction
1448 * with the other side.
1449 */
1450 CLS_QUEUING,
1451 /**
1452 * Lock is fully enqueued, waiting for server to reply when it is
1453 * granted.
1454 */
1455 CLS_ENQUEUED,
1456 /**
1457 * Lock granted, actively used by some IO.
1458 */
1459 CLS_HELD,
1460 /**
1461 * This state is used to mark the lock is being used, or unused.
1462 * We need this state because the lock may have several sublocks,
1463 * so it's impossible to have an atomic way to bring all sublocks
1464 * into CLS_HELD state at use case, or all sublocks to CLS_CACHED
1465 * at unuse case.
1466 * If a thread is referring to a lock, and it sees the lock is in this
1467 * state, it must wait for the lock.
1468 * See state diagram for details.
1469 */
1470 CLS_INTRANSIT,
1471 /**
1472 * Lock granted, not used.
1473 */
1474 CLS_CACHED,
1475 /**
1476 * Lock is being destroyed.
1477 */
1478 CLS_FREEING,
1479 CLS_NR
1480};
1481
1482enum cl_lock_flags {
1483 /**
1484 * lock has been cancelled. This flag is never cleared once set (by
1485 * cl_lock_cancel0()).
1486 */
1487 CLF_CANCELLED = 1 << 0,
1488 /** cancellation is pending for this lock. */
1489 CLF_CANCELPEND = 1 << 1,
1490 /** destruction is pending for this lock. */
1491 CLF_DOOMED = 1 << 2,
1492 /** from enqueue RPC reply upcall. */
1493 CLF_FROM_UPCALL = 1 << 3,
1494};
1495
1496/**
1497 * Lock closure.
1498 *
1499 * Lock closure is a collection of locks (both top-locks and sub-locks) that
1500 * might be updated in a result of an operation on a certain lock (which lock
1501 * this is a closure of).
1502 *
1503 * Closures are needed to guarantee dead-lock freedom in the presence of
1504 *
1505 * - nested state-machines (top-lock state-machine composed of sub-lock
1506 * state-machines), and
1507 *
1508 * - shared sub-locks.
1509 *
1510 * Specifically, many operations, such as lock enqueue, wait, unlock,
1511 * etc. start from a top-lock, and then operate on a sub-locks of this
1512 * top-lock, holding a top-lock mutex. When sub-lock state changes as a result
1513 * of such operation, this change has to be propagated to all top-locks that
1514 * share this sub-lock. Obviously, no natural lock ordering (e.g.,
1515 * top-to-bottom or bottom-to-top) captures this scenario, so try-locking has
1516 * to be used. Lock closure systematizes this try-and-repeat logic.
1517 */
1518struct cl_lock_closure {
1519 /**
1520 * Lock that is mutexed when closure construction is started. When
1521 * closure in is `wait' mode (cl_lock_closure::clc_wait), mutex on
1522 * origin is released before waiting.
1523 */
1524 struct cl_lock *clc_origin;
1525 /**
1526 * List of enclosed locks, so far. Locks are linked here through
1527 * cl_lock::cll_inclosure.
1528 */
1529 struct list_head clc_list;
1530 /**
1531 * True iff closure is in a `wait' mode. This determines what
1532 * cl_lock_enclosure() does when a lock L to be added to the closure
1533 * is currently mutexed by some other thread.
1534 *
1535 * If cl_lock_closure::clc_wait is not set, then closure construction
1536 * fails with CLO_REPEAT immediately.
1537 *
1538 * In wait mode, cl_lock_enclosure() waits until next attempt to build
1539 * a closure might succeed. To this end it releases an origin mutex
1540 * (cl_lock_closure::clc_origin), that has to be the only lock mutex
1541 * owned by the current thread, and then waits on L mutex (by grabbing
1542 * it and immediately releasing), before returning CLO_REPEAT to the
1543 * caller.
1544 */
1545 int clc_wait;
1546 /** Number of locks in the closure. */
1547 int clc_nr;
1548};
1549
1550/**
1551 * Layered client lock. 1216 * Layered client lock.
1552 */ 1217 */
1553struct cl_lock { 1218struct cl_lock {
1554 /** Reference counter. */
1555 atomic_t cll_ref;
1556 /** List of slices. Immutable after creation. */ 1219 /** List of slices. Immutable after creation. */
1557 struct list_head cll_layers; 1220 struct list_head cll_layers;
1558 /** 1221 /** lock attribute, extent, cl_object, etc. */
1559 * Linkage into cl_lock::cll_descr::cld_obj::coh_locks list. Protected
1560 * by cl_lock::cll_descr::cld_obj::coh_lock_guard.
1561 */
1562 struct list_head cll_linkage;
1563 /**
1564 * Parameters of this lock. Protected by
1565 * cl_lock::cll_descr::cld_obj::coh_lock_guard nested within
1566 * cl_lock::cll_guard. Modified only on lock creation and in
1567 * cl_lock_modify().
1568 */
1569 struct cl_lock_descr cll_descr; 1222 struct cl_lock_descr cll_descr;
1570 /** Protected by cl_lock::cll_guard. */
1571 enum cl_lock_state cll_state;
1572 /** signals state changes. */
1573 wait_queue_head_t cll_wq;
1574 /**
1575 * Recursive lock, most fields in cl_lock{} are protected by this.
1576 *
1577 * Locking rules: this mutex is never held across network
1578 * communication, except when lock is being canceled.
1579 *
1580 * Lock ordering: a mutex of a sub-lock is taken first, then a mutex
1581 * on a top-lock. Other direction is implemented through a
1582 * try-lock-repeat loop. Mutices of unrelated locks can be taken only
1583 * by try-locking.
1584 *
1585 * \see osc_lock_enqueue_wait(), lov_lock_cancel(), lov_sublock_wait().
1586 */
1587 struct mutex cll_guard;
1588 struct task_struct *cll_guarder;
1589 int cll_depth;
1590
1591 /**
1592 * the owner for INTRANSIT state
1593 */
1594 struct task_struct *cll_intransit_owner;
1595 int cll_error;
1596 /**
1597 * Number of holds on a lock. A hold prevents a lock from being
1598 * canceled and destroyed. Protected by cl_lock::cll_guard.
1599 *
1600 * \see cl_lock_hold(), cl_lock_unhold(), cl_lock_release()
1601 */
1602 int cll_holds;
1603 /**
1604 * Number of lock users. Valid in cl_lock_state::CLS_HELD state
1605 * only. Lock user pins lock in CLS_HELD state. Protected by
1606 * cl_lock::cll_guard.
1607 *
1608 * \see cl_wait(), cl_unuse().
1609 */
1610 int cll_users;
1611 /**
1612 * Flag bit-mask. Values from enum cl_lock_flags. Updates are
1613 * protected by cl_lock::cll_guard.
1614 */
1615 unsigned long cll_flags;
1616 /**
1617 * A linkage into a list of locks in a closure.
1618 *
1619 * \see cl_lock_closure
1620 */
1621 struct list_head cll_inclosure;
1622 /**
1623 * Confict lock at queuing time.
1624 */
1625 struct cl_lock *cll_conflict;
1626 /**
1627 * A list of references to this lock, for debugging.
1628 */
1629 struct lu_ref cll_reference;
1630 /**
1631 * A list of holds on this lock, for debugging.
1632 */
1633 struct lu_ref cll_holders;
1634 /**
1635 * A reference for cl_lock::cll_descr::cld_obj. For debugging.
1636 */
1637 struct lu_ref_link cll_obj_ref;
1638#ifdef CONFIG_LOCKDEP
1639 /* "dep_map" name is assumed by lockdep.h macros. */
1640 struct lockdep_map dep_map;
1641#endif
1642}; 1223};
1643 1224
1644/** 1225/**
1645 * Per-layer part of cl_lock 1226 * Per-layer part of cl_lock
1646 * 1227 *
1647 * \see ccc_lock, lov_lock, lovsub_lock, osc_lock 1228 * \see vvp_lock, lov_lock, lovsub_lock, osc_lock
1648 */ 1229 */
1649struct cl_lock_slice { 1230struct cl_lock_slice {
1650 struct cl_lock *cls_lock; 1231 struct cl_lock *cls_lock;
@@ -1658,174 +1239,36 @@ struct cl_lock_slice {
1658}; 1239};
1659 1240
1660/** 1241/**
1661 * Possible (non-error) return values of ->clo_{enqueue,wait,unlock}().
1662 *
1663 * NOTE: lov_subresult() depends on ordering here.
1664 */
1665enum cl_lock_transition {
1666 /** operation cannot be completed immediately. Wait for state change. */
1667 CLO_WAIT = 1,
1668 /** operation had to release lock mutex, restart. */
1669 CLO_REPEAT = 2,
1670 /** lower layer re-enqueued. */
1671 CLO_REENQUEUED = 3,
1672};
1673
1674/**
1675 * 1242 *
1676 * \see vvp_lock_ops, lov_lock_ops, lovsub_lock_ops, osc_lock_ops 1243 * \see vvp_lock_ops, lov_lock_ops, lovsub_lock_ops, osc_lock_ops
1677 */ 1244 */
1678struct cl_lock_operations { 1245struct cl_lock_operations {
1679 /**
1680 * \name statemachine
1681 *
1682 * State machine transitions. These 3 methods are called to transfer
1683 * lock from one state to another, as described in the commentary
1684 * above enum #cl_lock_state.
1685 *
1686 * \retval 0 this layer has nothing more to do to before
1687 * transition to the target state happens;
1688 *
1689 * \retval CLO_REPEAT method had to release and re-acquire cl_lock
1690 * mutex, repeat invocation of transition method
1691 * across all layers;
1692 *
1693 * \retval CLO_WAIT this layer cannot move to the target state
1694 * immediately, as it has to wait for certain event
1695 * (e.g., the communication with the server). It
1696 * is guaranteed, that when the state transfer
1697 * becomes possible, cl_lock::cll_wq wait-queue
1698 * is signaled. Caller can wait for this event by
1699 * calling cl_lock_state_wait();
1700 *
1701 * \retval -ve failure, abort state transition, move the lock
1702 * into cl_lock_state::CLS_FREEING state, and set
1703 * cl_lock::cll_error.
1704 *
1705 * Once all layers voted to agree to transition (by returning 0), lock
1706 * is moved into corresponding target state. All state transition
1707 * methods are optional.
1708 */
1709 /** @{ */ 1246 /** @{ */
1710 /** 1247 /**
1711 * Attempts to enqueue the lock. Called top-to-bottom. 1248 * Attempts to enqueue the lock. Called top-to-bottom.
1712 * 1249 *
1713 * \see ccc_lock_enqueue(), lov_lock_enqueue(), lovsub_lock_enqueue(), 1250 * \retval 0 this layer has enqueued the lock successfully
1251 * \retval >0 this layer has enqueued the lock, but need to wait on
1252 * @anchor for resources
1253 * \retval -ve failure
1254 *
1255 * \see vvp_lock_enqueue(), lov_lock_enqueue(), lovsub_lock_enqueue(),
1714 * \see osc_lock_enqueue() 1256 * \see osc_lock_enqueue()
1715 */ 1257 */
1716 int (*clo_enqueue)(const struct lu_env *env, 1258 int (*clo_enqueue)(const struct lu_env *env,
1717 const struct cl_lock_slice *slice, 1259 const struct cl_lock_slice *slice,
1718 struct cl_io *io, __u32 enqflags); 1260 struct cl_io *io, struct cl_sync_io *anchor);
1719 /** 1261 /**
1720 * Attempts to wait for enqueue result. Called top-to-bottom. 1262 * Cancel a lock, release its DLM lock ref, while does not cancel the
1721 * 1263 * DLM lock
1722 * \see ccc_lock_wait(), lov_lock_wait(), osc_lock_wait()
1723 */
1724 int (*clo_wait)(const struct lu_env *env,
1725 const struct cl_lock_slice *slice);
1726 /**
1727 * Attempts to unlock the lock. Called bottom-to-top. In addition to
1728 * usual return values of lock state-machine methods, this can return
1729 * -ESTALE to indicate that lock cannot be returned to the cache, and
1730 * has to be re-initialized.
1731 * unuse is a one-shot operation, so it must NOT return CLO_WAIT.
1732 *
1733 * \see ccc_lock_unuse(), lov_lock_unuse(), osc_lock_unuse()
1734 */
1735 int (*clo_unuse)(const struct lu_env *env,
1736 const struct cl_lock_slice *slice);
1737 /**
1738 * Notifies layer that cached lock is started being used.
1739 *
1740 * \pre lock->cll_state == CLS_CACHED
1741 *
1742 * \see lov_lock_use(), osc_lock_use()
1743 */
1744 int (*clo_use)(const struct lu_env *env,
1745 const struct cl_lock_slice *slice);
1746 /** @} statemachine */
1747 /**
1748 * A method invoked when lock state is changed (as a result of state
1749 * transition). This is used, for example, to track when the state of
1750 * a sub-lock changes, to propagate this change to the corresponding
1751 * top-lock. Optional
1752 *
1753 * \see lovsub_lock_state()
1754 */
1755 void (*clo_state)(const struct lu_env *env,
1756 const struct cl_lock_slice *slice,
1757 enum cl_lock_state st);
1758 /**
1759 * Returns true, iff given lock is suitable for the given io, idea
1760 * being, that there are certain "unsafe" locks, e.g., ones acquired
1761 * for O_APPEND writes, that we don't want to re-use for a normal
1762 * write, to avoid the danger of cascading evictions. Optional. Runs
1763 * under cl_object_header::coh_lock_guard.
1764 *
1765 * XXX this should take more information about lock needed by
1766 * io. Probably lock description or something similar.
1767 *
1768 * \see lov_fits_into()
1769 */
1770 int (*clo_fits_into)(const struct lu_env *env,
1771 const struct cl_lock_slice *slice,
1772 const struct cl_lock_descr *need,
1773 const struct cl_io *io);
1774 /**
1775 * \name ast
1776 * Asynchronous System Traps. All of then are optional, all are
1777 * executed bottom-to-top.
1778 */
1779 /** @{ */
1780
1781 /**
1782 * Cancellation callback. Cancel a lock voluntarily, or under
1783 * the request of server.
1784 */ 1264 */
1785 void (*clo_cancel)(const struct lu_env *env, 1265 void (*clo_cancel)(const struct lu_env *env,
1786 const struct cl_lock_slice *slice); 1266 const struct cl_lock_slice *slice);
1787 /** 1267 /** @} */
1788 * Lock weighting ast. Executed to estimate how precious this lock
1789 * is. The sum of results across all layers is used to determine
1790 * whether lock worth keeping in cache given present memory usage.
1791 *
1792 * \see osc_lock_weigh(), vvp_lock_weigh(), lovsub_lock_weigh().
1793 */
1794 unsigned long (*clo_weigh)(const struct lu_env *env,
1795 const struct cl_lock_slice *slice);
1796 /** @} ast */
1797
1798 /**
1799 * \see lovsub_lock_closure()
1800 */
1801 int (*clo_closure)(const struct lu_env *env,
1802 const struct cl_lock_slice *slice,
1803 struct cl_lock_closure *closure);
1804 /**
1805 * Executed bottom-to-top when lock description changes (e.g., as a
1806 * result of server granting more generous lock than was requested).
1807 *
1808 * \see lovsub_lock_modify()
1809 */
1810 int (*clo_modify)(const struct lu_env *env,
1811 const struct cl_lock_slice *slice,
1812 const struct cl_lock_descr *updated);
1813 /**
1814 * Notifies layers (bottom-to-top) that lock is going to be
1815 * destroyed. Responsibility of layers is to prevent new references on
1816 * this lock from being acquired once this method returns.
1817 *
1818 * This can be called multiple times due to the races.
1819 *
1820 * \see cl_lock_delete()
1821 * \see osc_lock_delete(), lovsub_lock_delete()
1822 */
1823 void (*clo_delete)(const struct lu_env *env,
1824 const struct cl_lock_slice *slice);
1825 /** 1268 /**
1826 * Destructor. Frees resources and the slice. 1269 * Destructor. Frees resources and the slice.
1827 * 1270 *
1828 * \see ccc_lock_fini(), lov_lock_fini(), lovsub_lock_fini(), 1271 * \see vvp_lock_fini(), lov_lock_fini(), lovsub_lock_fini(),
1829 * \see osc_lock_fini() 1272 * \see osc_lock_fini()
1830 */ 1273 */
1831 void (*clo_fini)(const struct lu_env *env, struct cl_lock_slice *slice); 1274 void (*clo_fini)(const struct lu_env *env, struct cl_lock_slice *slice);
@@ -2016,7 +1459,7 @@ enum cl_io_state {
2016 * This is usually embedded into layer session data, rather than allocated 1459 * This is usually embedded into layer session data, rather than allocated
2017 * dynamically. 1460 * dynamically.
2018 * 1461 *
2019 * \see vvp_io, lov_io, osc_io, ccc_io 1462 * \see vvp_io, lov_io, osc_io
2020 */ 1463 */
2021struct cl_io_slice { 1464struct cl_io_slice {
2022 struct cl_io *cis_io; 1465 struct cl_io *cis_io;
@@ -2031,6 +1474,8 @@ struct cl_io_slice {
2031 struct list_head cis_linkage; 1474 struct list_head cis_linkage;
2032}; 1475};
2033 1476
1477typedef void (*cl_commit_cbt)(const struct lu_env *, struct cl_io *,
1478 struct cl_page *);
2034/** 1479/**
2035 * Per-layer io operations. 1480 * Per-layer io operations.
2036 * \see vvp_io_ops, lov_io_ops, lovsub_io_ops, osc_io_ops 1481 * \see vvp_io_ops, lov_io_ops, lovsub_io_ops, osc_io_ops
@@ -2114,7 +1559,7 @@ struct cl_io_operations {
2114 void (*cio_fini)(const struct lu_env *env, 1559 void (*cio_fini)(const struct lu_env *env,
2115 const struct cl_io_slice *slice); 1560 const struct cl_io_slice *slice);
2116 } op[CIT_OP_NR]; 1561 } op[CIT_OP_NR];
2117 struct { 1562
2118 /** 1563 /**
2119 * Submit pages from \a queue->c2_qin for IO, and move 1564 * Submit pages from \a queue->c2_qin for IO, and move
2120 * successfully submitted pages into \a queue->c2_qout. Return 1565 * successfully submitted pages into \a queue->c2_qout. Return
@@ -2127,7 +1572,15 @@ struct cl_io_operations {
2127 const struct cl_io_slice *slice, 1572 const struct cl_io_slice *slice,
2128 enum cl_req_type crt, 1573 enum cl_req_type crt,
2129 struct cl_2queue *queue); 1574 struct cl_2queue *queue);
2130 } req_op[CRT_NR]; 1575 /**
1576 * Queue async page for write.
1577 * The difference between cio_submit and cio_queue is that
1578 * cio_submit is for urgent request.
1579 */
1580 int (*cio_commit_async)(const struct lu_env *env,
1581 const struct cl_io_slice *slice,
1582 struct cl_page_list *queue, int from, int to,
1583 cl_commit_cbt cb);
2131 /** 1584 /**
2132 * Read missing page. 1585 * Read missing page.
2133 * 1586 *
@@ -2140,31 +1593,6 @@ struct cl_io_operations {
2140 const struct cl_io_slice *slice, 1593 const struct cl_io_slice *slice,
2141 const struct cl_page_slice *page); 1594 const struct cl_page_slice *page);
2142 /** 1595 /**
2143 * Prepare write of a \a page. Called bottom-to-top by a top-level
2144 * cl_io_operations::op[CIT_WRITE]::cio_start() to prepare page for
2145 * get data from user-level buffer.
2146 *
2147 * \pre io->ci_type == CIT_WRITE
2148 *
2149 * \see vvp_io_prepare_write(), lov_io_prepare_write(),
2150 * osc_io_prepare_write().
2151 */
2152 int (*cio_prepare_write)(const struct lu_env *env,
2153 const struct cl_io_slice *slice,
2154 const struct cl_page_slice *page,
2155 unsigned from, unsigned to);
2156 /**
2157 *
2158 * \pre io->ci_type == CIT_WRITE
2159 *
2160 * \see vvp_io_commit_write(), lov_io_commit_write(),
2161 * osc_io_commit_write().
2162 */
2163 int (*cio_commit_write)(const struct lu_env *env,
2164 const struct cl_io_slice *slice,
2165 const struct cl_page_slice *page,
2166 unsigned from, unsigned to);
2167 /**
2168 * Optional debugging helper. Print given io slice. 1596 * Optional debugging helper. Print given io slice.
2169 */ 1597 */
2170 int (*cio_print)(const struct lu_env *env, void *cookie, 1598 int (*cio_print)(const struct lu_env *env, void *cookie,
@@ -2216,9 +1644,13 @@ enum cl_enq_flags {
2216 */ 1644 */
2217 CEF_AGL = 0x00000020, 1645 CEF_AGL = 0x00000020,
2218 /** 1646 /**
1647 * enqueue a lock to test DLM lock existence.
1648 */
1649 CEF_PEEK = 0x00000040,
1650 /**
2219 * mask of enq_flags. 1651 * mask of enq_flags.
2220 */ 1652 */
2221 CEF_MASK = 0x0000003f, 1653 CEF_MASK = 0x0000007f,
2222}; 1654};
2223 1655
2224/** 1656/**
@@ -2228,12 +1660,12 @@ enum cl_enq_flags {
2228struct cl_io_lock_link { 1660struct cl_io_lock_link {
2229 /** linkage into one of cl_lockset lists. */ 1661 /** linkage into one of cl_lockset lists. */
2230 struct list_head cill_linkage; 1662 struct list_head cill_linkage;
2231 struct cl_lock_descr cill_descr; 1663 struct cl_lock cill_lock;
2232 struct cl_lock *cill_lock;
2233 /** optional destructor */ 1664 /** optional destructor */
2234 void (*cill_fini)(const struct lu_env *env, 1665 void (*cill_fini)(const struct lu_env *env,
2235 struct cl_io_lock_link *link); 1666 struct cl_io_lock_link *link);
2236}; 1667};
1668#define cill_descr cill_lock.cll_descr
2237 1669
2238/** 1670/**
2239 * Lock-set represents a collection of locks, that io needs at a 1671 * Lock-set represents a collection of locks, that io needs at a
@@ -2267,8 +1699,6 @@ struct cl_io_lock_link {
2267struct cl_lockset { 1699struct cl_lockset {
2268 /** locks to be acquired. */ 1700 /** locks to be acquired. */
2269 struct list_head cls_todo; 1701 struct list_head cls_todo;
2270 /** locks currently being processed. */
2271 struct list_head cls_curr;
2272 /** locks acquired. */ 1702 /** locks acquired. */
2273 struct list_head cls_done; 1703 struct list_head cls_done;
2274}; 1704};
@@ -2632,9 +2062,7 @@ struct cl_site {
2632 * and top-locks (and top-pages) are accounted here. 2062 * and top-locks (and top-pages) are accounted here.
2633 */ 2063 */
2634 struct cache_stats cs_pages; 2064 struct cache_stats cs_pages;
2635 struct cache_stats cs_locks;
2636 atomic_t cs_pages_state[CPS_NR]; 2065 atomic_t cs_pages_state[CPS_NR];
2637 atomic_t cs_locks_state[CLS_NR];
2638}; 2066};
2639 2067
2640int cl_site_init(struct cl_site *s, struct cl_device *top); 2068int cl_site_init(struct cl_site *s, struct cl_device *top);
@@ -2725,7 +2153,7 @@ static inline void cl_device_fini(struct cl_device *d)
2725} 2153}
2726 2154
2727void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice, 2155void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
2728 struct cl_object *obj, 2156 struct cl_object *obj, pgoff_t index,
2729 const struct cl_page_operations *ops); 2157 const struct cl_page_operations *ops);
2730void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice, 2158void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
2731 struct cl_object *obj, 2159 struct cl_object *obj,
@@ -2758,7 +2186,7 @@ int cl_object_glimpse(const struct lu_env *env, struct cl_object *obj,
2758 struct ost_lvb *lvb); 2186 struct ost_lvb *lvb);
2759int cl_conf_set(const struct lu_env *env, struct cl_object *obj, 2187int cl_conf_set(const struct lu_env *env, struct cl_object *obj,
2760 const struct cl_object_conf *conf); 2188 const struct cl_object_conf *conf);
2761void cl_object_prune(const struct lu_env *env, struct cl_object *obj); 2189int cl_object_prune(const struct lu_env *env, struct cl_object *obj);
2762void cl_object_kill(const struct lu_env *env, struct cl_object *obj); 2190void cl_object_kill(const struct lu_env *env, struct cl_object *obj);
2763 2191
2764/** 2192/**
@@ -2772,7 +2200,7 @@ static inline int cl_object_same(struct cl_object *o0, struct cl_object *o1)
2772static inline void cl_object_page_init(struct cl_object *clob, int size) 2200static inline void cl_object_page_init(struct cl_object *clob, int size)
2773{ 2201{
2774 clob->co_slice_off = cl_object_header(clob)->coh_page_bufsize; 2202 clob->co_slice_off = cl_object_header(clob)->coh_page_bufsize;
2775 cl_object_header(clob)->coh_page_bufsize += ALIGN(size, 8); 2203 cl_object_header(clob)->coh_page_bufsize += cfs_size_round(size);
2776} 2204}
2777 2205
2778static inline void *cl_object_page_slice(struct cl_object *clob, 2206static inline void *cl_object_page_slice(struct cl_object *clob,
@@ -2781,6 +2209,16 @@ static inline void *cl_object_page_slice(struct cl_object *clob,
2781 return (void *)((char *)page + clob->co_slice_off); 2209 return (void *)((char *)page + clob->co_slice_off);
2782} 2210}
2783 2211
2212/**
2213 * Return refcount of cl_object.
2214 */
2215static inline int cl_object_refc(struct cl_object *clob)
2216{
2217 struct lu_object_header *header = clob->co_lu.lo_header;
2218
2219 return atomic_read(&header->loh_ref);
2220}
2221
2784/** @} cl_object */ 2222/** @} cl_object */
2785 2223
2786/** \defgroup cl_page cl_page 2224/** \defgroup cl_page cl_page
@@ -2794,28 +2232,20 @@ enum {
2794}; 2232};
2795 2233
2796/* callback of cl_page_gang_lookup() */ 2234/* callback of cl_page_gang_lookup() */
2797typedef int (*cl_page_gang_cb_t) (const struct lu_env *, struct cl_io *,
2798 struct cl_page *, void *);
2799int cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj,
2800 struct cl_io *io, pgoff_t start, pgoff_t end,
2801 cl_page_gang_cb_t cb, void *cbdata);
2802struct cl_page *cl_page_lookup(struct cl_object_header *hdr, pgoff_t index);
2803struct cl_page *cl_page_find(const struct lu_env *env, struct cl_object *obj, 2235struct cl_page *cl_page_find(const struct lu_env *env, struct cl_object *obj,
2804 pgoff_t idx, struct page *vmpage, 2236 pgoff_t idx, struct page *vmpage,
2805 enum cl_page_type type); 2237 enum cl_page_type type);
2806struct cl_page *cl_page_find_sub(const struct lu_env *env, 2238struct cl_page *cl_page_alloc(const struct lu_env *env,
2807 struct cl_object *obj, 2239 struct cl_object *o, pgoff_t ind,
2808 pgoff_t idx, struct page *vmpage, 2240 struct page *vmpage,
2809 struct cl_page *parent); 2241 enum cl_page_type type);
2810void cl_page_get(struct cl_page *page); 2242void cl_page_get(struct cl_page *page);
2811void cl_page_put(const struct lu_env *env, struct cl_page *page); 2243void cl_page_put(const struct lu_env *env, struct cl_page *page);
2812void cl_page_print(const struct lu_env *env, void *cookie, lu_printer_t printer, 2244void cl_page_print(const struct lu_env *env, void *cookie, lu_printer_t printer,
2813 const struct cl_page *pg); 2245 const struct cl_page *pg);
2814void cl_page_header_print(const struct lu_env *env, void *cookie, 2246void cl_page_header_print(const struct lu_env *env, void *cookie,
2815 lu_printer_t printer, const struct cl_page *pg); 2247 lu_printer_t printer, const struct cl_page *pg);
2816struct page *cl_page_vmpage(const struct lu_env *env, struct cl_page *page);
2817struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj); 2248struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj);
2818struct cl_page *cl_page_top(struct cl_page *page);
2819 2249
2820const struct cl_page_slice *cl_page_at(const struct cl_page *page, 2250const struct cl_page_slice *cl_page_at(const struct cl_page *page,
2821 const struct lu_device_type *dtype); 2251 const struct lu_device_type *dtype);
@@ -2872,12 +2302,10 @@ int cl_page_flush(const struct lu_env *env, struct cl_io *io,
2872void cl_page_discard(const struct lu_env *env, struct cl_io *io, 2302void cl_page_discard(const struct lu_env *env, struct cl_io *io,
2873 struct cl_page *pg); 2303 struct cl_page *pg);
2874void cl_page_delete(const struct lu_env *env, struct cl_page *pg); 2304void cl_page_delete(const struct lu_env *env, struct cl_page *pg);
2875int cl_page_unmap(const struct lu_env *env, struct cl_io *io,
2876 struct cl_page *pg);
2877int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg); 2305int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg);
2878void cl_page_export(const struct lu_env *env, struct cl_page *pg, int uptodate); 2306void cl_page_export(const struct lu_env *env, struct cl_page *pg, int uptodate);
2879int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io, 2307int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
2880 struct cl_page *page); 2308 struct cl_page *page, pgoff_t *max_index);
2881loff_t cl_offset(const struct cl_object *obj, pgoff_t idx); 2309loff_t cl_offset(const struct cl_object *obj, pgoff_t idx);
2882pgoff_t cl_index(const struct cl_object *obj, loff_t offset); 2310pgoff_t cl_index(const struct cl_object *obj, loff_t offset);
2883int cl_page_size(const struct cl_object *obj); 2311int cl_page_size(const struct cl_object *obj);
@@ -2890,138 +2318,66 @@ void cl_lock_descr_print(const struct lu_env *env, void *cookie,
2890 const struct cl_lock_descr *descr); 2318 const struct cl_lock_descr *descr);
2891/* @} helper */ 2319/* @} helper */
2892 2320
2321/**
2322 * Data structure managing a client's cached pages. A count of
2323 * "unstable" pages is maintained, and an LRU of clean pages is
2324 * maintained. "unstable" pages are pages pinned by the ptlrpc
2325 * layer for recovery purposes.
2326 */
2327struct cl_client_cache {
2328 /**
2329 * # of users (OSCs)
2330 */
2331 atomic_t ccc_users;
2332 /**
2333 * # of threads are doing shrinking
2334 */
2335 unsigned int ccc_lru_shrinkers;
2336 /**
2337 * # of LRU entries available
2338 */
2339 atomic_t ccc_lru_left;
2340 /**
2341 * List of entities(OSCs) for this LRU cache
2342 */
2343 struct list_head ccc_lru;
2344 /**
2345 * Max # of LRU entries
2346 */
2347 unsigned long ccc_lru_max;
2348 /**
2349 * Lock to protect ccc_lru list
2350 */
2351 spinlock_t ccc_lru_lock;
2352 /**
2353 * # of unstable pages for this mount point
2354 */
2355 atomic_t ccc_unstable_nr;
2356 /**
2357 * Waitq for awaiting unstable pages to reach zero.
2358 * Used at umounting time and signaled on BRW commit
2359 */
2360 wait_queue_head_t ccc_unstable_waitq;
2361
2362};
2363
2893/** @} cl_page */ 2364/** @} cl_page */
2894 2365
2895/** \defgroup cl_lock cl_lock 2366/** \defgroup cl_lock cl_lock
2896 * @{ 2367 * @{
2897 */ 2368 */
2898 2369
2899struct cl_lock *cl_lock_hold(const struct lu_env *env, const struct cl_io *io, 2370int cl_lock_request(const struct lu_env *env, struct cl_io *io,
2900 const struct cl_lock_descr *need, 2371 struct cl_lock *lock);
2901 const char *scope, const void *source); 2372int cl_lock_init(const struct lu_env *env, struct cl_lock *lock,
2902struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io, 2373 const struct cl_io *io);
2903 const struct cl_lock_descr *need, 2374void cl_lock_fini(const struct lu_env *env, struct cl_lock *lock);
2904 const char *scope, const void *source);
2905struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io,
2906 const struct cl_lock_descr *need,
2907 const char *scope, const void *source);
2908struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env,
2909 struct cl_object *obj, pgoff_t index,
2910 struct cl_lock *except, int pending,
2911 int canceld);
2912static inline struct cl_lock *cl_lock_at_page(const struct lu_env *env,
2913 struct cl_object *obj,
2914 struct cl_page *page,
2915 struct cl_lock *except,
2916 int pending, int canceld)
2917{
2918 LASSERT(cl_object_header(obj) == cl_object_header(page->cp_obj));
2919 return cl_lock_at_pgoff(env, obj, page->cp_index, except,
2920 pending, canceld);
2921}
2922
2923const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock, 2375const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
2924 const struct lu_device_type *dtype); 2376 const struct lu_device_type *dtype);
2925 2377void cl_lock_release(const struct lu_env *env, struct cl_lock *lock);
2926void cl_lock_get(struct cl_lock *lock); 2378int cl_lock_enqueue(const struct lu_env *env, struct cl_io *io,
2927void cl_lock_get_trust(struct cl_lock *lock); 2379 struct cl_lock *lock, struct cl_sync_io *anchor);
2928void cl_lock_put(const struct lu_env *env, struct cl_lock *lock);
2929void cl_lock_hold_add(const struct lu_env *env, struct cl_lock *lock,
2930 const char *scope, const void *source);
2931void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock,
2932 const char *scope, const void *source);
2933void cl_lock_unhold(const struct lu_env *env, struct cl_lock *lock,
2934 const char *scope, const void *source);
2935void cl_lock_release(const struct lu_env *env, struct cl_lock *lock,
2936 const char *scope, const void *source);
2937void cl_lock_user_add(const struct lu_env *env, struct cl_lock *lock);
2938void cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock);
2939
2940int cl_lock_is_intransit(struct cl_lock *lock);
2941
2942int cl_lock_enqueue_wait(const struct lu_env *env, struct cl_lock *lock,
2943 int keep_mutex);
2944
2945/** \name statemachine statemachine
2946 * Interface to lock state machine consists of 3 parts:
2947 *
2948 * - "try" functions that attempt to effect a state transition. If state
2949 * transition is not possible right now (e.g., if it has to wait for some
2950 * asynchronous event to occur), these functions return
2951 * cl_lock_transition::CLO_WAIT.
2952 *
2953 * - "non-try" functions that implement synchronous blocking interface on
2954 * top of non-blocking "try" functions. These functions repeatedly call
2955 * corresponding "try" versions, and if state transition is not possible
2956 * immediately, wait for lock state change.
2957 *
2958 * - methods from cl_lock_operations, called by "try" functions. Lock can
2959 * be advanced to the target state only when all layers voted that they
2960 * are ready for this transition. "Try" functions call methods under lock
2961 * mutex. If a layer had to release a mutex, it re-acquires it and returns
2962 * cl_lock_transition::CLO_REPEAT, causing "try" function to call all
2963 * layers again.
2964 *
2965 * TRY NON-TRY METHOD FINAL STATE
2966 *
2967 * cl_enqueue_try() cl_enqueue() cl_lock_operations::clo_enqueue() CLS_ENQUEUED
2968 *
2969 * cl_wait_try() cl_wait() cl_lock_operations::clo_wait() CLS_HELD
2970 *
2971 * cl_unuse_try() cl_unuse() cl_lock_operations::clo_unuse() CLS_CACHED
2972 *
2973 * cl_use_try() NONE cl_lock_operations::clo_use() CLS_HELD
2974 *
2975 * @{
2976 */
2977
2978int cl_wait(const struct lu_env *env, struct cl_lock *lock);
2979void cl_unuse(const struct lu_env *env, struct cl_lock *lock);
2980int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock,
2981 struct cl_io *io, __u32 flags);
2982int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock);
2983int cl_wait_try(const struct lu_env *env, struct cl_lock *lock);
2984int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic);
2985
2986/** @} statemachine */
2987
2988void cl_lock_signal(const struct lu_env *env, struct cl_lock *lock);
2989int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock);
2990void cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock,
2991 enum cl_lock_state state);
2992int cl_queue_match(const struct list_head *queue,
2993 const struct cl_lock_descr *need);
2994
2995void cl_lock_mutex_get(const struct lu_env *env, struct cl_lock *lock);
2996void cl_lock_mutex_put(const struct lu_env *env, struct cl_lock *lock);
2997int cl_lock_is_mutexed(struct cl_lock *lock);
2998int cl_lock_nr_mutexed(const struct lu_env *env);
2999int cl_lock_discard_pages(const struct lu_env *env, struct cl_lock *lock);
3000int cl_lock_ext_match(const struct cl_lock_descr *has,
3001 const struct cl_lock_descr *need);
3002int cl_lock_descr_match(const struct cl_lock_descr *has,
3003 const struct cl_lock_descr *need);
3004int cl_lock_mode_match(enum cl_lock_mode has, enum cl_lock_mode need);
3005int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock,
3006 const struct cl_lock_descr *desc);
3007
3008void cl_lock_closure_init(const struct lu_env *env,
3009 struct cl_lock_closure *closure,
3010 struct cl_lock *origin, int wait);
3011void cl_lock_closure_fini(struct cl_lock_closure *closure);
3012int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock,
3013 struct cl_lock_closure *closure);
3014void cl_lock_disclosure(const struct lu_env *env,
3015 struct cl_lock_closure *closure);
3016int cl_lock_enclosure(const struct lu_env *env, struct cl_lock *lock,
3017 struct cl_lock_closure *closure);
3018
3019void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock); 2380void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock);
3020void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock);
3021void cl_lock_error(const struct lu_env *env, struct cl_lock *lock, int error);
3022void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int wait);
3023
3024unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock);
3025 2381
3026/** @} cl_lock */ 2382/** @} cl_lock */
3027 2383
@@ -3050,15 +2406,14 @@ int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
3050 struct cl_lock_descr *descr); 2406 struct cl_lock_descr *descr);
3051int cl_io_read_page(const struct lu_env *env, struct cl_io *io, 2407int cl_io_read_page(const struct lu_env *env, struct cl_io *io,
3052 struct cl_page *page); 2408 struct cl_page *page);
3053int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io,
3054 struct cl_page *page, unsigned from, unsigned to);
3055int cl_io_commit_write(const struct lu_env *env, struct cl_io *io,
3056 struct cl_page *page, unsigned from, unsigned to);
3057int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io, 2409int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
3058 enum cl_req_type iot, struct cl_2queue *queue); 2410 enum cl_req_type iot, struct cl_2queue *queue);
3059int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io, 2411int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
3060 enum cl_req_type iot, struct cl_2queue *queue, 2412 enum cl_req_type iot, struct cl_2queue *queue,
3061 long timeout); 2413 long timeout);
2414int cl_io_commit_async(const struct lu_env *env, struct cl_io *io,
2415 struct cl_page_list *queue, int from, int to,
2416 cl_commit_cbt cb);
3062int cl_io_is_going(const struct lu_env *env); 2417int cl_io_is_going(const struct lu_env *env);
3063 2418
3064/** 2419/**
@@ -3114,6 +2469,12 @@ static inline struct cl_page *cl_page_list_last(struct cl_page_list *plist)
3114 return list_entry(plist->pl_pages.prev, struct cl_page, cp_batch); 2469 return list_entry(plist->pl_pages.prev, struct cl_page, cp_batch);
3115} 2470}
3116 2471
2472static inline struct cl_page *cl_page_list_first(struct cl_page_list *plist)
2473{
2474 LASSERT(plist->pl_nr > 0);
2475 return list_entry(plist->pl_pages.next, struct cl_page, cp_batch);
2476}
2477
3117/** 2478/**
3118 * Iterate over pages in a page list. 2479 * Iterate over pages in a page list.
3119 */ 2480 */
@@ -3130,9 +2491,14 @@ void cl_page_list_init(struct cl_page_list *plist);
3130void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page); 2491void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page);
3131void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src, 2492void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src,
3132 struct cl_page *page); 2493 struct cl_page *page);
2494void cl_page_list_move_head(struct cl_page_list *dst, struct cl_page_list *src,
2495 struct cl_page *page);
3133void cl_page_list_splice(struct cl_page_list *list, struct cl_page_list *head); 2496void cl_page_list_splice(struct cl_page_list *list, struct cl_page_list *head);
2497void cl_page_list_del(const struct lu_env *env, struct cl_page_list *plist,
2498 struct cl_page *page);
3134void cl_page_list_disown(const struct lu_env *env, 2499void cl_page_list_disown(const struct lu_env *env,
3135 struct cl_io *io, struct cl_page_list *plist); 2500 struct cl_io *io, struct cl_page_list *plist);
2501void cl_page_list_fini(const struct lu_env *env, struct cl_page_list *plist);
3136 2502
3137void cl_2queue_init(struct cl_2queue *queue); 2503void cl_2queue_init(struct cl_2queue *queue);
3138void cl_2queue_disown(const struct lu_env *env, 2504void cl_2queue_disown(const struct lu_env *env,
@@ -3177,13 +2543,18 @@ struct cl_sync_io {
3177 atomic_t csi_barrier; 2543 atomic_t csi_barrier;
3178 /** completion to be signaled when transfer is complete. */ 2544 /** completion to be signaled when transfer is complete. */
3179 wait_queue_head_t csi_waitq; 2545 wait_queue_head_t csi_waitq;
2546 /** callback to invoke when this IO is finished */
2547 void (*csi_end_io)(const struct lu_env *,
2548 struct cl_sync_io *);
3180}; 2549};
3181 2550
3182void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages); 2551void cl_sync_io_init(struct cl_sync_io *anchor, int nr,
3183int cl_sync_io_wait(const struct lu_env *env, struct cl_io *io, 2552 void (*end)(const struct lu_env *, struct cl_sync_io *));
3184 struct cl_page_list *queue, struct cl_sync_io *anchor, 2553int cl_sync_io_wait(const struct lu_env *env, struct cl_sync_io *anchor,
3185 long timeout); 2554 long timeout);
3186void cl_sync_io_note(struct cl_sync_io *anchor, int ioret); 2555void cl_sync_io_note(const struct lu_env *env, struct cl_sync_io *anchor,
2556 int ioret);
2557void cl_sync_io_end(const struct lu_env *env, struct cl_sync_io *anchor);
3187 2558
3188/** @} cl_sync_io */ 2559/** @} cl_sync_io */
3189 2560
@@ -3241,6 +2612,9 @@ void *cl_env_reenter(void);
3241void cl_env_reexit(void *cookie); 2612void cl_env_reexit(void *cookie);
3242void cl_env_implant(struct lu_env *env, int *refcheck); 2613void cl_env_implant(struct lu_env *env, int *refcheck);
3243void cl_env_unplant(struct lu_env *env, int *refcheck); 2614void cl_env_unplant(struct lu_env *env, int *refcheck);
2615unsigned int cl_env_cache_purge(unsigned int nr);
2616struct lu_env *cl_env_percpu_get(void);
2617void cl_env_percpu_put(struct lu_env *env);
3244 2618
3245/** @} cl_env */ 2619/** @} cl_env */
3246 2620
diff --git a/drivers/staging/lustre/lustre/include/lclient.h b/drivers/staging/lustre/lustre/include/lclient.h
deleted file mode 100644
index 5d839a9f789f..000000000000
--- a/drivers/staging/lustre/lustre/include/lclient.h
+++ /dev/null
@@ -1,408 +0,0 @@
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26/*
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2012, Intel Corporation.
31 */
32/*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * Definitions shared between vvp and liblustre, and other clients in the
37 * future.
38 *
39 * Author: Oleg Drokin <oleg.drokin@sun.com>
40 * Author: Nikita Danilov <nikita.danilov@sun.com>
41 */
42
43#ifndef LCLIENT_H
44#define LCLIENT_H
45
46blkcnt_t dirty_cnt(struct inode *inode);
47
48int cl_glimpse_size0(struct inode *inode, int agl);
49int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
50 struct inode *inode, struct cl_object *clob, int agl);
51
52static inline int cl_glimpse_size(struct inode *inode)
53{
54 return cl_glimpse_size0(inode, 0);
55}
56
57static inline int cl_agl(struct inode *inode)
58{
59 return cl_glimpse_size0(inode, 1);
60}
61
62/**
63 * Locking policy for setattr.
64 */
65enum ccc_setattr_lock_type {
66 /** Locking is done by server */
67 SETATTR_NOLOCK,
68 /** Extent lock is enqueued */
69 SETATTR_EXTENT_LOCK,
70 /** Existing local extent lock is used */
71 SETATTR_MATCH_LOCK
72};
73
74/**
75 * IO state private to vvp or slp layers.
76 */
77struct ccc_io {
78 /** super class */
79 struct cl_io_slice cui_cl;
80 struct cl_io_lock_link cui_link;
81 /**
82 * I/O vector information to or from which read/write is going.
83 */
84 struct iov_iter *cui_iter;
85 /**
86 * Total size for the left IO.
87 */
88 size_t cui_tot_count;
89
90 union {
91 struct {
92 enum ccc_setattr_lock_type cui_local_lock;
93 } setattr;
94 } u;
95 /**
96 * True iff io is processing glimpse right now.
97 */
98 int cui_glimpse;
99 /**
100 * Layout version when this IO is initialized
101 */
102 __u32 cui_layout_gen;
103 /**
104 * File descriptor against which IO is done.
105 */
106 struct ll_file_data *cui_fd;
107 struct kiocb *cui_iocb;
108};
109
110/**
111 * True, if \a io is a normal io, False for splice_{read,write}.
112 * must be implemented in arch specific code.
113 */
114int cl_is_normalio(const struct lu_env *env, const struct cl_io *io);
115
116extern struct lu_context_key ccc_key;
117extern struct lu_context_key ccc_session_key;
118
119struct ccc_thread_info {
120 struct cl_lock_descr cti_descr;
121 struct cl_io cti_io;
122 struct cl_attr cti_attr;
123};
124
125static inline struct ccc_thread_info *ccc_env_info(const struct lu_env *env)
126{
127 struct ccc_thread_info *info;
128
129 info = lu_context_key_get(&env->le_ctx, &ccc_key);
130 LASSERT(info);
131 return info;
132}
133
134static inline struct cl_attr *ccc_env_thread_attr(const struct lu_env *env)
135{
136 struct cl_attr *attr = &ccc_env_info(env)->cti_attr;
137
138 memset(attr, 0, sizeof(*attr));
139 return attr;
140}
141
142static inline struct cl_io *ccc_env_thread_io(const struct lu_env *env)
143{
144 struct cl_io *io = &ccc_env_info(env)->cti_io;
145
146 memset(io, 0, sizeof(*io));
147 return io;
148}
149
150struct ccc_session {
151 struct ccc_io cs_ios;
152};
153
154static inline struct ccc_session *ccc_env_session(const struct lu_env *env)
155{
156 struct ccc_session *ses;
157
158 ses = lu_context_key_get(env->le_ses, &ccc_session_key);
159 LASSERT(ses);
160 return ses;
161}
162
163static inline struct ccc_io *ccc_env_io(const struct lu_env *env)
164{
165 return &ccc_env_session(env)->cs_ios;
166}
167
168/**
169 * ccc-private object state.
170 */
171struct ccc_object {
172 struct cl_object_header cob_header;
173 struct cl_object cob_cl;
174 struct inode *cob_inode;
175
176 /**
177 * A list of dirty pages pending IO in the cache. Used by
178 * SOM. Protected by ll_inode_info::lli_lock.
179 *
180 * \see ccc_page::cpg_pending_linkage
181 */
182 struct list_head cob_pending_list;
183
184 /**
185 * Access this counter is protected by inode->i_sem. Now that
186 * the lifetime of transient pages must be covered by inode sem,
187 * we don't need to hold any lock..
188 */
189 int cob_transient_pages;
190 /**
191 * Number of outstanding mmaps on this file.
192 *
193 * \see ll_vm_open(), ll_vm_close().
194 */
195 atomic_t cob_mmap_cnt;
196
197 /**
198 * various flags
199 * cob_discard_page_warned
200 * if pages belonging to this object are discarded when a client
201 * is evicted, some debug info will be printed, this flag will be set
202 * during processing the first discarded page, then avoid flooding
203 * debug message for lots of discarded pages.
204 *
205 * \see ll_dirty_page_discard_warn.
206 */
207 unsigned int cob_discard_page_warned:1;
208};
209
210/**
211 * ccc-private page state.
212 */
213struct ccc_page {
214 struct cl_page_slice cpg_cl;
215 int cpg_defer_uptodate;
216 int cpg_ra_used;
217 int cpg_write_queued;
218 /**
219 * Non-empty iff this page is already counted in
220 * ccc_object::cob_pending_list. Protected by
221 * ccc_object::cob_pending_guard. This list is only used as a flag,
222 * that is, never iterated through, only checked for list_empty(), but
223 * having a list is useful for debugging.
224 */
225 struct list_head cpg_pending_linkage;
226 /** VM page */
227 struct page *cpg_page;
228};
229
230static inline struct ccc_page *cl2ccc_page(const struct cl_page_slice *slice)
231{
232 return container_of(slice, struct ccc_page, cpg_cl);
233}
234
235struct ccc_device {
236 struct cl_device cdv_cl;
237 struct super_block *cdv_sb;
238 struct cl_device *cdv_next;
239};
240
241struct ccc_lock {
242 struct cl_lock_slice clk_cl;
243};
244
245struct ccc_req {
246 struct cl_req_slice crq_cl;
247};
248
249void *ccc_key_init (const struct lu_context *ctx,
250 struct lu_context_key *key);
251void ccc_key_fini (const struct lu_context *ctx,
252 struct lu_context_key *key, void *data);
253void *ccc_session_key_init(const struct lu_context *ctx,
254 struct lu_context_key *key);
255void ccc_session_key_fini(const struct lu_context *ctx,
256 struct lu_context_key *key, void *data);
257
258int ccc_device_init (const struct lu_env *env,
259 struct lu_device *d,
260 const char *name, struct lu_device *next);
261struct lu_device *ccc_device_fini (const struct lu_env *env,
262 struct lu_device *d);
263struct lu_device *ccc_device_alloc(const struct lu_env *env,
264 struct lu_device_type *t,
265 struct lustre_cfg *cfg,
266 const struct lu_device_operations *luops,
267 const struct cl_device_operations *clops);
268struct lu_device *ccc_device_free (const struct lu_env *env,
269 struct lu_device *d);
270struct lu_object *ccc_object_alloc(const struct lu_env *env,
271 const struct lu_object_header *hdr,
272 struct lu_device *dev,
273 const struct cl_object_operations *clops,
274 const struct lu_object_operations *luops);
275
276int ccc_req_init(const struct lu_env *env, struct cl_device *dev,
277 struct cl_req *req);
278void ccc_umount(const struct lu_env *env, struct cl_device *dev);
279int ccc_global_init(struct lu_device_type *device_type);
280void ccc_global_fini(struct lu_device_type *device_type);
281int ccc_object_init0(const struct lu_env *env, struct ccc_object *vob,
282 const struct cl_object_conf *conf);
283int ccc_object_init(const struct lu_env *env, struct lu_object *obj,
284 const struct lu_object_conf *conf);
285void ccc_object_free(const struct lu_env *env, struct lu_object *obj);
286int ccc_lock_init(const struct lu_env *env, struct cl_object *obj,
287 struct cl_lock *lock, const struct cl_io *io,
288 const struct cl_lock_operations *lkops);
289int ccc_object_glimpse(const struct lu_env *env,
290 const struct cl_object *obj, struct ost_lvb *lvb);
291struct page *ccc_page_vmpage(const struct lu_env *env,
292 const struct cl_page_slice *slice);
293int ccc_page_is_under_lock(const struct lu_env *env,
294 const struct cl_page_slice *slice, struct cl_io *io);
295int ccc_fail(const struct lu_env *env, const struct cl_page_slice *slice);
296int ccc_transient_page_prep(const struct lu_env *env,
297 const struct cl_page_slice *slice,
298 struct cl_io *io);
299void ccc_lock_delete(const struct lu_env *env,
300 const struct cl_lock_slice *slice);
301void ccc_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice);
302int ccc_lock_enqueue(const struct lu_env *env,
303 const struct cl_lock_slice *slice,
304 struct cl_io *io, __u32 enqflags);
305int ccc_lock_use(const struct lu_env *env, const struct cl_lock_slice *slice);
306int ccc_lock_unuse(const struct lu_env *env, const struct cl_lock_slice *slice);
307int ccc_lock_wait(const struct lu_env *env, const struct cl_lock_slice *slice);
308int ccc_lock_fits_into(const struct lu_env *env,
309 const struct cl_lock_slice *slice,
310 const struct cl_lock_descr *need,
311 const struct cl_io *io);
312void ccc_lock_state(const struct lu_env *env,
313 const struct cl_lock_slice *slice,
314 enum cl_lock_state state);
315
316int ccc_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
317 __u32 enqflags, enum cl_lock_mode mode,
318 pgoff_t start, pgoff_t end);
319int ccc_io_one_lock(const struct lu_env *env, struct cl_io *io,
320 __u32 enqflags, enum cl_lock_mode mode,
321 loff_t start, loff_t end);
322void ccc_io_end(const struct lu_env *env, const struct cl_io_slice *ios);
323void ccc_io_advance(const struct lu_env *env, const struct cl_io_slice *ios,
324 size_t nob);
325void ccc_io_update_iov(const struct lu_env *env, struct ccc_io *cio,
326 struct cl_io *io);
327int ccc_prep_size(const struct lu_env *env, struct cl_object *obj,
328 struct cl_io *io, loff_t start, size_t count, int *exceed);
329void ccc_req_completion(const struct lu_env *env,
330 const struct cl_req_slice *slice, int ioret);
331void ccc_req_attr_set(const struct lu_env *env,
332 const struct cl_req_slice *slice,
333 const struct cl_object *obj,
334 struct cl_req_attr *oa, u64 flags);
335
336struct lu_device *ccc2lu_dev (struct ccc_device *vdv);
337struct lu_object *ccc2lu (struct ccc_object *vob);
338struct ccc_device *lu2ccc_dev (const struct lu_device *d);
339struct ccc_device *cl2ccc_dev (const struct cl_device *d);
340struct ccc_object *lu2ccc (const struct lu_object *obj);
341struct ccc_object *cl2ccc (const struct cl_object *obj);
342struct ccc_lock *cl2ccc_lock (const struct cl_lock_slice *slice);
343struct ccc_io *cl2ccc_io (const struct lu_env *env,
344 const struct cl_io_slice *slice);
345struct ccc_req *cl2ccc_req (const struct cl_req_slice *slice);
346struct page *cl2vm_page (const struct cl_page_slice *slice);
347struct inode *ccc_object_inode(const struct cl_object *obj);
348struct ccc_object *cl_inode2ccc (struct inode *inode);
349
350int cl_setattr_ost(struct inode *inode, const struct iattr *attr);
351
352int ccc_object_invariant(const struct cl_object *obj);
353int cl_file_inode_init(struct inode *inode, struct lustre_md *md);
354void cl_inode_fini(struct inode *inode);
355int cl_local_size(struct inode *inode);
356
357__u16 ll_dirent_type_get(struct lu_dirent *ent);
358__u64 cl_fid_build_ino(const struct lu_fid *fid, int api32);
359__u32 cl_fid_build_gen(const struct lu_fid *fid);
360
361# define CLOBINVRNT(env, clob, expr) \
362 ((void)sizeof(env), (void)sizeof(clob), (void)sizeof(!!(expr)))
363
364int cl_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp);
365int cl_ocd_update(struct obd_device *host,
366 struct obd_device *watched,
367 enum obd_notify_event ev, void *owner, void *data);
368
369struct ccc_grouplock {
370 struct lu_env *cg_env;
371 struct cl_io *cg_io;
372 struct cl_lock *cg_lock;
373 unsigned long cg_gid;
374};
375
376int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
377 struct ccc_grouplock *cg);
378void cl_put_grouplock(struct ccc_grouplock *cg);
379
380/**
381 * New interfaces to get and put lov_stripe_md from lov layer. This violates
382 * layering because lov_stripe_md is supposed to be a private data in lov.
383 *
384 * NB: If you find you have to use these interfaces for your new code, please
385 * think about it again. These interfaces may be removed in the future for
386 * better layering.
387 */
388struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj);
389void lov_lsm_put(struct cl_object *clobj, struct lov_stripe_md *lsm);
390int lov_read_and_clear_async_rc(struct cl_object *clob);
391
392struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode);
393void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm);
394
395/**
396 * Data structure managing a client's cached clean pages. An LRU of
397 * pages is maintained, along with other statistics.
398 */
399struct cl_client_cache {
400 atomic_t ccc_users; /* # of users (OSCs) of this data */
401 struct list_head ccc_lru; /* LRU list of cached clean pages */
402 spinlock_t ccc_lru_lock; /* lock for list */
403 atomic_t ccc_lru_left; /* # of LRU entries available */
404 unsigned long ccc_lru_max; /* Max # of LRU entries possible */
405 unsigned int ccc_lru_shrinkers; /* # of threads reclaiming */
406};
407
408#endif /*LCLIENT_H */
diff --git a/drivers/staging/lustre/lustre/include/linux/obd.h b/drivers/staging/lustre/lustre/include/linux/obd.h
deleted file mode 100644
index 3907bf4ce07c..000000000000
--- a/drivers/staging/lustre/lustre/include/linux/obd.h
+++ /dev/null
@@ -1,125 +0,0 @@
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26/*
27 * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2012, Intel Corporation.
31 */
32/*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 */
36
37#ifndef __LINUX_OBD_H
38#define __LINUX_OBD_H
39
40#ifndef __OBD_H
41#error Do not #include this file directly. #include <obd.h> instead
42#endif
43
44#include "../obd_support.h"
45
46#include <linux/fs.h>
47#include <linux/list.h>
48#include <linux/sched.h> /* for struct task_struct, for current.h */
49#include <linux/mount.h>
50
51#include "../lustre_intent.h"
52
53struct ll_iattr {
54 struct iattr iattr;
55 unsigned int ia_attr_flags;
56};
57
58#define CLIENT_OBD_LIST_LOCK_DEBUG 1
59
60struct client_obd_lock {
61 spinlock_t lock;
62
63 unsigned long time;
64 struct task_struct *task;
65 const char *func;
66 int line;
67};
68
69static inline void __client_obd_list_lock(struct client_obd_lock *lock,
70 const char *func, int line)
71{
72 unsigned long cur = jiffies;
73
74 while (1) {
75 if (spin_trylock(&lock->lock)) {
76 LASSERT(!lock->task);
77 lock->task = current;
78 lock->func = func;
79 lock->line = line;
80 lock->time = jiffies;
81 break;
82 }
83
84 if (time_before(cur + 5 * HZ, jiffies) &&
85 time_before(lock->time + 5 * HZ, jiffies)) {
86 struct task_struct *task = lock->task;
87
88 if (!task)
89 continue;
90
91 LCONSOLE_WARN("%s:%d: lock %p was acquired by <%s:%d:%s:%d> for %lu seconds.\n",
92 current->comm, current->pid,
93 lock, task->comm, task->pid,
94 lock->func, lock->line,
95 (jiffies - lock->time) / HZ);
96 LCONSOLE_WARN("====== for current process =====\n");
97 dump_stack();
98 LCONSOLE_WARN("====== end =======\n");
99 set_current_state(TASK_UNINTERRUPTIBLE);
100 schedule_timeout(1000 * HZ);
101 }
102 cpu_relax();
103 }
104}
105
106#define client_obd_list_lock(lock) \
107 __client_obd_list_lock(lock, __func__, __LINE__)
108
109static inline void client_obd_list_unlock(struct client_obd_lock *lock)
110{
111 LASSERT(lock->task);
112 lock->task = NULL;
113 lock->time = jiffies;
114 spin_unlock(&lock->lock);
115}
116
117static inline void client_obd_list_lock_init(struct client_obd_lock *lock)
118{
119 spin_lock_init(&lock->lock);
120}
121
122static inline void client_obd_list_lock_done(struct client_obd_lock *lock)
123{}
124
125#endif /* __LINUX_OBD_H */
diff --git a/drivers/staging/lustre/lustre/include/lu_object.h b/drivers/staging/lustre/lustre/include/lu_object.h
index 242bb1ef6245..2816512185af 100644
--- a/drivers/staging/lustre/lustre/include/lu_object.h
+++ b/drivers/staging/lustre/lustre/include/lu_object.h
@@ -198,7 +198,6 @@ typedef int (*lu_printer_t)(const struct lu_env *env,
198 * Operations specific for particular lu_object. 198 * Operations specific for particular lu_object.
199 */ 199 */
200struct lu_object_operations { 200struct lu_object_operations {
201
202 /** 201 /**
203 * Allocate lower-layer parts of the object by calling 202 * Allocate lower-layer parts of the object by calling
204 * lu_device_operations::ldo_object_alloc() of the corresponding 203 * lu_device_operations::ldo_object_alloc() of the corresponding
@@ -656,21 +655,21 @@ static inline struct seq_server_site *lu_site2seq(const struct lu_site *s)
656 * @{ 655 * @{
657 */ 656 */
658 657
659int lu_site_init (struct lu_site *s, struct lu_device *d); 658int lu_site_init(struct lu_site *s, struct lu_device *d);
660void lu_site_fini (struct lu_site *s); 659void lu_site_fini(struct lu_site *s);
661int lu_site_init_finish (struct lu_site *s); 660int lu_site_init_finish(struct lu_site *s);
662void lu_stack_fini (const struct lu_env *env, struct lu_device *top); 661void lu_stack_fini(const struct lu_env *env, struct lu_device *top);
663void lu_device_get (struct lu_device *d); 662void lu_device_get(struct lu_device *d);
664void lu_device_put (struct lu_device *d); 663void lu_device_put(struct lu_device *d);
665int lu_device_init (struct lu_device *d, struct lu_device_type *t); 664int lu_device_init(struct lu_device *d, struct lu_device_type *t);
666void lu_device_fini (struct lu_device *d); 665void lu_device_fini(struct lu_device *d);
667int lu_object_header_init(struct lu_object_header *h); 666int lu_object_header_init(struct lu_object_header *h);
668void lu_object_header_fini(struct lu_object_header *h); 667void lu_object_header_fini(struct lu_object_header *h);
669int lu_object_init (struct lu_object *o, 668int lu_object_init(struct lu_object *o,
670 struct lu_object_header *h, struct lu_device *d); 669 struct lu_object_header *h, struct lu_device *d);
671void lu_object_fini (struct lu_object *o); 670void lu_object_fini(struct lu_object *o);
672void lu_object_add_top (struct lu_object_header *h, struct lu_object *o); 671void lu_object_add_top(struct lu_object_header *h, struct lu_object *o);
673void lu_object_add (struct lu_object *before, struct lu_object *o); 672void lu_object_add(struct lu_object *before, struct lu_object *o);
674 673
675/** 674/**
676 * Helpers to initialize and finalize device types. 675 * Helpers to initialize and finalize device types.
@@ -781,9 +780,8 @@ int lu_cdebug_printer(const struct lu_env *env,
781 */ 780 */
782#define LU_OBJECT_DEBUG(mask, env, object, format, ...) \ 781#define LU_OBJECT_DEBUG(mask, env, object, format, ...) \
783do { \ 782do { \
784 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
785 \
786 if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \ 783 if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
784 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
787 lu_object_print(env, &msgdata, lu_cdebug_printer, object);\ 785 lu_object_print(env, &msgdata, lu_cdebug_printer, object);\
788 CDEBUG(mask, format, ## __VA_ARGS__); \ 786 CDEBUG(mask, format, ## __VA_ARGS__); \
789 } \ 787 } \
@@ -794,9 +792,8 @@ do { \
794 */ 792 */
795#define LU_OBJECT_HEADER(mask, env, object, format, ...) \ 793#define LU_OBJECT_HEADER(mask, env, object, format, ...) \
796do { \ 794do { \
797 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
798 \
799 if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \ 795 if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
796 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
800 lu_object_header_print(env, &msgdata, lu_cdebug_printer,\ 797 lu_object_header_print(env, &msgdata, lu_cdebug_printer,\
801 (object)->lo_header); \ 798 (object)->lo_header); \
802 lu_cdebug_printer(env, &msgdata, "\n"); \ 799 lu_cdebug_printer(env, &msgdata, "\n"); \
@@ -1007,6 +1004,10 @@ enum lu_context_tag {
1007 */ 1004 */
1008 LCT_LOCAL = 1 << 7, 1005 LCT_LOCAL = 1 << 7,
1009 /** 1006 /**
1007 * session for server thread
1008 **/
1009 LCT_SERVER_SESSION = BIT(8),
1010 /**
1010 * Set when at least one of keys, having values in this context has 1011 * Set when at least one of keys, having values in this context has
1011 * non-NULL lu_context_key::lct_exit() method. This is used to 1012 * non-NULL lu_context_key::lct_exit() method. This is used to
1012 * optimize lu_context_exit() call. 1013 * optimize lu_context_exit() call.
@@ -1118,7 +1119,7 @@ struct lu_context_key {
1118 { \ 1119 { \
1119 type *value; \ 1120 type *value; \
1120 \ 1121 \
1121 CLASSERT(PAGE_SIZE >= sizeof (*value)); \ 1122 CLASSERT(PAGE_SIZE >= sizeof(*value)); \
1122 \ 1123 \
1123 value = kzalloc(sizeof(*value), GFP_NOFS); \ 1124 value = kzalloc(sizeof(*value), GFP_NOFS); \
1124 if (!value) \ 1125 if (!value) \
@@ -1154,12 +1155,12 @@ do { \
1154 (key)->lct_owner = THIS_MODULE; \ 1155 (key)->lct_owner = THIS_MODULE; \
1155} while (0) 1156} while (0)
1156 1157
1157int lu_context_key_register(struct lu_context_key *key); 1158int lu_context_key_register(struct lu_context_key *key);
1158void lu_context_key_degister(struct lu_context_key *key); 1159void lu_context_key_degister(struct lu_context_key *key);
1159void *lu_context_key_get (const struct lu_context *ctx, 1160void *lu_context_key_get(const struct lu_context *ctx,
1160 const struct lu_context_key *key); 1161 const struct lu_context_key *key);
1161void lu_context_key_quiesce (struct lu_context_key *key); 1162void lu_context_key_quiesce(struct lu_context_key *key);
1162void lu_context_key_revive (struct lu_context_key *key); 1163void lu_context_key_revive(struct lu_context_key *key);
1163 1164
1164/* 1165/*
1165 * LU_KEY_INIT_GENERIC() has to be a macro to correctly determine an 1166 * LU_KEY_INIT_GENERIC() has to be a macro to correctly determine an
@@ -1216,21 +1217,21 @@ void lu_context_key_revive (struct lu_context_key *key);
1216 LU_TYPE_START(mod, __VA_ARGS__); \ 1217 LU_TYPE_START(mod, __VA_ARGS__); \
1217 LU_TYPE_STOP(mod, __VA_ARGS__) 1218 LU_TYPE_STOP(mod, __VA_ARGS__)
1218 1219
1219int lu_context_init (struct lu_context *ctx, __u32 tags); 1220int lu_context_init(struct lu_context *ctx, __u32 tags);
1220void lu_context_fini (struct lu_context *ctx); 1221void lu_context_fini(struct lu_context *ctx);
1221void lu_context_enter (struct lu_context *ctx); 1222void lu_context_enter(struct lu_context *ctx);
1222void lu_context_exit (struct lu_context *ctx); 1223void lu_context_exit(struct lu_context *ctx);
1223int lu_context_refill(struct lu_context *ctx); 1224int lu_context_refill(struct lu_context *ctx);
1224 1225
1225/* 1226/*
1226 * Helper functions to operate on multiple keys. These are used by the default 1227 * Helper functions to operate on multiple keys. These are used by the default
1227 * device type operations, defined by LU_TYPE_INIT_FINI(). 1228 * device type operations, defined by LU_TYPE_INIT_FINI().
1228 */ 1229 */
1229 1230
1230int lu_context_key_register_many(struct lu_context_key *k, ...); 1231int lu_context_key_register_many(struct lu_context_key *k, ...);
1231void lu_context_key_degister_many(struct lu_context_key *k, ...); 1232void lu_context_key_degister_many(struct lu_context_key *k, ...);
1232void lu_context_key_revive_many (struct lu_context_key *k, ...); 1233void lu_context_key_revive_many(struct lu_context_key *k, ...);
1233void lu_context_key_quiesce_many (struct lu_context_key *k, ...); 1234void lu_context_key_quiesce_many(struct lu_context_key *k, ...);
1234 1235
1235/** 1236/**
1236 * Environment. 1237 * Environment.
@@ -1246,9 +1247,9 @@ struct lu_env {
1246 struct lu_context *le_ses; 1247 struct lu_context *le_ses;
1247}; 1248};
1248 1249
1249int lu_env_init (struct lu_env *env, __u32 tags); 1250int lu_env_init(struct lu_env *env, __u32 tags);
1250void lu_env_fini (struct lu_env *env); 1251void lu_env_fini(struct lu_env *env);
1251int lu_env_refill(struct lu_env *env); 1252int lu_env_refill(struct lu_env *env);
1252 1253
1253/** @} lu_context */ 1254/** @} lu_context */
1254 1255
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
index 5aae1d06a5fa..9c53c1792dc8 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
@@ -183,6 +183,12 @@ struct lu_seq_range {
183 __u32 lsr_flags; 183 __u32 lsr_flags;
184}; 184};
185 185
186struct lu_seq_range_array {
187 __u32 lsra_count;
188 __u32 lsra_padding;
189 struct lu_seq_range lsra_lsr[0];
190};
191
186#define LU_SEQ_RANGE_MDT 0x0 192#define LU_SEQ_RANGE_MDT 0x0
187#define LU_SEQ_RANGE_OST 0x1 193#define LU_SEQ_RANGE_OST 0x1
188#define LU_SEQ_RANGE_ANY 0x3 194#define LU_SEQ_RANGE_ANY 0x3
@@ -578,7 +584,7 @@ static inline __u64 ostid_seq(const struct ost_id *ostid)
578 if (fid_seq_is_mdt0(ostid->oi.oi_seq)) 584 if (fid_seq_is_mdt0(ostid->oi.oi_seq))
579 return FID_SEQ_OST_MDT0; 585 return FID_SEQ_OST_MDT0;
580 586
581 if (fid_seq_is_default(ostid->oi.oi_seq)) 587 if (unlikely(fid_seq_is_default(ostid->oi.oi_seq)))
582 return FID_SEQ_LOV_DEFAULT; 588 return FID_SEQ_LOV_DEFAULT;
583 589
584 if (fid_is_idif(&ostid->oi_fid)) 590 if (fid_is_idif(&ostid->oi_fid))
@@ -590,9 +596,12 @@ static inline __u64 ostid_seq(const struct ost_id *ostid)
590/* extract OST objid from a wire ost_id (id/seq) pair */ 596/* extract OST objid from a wire ost_id (id/seq) pair */
591static inline __u64 ostid_id(const struct ost_id *ostid) 597static inline __u64 ostid_id(const struct ost_id *ostid)
592{ 598{
593 if (fid_seq_is_mdt0(ostid_seq(ostid))) 599 if (fid_seq_is_mdt0(ostid->oi.oi_seq))
594 return ostid->oi.oi_id & IDIF_OID_MASK; 600 return ostid->oi.oi_id & IDIF_OID_MASK;
595 601
602 if (unlikely(fid_seq_is_default(ostid->oi.oi_seq)))
603 return ostid->oi.oi_id;
604
596 if (fid_is_idif(&ostid->oi_fid)) 605 if (fid_is_idif(&ostid->oi_fid))
597 return fid_idif_id(fid_seq(&ostid->oi_fid), 606 return fid_idif_id(fid_seq(&ostid->oi_fid),
598 fid_oid(&ostid->oi_fid), 0); 607 fid_oid(&ostid->oi_fid), 0);
@@ -636,12 +645,22 @@ static inline void ostid_set_seq_llog(struct ost_id *oi)
636 */ 645 */
637static inline void ostid_set_id(struct ost_id *oi, __u64 oid) 646static inline void ostid_set_id(struct ost_id *oi, __u64 oid)
638{ 647{
639 if (fid_seq_is_mdt0(ostid_seq(oi))) { 648 if (fid_seq_is_mdt0(oi->oi.oi_seq)) {
640 if (oid >= IDIF_MAX_OID) { 649 if (oid >= IDIF_MAX_OID) {
641 CERROR("Bad %llu to set " DOSTID "\n", oid, POSTID(oi)); 650 CERROR("Bad %llu to set " DOSTID "\n", oid, POSTID(oi));
642 return; 651 return;
643 } 652 }
644 oi->oi.oi_id = oid; 653 oi->oi.oi_id = oid;
654 } else if (fid_is_idif(&oi->oi_fid)) {
655 if (oid >= IDIF_MAX_OID) {
656 CERROR("Bad %llu to set "DOSTID"\n",
657 oid, POSTID(oi));
658 return;
659 }
660 oi->oi_fid.f_seq = fid_idif_seq(oid,
661 fid_idif_ost_idx(&oi->oi_fid));
662 oi->oi_fid.f_oid = oid;
663 oi->oi_fid.f_ver = oid >> 48;
645 } else { 664 } else {
646 if (oid > OBIF_MAX_OID) { 665 if (oid > OBIF_MAX_OID) {
647 CERROR("Bad %llu to set " DOSTID "\n", oid, POSTID(oi)); 666 CERROR("Bad %llu to set " DOSTID "\n", oid, POSTID(oi));
@@ -651,25 +670,31 @@ static inline void ostid_set_id(struct ost_id *oi, __u64 oid)
651 } 670 }
652} 671}
653 672
654static inline void ostid_inc_id(struct ost_id *oi) 673static inline int fid_set_id(struct lu_fid *fid, __u64 oid)
655{ 674{
656 if (fid_seq_is_mdt0(ostid_seq(oi))) { 675 if (unlikely(fid_seq_is_igif(fid->f_seq))) {
657 if (unlikely(ostid_id(oi) + 1 > IDIF_MAX_OID)) { 676 CERROR("bad IGIF, "DFID"\n", PFID(fid));
658 CERROR("Bad inc "DOSTID"\n", POSTID(oi)); 677 return -EBADF;
659 return; 678 }
679
680 if (fid_is_idif(fid)) {
681 if (oid >= IDIF_MAX_OID) {
682 CERROR("Too large OID %#llx to set IDIF "DFID"\n",
683 (unsigned long long)oid, PFID(fid));
684 return -EBADF;
660 } 685 }
661 oi->oi.oi_id++; 686 fid->f_seq = fid_idif_seq(oid, fid_idif_ost_idx(fid));
687 fid->f_oid = oid;
688 fid->f_ver = oid >> 48;
662 } else { 689 } else {
663 oi->oi_fid.f_oid++; 690 if (oid > OBIF_MAX_OID) {
691 CERROR("Too large OID %#llx to set REG "DFID"\n",
692 (unsigned long long)oid, PFID(fid));
693 return -EBADF;
694 }
695 fid->f_oid = oid;
664 } 696 }
665} 697 return 0;
666
667static inline void ostid_dec_id(struct ost_id *oi)
668{
669 if (fid_seq_is_mdt0(ostid_seq(oi)))
670 oi->oi.oi_id--;
671 else
672 oi->oi_fid.f_oid--;
673} 698}
674 699
675/** 700/**
@@ -684,30 +709,34 @@ static inline void ostid_dec_id(struct ost_id *oi)
684static inline int ostid_to_fid(struct lu_fid *fid, struct ost_id *ostid, 709static inline int ostid_to_fid(struct lu_fid *fid, struct ost_id *ostid,
685 __u32 ost_idx) 710 __u32 ost_idx)
686{ 711{
712 __u64 seq = ostid_seq(ostid);
713
687 if (ost_idx > 0xffff) { 714 if (ost_idx > 0xffff) {
688 CERROR("bad ost_idx, "DOSTID" ost_idx:%u\n", POSTID(ostid), 715 CERROR("bad ost_idx, "DOSTID" ost_idx:%u\n", POSTID(ostid),
689 ost_idx); 716 ost_idx);
690 return -EBADF; 717 return -EBADF;
691 } 718 }
692 719
693 if (fid_seq_is_mdt0(ostid_seq(ostid))) { 720 if (fid_seq_is_mdt0(seq)) {
721 __u64 oid = ostid_id(ostid);
722
694 /* This is a "legacy" (old 1.x/2.early) OST object in "group 0" 723 /* This is a "legacy" (old 1.x/2.early) OST object in "group 0"
695 * that we map into the IDIF namespace. It allows up to 2^48 724 * that we map into the IDIF namespace. It allows up to 2^48
696 * objects per OST, as this is the object namespace that has 725 * objects per OST, as this is the object namespace that has
697 * been in production for years. This can handle create rates 726 * been in production for years. This can handle create rates
698 * of 1M objects/s/OST for 9 years, or combinations thereof. 727 * of 1M objects/s/OST for 9 years, or combinations thereof.
699 */ 728 */
700 if (ostid_id(ostid) >= IDIF_MAX_OID) { 729 if (oid >= IDIF_MAX_OID) {
701 CERROR("bad MDT0 id, " DOSTID " ost_idx:%u\n", 730 CERROR("bad MDT0 id, " DOSTID " ost_idx:%u\n",
702 POSTID(ostid), ost_idx); 731 POSTID(ostid), ost_idx);
703 return -EBADF; 732 return -EBADF;
704 } 733 }
705 fid->f_seq = fid_idif_seq(ostid_id(ostid), ost_idx); 734 fid->f_seq = fid_idif_seq(oid, ost_idx);
706 /* truncate to 32 bits by assignment */ 735 /* truncate to 32 bits by assignment */
707 fid->f_oid = ostid_id(ostid); 736 fid->f_oid = oid;
708 /* in theory, not currently used */ 737 /* in theory, not currently used */
709 fid->f_ver = ostid_id(ostid) >> 48; 738 fid->f_ver = oid >> 48;
710 } else /* if (fid_seq_is_idif(seq) || fid_seq_is_norm(seq)) */ { 739 } else if (likely(!fid_seq_is_default(seq))) {
711 /* This is either an IDIF object, which identifies objects across 740 /* This is either an IDIF object, which identifies objects across
712 * all OSTs, or a regular FID. The IDIF namespace maps legacy 741 * all OSTs, or a regular FID. The IDIF namespace maps legacy
713 * OST objects into the FID namespace. In both cases, we just 742 * OST objects into the FID namespace. In both cases, we just
@@ -1001,8 +1030,9 @@ static inline int lu_dirent_calc_size(int namelen, __u16 attr)
1001 1030
1002 size = (sizeof(struct lu_dirent) + namelen + align) & ~align; 1031 size = (sizeof(struct lu_dirent) + namelen + align) & ~align;
1003 size += sizeof(struct luda_type); 1032 size += sizeof(struct luda_type);
1004 } else 1033 } else {
1005 size = sizeof(struct lu_dirent) + namelen; 1034 size = sizeof(struct lu_dirent) + namelen;
1035 }
1006 1036
1007 return (size + 7) & ~7; 1037 return (size + 7) & ~7;
1008} 1038}
@@ -1256,6 +1286,9 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
1256#define OBD_CONNECT_PINGLESS 0x4000000000000ULL/* pings not required */ 1286#define OBD_CONNECT_PINGLESS 0x4000000000000ULL/* pings not required */
1257#define OBD_CONNECT_FLOCK_DEAD 0x8000000000000ULL/* flock deadlock detection */ 1287#define OBD_CONNECT_FLOCK_DEAD 0x8000000000000ULL/* flock deadlock detection */
1258#define OBD_CONNECT_DISP_STRIPE 0x10000000000000ULL/*create stripe disposition*/ 1288#define OBD_CONNECT_DISP_STRIPE 0x10000000000000ULL/*create stripe disposition*/
1289#define OBD_CONNECT_OPEN_BY_FID 0x20000000000000ULL /* open by fid won't pack
1290 * name in request
1291 */
1259 1292
1260/* XXX README XXX: 1293/* XXX README XXX:
1261 * Please DO NOT add flag values here before first ensuring that this same 1294 * Please DO NOT add flag values here before first ensuring that this same
@@ -1428,6 +1461,8 @@ enum obdo_flags {
1428 */ 1461 */
1429 OBD_FL_RECOV_RESEND = 0x00080000, /* recoverable resent */ 1462 OBD_FL_RECOV_RESEND = 0x00080000, /* recoverable resent */
1430 OBD_FL_NOSPC_BLK = 0x00100000, /* no more block space on OST */ 1463 OBD_FL_NOSPC_BLK = 0x00100000, /* no more block space on OST */
1464 OBD_FL_FLUSH = 0x00200000, /* flush pages on the OST */
1465 OBD_FL_SHORT_IO = 0x00400000, /* short io request */
1431 1466
1432 /* Note that while these checksum values are currently separate bits, 1467 /* Note that while these checksum values are currently separate bits,
1433 * in 2.x we can actually allow all values from 1-31 if we wanted. 1468 * in 2.x we can actually allow all values from 1-31 if we wanted.
@@ -1525,6 +1560,11 @@ static inline void lmm_oi_set_seq(struct ost_id *oi, __u64 seq)
1525 oi->oi.oi_seq = seq; 1560 oi->oi.oi_seq = seq;
1526} 1561}
1527 1562
1563static inline void lmm_oi_set_id(struct ost_id *oi, __u64 oid)
1564{
1565 oi->oi.oi_id = oid;
1566}
1567
1528static inline __u64 lmm_oi_id(struct ost_id *oi) 1568static inline __u64 lmm_oi_id(struct ost_id *oi)
1529{ 1569{
1530 return oi->oi.oi_id; 1570 return oi->oi.oi_id;
@@ -1732,6 +1772,11 @@ void lustre_swab_obd_statfs(struct obd_statfs *os);
1732#define OBD_BRW_MEMALLOC 0x800 /* Client runs in the "kswapd" context */ 1772#define OBD_BRW_MEMALLOC 0x800 /* Client runs in the "kswapd" context */
1733#define OBD_BRW_OVER_USRQUOTA 0x1000 /* Running out of user quota */ 1773#define OBD_BRW_OVER_USRQUOTA 0x1000 /* Running out of user quota */
1734#define OBD_BRW_OVER_GRPQUOTA 0x2000 /* Running out of group quota */ 1774#define OBD_BRW_OVER_GRPQUOTA 0x2000 /* Running out of group quota */
1775#define OBD_BRW_SOFT_SYNC 0x4000 /* This flag notifies the server
1776 * that the client is running low on
1777 * space for unstable pages; asking
1778 * it to sync quickly
1779 */
1735 1780
1736#define OBD_OBJECT_EOF 0xffffffffffffffffULL 1781#define OBD_OBJECT_EOF 0xffffffffffffffffULL
1737 1782
@@ -2436,6 +2481,7 @@ struct mdt_rec_reint {
2436 2481
2437void lustre_swab_mdt_rec_reint(struct mdt_rec_reint *rr); 2482void lustre_swab_mdt_rec_reint(struct mdt_rec_reint *rr);
2438 2483
2484/* lmv structures */
2439struct lmv_desc { 2485struct lmv_desc {
2440 __u32 ld_tgt_count; /* how many MDS's */ 2486 __u32 ld_tgt_count; /* how many MDS's */
2441 __u32 ld_active_tgt_count; /* how many active */ 2487 __u32 ld_active_tgt_count; /* how many active */
@@ -2460,7 +2506,6 @@ struct lmv_stripe_md {
2460 struct lu_fid mea_ids[0]; 2506 struct lu_fid mea_ids[0];
2461}; 2507};
2462 2508
2463/* lmv structures */
2464#define MEA_MAGIC_LAST_CHAR 0xb2221ca1 2509#define MEA_MAGIC_LAST_CHAR 0xb2221ca1
2465#define MEA_MAGIC_ALL_CHARS 0xb222a11c 2510#define MEA_MAGIC_ALL_CHARS 0xb222a11c
2466#define MEA_MAGIC_HASH_SEGMENT 0xb222a11b 2511#define MEA_MAGIC_HASH_SEGMENT 0xb222a11b
@@ -2470,9 +2515,10 @@ struct lmv_stripe_md {
2470#define MAX_HASH_HIGHEST_BIT 0x1000000000000000ULL 2515#define MAX_HASH_HIGHEST_BIT 0x1000000000000000ULL
2471 2516
2472enum fld_rpc_opc { 2517enum fld_rpc_opc {
2473 FLD_QUERY = 900, 2518 FLD_QUERY = 900,
2519 FLD_READ = 901,
2474 FLD_LAST_OPC, 2520 FLD_LAST_OPC,
2475 FLD_FIRST_OPC = FLD_QUERY 2521 FLD_FIRST_OPC = FLD_QUERY
2476}; 2522};
2477 2523
2478enum seq_rpc_opc { 2524enum seq_rpc_opc {
@@ -2486,6 +2532,12 @@ enum seq_op {
2486 SEQ_ALLOC_META = 1 2532 SEQ_ALLOC_META = 1
2487}; 2533};
2488 2534
2535enum fld_op {
2536 FLD_CREATE = 0,
2537 FLD_DELETE = 1,
2538 FLD_LOOKUP = 2,
2539};
2540
2489/* 2541/*
2490 * LOV data structures 2542 * LOV data structures
2491 */ 2543 */
@@ -2582,6 +2634,8 @@ struct ldlm_extent {
2582 __u64 gid; 2634 __u64 gid;
2583}; 2635};
2584 2636
2637#define LDLM_GID_ANY ((__u64)-1)
2638
2585static inline int ldlm_extent_overlap(struct ldlm_extent *ex1, 2639static inline int ldlm_extent_overlap(struct ldlm_extent *ex1,
2586 struct ldlm_extent *ex2) 2640 struct ldlm_extent *ex2)
2587{ 2641{
@@ -3304,7 +3358,7 @@ struct getinfo_fid2path {
3304 char gf_path[0]; 3358 char gf_path[0];
3305} __packed; 3359} __packed;
3306 3360
3307void lustre_swab_fid2path (struct getinfo_fid2path *gf); 3361void lustre_swab_fid2path(struct getinfo_fid2path *gf);
3308 3362
3309enum { 3363enum {
3310 LAYOUT_INTENT_ACCESS = 0, 3364 LAYOUT_INTENT_ACCESS = 0,
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
index 276906e646f5..59ba48ac31a7 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
@@ -193,37 +193,37 @@ struct ost_id {
193 * *INFO - set/get lov_user_mds_data 193 * *INFO - set/get lov_user_mds_data
194 */ 194 */
195/* see <lustre_lib.h> for ioctl numberss 101-150 */ 195/* see <lustre_lib.h> for ioctl numberss 101-150 */
196#define LL_IOC_GETFLAGS _IOR ('f', 151, long) 196#define LL_IOC_GETFLAGS _IOR('f', 151, long)
197#define LL_IOC_SETFLAGS _IOW ('f', 152, long) 197#define LL_IOC_SETFLAGS _IOW('f', 152, long)
198#define LL_IOC_CLRFLAGS _IOW ('f', 153, long) 198#define LL_IOC_CLRFLAGS _IOW('f', 153, long)
199/* LL_IOC_LOV_SETSTRIPE: See also OBD_IOC_LOV_SETSTRIPE */ 199/* LL_IOC_LOV_SETSTRIPE: See also OBD_IOC_LOV_SETSTRIPE */
200#define LL_IOC_LOV_SETSTRIPE _IOW ('f', 154, long) 200#define LL_IOC_LOV_SETSTRIPE _IOW('f', 154, long)
201/* LL_IOC_LOV_GETSTRIPE: See also OBD_IOC_LOV_GETSTRIPE */ 201/* LL_IOC_LOV_GETSTRIPE: See also OBD_IOC_LOV_GETSTRIPE */
202#define LL_IOC_LOV_GETSTRIPE _IOW ('f', 155, long) 202#define LL_IOC_LOV_GETSTRIPE _IOW('f', 155, long)
203/* LL_IOC_LOV_SETEA: See also OBD_IOC_LOV_SETEA */ 203/* LL_IOC_LOV_SETEA: See also OBD_IOC_LOV_SETEA */
204#define LL_IOC_LOV_SETEA _IOW ('f', 156, long) 204#define LL_IOC_LOV_SETEA _IOW('f', 156, long)
205#define LL_IOC_RECREATE_OBJ _IOW ('f', 157, long) 205#define LL_IOC_RECREATE_OBJ _IOW('f', 157, long)
206#define LL_IOC_RECREATE_FID _IOW ('f', 157, struct lu_fid) 206#define LL_IOC_RECREATE_FID _IOW('f', 157, struct lu_fid)
207#define LL_IOC_GROUP_LOCK _IOW ('f', 158, long) 207#define LL_IOC_GROUP_LOCK _IOW('f', 158, long)
208#define LL_IOC_GROUP_UNLOCK _IOW ('f', 159, long) 208#define LL_IOC_GROUP_UNLOCK _IOW('f', 159, long)
209/* LL_IOC_QUOTACHECK: See also OBD_IOC_QUOTACHECK */ 209/* LL_IOC_QUOTACHECK: See also OBD_IOC_QUOTACHECK */
210#define LL_IOC_QUOTACHECK _IOW ('f', 160, int) 210#define LL_IOC_QUOTACHECK _IOW('f', 160, int)
211/* LL_IOC_POLL_QUOTACHECK: See also OBD_IOC_POLL_QUOTACHECK */ 211/* LL_IOC_POLL_QUOTACHECK: See also OBD_IOC_POLL_QUOTACHECK */
212#define LL_IOC_POLL_QUOTACHECK _IOR ('f', 161, struct if_quotacheck *) 212#define LL_IOC_POLL_QUOTACHECK _IOR('f', 161, struct if_quotacheck *)
213/* LL_IOC_QUOTACTL: See also OBD_IOC_QUOTACTL */ 213/* LL_IOC_QUOTACTL: See also OBD_IOC_QUOTACTL */
214#define LL_IOC_QUOTACTL _IOWR('f', 162, struct if_quotactl) 214#define LL_IOC_QUOTACTL _IOWR('f', 162, struct if_quotactl)
215#define IOC_OBD_STATFS _IOWR('f', 164, struct obd_statfs *) 215#define IOC_OBD_STATFS _IOWR('f', 164, struct obd_statfs *)
216#define IOC_LOV_GETINFO _IOWR('f', 165, struct lov_user_mds_data *) 216#define IOC_LOV_GETINFO _IOWR('f', 165, struct lov_user_mds_data *)
217#define LL_IOC_FLUSHCTX _IOW ('f', 166, long) 217#define LL_IOC_FLUSHCTX _IOW('f', 166, long)
218#define LL_IOC_RMTACL _IOW ('f', 167, long) 218#define LL_IOC_RMTACL _IOW('f', 167, long)
219#define LL_IOC_GETOBDCOUNT _IOR ('f', 168, long) 219#define LL_IOC_GETOBDCOUNT _IOR('f', 168, long)
220#define LL_IOC_LLOOP_ATTACH _IOWR('f', 169, long) 220#define LL_IOC_LLOOP_ATTACH _IOWR('f', 169, long)
221#define LL_IOC_LLOOP_DETACH _IOWR('f', 170, long) 221#define LL_IOC_LLOOP_DETACH _IOWR('f', 170, long)
222#define LL_IOC_LLOOP_INFO _IOWR('f', 171, struct lu_fid) 222#define LL_IOC_LLOOP_INFO _IOWR('f', 171, struct lu_fid)
223#define LL_IOC_LLOOP_DETACH_BYDEV _IOWR('f', 172, long) 223#define LL_IOC_LLOOP_DETACH_BYDEV _IOWR('f', 172, long)
224#define LL_IOC_PATH2FID _IOR ('f', 173, long) 224#define LL_IOC_PATH2FID _IOR('f', 173, long)
225#define LL_IOC_GET_CONNECT_FLAGS _IOWR('f', 174, __u64 *) 225#define LL_IOC_GET_CONNECT_FLAGS _IOWR('f', 174, __u64 *)
226#define LL_IOC_GET_MDTIDX _IOR ('f', 175, int) 226#define LL_IOC_GET_MDTIDX _IOR('f', 175, int)
227 227
228/* see <lustre_lib.h> for ioctl numbers 177-210 */ 228/* see <lustre_lib.h> for ioctl numbers 177-210 */
229 229
@@ -676,7 +676,12 @@ static inline const char *changelog_type2str(int type)
676#define CLF_UNLINK_HSM_EXISTS 0x0002 /* File has something in HSM */ 676#define CLF_UNLINK_HSM_EXISTS 0x0002 /* File has something in HSM */
677 /* HSM cleaning needed */ 677 /* HSM cleaning needed */
678/* Flags for rename */ 678/* Flags for rename */
679#define CLF_RENAME_LAST 0x0001 /* rename unlink last hardlink of target */ 679#define CLF_RENAME_LAST 0x0001 /* rename unlink last hardlink of
680 * target
681 */
682#define CLF_RENAME_LAST_EXISTS 0x0002 /* rename unlink last hardlink of target
683 * has an archive in backend
684 */
680 685
681/* Flags for HSM */ 686/* Flags for HSM */
682/* 12b used (from high weight to low weight): 687/* 12b used (from high weight to low weight):
@@ -833,9 +838,8 @@ struct ioc_data_version {
833 __u64 idv_flags; /* See LL_DV_xxx */ 838 __u64 idv_flags; /* See LL_DV_xxx */
834}; 839};
835 840
836#define LL_DV_NOFLUSH 0x01 /* Do not take READ EXTENT LOCK before sampling 841#define LL_DV_RD_FLUSH BIT(0) /* Flush dirty pages from clients */
837 * version. Dirty caches are left unchanged. 842#define LL_DV_WR_FLUSH BIT(1) /* Flush all caching pages from clients */
838 */
839 843
840#ifndef offsetof 844#ifndef offsetof
841# define offsetof(typ, memb) ((unsigned long)((char *)&(((typ *)0)->memb))) 845# define offsetof(typ, memb) ((unsigned long)((char *)&(((typ *)0)->memb)))
@@ -1095,12 +1099,12 @@ struct hsm_action_list {
1095 __u32 padding1; 1099 __u32 padding1;
1096 char hal_fsname[0]; /* null-terminated */ 1100 char hal_fsname[0]; /* null-terminated */
1097 /* struct hsm_action_item[hal_count] follows, aligned on 8-byte 1101 /* struct hsm_action_item[hal_count] follows, aligned on 8-byte
1098 * boundaries. See hai_zero 1102 * boundaries. See hai_first
1099 */ 1103 */
1100} __packed; 1104} __packed;
1101 1105
1102#ifndef HAVE_CFS_SIZE_ROUND 1106#ifndef HAVE_CFS_SIZE_ROUND
1103static inline int cfs_size_round (int val) 1107static inline int cfs_size_round(int val)
1104{ 1108{
1105 return (val + 7) & (~0x7); 1109 return (val + 7) & (~0x7);
1106} 1110}
@@ -1109,7 +1113,7 @@ static inline int cfs_size_round (int val)
1109#endif 1113#endif
1110 1114
1111/* Return pointer to first hai in action list */ 1115/* Return pointer to first hai in action list */
1112static inline struct hsm_action_item *hai_zero(struct hsm_action_list *hal) 1116static inline struct hsm_action_item *hai_first(struct hsm_action_list *hal)
1113{ 1117{
1114 return (struct hsm_action_item *)(hal->hal_fsname + 1118 return (struct hsm_action_item *)(hal->hal_fsname +
1115 cfs_size_round(strlen(hal-> \ 1119 cfs_size_round(strlen(hal-> \
@@ -1131,7 +1135,7 @@ static inline int hal_size(struct hsm_action_list *hal)
1131 struct hsm_action_item *hai; 1135 struct hsm_action_item *hai;
1132 1136
1133 sz = sizeof(*hal) + cfs_size_round(strlen(hal->hal_fsname) + 1); 1137 sz = sizeof(*hal) + cfs_size_round(strlen(hal->hal_fsname) + 1);
1134 hai = hai_zero(hal); 1138 hai = hai_first(hal);
1135 for (i = 0; i < hal->hal_count; i++, hai = hai_next(hai)) 1139 for (i = 0; i < hal->hal_count; i++, hai = hai_next(hai))
1136 sz += cfs_size_round(hai->hai_len); 1140 sz += cfs_size_round(hai->hai_len);
1137 1141
diff --git a/drivers/staging/lustre/lustre/include/lustre_cfg.h b/drivers/staging/lustre/lustre/include/lustre_cfg.h
index bb16ae980b98..e229e91f7f56 100644
--- a/drivers/staging/lustre/lustre/include/lustre_cfg.h
+++ b/drivers/staging/lustre/lustre/include/lustre_cfg.h
@@ -161,7 +161,7 @@ static inline void *lustre_cfg_buf(struct lustre_cfg *lcfg, int index)
161 int offset; 161 int offset;
162 int bufcount; 162 int bufcount;
163 163
164 LASSERT (index >= 0); 164 LASSERT(index >= 0);
165 165
166 bufcount = lcfg->lcfg_bufcount; 166 bufcount = lcfg->lcfg_bufcount;
167 if (index >= bufcount) 167 if (index >= bufcount)
diff --git a/drivers/staging/lustre/lustre/include/lustre_disk.h b/drivers/staging/lustre/lustre/include/lustre_disk.h
index 95fd36063f55..b36821ffb252 100644
--- a/drivers/staging/lustre/lustre/include/lustre_disk.h
+++ b/drivers/staging/lustre/lustre/include/lustre_disk.h
@@ -130,7 +130,6 @@ struct lustre_sb_info {
130 struct lustre_mount_data *lsi_lmd; /* mount command info */ 130 struct lustre_mount_data *lsi_lmd; /* mount command info */
131 struct ll_sb_info *lsi_llsbi; /* add'l client sbi info */ 131 struct ll_sb_info *lsi_llsbi; /* add'l client sbi info */
132 struct dt_device *lsi_dt_dev; /* dt device to access disk fs*/ 132 struct dt_device *lsi_dt_dev; /* dt device to access disk fs*/
133 struct vfsmount *lsi_srv_mnt; /* the one server mount */
134 atomic_t lsi_mounts; /* references to the srv_mnt */ 133 atomic_t lsi_mounts; /* references to the srv_mnt */
135 char lsi_svname[MTI_NAME_MAXLEN]; 134 char lsi_svname[MTI_NAME_MAXLEN];
136 char lsi_osd_obdname[64]; 135 char lsi_osd_obdname[64];
@@ -158,7 +157,6 @@ struct lustre_sb_info {
158struct lustre_mount_info { 157struct lustre_mount_info {
159 char *lmi_name; 158 char *lmi_name;
160 struct super_block *lmi_sb; 159 struct super_block *lmi_sb;
161 struct vfsmount *lmi_mnt;
162 struct list_head lmi_list_chain; 160 struct list_head lmi_list_chain;
163}; 161};
164 162
diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
index 8b0364f71129..9cade144faca 100644
--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
@@ -71,6 +71,7 @@ struct obd_device;
71 */ 71 */
72enum ldlm_error { 72enum ldlm_error {
73 ELDLM_OK = 0, 73 ELDLM_OK = 0,
74 ELDLM_LOCK_MATCHED = 1,
74 75
75 ELDLM_LOCK_CHANGED = 300, 76 ELDLM_LOCK_CHANGED = 300,
76 ELDLM_LOCK_ABORTED = 301, 77 ELDLM_LOCK_ABORTED = 301,
@@ -269,7 +270,7 @@ struct ldlm_pool {
269 struct completion pl_kobj_unregister; 270 struct completion pl_kobj_unregister;
270}; 271};
271 272
272typedef int (*ldlm_cancel_for_recovery)(struct ldlm_lock *lock); 273typedef int (*ldlm_cancel_cbt)(struct ldlm_lock *lock);
273 274
274/** 275/**
275 * LVB operations. 276 * LVB operations.
@@ -446,8 +447,11 @@ struct ldlm_namespace {
446 /** Limit of parallel AST RPC count. */ 447 /** Limit of parallel AST RPC count. */
447 unsigned ns_max_parallel_ast; 448 unsigned ns_max_parallel_ast;
448 449
449 /** Callback to cancel locks before replaying it during recovery. */ 450 /**
450 ldlm_cancel_for_recovery ns_cancel_for_recovery; 451 * Callback to check if a lock is good to be canceled by ELC or
452 * during recovery.
453 */
454 ldlm_cancel_cbt ns_cancel;
451 455
452 /** LDLM lock stats */ 456 /** LDLM lock stats */
453 struct lprocfs_stats *ns_stats; 457 struct lprocfs_stats *ns_stats;
@@ -479,9 +483,9 @@ static inline int ns_connect_lru_resize(struct ldlm_namespace *ns)
479} 483}
480 484
481static inline void ns_register_cancel(struct ldlm_namespace *ns, 485static inline void ns_register_cancel(struct ldlm_namespace *ns,
482 ldlm_cancel_for_recovery arg) 486 ldlm_cancel_cbt arg)
483{ 487{
484 ns->ns_cancel_for_recovery = arg; 488 ns->ns_cancel = arg;
485} 489}
486 490
487struct ldlm_lock; 491struct ldlm_lock;
diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h b/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
index 7f2ba2ffe0eb..e7e0c21a9b40 100644
--- a/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
+++ b/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
@@ -37,17 +37,11 @@
37/** l_flags bits marked as "gone" bits */ 37/** l_flags bits marked as "gone" bits */
38#define LDLM_FL_GONE_MASK 0x0006004000000000ULL 38#define LDLM_FL_GONE_MASK 0x0006004000000000ULL
39 39
40/** l_flags bits marked as "hide_lock" bits */
41#define LDLM_FL_HIDE_LOCK_MASK 0x0000206400000000ULL
42
43/** l_flags bits marked as "inherit" bits */ 40/** l_flags bits marked as "inherit" bits */
44#define LDLM_FL_INHERIT_MASK 0x0000000000800000ULL 41#define LDLM_FL_INHERIT_MASK 0x0000000000800000ULL
45 42
46/** l_flags bits marked as "local_only" bits */ 43/** l_flags bits marked as "off_wire" bits */
47#define LDLM_FL_LOCAL_ONLY_MASK 0x00FFFFFF00000000ULL 44#define LDLM_FL_OFF_WIRE_MASK 0x00FFFFFF00000000ULL
48
49/** l_flags bits marked as "on_wire" bits */
50#define LDLM_FL_ON_WIRE_MASK 0x00000000C08F932FULL
51 45
52/** extent, mode, or resource changed */ 46/** extent, mode, or resource changed */
53#define LDLM_FL_LOCK_CHANGED 0x0000000000000001ULL /* bit 0 */ 47#define LDLM_FL_LOCK_CHANGED 0x0000000000000001ULL /* bit 0 */
@@ -204,7 +198,7 @@
204#define ldlm_set_cancel(_l) LDLM_SET_FLAG((_l), 1ULL << 36) 198#define ldlm_set_cancel(_l) LDLM_SET_FLAG((_l), 1ULL << 36)
205#define ldlm_clear_cancel(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 36) 199#define ldlm_clear_cancel(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 36)
206 200
207/** whatever it might mean */ 201/** whatever it might mean -- never transmitted? */
208#define LDLM_FL_LOCAL_ONLY 0x0000002000000000ULL /* bit 37 */ 202#define LDLM_FL_LOCAL_ONLY 0x0000002000000000ULL /* bit 37 */
209#define ldlm_is_local_only(_l) LDLM_TEST_FLAG((_l), 1ULL << 37) 203#define ldlm_is_local_only(_l) LDLM_TEST_FLAG((_l), 1ULL << 37)
210#define ldlm_set_local_only(_l) LDLM_SET_FLAG((_l), 1ULL << 37) 204#define ldlm_set_local_only(_l) LDLM_SET_FLAG((_l), 1ULL << 37)
@@ -287,18 +281,18 @@
287 * has canceled this lock and is waiting for rpc_lock which is taken by 281 * has canceled this lock and is waiting for rpc_lock which is taken by
288 * the first operation. LDLM_FL_BL_AST is set by ldlm_callback_handler() in 282 * the first operation. LDLM_FL_BL_AST is set by ldlm_callback_handler() in
289 * the lock to prevent the Early Lock Cancel (ELC) code from cancelling it. 283 * the lock to prevent the Early Lock Cancel (ELC) code from cancelling it.
290 *
291 * LDLM_FL_BL_DONE is to be set by ldlm_cancel_callback() when lock cache is
292 * dropped to let ldlm_callback_handler() return EINVAL to the server. It
293 * is used when ELC RPC is already prepared and is waiting for rpc_lock,
294 * too late to send a separate CANCEL RPC.
295 */ 284 */
296#define LDLM_FL_BL_AST 0x0000400000000000ULL /* bit 46 */ 285#define LDLM_FL_BL_AST 0x0000400000000000ULL /* bit 46 */
297#define ldlm_is_bl_ast(_l) LDLM_TEST_FLAG((_l), 1ULL << 46) 286#define ldlm_is_bl_ast(_l) LDLM_TEST_FLAG((_l), 1ULL << 46)
298#define ldlm_set_bl_ast(_l) LDLM_SET_FLAG((_l), 1ULL << 46) 287#define ldlm_set_bl_ast(_l) LDLM_SET_FLAG((_l), 1ULL << 46)
299#define ldlm_clear_bl_ast(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 46) 288#define ldlm_clear_bl_ast(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 46)
300 289
301/** whatever it might mean */ 290/**
291 * Set by ldlm_cancel_callback() when lock cache is dropped to let
292 * ldlm_callback_handler() return EINVAL to the server. It is used when
293 * ELC RPC is already prepared and is waiting for rpc_lock, too late to
294 * send a separate CANCEL RPC.
295 */
302#define LDLM_FL_BL_DONE 0x0000800000000000ULL /* bit 47 */ 296#define LDLM_FL_BL_DONE 0x0000800000000000ULL /* bit 47 */
303#define ldlm_is_bl_done(_l) LDLM_TEST_FLAG((_l), 1ULL << 47) 297#define ldlm_is_bl_done(_l) LDLM_TEST_FLAG((_l), 1ULL << 47)
304#define ldlm_set_bl_done(_l) LDLM_SET_FLAG((_l), 1ULL << 47) 298#define ldlm_set_bl_done(_l) LDLM_SET_FLAG((_l), 1ULL << 47)
@@ -381,104 +375,16 @@
381/** test for ldlm_lock flag bit set */ 375/** test for ldlm_lock flag bit set */
382#define LDLM_TEST_FLAG(_l, _b) (((_l)->l_flags & (_b)) != 0) 376#define LDLM_TEST_FLAG(_l, _b) (((_l)->l_flags & (_b)) != 0)
383 377
378/** multi-bit test: are any of mask bits set? */
379#define LDLM_HAVE_MASK(_l, _m) ((_l)->l_flags & LDLM_FL_##_m##_MASK)
380
384/** set a ldlm_lock flag bit */ 381/** set a ldlm_lock flag bit */
385#define LDLM_SET_FLAG(_l, _b) ((_l)->l_flags |= (_b)) 382#define LDLM_SET_FLAG(_l, _b) ((_l)->l_flags |= (_b))
386 383
387/** clear a ldlm_lock flag bit */ 384/** clear a ldlm_lock flag bit */
388#define LDLM_CLEAR_FLAG(_l, _b) ((_l)->l_flags &= ~(_b)) 385#define LDLM_CLEAR_FLAG(_l, _b) ((_l)->l_flags &= ~(_b))
389 386
390/** Mask of flags inherited from parent lock when doing intents. */
391#define LDLM_INHERIT_FLAGS LDLM_FL_INHERIT_MASK
392
393/** Mask of Flags sent in AST lock_flags to map into the receiving lock. */
394#define LDLM_AST_FLAGS LDLM_FL_AST_MASK
395
396/** @} subgroup */ 387/** @} subgroup */
397/** @} group */ 388/** @} group */
398#ifdef WIRESHARK_COMPILE 389
399static int hf_lustre_ldlm_fl_lock_changed = -1;
400static int hf_lustre_ldlm_fl_block_granted = -1;
401static int hf_lustre_ldlm_fl_block_conv = -1;
402static int hf_lustre_ldlm_fl_block_wait = -1;
403static int hf_lustre_ldlm_fl_ast_sent = -1;
404static int hf_lustre_ldlm_fl_replay = -1;
405static int hf_lustre_ldlm_fl_intent_only = -1;
406static int hf_lustre_ldlm_fl_has_intent = -1;
407static int hf_lustre_ldlm_fl_flock_deadlock = -1;
408static int hf_lustre_ldlm_fl_discard_data = -1;
409static int hf_lustre_ldlm_fl_no_timeout = -1;
410static int hf_lustre_ldlm_fl_block_nowait = -1;
411static int hf_lustre_ldlm_fl_test_lock = -1;
412static int hf_lustre_ldlm_fl_cancel_on_block = -1;
413static int hf_lustre_ldlm_fl_deny_on_contention = -1;
414static int hf_lustre_ldlm_fl_ast_discard_data = -1;
415static int hf_lustre_ldlm_fl_fail_loc = -1;
416static int hf_lustre_ldlm_fl_skipped = -1;
417static int hf_lustre_ldlm_fl_cbpending = -1;
418static int hf_lustre_ldlm_fl_wait_noreproc = -1;
419static int hf_lustre_ldlm_fl_cancel = -1;
420static int hf_lustre_ldlm_fl_local_only = -1;
421static int hf_lustre_ldlm_fl_failed = -1;
422static int hf_lustre_ldlm_fl_canceling = -1;
423static int hf_lustre_ldlm_fl_local = -1;
424static int hf_lustre_ldlm_fl_lvb_ready = -1;
425static int hf_lustre_ldlm_fl_kms_ignore = -1;
426static int hf_lustre_ldlm_fl_cp_reqd = -1;
427static int hf_lustre_ldlm_fl_cleaned = -1;
428static int hf_lustre_ldlm_fl_atomic_cb = -1;
429static int hf_lustre_ldlm_fl_bl_ast = -1;
430static int hf_lustre_ldlm_fl_bl_done = -1;
431static int hf_lustre_ldlm_fl_no_lru = -1;
432static int hf_lustre_ldlm_fl_fail_notified = -1;
433static int hf_lustre_ldlm_fl_destroyed = -1;
434static int hf_lustre_ldlm_fl_server_lock = -1;
435static int hf_lustre_ldlm_fl_res_locked = -1;
436static int hf_lustre_ldlm_fl_waited = -1;
437static int hf_lustre_ldlm_fl_ns_srv = -1;
438static int hf_lustre_ldlm_fl_excl = -1;
439
440const value_string lustre_ldlm_flags_vals[] = {
441 {LDLM_FL_LOCK_CHANGED, "LDLM_FL_LOCK_CHANGED"},
442 {LDLM_FL_BLOCK_GRANTED, "LDLM_FL_BLOCK_GRANTED"},
443 {LDLM_FL_BLOCK_CONV, "LDLM_FL_BLOCK_CONV"},
444 {LDLM_FL_BLOCK_WAIT, "LDLM_FL_BLOCK_WAIT"},
445 {LDLM_FL_AST_SENT, "LDLM_FL_AST_SENT"},
446 {LDLM_FL_REPLAY, "LDLM_FL_REPLAY"},
447 {LDLM_FL_INTENT_ONLY, "LDLM_FL_INTENT_ONLY"},
448 {LDLM_FL_HAS_INTENT, "LDLM_FL_HAS_INTENT"},
449 {LDLM_FL_FLOCK_DEADLOCK, "LDLM_FL_FLOCK_DEADLOCK"},
450 {LDLM_FL_DISCARD_DATA, "LDLM_FL_DISCARD_DATA"},
451 {LDLM_FL_NO_TIMEOUT, "LDLM_FL_NO_TIMEOUT"},
452 {LDLM_FL_BLOCK_NOWAIT, "LDLM_FL_BLOCK_NOWAIT"},
453 {LDLM_FL_TEST_LOCK, "LDLM_FL_TEST_LOCK"},
454 {LDLM_FL_CANCEL_ON_BLOCK, "LDLM_FL_CANCEL_ON_BLOCK"},
455 {LDLM_FL_DENY_ON_CONTENTION, "LDLM_FL_DENY_ON_CONTENTION"},
456 {LDLM_FL_AST_DISCARD_DATA, "LDLM_FL_AST_DISCARD_DATA"},
457 {LDLM_FL_FAIL_LOC, "LDLM_FL_FAIL_LOC"},
458 {LDLM_FL_SKIPPED, "LDLM_FL_SKIPPED"},
459 {LDLM_FL_CBPENDING, "LDLM_FL_CBPENDING"},
460 {LDLM_FL_WAIT_NOREPROC, "LDLM_FL_WAIT_NOREPROC"},
461 {LDLM_FL_CANCEL, "LDLM_FL_CANCEL"},
462 {LDLM_FL_LOCAL_ONLY, "LDLM_FL_LOCAL_ONLY"},
463 {LDLM_FL_FAILED, "LDLM_FL_FAILED"},
464 {LDLM_FL_CANCELING, "LDLM_FL_CANCELING"},
465 {LDLM_FL_LOCAL, "LDLM_FL_LOCAL"},
466 {LDLM_FL_LVB_READY, "LDLM_FL_LVB_READY"},
467 {LDLM_FL_KMS_IGNORE, "LDLM_FL_KMS_IGNORE"},
468 {LDLM_FL_CP_REQD, "LDLM_FL_CP_REQD"},
469 {LDLM_FL_CLEANED, "LDLM_FL_CLEANED"},
470 {LDLM_FL_ATOMIC_CB, "LDLM_FL_ATOMIC_CB"},
471 {LDLM_FL_BL_AST, "LDLM_FL_BL_AST"},
472 {LDLM_FL_BL_DONE, "LDLM_FL_BL_DONE"},
473 {LDLM_FL_NO_LRU, "LDLM_FL_NO_LRU"},
474 {LDLM_FL_FAIL_NOTIFIED, "LDLM_FL_FAIL_NOTIFIED"},
475 {LDLM_FL_DESTROYED, "LDLM_FL_DESTROYED"},
476 {LDLM_FL_SERVER_LOCK, "LDLM_FL_SERVER_LOCK"},
477 {LDLM_FL_RES_LOCKED, "LDLM_FL_RES_LOCKED"},
478 {LDLM_FL_WAITED, "LDLM_FL_WAITED"},
479 {LDLM_FL_NS_SRV, "LDLM_FL_NS_SRV"},
480 {LDLM_FL_EXCL, "LDLM_FL_EXCL"},
481 { 0, NULL }
482};
483#endif /* WIRESHARK_COMPILE */
484#endif /* LDLM_ALL_FLAGS_MASK */ 390#endif /* LDLM_ALL_FLAGS_MASK */
diff --git a/drivers/staging/lustre/lustre/include/lustre_fid.h b/drivers/staging/lustre/lustre/include/lustre_fid.h
index ab4a92390a43..12e8b585c2b4 100644
--- a/drivers/staging/lustre/lustre/include/lustre_fid.h
+++ b/drivers/staging/lustre/lustre/include/lustre_fid.h
@@ -308,10 +308,10 @@ static inline int fid_seq_in_fldb(__u64 seq)
308 fid_seq_is_root(seq) || fid_seq_is_dot(seq); 308 fid_seq_is_root(seq) || fid_seq_is_dot(seq);
309} 309}
310 310
311static inline void lu_last_id_fid(struct lu_fid *fid, __u64 seq) 311static inline void lu_last_id_fid(struct lu_fid *fid, __u64 seq, __u32 ost_idx)
312{ 312{
313 if (fid_seq_is_mdt0(seq)) { 313 if (fid_seq_is_mdt0(seq)) {
314 fid->f_seq = fid_idif_seq(0, 0); 314 fid->f_seq = fid_idif_seq(0, ost_idx);
315 } else { 315 } else {
316 LASSERTF(fid_seq_is_norm(seq) || fid_seq_is_echo(seq) || 316 LASSERTF(fid_seq_is_norm(seq) || fid_seq_is_echo(seq) ||
317 fid_seq_is_idif(seq), "%#llx\n", seq); 317 fid_seq_is_idif(seq), "%#llx\n", seq);
@@ -498,19 +498,6 @@ static inline void ostid_build_res_name(struct ost_id *oi,
498 } 498 }
499} 499}
500 500
501static inline void ostid_res_name_to_id(struct ost_id *oi,
502 struct ldlm_res_id *name)
503{
504 if (fid_seq_is_mdt0(name->name[LUSTRE_RES_ID_SEQ_OFF])) {
505 /* old resid */
506 ostid_set_seq(oi, name->name[LUSTRE_RES_ID_VER_OID_OFF]);
507 ostid_set_id(oi, name->name[LUSTRE_RES_ID_SEQ_OFF]);
508 } else {
509 /* new resid */
510 fid_extract_from_res_name(&oi->oi_fid, name);
511 }
512}
513
514/** 501/**
515 * Return true if the resource is for the object identified by this id & group. 502 * Return true if the resource is for the object identified by this id & group.
516 */ 503 */
@@ -546,7 +533,8 @@ static inline void ost_fid_build_resid(const struct lu_fid *fid,
546} 533}
547 534
548static inline void ost_fid_from_resid(struct lu_fid *fid, 535static inline void ost_fid_from_resid(struct lu_fid *fid,
549 const struct ldlm_res_id *name) 536 const struct ldlm_res_id *name,
537 int ost_idx)
550{ 538{
551 if (fid_seq_is_mdt0(name->name[LUSTRE_RES_ID_VER_OID_OFF])) { 539 if (fid_seq_is_mdt0(name->name[LUSTRE_RES_ID_VER_OID_OFF])) {
552 /* old resid */ 540 /* old resid */
@@ -554,7 +542,7 @@ static inline void ost_fid_from_resid(struct lu_fid *fid,
554 542
555 ostid_set_seq(&oi, name->name[LUSTRE_RES_ID_VER_OID_OFF]); 543 ostid_set_seq(&oi, name->name[LUSTRE_RES_ID_VER_OID_OFF]);
556 ostid_set_id(&oi, name->name[LUSTRE_RES_ID_SEQ_OFF]); 544 ostid_set_id(&oi, name->name[LUSTRE_RES_ID_SEQ_OFF]);
557 ostid_to_fid(fid, &oi, 0); 545 ostid_to_fid(fid, &oi, ost_idx);
558 } else { 546 } else {
559 /* new resid */ 547 /* new resid */
560 fid_extract_from_res_name(fid, name); 548 fid_extract_from_res_name(fid, name);
diff --git a/drivers/staging/lustre/lustre/include/lustre_import.h b/drivers/staging/lustre/lustre/include/lustre_import.h
index dac2d84d8266..8325c82b3ebf 100644
--- a/drivers/staging/lustre/lustre/include/lustre_import.h
+++ b/drivers/staging/lustre/lustre/include/lustre_import.h
@@ -109,7 +109,7 @@ static inline char *ptlrpc_import_state_name(enum lustre_imp_state state)
109 "RECOVER", "FULL", "EVICTED", 109 "RECOVER", "FULL", "EVICTED",
110 }; 110 };
111 111
112 LASSERT (state <= LUSTRE_IMP_EVICTED); 112 LASSERT(state <= LUSTRE_IMP_EVICTED);
113 return import_state_names[state]; 113 return import_state_names[state];
114} 114}
115 115
diff --git a/drivers/staging/lustre/lustre/include/lustre_lib.h b/drivers/staging/lustre/lustre/include/lustre_lib.h
index f2223d55850a..00b976766aef 100644
--- a/drivers/staging/lustre/lustre/include/lustre_lib.h
+++ b/drivers/staging/lustre/lustre/include/lustre_lib.h
@@ -280,16 +280,16 @@ static inline void obd_ioctl_freedata(char *buf, int len)
280#define OBD_IOC_DATA_TYPE long 280#define OBD_IOC_DATA_TYPE long
281 281
282#define OBD_IOC_CREATE _IOWR('f', 101, OBD_IOC_DATA_TYPE) 282#define OBD_IOC_CREATE _IOWR('f', 101, OBD_IOC_DATA_TYPE)
283#define OBD_IOC_DESTROY _IOW ('f', 104, OBD_IOC_DATA_TYPE) 283#define OBD_IOC_DESTROY _IOW('f', 104, OBD_IOC_DATA_TYPE)
284#define OBD_IOC_PREALLOCATE _IOWR('f', 105, OBD_IOC_DATA_TYPE) 284#define OBD_IOC_PREALLOCATE _IOWR('f', 105, OBD_IOC_DATA_TYPE)
285 285
286#define OBD_IOC_SETATTR _IOW ('f', 107, OBD_IOC_DATA_TYPE) 286#define OBD_IOC_SETATTR _IOW('f', 107, OBD_IOC_DATA_TYPE)
287#define OBD_IOC_GETATTR _IOWR ('f', 108, OBD_IOC_DATA_TYPE) 287#define OBD_IOC_GETATTR _IOWR ('f', 108, OBD_IOC_DATA_TYPE)
288#define OBD_IOC_READ _IOWR('f', 109, OBD_IOC_DATA_TYPE) 288#define OBD_IOC_READ _IOWR('f', 109, OBD_IOC_DATA_TYPE)
289#define OBD_IOC_WRITE _IOWR('f', 110, OBD_IOC_DATA_TYPE) 289#define OBD_IOC_WRITE _IOWR('f', 110, OBD_IOC_DATA_TYPE)
290 290
291#define OBD_IOC_STATFS _IOWR('f', 113, OBD_IOC_DATA_TYPE) 291#define OBD_IOC_STATFS _IOWR('f', 113, OBD_IOC_DATA_TYPE)
292#define OBD_IOC_SYNC _IOW ('f', 114, OBD_IOC_DATA_TYPE) 292#define OBD_IOC_SYNC _IOW('f', 114, OBD_IOC_DATA_TYPE)
293#define OBD_IOC_READ2 _IOWR('f', 115, OBD_IOC_DATA_TYPE) 293#define OBD_IOC_READ2 _IOWR('f', 115, OBD_IOC_DATA_TYPE)
294#define OBD_IOC_FORMAT _IOWR('f', 116, OBD_IOC_DATA_TYPE) 294#define OBD_IOC_FORMAT _IOWR('f', 116, OBD_IOC_DATA_TYPE)
295#define OBD_IOC_PARTITION _IOWR('f', 117, OBD_IOC_DATA_TYPE) 295#define OBD_IOC_PARTITION _IOWR('f', 117, OBD_IOC_DATA_TYPE)
@@ -308,13 +308,13 @@ static inline void obd_ioctl_freedata(char *buf, int len)
308#define OBD_IOC_GETDTNAME OBD_IOC_GETNAME 308#define OBD_IOC_GETDTNAME OBD_IOC_GETNAME
309 309
310#define OBD_IOC_LOV_GET_CONFIG _IOWR('f', 132, OBD_IOC_DATA_TYPE) 310#define OBD_IOC_LOV_GET_CONFIG _IOWR('f', 132, OBD_IOC_DATA_TYPE)
311#define OBD_IOC_CLIENT_RECOVER _IOW ('f', 133, OBD_IOC_DATA_TYPE) 311#define OBD_IOC_CLIENT_RECOVER _IOW('f', 133, OBD_IOC_DATA_TYPE)
312#define OBD_IOC_PING_TARGET _IOW ('f', 136, OBD_IOC_DATA_TYPE) 312#define OBD_IOC_PING_TARGET _IOW('f', 136, OBD_IOC_DATA_TYPE)
313 313
314#define OBD_IOC_DEC_FS_USE_COUNT _IO ('f', 139) 314#define OBD_IOC_DEC_FS_USE_COUNT _IO ('f', 139)
315#define OBD_IOC_NO_TRANSNO _IOW ('f', 140, OBD_IOC_DATA_TYPE) 315#define OBD_IOC_NO_TRANSNO _IOW('f', 140, OBD_IOC_DATA_TYPE)
316#define OBD_IOC_SET_READONLY _IOW ('f', 141, OBD_IOC_DATA_TYPE) 316#define OBD_IOC_SET_READONLY _IOW('f', 141, OBD_IOC_DATA_TYPE)
317#define OBD_IOC_ABORT_RECOVERY _IOR ('f', 142, OBD_IOC_DATA_TYPE) 317#define OBD_IOC_ABORT_RECOVERY _IOR('f', 142, OBD_IOC_DATA_TYPE)
318 318
319#define OBD_IOC_ROOT_SQUASH _IOWR('f', 143, OBD_IOC_DATA_TYPE) 319#define OBD_IOC_ROOT_SQUASH _IOWR('f', 143, OBD_IOC_DATA_TYPE)
320 320
@@ -324,27 +324,27 @@ static inline void obd_ioctl_freedata(char *buf, int len)
324 324
325#define OBD_IOC_CLOSE_UUID _IOWR ('f', 147, OBD_IOC_DATA_TYPE) 325#define OBD_IOC_CLOSE_UUID _IOWR ('f', 147, OBD_IOC_DATA_TYPE)
326 326
327#define OBD_IOC_CHANGELOG_SEND _IOW ('f', 148, OBD_IOC_DATA_TYPE) 327#define OBD_IOC_CHANGELOG_SEND _IOW('f', 148, OBD_IOC_DATA_TYPE)
328#define OBD_IOC_GETDEVICE _IOWR ('f', 149, OBD_IOC_DATA_TYPE) 328#define OBD_IOC_GETDEVICE _IOWR ('f', 149, OBD_IOC_DATA_TYPE)
329#define OBD_IOC_FID2PATH _IOWR ('f', 150, OBD_IOC_DATA_TYPE) 329#define OBD_IOC_FID2PATH _IOWR ('f', 150, OBD_IOC_DATA_TYPE)
330/* see also <lustre/lustre_user.h> for ioctls 151-153 */ 330/* see also <lustre/lustre_user.h> for ioctls 151-153 */
331/* OBD_IOC_LOV_SETSTRIPE: See also LL_IOC_LOV_SETSTRIPE */ 331/* OBD_IOC_LOV_SETSTRIPE: See also LL_IOC_LOV_SETSTRIPE */
332#define OBD_IOC_LOV_SETSTRIPE _IOW ('f', 154, OBD_IOC_DATA_TYPE) 332#define OBD_IOC_LOV_SETSTRIPE _IOW('f', 154, OBD_IOC_DATA_TYPE)
333/* OBD_IOC_LOV_GETSTRIPE: See also LL_IOC_LOV_GETSTRIPE */ 333/* OBD_IOC_LOV_GETSTRIPE: See also LL_IOC_LOV_GETSTRIPE */
334#define OBD_IOC_LOV_GETSTRIPE _IOW ('f', 155, OBD_IOC_DATA_TYPE) 334#define OBD_IOC_LOV_GETSTRIPE _IOW('f', 155, OBD_IOC_DATA_TYPE)
335/* OBD_IOC_LOV_SETEA: See also LL_IOC_LOV_SETEA */ 335/* OBD_IOC_LOV_SETEA: See also LL_IOC_LOV_SETEA */
336#define OBD_IOC_LOV_SETEA _IOW ('f', 156, OBD_IOC_DATA_TYPE) 336#define OBD_IOC_LOV_SETEA _IOW('f', 156, OBD_IOC_DATA_TYPE)
337/* see <lustre/lustre_user.h> for ioctls 157-159 */ 337/* see <lustre/lustre_user.h> for ioctls 157-159 */
338/* OBD_IOC_QUOTACHECK: See also LL_IOC_QUOTACHECK */ 338/* OBD_IOC_QUOTACHECK: See also LL_IOC_QUOTACHECK */
339#define OBD_IOC_QUOTACHECK _IOW ('f', 160, int) 339#define OBD_IOC_QUOTACHECK _IOW('f', 160, int)
340/* OBD_IOC_POLL_QUOTACHECK: See also LL_IOC_POLL_QUOTACHECK */ 340/* OBD_IOC_POLL_QUOTACHECK: See also LL_IOC_POLL_QUOTACHECK */
341#define OBD_IOC_POLL_QUOTACHECK _IOR ('f', 161, struct if_quotacheck *) 341#define OBD_IOC_POLL_QUOTACHECK _IOR('f', 161, struct if_quotacheck *)
342/* OBD_IOC_QUOTACTL: See also LL_IOC_QUOTACTL */ 342/* OBD_IOC_QUOTACTL: See also LL_IOC_QUOTACTL */
343#define OBD_IOC_QUOTACTL _IOWR('f', 162, struct if_quotactl) 343#define OBD_IOC_QUOTACTL _IOWR('f', 162, struct if_quotactl)
344/* see also <lustre/lustre_user.h> for ioctls 163-176 */ 344/* see also <lustre/lustre_user.h> for ioctls 163-176 */
345#define OBD_IOC_CHANGELOG_REG _IOW ('f', 177, struct obd_ioctl_data) 345#define OBD_IOC_CHANGELOG_REG _IOW('f', 177, struct obd_ioctl_data)
346#define OBD_IOC_CHANGELOG_DEREG _IOW ('f', 178, struct obd_ioctl_data) 346#define OBD_IOC_CHANGELOG_DEREG _IOW('f', 178, struct obd_ioctl_data)
347#define OBD_IOC_CHANGELOG_CLEAR _IOW ('f', 179, struct obd_ioctl_data) 347#define OBD_IOC_CHANGELOG_CLEAR _IOW('f', 179, struct obd_ioctl_data)
348#define OBD_IOC_RECORD _IOWR('f', 180, OBD_IOC_DATA_TYPE) 348#define OBD_IOC_RECORD _IOWR('f', 180, OBD_IOC_DATA_TYPE)
349#define OBD_IOC_ENDRECORD _IOWR('f', 181, OBD_IOC_DATA_TYPE) 349#define OBD_IOC_ENDRECORD _IOWR('f', 181, OBD_IOC_DATA_TYPE)
350#define OBD_IOC_PARSE _IOWR('f', 182, OBD_IOC_DATA_TYPE) 350#define OBD_IOC_PARSE _IOWR('f', 182, OBD_IOC_DATA_TYPE)
@@ -352,7 +352,7 @@ static inline void obd_ioctl_freedata(char *buf, int len)
352#define OBD_IOC_PROCESS_CFG _IOWR('f', 184, OBD_IOC_DATA_TYPE) 352#define OBD_IOC_PROCESS_CFG _IOWR('f', 184, OBD_IOC_DATA_TYPE)
353#define OBD_IOC_DUMP_LOG _IOWR('f', 185, OBD_IOC_DATA_TYPE) 353#define OBD_IOC_DUMP_LOG _IOWR('f', 185, OBD_IOC_DATA_TYPE)
354#define OBD_IOC_CLEAR_LOG _IOWR('f', 186, OBD_IOC_DATA_TYPE) 354#define OBD_IOC_CLEAR_LOG _IOWR('f', 186, OBD_IOC_DATA_TYPE)
355#define OBD_IOC_PARAM _IOW ('f', 187, OBD_IOC_DATA_TYPE) 355#define OBD_IOC_PARAM _IOW('f', 187, OBD_IOC_DATA_TYPE)
356#define OBD_IOC_POOL _IOWR('f', 188, OBD_IOC_DATA_TYPE) 356#define OBD_IOC_POOL _IOWR('f', 188, OBD_IOC_DATA_TYPE)
357#define OBD_IOC_REPLACE_NIDS _IOWR('f', 189, OBD_IOC_DATA_TYPE) 357#define OBD_IOC_REPLACE_NIDS _IOWR('f', 189, OBD_IOC_DATA_TYPE)
358 358
@@ -522,6 +522,28 @@ struct l_wait_info {
522 sigmask(SIGTERM) | sigmask(SIGQUIT) | \ 522 sigmask(SIGTERM) | sigmask(SIGQUIT) | \
523 sigmask(SIGALRM)) 523 sigmask(SIGALRM))
524 524
525/**
526 * wait_queue_t of Linux (version < 2.6.34) is a FIFO list for exclusively
527 * waiting threads, which is not always desirable because all threads will
528 * be waken up again and again, even user only needs a few of them to be
529 * active most time. This is not good for performance because cache can
530 * be polluted by different threads.
531 *
532 * LIFO list can resolve this problem because we always wakeup the most
533 * recent active thread by default.
534 *
535 * NB: please don't call non-exclusive & exclusive wait on the same
536 * waitq if add_wait_queue_exclusive_head is used.
537 */
538#define add_wait_queue_exclusive_head(waitq, link) \
539{ \
540 unsigned long flags; \
541 \
542 spin_lock_irqsave(&((waitq)->lock), flags); \
543 __add_wait_queue_exclusive(waitq, link); \
544 spin_unlock_irqrestore(&((waitq)->lock), flags); \
545}
546
525/* 547/*
526 * wait for @condition to become true, but no longer than timeout, specified 548 * wait for @condition to become true, but no longer than timeout, specified
527 * by @info. 549 * by @info.
@@ -578,7 +600,7 @@ do { \
578 \ 600 \
579 if (condition) \ 601 if (condition) \
580 break; \ 602 break; \
581 if (cfs_signal_pending()) { \ 603 if (signal_pending(current)) { \
582 if (info->lwi_on_signal && \ 604 if (info->lwi_on_signal && \
583 (__timeout == 0 || __allow_intr)) { \ 605 (__timeout == 0 || __allow_intr)) { \
584 if (info->lwi_on_signal != LWI_ON_SIGNAL_NOOP) \ 606 if (info->lwi_on_signal != LWI_ON_SIGNAL_NOOP) \
diff --git a/drivers/staging/lustre/lustre/include/lustre_mdc.h b/drivers/staging/lustre/lustre/include/lustre_mdc.h
index af77eb359c43..f267ff8a6ec8 100644
--- a/drivers/staging/lustre/lustre/include/lustre_mdc.h
+++ b/drivers/staging/lustre/lustre/include/lustre_mdc.h
@@ -64,9 +64,27 @@ struct obd_export;
64struct ptlrpc_request; 64struct ptlrpc_request;
65struct obd_device; 65struct obd_device;
66 66
67/**
68 * Serializes in-flight MDT-modifying RPC requests to preserve idempotency.
69 *
70 * This mutex is used to implement execute-once semantics on the MDT.
71 * The MDT stores the last transaction ID and result for every client in
72 * its last_rcvd file. If the client doesn't get a reply, it can safely
73 * resend the request and the MDT will reconstruct the reply being aware
74 * that the request has already been executed. Without this lock,
75 * execution status of concurrent in-flight requests would be
76 * overwritten.
77 *
78 * This design limits the extent to which we can keep a full pipeline of
79 * in-flight requests from a single client. This limitation could be
80 * overcome by allowing multiple slots per client in the last_rcvd file.
81 */
67struct mdc_rpc_lock { 82struct mdc_rpc_lock {
83 /** Lock protecting in-flight RPC concurrency. */
68 struct mutex rpcl_mutex; 84 struct mutex rpcl_mutex;
85 /** Intent associated with currently executing request. */
69 struct lookup_intent *rpcl_it; 86 struct lookup_intent *rpcl_it;
87 /** Used for MDS/RPC load testing purposes. */
70 int rpcl_fakes; 88 int rpcl_fakes;
71}; 89};
72 90
diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h
index 69586a522eb7..a7973d5de168 100644
--- a/drivers/staging/lustre/lustre/include/lustre_net.h
+++ b/drivers/staging/lustre/lustre/include/lustre_net.h
@@ -1327,7 +1327,9 @@ struct ptlrpc_request {
1327 /* allow the req to be sent if the import is in recovery 1327 /* allow the req to be sent if the import is in recovery
1328 * status 1328 * status
1329 */ 1329 */
1330 rq_allow_replay:1; 1330 rq_allow_replay:1,
1331 /* bulk request, sent to server, but uncommitted */
1332 rq_unstable:1;
1331 1333
1332 unsigned int rq_nr_resend; 1334 unsigned int rq_nr_resend;
1333 1335
diff --git a/drivers/staging/lustre/lustre/include/lustre_param.h b/drivers/staging/lustre/lustre/include/lustre_param.h
index 383fe6febe4b..a42cf90c1cd8 100644
--- a/drivers/staging/lustre/lustre/include/lustre_param.h
+++ b/drivers/staging/lustre/lustre/include/lustre_param.h
@@ -89,6 +89,7 @@ int class_parse_nid_quiet(char *buf, lnet_nid_t *nid, char **endh);
89 89
90/* Prefixes for parameters handled by obd's proc methods (XXX_process_config) */ 90/* Prefixes for parameters handled by obd's proc methods (XXX_process_config) */
91#define PARAM_OST "ost." 91#define PARAM_OST "ost."
92#define PARAM_OSD "osd."
92#define PARAM_OSC "osc." 93#define PARAM_OSC "osc."
93#define PARAM_MDT "mdt." 94#define PARAM_MDT "mdt."
94#define PARAM_MDD "mdd." 95#define PARAM_MDD "mdd."
diff --git a/drivers/staging/lustre/lustre/include/lustre_req_layout.h b/drivers/staging/lustre/lustre/include/lustre_req_layout.h
index b2e67fcf9ef1..0aac4391ea16 100644
--- a/drivers/staging/lustre/lustre/include/lustre_req_layout.h
+++ b/drivers/staging/lustre/lustre/include/lustre_req_layout.h
@@ -137,6 +137,7 @@ extern struct req_format RQF_MGS_CONFIG_READ;
137/* fid/fld req_format */ 137/* fid/fld req_format */
138extern struct req_format RQF_SEQ_QUERY; 138extern struct req_format RQF_SEQ_QUERY;
139extern struct req_format RQF_FLD_QUERY; 139extern struct req_format RQF_FLD_QUERY;
140extern struct req_format RQF_FLD_READ;
140/* MDS req_format */ 141/* MDS req_format */
141extern struct req_format RQF_MDS_CONNECT; 142extern struct req_format RQF_MDS_CONNECT;
142extern struct req_format RQF_MDS_DISCONNECT; 143extern struct req_format RQF_MDS_DISCONNECT;
@@ -199,7 +200,7 @@ extern struct req_format RQF_OST_BRW_READ;
199extern struct req_format RQF_OST_BRW_WRITE; 200extern struct req_format RQF_OST_BRW_WRITE;
200extern struct req_format RQF_OST_STATFS; 201extern struct req_format RQF_OST_STATFS;
201extern struct req_format RQF_OST_SET_GRANT_INFO; 202extern struct req_format RQF_OST_SET_GRANT_INFO;
202extern struct req_format RQF_OST_GET_INFO_GENERIC; 203extern struct req_format RQF_OST_GET_INFO;
203extern struct req_format RQF_OST_GET_INFO_LAST_ID; 204extern struct req_format RQF_OST_GET_INFO_LAST_ID;
204extern struct req_format RQF_OST_GET_INFO_LAST_FID; 205extern struct req_format RQF_OST_GET_INFO_LAST_FID;
205extern struct req_format RQF_OST_SET_INFO_LAST_FID; 206extern struct req_format RQF_OST_SET_INFO_LAST_FID;
diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
index 4264d97650ec..2d926e0ee647 100644
--- a/drivers/staging/lustre/lustre/include/obd.h
+++ b/drivers/staging/lustre/lustre/include/obd.h
@@ -37,7 +37,7 @@
37#ifndef __OBD_H 37#ifndef __OBD_H
38#define __OBD_H 38#define __OBD_H
39 39
40#include "linux/obd.h" 40#include <linux/spinlock.h>
41 41
42#define IOC_OSC_TYPE 'h' 42#define IOC_OSC_TYPE 'h'
43#define IOC_OSC_MIN_NR 20 43#define IOC_OSC_MIN_NR 20
@@ -54,6 +54,7 @@
54#include "lustre_export.h" 54#include "lustre_export.h"
55#include "lustre_fid.h" 55#include "lustre_fid.h"
56#include "lustre_fld.h" 56#include "lustre_fld.h"
57#include "lustre_intent.h"
57 58
58#define MAX_OBD_DEVICES 8192 59#define MAX_OBD_DEVICES 8192
59 60
@@ -165,9 +166,6 @@ struct obd_info {
165 obd_enqueue_update_f oi_cb_up; 166 obd_enqueue_update_f oi_cb_up;
166}; 167};
167 168
168void lov_stripe_lock(struct lov_stripe_md *md);
169void lov_stripe_unlock(struct lov_stripe_md *md);
170
171struct obd_type { 169struct obd_type {
172 struct list_head typ_chain; 170 struct list_head typ_chain;
173 struct obd_ops *typ_dt_ops; 171 struct obd_ops *typ_dt_ops;
@@ -293,14 +291,10 @@ struct client_obd {
293 * blocking everywhere, but we don't want to slow down fast-path of 291 * blocking everywhere, but we don't want to slow down fast-path of
294 * our main platform.) 292 * our main platform.)
295 * 293 *
296 * Exact type of ->cl_loi_list_lock is defined in arch/obd.h together
297 * with client_obd_list_{un,}lock() and
298 * client_obd_list_lock_{init,done}() functions.
299 *
300 * NB by Jinshan: though field names are still _loi_, but actually 294 * NB by Jinshan: though field names are still _loi_, but actually
301 * osc_object{}s are in the list. 295 * osc_object{}s are in the list.
302 */ 296 */
303 struct client_obd_lock cl_loi_list_lock; 297 spinlock_t cl_loi_list_lock;
304 struct list_head cl_loi_ready_list; 298 struct list_head cl_loi_ready_list;
305 struct list_head cl_loi_hp_ready_list; 299 struct list_head cl_loi_hp_ready_list;
306 struct list_head cl_loi_write_list; 300 struct list_head cl_loi_write_list;
@@ -327,7 +321,8 @@ struct client_obd {
327 atomic_t cl_lru_shrinkers; 321 atomic_t cl_lru_shrinkers;
328 atomic_t cl_lru_in_list; 322 atomic_t cl_lru_in_list;
329 struct list_head cl_lru_list; /* lru page list */ 323 struct list_head cl_lru_list; /* lru page list */
330 struct client_obd_lock cl_lru_list_lock; /* page list protector */ 324 spinlock_t cl_lru_list_lock; /* page list protector */
325 atomic_t cl_unstable_count;
331 326
332 /* number of in flight destroy rpcs is limited to max_rpcs_in_flight */ 327 /* number of in flight destroy rpcs is limited to max_rpcs_in_flight */
333 atomic_t cl_destroy_in_flight; 328 atomic_t cl_destroy_in_flight;
@@ -364,6 +359,7 @@ struct client_obd {
364 359
365 /* ptlrpc work for writeback in ptlrpcd context */ 360 /* ptlrpc work for writeback in ptlrpcd context */
366 void *cl_writeback_work; 361 void *cl_writeback_work;
362 void *cl_lru_work;
367 /* hash tables for osc_quota_info */ 363 /* hash tables for osc_quota_info */
368 struct cfs_hash *cl_quota_hash[MAXQUOTAS]; 364 struct cfs_hash *cl_quota_hash[MAXQUOTAS];
369}; 365};
@@ -391,45 +387,9 @@ struct ost_pool {
391 struct rw_semaphore op_rw_sem; /* to protect ost_pool use */ 387 struct rw_semaphore op_rw_sem; /* to protect ost_pool use */
392}; 388};
393 389
394/* Round-robin allocator data */
395struct lov_qos_rr {
396 __u32 lqr_start_idx; /* start index of new inode */
397 __u32 lqr_offset_idx; /* aliasing for start_idx */
398 int lqr_start_count; /* reseed counter */
399 struct ost_pool lqr_pool; /* round-robin optimized list */
400 unsigned long lqr_dirty:1; /* recalc round-robin list */
401};
402
403/* allow statfs data caching for 1 second */ 390/* allow statfs data caching for 1 second */
404#define OBD_STATFS_CACHE_SECONDS 1 391#define OBD_STATFS_CACHE_SECONDS 1
405 392
406struct lov_statfs_data {
407 struct obd_info lsd_oi;
408 struct obd_statfs lsd_statfs;
409};
410
411/* Stripe placement optimization */
412struct lov_qos {
413 struct list_head lq_oss_list; /* list of OSSs that targets use */
414 struct rw_semaphore lq_rw_sem;
415 __u32 lq_active_oss_count;
416 unsigned int lq_prio_free; /* priority for free space */
417 unsigned int lq_threshold_rr;/* priority for rr */
418 struct lov_qos_rr lq_rr; /* round robin qos data */
419 unsigned long lq_dirty:1, /* recalc qos data */
420 lq_same_space:1,/* the ost's all have approx.
421 * the same space avail
422 */
423 lq_reset:1, /* zero current penalties */
424 lq_statfs_in_progress:1; /* statfs op in
425 progress */
426 /* qos statfs data */
427 struct lov_statfs_data *lq_statfs_data;
428 wait_queue_head_t lq_statfs_waitq; /* waitqueue to notify statfs
429 * requests completion
430 */
431};
432
433struct lov_tgt_desc { 393struct lov_tgt_desc {
434 struct list_head ltd_kill; 394 struct list_head ltd_kill;
435 struct obd_uuid ltd_uuid; 395 struct obd_uuid ltd_uuid;
@@ -442,25 +402,6 @@ struct lov_tgt_desc {
442 ltd_reap:1; /* should this target be deleted */ 402 ltd_reap:1; /* should this target be deleted */
443}; 403};
444 404
445/* Pool metadata */
446#define pool_tgt_size(_p) _p->pool_obds.op_size
447#define pool_tgt_count(_p) _p->pool_obds.op_count
448#define pool_tgt_array(_p) _p->pool_obds.op_array
449#define pool_tgt_rw_sem(_p) _p->pool_obds.op_rw_sem
450
451struct pool_desc {
452 char pool_name[LOV_MAXPOOLNAME + 1]; /* name of pool */
453 struct ost_pool pool_obds; /* pool members */
454 atomic_t pool_refcount; /* pool ref. counter */
455 struct lov_qos_rr pool_rr; /* round robin qos */
456 struct hlist_node pool_hash; /* access by poolname */
457 struct list_head pool_list; /* serial access */
458 struct dentry *pool_debugfs_entry; /* file in debugfs */
459 struct obd_device *pool_lobd; /* obd of the lov/lod to which
460 * this pool belongs
461 */
462};
463
464struct lov_obd { 405struct lov_obd {
465 struct lov_desc desc; 406 struct lov_desc desc;
466 struct lov_tgt_desc **lov_tgts; /* sparse array */ 407 struct lov_tgt_desc **lov_tgts; /* sparse array */
@@ -468,8 +409,6 @@ struct lov_obd {
468 struct mutex lov_lock; 409 struct mutex lov_lock;
469 struct obd_connect_data lov_ocd; 410 struct obd_connect_data lov_ocd;
470 atomic_t lov_refcount; 411 atomic_t lov_refcount;
471 __u32 lov_tgt_count; /* how many OBD's */
472 __u32 lov_active_tgt_count; /* how many active */
473 __u32 lov_death_row;/* tgts scheduled to be deleted */ 412 __u32 lov_death_row;/* tgts scheduled to be deleted */
474 __u32 lov_tgt_size; /* size of tgts array */ 413 __u32 lov_tgt_size; /* size of tgts array */
475 int lov_connects; 414 int lov_connects;
@@ -479,7 +418,7 @@ struct lov_obd {
479 struct dentry *lov_pool_debugfs_entry; 418 struct dentry *lov_pool_debugfs_entry;
480 enum lustre_sec_part lov_sp_me; 419 enum lustre_sec_part lov_sp_me;
481 420
482 /* Cached LRU pages from upper layer */ 421 /* Cached LRU and unstable data from upper layer */
483 void *lov_cache; 422 void *lov_cache;
484 423
485 struct rw_semaphore lov_notify_lock; 424 struct rw_semaphore lov_notify_lock;
@@ -511,7 +450,7 @@ struct lmv_obd {
511 struct obd_uuid cluuid; 450 struct obd_uuid cluuid;
512 struct obd_export *exp; 451 struct obd_export *exp;
513 452
514 struct mutex init_mutex; 453 struct mutex lmv_init_mutex;
515 int connected; 454 int connected;
516 int max_easize; 455 int max_easize;
517 int max_def_easize; 456 int max_def_easize;
diff --git a/drivers/staging/lustre/lustre/include/obd_cksum.h b/drivers/staging/lustre/lustre/include/obd_cksum.h
index 637fa22110a4..f6c18df906a8 100644
--- a/drivers/staging/lustre/lustre/include/obd_cksum.h
+++ b/drivers/staging/lustre/lustre/include/obd_cksum.h
@@ -35,6 +35,7 @@
35#ifndef __OBD_CKSUM 35#ifndef __OBD_CKSUM
36#define __OBD_CKSUM 36#define __OBD_CKSUM
37#include "../../include/linux/libcfs/libcfs.h" 37#include "../../include/linux/libcfs/libcfs.h"
38#include "../../include/linux/libcfs/libcfs_crypto.h"
38#include "lustre/lustre_idl.h" 39#include "lustre/lustre_idl.h"
39 40
40static inline unsigned char cksum_obd2cfs(enum cksum_type cksum_type) 41static inline unsigned char cksum_obd2cfs(enum cksum_type cksum_type)
diff --git a/drivers/staging/lustre/lustre/include/obd_class.h b/drivers/staging/lustre/lustre/include/obd_class.h
index 706869f8c98f..32863bcb30b9 100644
--- a/drivers/staging/lustre/lustre/include/obd_class.h
+++ b/drivers/staging/lustre/lustre/include/obd_class.h
@@ -477,7 +477,7 @@ static inline int obd_setup(struct obd_device *obd, struct lustre_cfg *cfg)
477 struct lu_context session_ctx; 477 struct lu_context session_ctx;
478 struct lu_env env; 478 struct lu_env env;
479 479
480 lu_context_init(&session_ctx, LCT_SESSION); 480 lu_context_init(&session_ctx, LCT_SESSION | LCT_SERVER_SESSION);
481 session_ctx.lc_thread = NULL; 481 session_ctx.lc_thread = NULL;
482 lu_context_enter(&session_ctx); 482 lu_context_enter(&session_ctx);
483 483
@@ -490,8 +490,9 @@ static inline int obd_setup(struct obd_device *obd, struct lustre_cfg *cfg)
490 obd->obd_lu_dev = d; 490 obd->obd_lu_dev = d;
491 d->ld_obd = obd; 491 d->ld_obd = obd;
492 rc = 0; 492 rc = 0;
493 } else 493 } else {
494 rc = PTR_ERR(d); 494 rc = PTR_ERR(d);
495 }
495 } 496 }
496 lu_context_exit(&session_ctx); 497 lu_context_exit(&session_ctx);
497 lu_context_fini(&session_ctx); 498 lu_context_fini(&session_ctx);
diff --git a/drivers/staging/lustre/lustre/include/obd_support.h b/drivers/staging/lustre/lustre/include/obd_support.h
index f8ee3a3254ba..60034d39b00d 100644
--- a/drivers/staging/lustre/lustre/include/obd_support.h
+++ b/drivers/staging/lustre/lustre/include/obd_support.h
@@ -58,6 +58,7 @@ extern int at_early_margin;
58extern int at_extra; 58extern int at_extra;
59extern unsigned int obd_sync_filter; 59extern unsigned int obd_sync_filter;
60extern unsigned int obd_max_dirty_pages; 60extern unsigned int obd_max_dirty_pages;
61extern atomic_t obd_unstable_pages;
61extern atomic_t obd_dirty_pages; 62extern atomic_t obd_dirty_pages;
62extern atomic_t obd_dirty_transit_pages; 63extern atomic_t obd_dirty_transit_pages;
63extern char obd_jobid_var[]; 64extern char obd_jobid_var[];
@@ -289,6 +290,7 @@ extern char obd_jobid_var[];
289#define OBD_FAIL_OST_ENOINO 0x229 290#define OBD_FAIL_OST_ENOINO 0x229
290#define OBD_FAIL_OST_DQACQ_NET 0x230 291#define OBD_FAIL_OST_DQACQ_NET 0x230
291#define OBD_FAIL_OST_STATFS_EINPROGRESS 0x231 292#define OBD_FAIL_OST_STATFS_EINPROGRESS 0x231
293#define OBD_FAIL_OST_SET_INFO_NET 0x232
292 294
293#define OBD_FAIL_LDLM 0x300 295#define OBD_FAIL_LDLM 0x300
294#define OBD_FAIL_LDLM_NAMESPACE_NEW 0x301 296#define OBD_FAIL_LDLM_NAMESPACE_NEW 0x301
@@ -319,6 +321,7 @@ extern char obd_jobid_var[];
319#define OBD_FAIL_LDLM_AGL_DELAY 0x31a 321#define OBD_FAIL_LDLM_AGL_DELAY 0x31a
320#define OBD_FAIL_LDLM_AGL_NOLOCK 0x31b 322#define OBD_FAIL_LDLM_AGL_NOLOCK 0x31b
321#define OBD_FAIL_LDLM_OST_LVB 0x31c 323#define OBD_FAIL_LDLM_OST_LVB 0x31c
324#define OBD_FAIL_LDLM_ENQUEUE_HANG 0x31d
322 325
323/* LOCKLESS IO */ 326/* LOCKLESS IO */
324#define OBD_FAIL_LDLM_SET_CONTENTION 0x385 327#define OBD_FAIL_LDLM_SET_CONTENTION 0x385
@@ -426,6 +429,7 @@ extern char obd_jobid_var[];
426 429
427#define OBD_FAIL_FLD 0x1100 430#define OBD_FAIL_FLD 0x1100
428#define OBD_FAIL_FLD_QUERY_NET 0x1101 431#define OBD_FAIL_FLD_QUERY_NET 0x1101
432#define OBD_FAIL_FLD_READ_NET 0x1102
429 433
430#define OBD_FAIL_SEC_CTX 0x1200 434#define OBD_FAIL_SEC_CTX 0x1200
431#define OBD_FAIL_SEC_CTX_INIT_NET 0x1201 435#define OBD_FAIL_SEC_CTX_INIT_NET 0x1201
diff --git a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
deleted file mode 100644
index 96141d17d07f..000000000000
--- a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
+++ /dev/null
@@ -1,1203 +0,0 @@
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26/*
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2015, Intel Corporation.
31 */
32/*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * cl code shared between vvp and liblustre (and other Lustre clients in the
37 * future).
38 *
39 * Author: Nikita Danilov <nikita.danilov@sun.com>
40 */
41
42#define DEBUG_SUBSYSTEM S_LLITE
43
44#include "../../include/linux/libcfs/libcfs.h"
45# include <linux/fs.h>
46# include <linux/sched.h>
47# include <linux/mm.h>
48# include <linux/quotaops.h>
49# include <linux/highmem.h>
50# include <linux/pagemap.h>
51# include <linux/rbtree.h>
52
53#include "../include/obd.h"
54#include "../include/obd_support.h"
55#include "../include/lustre_fid.h"
56#include "../include/lustre_lite.h"
57#include "../include/lustre_dlm.h"
58#include "../include/lustre_ver.h"
59#include "../include/lustre_mdc.h"
60#include "../include/cl_object.h"
61
62#include "../include/lclient.h"
63
64#include "../llite/llite_internal.h"
65
66static const struct cl_req_operations ccc_req_ops;
67
68/*
69 * ccc_ prefix stands for "Common Client Code".
70 */
71
72static struct kmem_cache *ccc_lock_kmem;
73static struct kmem_cache *ccc_object_kmem;
74static struct kmem_cache *ccc_thread_kmem;
75static struct kmem_cache *ccc_session_kmem;
76static struct kmem_cache *ccc_req_kmem;
77
78static struct lu_kmem_descr ccc_caches[] = {
79 {
80 .ckd_cache = &ccc_lock_kmem,
81 .ckd_name = "ccc_lock_kmem",
82 .ckd_size = sizeof(struct ccc_lock)
83 },
84 {
85 .ckd_cache = &ccc_object_kmem,
86 .ckd_name = "ccc_object_kmem",
87 .ckd_size = sizeof(struct ccc_object)
88 },
89 {
90 .ckd_cache = &ccc_thread_kmem,
91 .ckd_name = "ccc_thread_kmem",
92 .ckd_size = sizeof(struct ccc_thread_info),
93 },
94 {
95 .ckd_cache = &ccc_session_kmem,
96 .ckd_name = "ccc_session_kmem",
97 .ckd_size = sizeof(struct ccc_session)
98 },
99 {
100 .ckd_cache = &ccc_req_kmem,
101 .ckd_name = "ccc_req_kmem",
102 .ckd_size = sizeof(struct ccc_req)
103 },
104 {
105 .ckd_cache = NULL
106 }
107};
108
109/*****************************************************************************
110 *
111 * Vvp device and device type functions.
112 *
113 */
114
115void *ccc_key_init(const struct lu_context *ctx, struct lu_context_key *key)
116{
117 struct ccc_thread_info *info;
118
119 info = kmem_cache_zalloc(ccc_thread_kmem, GFP_NOFS);
120 if (!info)
121 info = ERR_PTR(-ENOMEM);
122 return info;
123}
124
125void ccc_key_fini(const struct lu_context *ctx,
126 struct lu_context_key *key, void *data)
127{
128 struct ccc_thread_info *info = data;
129
130 kmem_cache_free(ccc_thread_kmem, info);
131}
132
133void *ccc_session_key_init(const struct lu_context *ctx,
134 struct lu_context_key *key)
135{
136 struct ccc_session *session;
137
138 session = kmem_cache_zalloc(ccc_session_kmem, GFP_NOFS);
139 if (!session)
140 session = ERR_PTR(-ENOMEM);
141 return session;
142}
143
144void ccc_session_key_fini(const struct lu_context *ctx,
145 struct lu_context_key *key, void *data)
146{
147 struct ccc_session *session = data;
148
149 kmem_cache_free(ccc_session_kmem, session);
150}
151
152struct lu_context_key ccc_key = {
153 .lct_tags = LCT_CL_THREAD,
154 .lct_init = ccc_key_init,
155 .lct_fini = ccc_key_fini
156};
157
158struct lu_context_key ccc_session_key = {
159 .lct_tags = LCT_SESSION,
160 .lct_init = ccc_session_key_init,
161 .lct_fini = ccc_session_key_fini
162};
163
164/* type constructor/destructor: ccc_type_{init,fini,start,stop}(). */
165/* LU_TYPE_INIT_FINI(ccc, &ccc_key, &ccc_session_key); */
166
167int ccc_device_init(const struct lu_env *env, struct lu_device *d,
168 const char *name, struct lu_device *next)
169{
170 struct ccc_device *vdv;
171 int rc;
172
173 vdv = lu2ccc_dev(d);
174 vdv->cdv_next = lu2cl_dev(next);
175
176 LASSERT(d->ld_site && next->ld_type);
177 next->ld_site = d->ld_site;
178 rc = next->ld_type->ldt_ops->ldto_device_init(
179 env, next, next->ld_type->ldt_name, NULL);
180 if (rc == 0) {
181 lu_device_get(next);
182 lu_ref_add(&next->ld_reference, "lu-stack", &lu_site_init);
183 }
184 return rc;
185}
186
187struct lu_device *ccc_device_fini(const struct lu_env *env,
188 struct lu_device *d)
189{
190 return cl2lu_dev(lu2ccc_dev(d)->cdv_next);
191}
192
193struct lu_device *ccc_device_alloc(const struct lu_env *env,
194 struct lu_device_type *t,
195 struct lustre_cfg *cfg,
196 const struct lu_device_operations *luops,
197 const struct cl_device_operations *clops)
198{
199 struct ccc_device *vdv;
200 struct lu_device *lud;
201 struct cl_site *site;
202 int rc;
203
204 vdv = kzalloc(sizeof(*vdv), GFP_NOFS);
205 if (!vdv)
206 return ERR_PTR(-ENOMEM);
207
208 lud = &vdv->cdv_cl.cd_lu_dev;
209 cl_device_init(&vdv->cdv_cl, t);
210 ccc2lu_dev(vdv)->ld_ops = luops;
211 vdv->cdv_cl.cd_ops = clops;
212
213 site = kzalloc(sizeof(*site), GFP_NOFS);
214 if (site) {
215 rc = cl_site_init(site, &vdv->cdv_cl);
216 if (rc == 0)
217 rc = lu_site_init_finish(&site->cs_lu);
218 else {
219 LASSERT(!lud->ld_site);
220 CERROR("Cannot init lu_site, rc %d.\n", rc);
221 kfree(site);
222 }
223 } else
224 rc = -ENOMEM;
225 if (rc != 0) {
226 ccc_device_free(env, lud);
227 lud = ERR_PTR(rc);
228 }
229 return lud;
230}
231
232struct lu_device *ccc_device_free(const struct lu_env *env,
233 struct lu_device *d)
234{
235 struct ccc_device *vdv = lu2ccc_dev(d);
236 struct cl_site *site = lu2cl_site(d->ld_site);
237 struct lu_device *next = cl2lu_dev(vdv->cdv_next);
238
239 if (d->ld_site) {
240 cl_site_fini(site);
241 kfree(site);
242 }
243 cl_device_fini(lu2cl_dev(d));
244 kfree(vdv);
245 return next;
246}
247
248int ccc_req_init(const struct lu_env *env, struct cl_device *dev,
249 struct cl_req *req)
250{
251 struct ccc_req *vrq;
252 int result;
253
254 vrq = kmem_cache_zalloc(ccc_req_kmem, GFP_NOFS);
255 if (vrq) {
256 cl_req_slice_add(req, &vrq->crq_cl, dev, &ccc_req_ops);
257 result = 0;
258 } else
259 result = -ENOMEM;
260 return result;
261}
262
263/**
264 * An `emergency' environment used by ccc_inode_fini() when cl_env_get()
265 * fails. Access to this environment is serialized by ccc_inode_fini_guard
266 * mutex.
267 */
268static struct lu_env *ccc_inode_fini_env;
269
270/**
271 * A mutex serializing calls to slp_inode_fini() under extreme memory
272 * pressure, when environments cannot be allocated.
273 */
274static DEFINE_MUTEX(ccc_inode_fini_guard);
275static int dummy_refcheck;
276
277int ccc_global_init(struct lu_device_type *device_type)
278{
279 int result;
280
281 result = lu_kmem_init(ccc_caches);
282 if (result)
283 return result;
284
285 result = lu_device_type_init(device_type);
286 if (result)
287 goto out_kmem;
288
289 ccc_inode_fini_env = cl_env_alloc(&dummy_refcheck,
290 LCT_REMEMBER|LCT_NOREF);
291 if (IS_ERR(ccc_inode_fini_env)) {
292 result = PTR_ERR(ccc_inode_fini_env);
293 goto out_device;
294 }
295
296 ccc_inode_fini_env->le_ctx.lc_cookie = 0x4;
297 return 0;
298out_device:
299 lu_device_type_fini(device_type);
300out_kmem:
301 lu_kmem_fini(ccc_caches);
302 return result;
303}
304
305void ccc_global_fini(struct lu_device_type *device_type)
306{
307 if (ccc_inode_fini_env) {
308 cl_env_put(ccc_inode_fini_env, &dummy_refcheck);
309 ccc_inode_fini_env = NULL;
310 }
311 lu_device_type_fini(device_type);
312 lu_kmem_fini(ccc_caches);
313}
314
315/*****************************************************************************
316 *
317 * Object operations.
318 *
319 */
320
321struct lu_object *ccc_object_alloc(const struct lu_env *env,
322 const struct lu_object_header *unused,
323 struct lu_device *dev,
324 const struct cl_object_operations *clops,
325 const struct lu_object_operations *luops)
326{
327 struct ccc_object *vob;
328 struct lu_object *obj;
329
330 vob = kmem_cache_zalloc(ccc_object_kmem, GFP_NOFS);
331 if (vob) {
332 struct cl_object_header *hdr;
333
334 obj = ccc2lu(vob);
335 hdr = &vob->cob_header;
336 cl_object_header_init(hdr);
337 lu_object_init(obj, &hdr->coh_lu, dev);
338 lu_object_add_top(&hdr->coh_lu, obj);
339
340 vob->cob_cl.co_ops = clops;
341 obj->lo_ops = luops;
342 } else
343 obj = NULL;
344 return obj;
345}
346
347int ccc_object_init0(const struct lu_env *env,
348 struct ccc_object *vob,
349 const struct cl_object_conf *conf)
350{
351 vob->cob_inode = conf->coc_inode;
352 vob->cob_transient_pages = 0;
353 cl_object_page_init(&vob->cob_cl, sizeof(struct ccc_page));
354 return 0;
355}
356
357int ccc_object_init(const struct lu_env *env, struct lu_object *obj,
358 const struct lu_object_conf *conf)
359{
360 struct ccc_device *dev = lu2ccc_dev(obj->lo_dev);
361 struct ccc_object *vob = lu2ccc(obj);
362 struct lu_object *below;
363 struct lu_device *under;
364 int result;
365
366 under = &dev->cdv_next->cd_lu_dev;
367 below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, under);
368 if (below) {
369 const struct cl_object_conf *cconf;
370
371 cconf = lu2cl_conf(conf);
372 INIT_LIST_HEAD(&vob->cob_pending_list);
373 lu_object_add(obj, below);
374 result = ccc_object_init0(env, vob, cconf);
375 } else
376 result = -ENOMEM;
377 return result;
378}
379
380void ccc_object_free(const struct lu_env *env, struct lu_object *obj)
381{
382 struct ccc_object *vob = lu2ccc(obj);
383
384 lu_object_fini(obj);
385 lu_object_header_fini(obj->lo_header);
386 kmem_cache_free(ccc_object_kmem, vob);
387}
388
389int ccc_lock_init(const struct lu_env *env,
390 struct cl_object *obj, struct cl_lock *lock,
391 const struct cl_io *unused,
392 const struct cl_lock_operations *lkops)
393{
394 struct ccc_lock *clk;
395 int result;
396
397 CLOBINVRNT(env, obj, ccc_object_invariant(obj));
398
399 clk = kmem_cache_zalloc(ccc_lock_kmem, GFP_NOFS);
400 if (clk) {
401 cl_lock_slice_add(lock, &clk->clk_cl, obj, lkops);
402 result = 0;
403 } else
404 result = -ENOMEM;
405 return result;
406}
407
408int ccc_object_glimpse(const struct lu_env *env,
409 const struct cl_object *obj, struct ost_lvb *lvb)
410{
411 struct inode *inode = ccc_object_inode(obj);
412
413 lvb->lvb_mtime = cl_inode_mtime(inode);
414 lvb->lvb_atime = cl_inode_atime(inode);
415 lvb->lvb_ctime = cl_inode_ctime(inode);
416 /*
417 * LU-417: Add dirty pages block count lest i_blocks reports 0, some
418 * "cp" or "tar" on remote node may think it's a completely sparse file
419 * and skip it.
420 */
421 if (lvb->lvb_size > 0 && lvb->lvb_blocks == 0)
422 lvb->lvb_blocks = dirty_cnt(inode);
423 return 0;
424}
425
426static void ccc_object_size_lock(struct cl_object *obj)
427{
428 struct inode *inode = ccc_object_inode(obj);
429
430 ll_inode_size_lock(inode);
431 cl_object_attr_lock(obj);
432}
433
434static void ccc_object_size_unlock(struct cl_object *obj)
435{
436 struct inode *inode = ccc_object_inode(obj);
437
438 cl_object_attr_unlock(obj);
439 ll_inode_size_unlock(inode);
440}
441
442/*****************************************************************************
443 *
444 * Page operations.
445 *
446 */
447
448struct page *ccc_page_vmpage(const struct lu_env *env,
449 const struct cl_page_slice *slice)
450{
451 return cl2vm_page(slice);
452}
453
454int ccc_page_is_under_lock(const struct lu_env *env,
455 const struct cl_page_slice *slice,
456 struct cl_io *io)
457{
458 struct ccc_io *cio = ccc_env_io(env);
459 struct cl_lock_descr *desc = &ccc_env_info(env)->cti_descr;
460 struct cl_page *page = slice->cpl_page;
461
462 int result;
463
464 if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
465 io->ci_type == CIT_FAULT) {
466 if (cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)
467 result = -EBUSY;
468 else {
469 desc->cld_start = page->cp_index;
470 desc->cld_end = page->cp_index;
471 desc->cld_obj = page->cp_obj;
472 desc->cld_mode = CLM_READ;
473 result = cl_queue_match(&io->ci_lockset.cls_done,
474 desc) ? -EBUSY : 0;
475 }
476 } else
477 result = 0;
478 return result;
479}
480
481int ccc_fail(const struct lu_env *env, const struct cl_page_slice *slice)
482{
483 /*
484 * Cached read?
485 */
486 LBUG();
487 return 0;
488}
489
490int ccc_transient_page_prep(const struct lu_env *env,
491 const struct cl_page_slice *slice,
492 struct cl_io *unused)
493{
494 /* transient page should always be sent. */
495 return 0;
496}
497
498/*****************************************************************************
499 *
500 * Lock operations.
501 *
502 */
503
504void ccc_lock_delete(const struct lu_env *env,
505 const struct cl_lock_slice *slice)
506{
507 CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
508}
509
510void ccc_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice)
511{
512 struct ccc_lock *clk = cl2ccc_lock(slice);
513
514 kmem_cache_free(ccc_lock_kmem, clk);
515}
516
517int ccc_lock_enqueue(const struct lu_env *env,
518 const struct cl_lock_slice *slice,
519 struct cl_io *unused, __u32 enqflags)
520{
521 CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
522 return 0;
523}
524
525int ccc_lock_use(const struct lu_env *env, const struct cl_lock_slice *slice)
526{
527 CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
528 return 0;
529}
530
531int ccc_lock_unuse(const struct lu_env *env, const struct cl_lock_slice *slice)
532{
533 CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
534 return 0;
535}
536
537int ccc_lock_wait(const struct lu_env *env, const struct cl_lock_slice *slice)
538{
539 CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
540 return 0;
541}
542
543/**
544 * Implementation of cl_lock_operations::clo_fits_into() methods for ccc
545 * layer. This function is executed every time io finds an existing lock in
546 * the lock cache while creating new lock. This function has to decide whether
547 * cached lock "fits" into io.
548 *
549 * \param slice lock to be checked
550 * \param io IO that wants a lock.
551 *
552 * \see lov_lock_fits_into().
553 */
554int ccc_lock_fits_into(const struct lu_env *env,
555 const struct cl_lock_slice *slice,
556 const struct cl_lock_descr *need,
557 const struct cl_io *io)
558{
559 const struct cl_lock *lock = slice->cls_lock;
560 const struct cl_lock_descr *descr = &lock->cll_descr;
561 const struct ccc_io *cio = ccc_env_io(env);
562 int result;
563
564 /*
565 * Work around DLM peculiarity: it assumes that glimpse
566 * (LDLM_FL_HAS_INTENT) lock is always LCK_PR, and returns reads lock
567 * when asked for LCK_PW lock with LDLM_FL_HAS_INTENT flag set. Make
568 * sure that glimpse doesn't get CLM_WRITE top-lock, so that it
569 * doesn't enqueue CLM_WRITE sub-locks.
570 */
571 if (cio->cui_glimpse)
572 result = descr->cld_mode != CLM_WRITE;
573
574 /*
575 * Also, don't match incomplete write locks for read, otherwise read
576 * would enqueue missing sub-locks in the write mode.
577 */
578 else if (need->cld_mode != descr->cld_mode)
579 result = lock->cll_state >= CLS_ENQUEUED;
580 else
581 result = 1;
582 return result;
583}
584
585/**
586 * Implements cl_lock_operations::clo_state() method for ccc layer, invoked
587 * whenever lock state changes. Transfers object attributes, that might be
588 * updated as a result of lock acquiring into inode.
589 */
590void ccc_lock_state(const struct lu_env *env,
591 const struct cl_lock_slice *slice,
592 enum cl_lock_state state)
593{
594 struct cl_lock *lock = slice->cls_lock;
595
596 /*
597 * Refresh inode attributes when the lock is moving into CLS_HELD
598 * state, and only when this is a result of real enqueue, rather than
599 * of finding lock in the cache.
600 */
601 if (state == CLS_HELD && lock->cll_state < CLS_HELD) {
602 struct cl_object *obj;
603 struct inode *inode;
604
605 obj = slice->cls_obj;
606 inode = ccc_object_inode(obj);
607
608 /* vmtruncate() sets the i_size
609 * under both a DLM lock and the
610 * ll_inode_size_lock(). If we don't get the
611 * ll_inode_size_lock() here we can match the DLM lock and
612 * reset i_size. generic_file_write can then trust the
613 * stale i_size when doing appending writes and effectively
614 * cancel the result of the truncate. Getting the
615 * ll_inode_size_lock() after the enqueue maintains the DLM
616 * -> ll_inode_size_lock() acquiring order.
617 */
618 if (lock->cll_descr.cld_start == 0 &&
619 lock->cll_descr.cld_end == CL_PAGE_EOF)
620 cl_merge_lvb(env, inode);
621 }
622}
623
624/*****************************************************************************
625 *
626 * io operations.
627 *
628 */
629
630int ccc_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
631 __u32 enqflags, enum cl_lock_mode mode,
632 pgoff_t start, pgoff_t end)
633{
634 struct ccc_io *cio = ccc_env_io(env);
635 struct cl_lock_descr *descr = &cio->cui_link.cill_descr;
636 struct cl_object *obj = io->ci_obj;
637
638 CLOBINVRNT(env, obj, ccc_object_invariant(obj));
639
640 CDEBUG(D_VFSTRACE, "lock: %d [%lu, %lu]\n", mode, start, end);
641
642 memset(&cio->cui_link, 0, sizeof(cio->cui_link));
643
644 if (cio->cui_fd && (cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
645 descr->cld_mode = CLM_GROUP;
646 descr->cld_gid = cio->cui_fd->fd_grouplock.cg_gid;
647 } else {
648 descr->cld_mode = mode;
649 }
650 descr->cld_obj = obj;
651 descr->cld_start = start;
652 descr->cld_end = end;
653 descr->cld_enq_flags = enqflags;
654
655 cl_io_lock_add(env, io, &cio->cui_link);
656 return 0;
657}
658
659void ccc_io_update_iov(const struct lu_env *env,
660 struct ccc_io *cio, struct cl_io *io)
661{
662 size_t size = io->u.ci_rw.crw_count;
663
664 if (!cl_is_normalio(env, io) || !cio->cui_iter)
665 return;
666
667 iov_iter_truncate(cio->cui_iter, size);
668}
669
670int ccc_io_one_lock(const struct lu_env *env, struct cl_io *io,
671 __u32 enqflags, enum cl_lock_mode mode,
672 loff_t start, loff_t end)
673{
674 struct cl_object *obj = io->ci_obj;
675
676 return ccc_io_one_lock_index(env, io, enqflags, mode,
677 cl_index(obj, start), cl_index(obj, end));
678}
679
680void ccc_io_end(const struct lu_env *env, const struct cl_io_slice *ios)
681{
682 CLOBINVRNT(env, ios->cis_io->ci_obj,
683 ccc_object_invariant(ios->cis_io->ci_obj));
684}
685
686void ccc_io_advance(const struct lu_env *env,
687 const struct cl_io_slice *ios,
688 size_t nob)
689{
690 struct ccc_io *cio = cl2ccc_io(env, ios);
691 struct cl_io *io = ios->cis_io;
692 struct cl_object *obj = ios->cis_io->ci_obj;
693
694 CLOBINVRNT(env, obj, ccc_object_invariant(obj));
695
696 if (!cl_is_normalio(env, io))
697 return;
698
699 iov_iter_reexpand(cio->cui_iter, cio->cui_tot_count -= nob);
700}
701
702/**
703 * Helper function that if necessary adjusts file size (inode->i_size), when
704 * position at the offset \a pos is accessed. File size can be arbitrary stale
705 * on a Lustre client, but client at least knows KMS. If accessed area is
706 * inside [0, KMS], set file size to KMS, otherwise glimpse file size.
707 *
708 * Locking: cl_isize_lock is used to serialize changes to inode size and to
709 * protect consistency between inode size and cl_object
710 * attributes. cl_object_size_lock() protects consistency between cl_attr's of
711 * top-object and sub-objects.
712 */
713int ccc_prep_size(const struct lu_env *env, struct cl_object *obj,
714 struct cl_io *io, loff_t start, size_t count, int *exceed)
715{
716 struct cl_attr *attr = ccc_env_thread_attr(env);
717 struct inode *inode = ccc_object_inode(obj);
718 loff_t pos = start + count - 1;
719 loff_t kms;
720 int result;
721
722 /*
723 * Consistency guarantees: following possibilities exist for the
724 * relation between region being accessed and real file size at this
725 * moment:
726 *
727 * (A): the region is completely inside of the file;
728 *
729 * (B-x): x bytes of region are inside of the file, the rest is
730 * outside;
731 *
732 * (C): the region is completely outside of the file.
733 *
734 * This classification is stable under DLM lock already acquired by
735 * the caller, because to change the class, other client has to take
736 * DLM lock conflicting with our lock. Also, any updates to ->i_size
737 * by other threads on this client are serialized by
738 * ll_inode_size_lock(). This guarantees that short reads are handled
739 * correctly in the face of concurrent writes and truncates.
740 */
741 ccc_object_size_lock(obj);
742 result = cl_object_attr_get(env, obj, attr);
743 if (result == 0) {
744 kms = attr->cat_kms;
745 if (pos > kms) {
746 /*
747 * A glimpse is necessary to determine whether we
748 * return a short read (B) or some zeroes at the end
749 * of the buffer (C)
750 */
751 ccc_object_size_unlock(obj);
752 result = cl_glimpse_lock(env, io, inode, obj, 0);
753 if (result == 0 && exceed) {
754 /* If objective page index exceed end-of-file
755 * page index, return directly. Do not expect
756 * kernel will check such case correctly.
757 * linux-2.6.18-128.1.1 miss to do that.
758 * --bug 17336
759 */
760 loff_t size = cl_isize_read(inode);
761 loff_t cur_index = start >> PAGE_SHIFT;
762 loff_t size_index = (size - 1) >>
763 PAGE_SHIFT;
764
765 if ((size == 0 && cur_index != 0) ||
766 size_index < cur_index)
767 *exceed = 1;
768 }
769 return result;
770 }
771 /*
772 * region is within kms and, hence, within real file
773 * size (A). We need to increase i_size to cover the
774 * read region so that generic_file_read() will do its
775 * job, but that doesn't mean the kms size is
776 * _correct_, it is only the _minimum_ size. If
777 * someone does a stat they will get the correct size
778 * which will always be >= the kms value here.
779 * b=11081
780 */
781 if (cl_isize_read(inode) < kms) {
782 cl_isize_write_nolock(inode, kms);
783 CDEBUG(D_VFSTRACE,
784 DFID" updating i_size %llu\n",
785 PFID(lu_object_fid(&obj->co_lu)),
786 (__u64)cl_isize_read(inode));
787
788 }
789 }
790 ccc_object_size_unlock(obj);
791 return result;
792}
793
794/*****************************************************************************
795 *
796 * Transfer operations.
797 *
798 */
799
800void ccc_req_completion(const struct lu_env *env,
801 const struct cl_req_slice *slice, int ioret)
802{
803 struct ccc_req *vrq;
804
805 if (ioret > 0)
806 cl_stats_tally(slice->crs_dev, slice->crs_req->crq_type, ioret);
807
808 vrq = cl2ccc_req(slice);
809 kmem_cache_free(ccc_req_kmem, vrq);
810}
811
812/**
813 * Implementation of struct cl_req_operations::cro_attr_set() for ccc
814 * layer. ccc is responsible for
815 *
816 * - o_[mac]time
817 *
818 * - o_mode
819 *
820 * - o_parent_seq
821 *
822 * - o_[ug]id
823 *
824 * - o_parent_oid
825 *
826 * - o_parent_ver
827 *
828 * - o_ioepoch,
829 *
830 */
831void ccc_req_attr_set(const struct lu_env *env,
832 const struct cl_req_slice *slice,
833 const struct cl_object *obj,
834 struct cl_req_attr *attr, u64 flags)
835{
836 struct inode *inode;
837 struct obdo *oa;
838 u32 valid_flags;
839
840 oa = attr->cra_oa;
841 inode = ccc_object_inode(obj);
842 valid_flags = OBD_MD_FLTYPE;
843
844 if (slice->crs_req->crq_type == CRT_WRITE) {
845 if (flags & OBD_MD_FLEPOCH) {
846 oa->o_valid |= OBD_MD_FLEPOCH;
847 oa->o_ioepoch = cl_i2info(inode)->lli_ioepoch;
848 valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME |
849 OBD_MD_FLUID | OBD_MD_FLGID;
850 }
851 }
852 obdo_from_inode(oa, inode, valid_flags & flags);
853 obdo_set_parent_fid(oa, &cl_i2info(inode)->lli_fid);
854 memcpy(attr->cra_jobid, cl_i2info(inode)->lli_jobid,
855 JOBSTATS_JOBID_SIZE);
856}
857
858static const struct cl_req_operations ccc_req_ops = {
859 .cro_attr_set = ccc_req_attr_set,
860 .cro_completion = ccc_req_completion
861};
862
863int cl_setattr_ost(struct inode *inode, const struct iattr *attr)
864{
865 struct lu_env *env;
866 struct cl_io *io;
867 int result;
868 int refcheck;
869
870 env = cl_env_get(&refcheck);
871 if (IS_ERR(env))
872 return PTR_ERR(env);
873
874 io = ccc_env_thread_io(env);
875 io->ci_obj = cl_i2info(inode)->lli_clob;
876
877 io->u.ci_setattr.sa_attr.lvb_atime = LTIME_S(attr->ia_atime);
878 io->u.ci_setattr.sa_attr.lvb_mtime = LTIME_S(attr->ia_mtime);
879 io->u.ci_setattr.sa_attr.lvb_ctime = LTIME_S(attr->ia_ctime);
880 io->u.ci_setattr.sa_attr.lvb_size = attr->ia_size;
881 io->u.ci_setattr.sa_valid = attr->ia_valid;
882
883again:
884 if (cl_io_init(env, io, CIT_SETATTR, io->ci_obj) == 0) {
885 struct ccc_io *cio = ccc_env_io(env);
886
887 if (attr->ia_valid & ATTR_FILE)
888 /* populate the file descriptor for ftruncate to honor
889 * group lock - see LU-787
890 */
891 cio->cui_fd = cl_iattr2fd(inode, attr);
892
893 result = cl_io_loop(env, io);
894 } else {
895 result = io->ci_result;
896 }
897 cl_io_fini(env, io);
898 if (unlikely(io->ci_need_restart))
899 goto again;
900 /* HSM import case: file is released, cannot be restored
901 * no need to fail except if restore registration failed
902 * with -ENODATA
903 */
904 if (result == -ENODATA && io->ci_restore_needed &&
905 io->ci_result != -ENODATA)
906 result = 0;
907 cl_env_put(env, &refcheck);
908 return result;
909}
910
911/*****************************************************************************
912 *
913 * Type conversions.
914 *
915 */
916
917struct lu_device *ccc2lu_dev(struct ccc_device *vdv)
918{
919 return &vdv->cdv_cl.cd_lu_dev;
920}
921
922struct ccc_device *lu2ccc_dev(const struct lu_device *d)
923{
924 return container_of0(d, struct ccc_device, cdv_cl.cd_lu_dev);
925}
926
927struct ccc_device *cl2ccc_dev(const struct cl_device *d)
928{
929 return container_of0(d, struct ccc_device, cdv_cl);
930}
931
932struct lu_object *ccc2lu(struct ccc_object *vob)
933{
934 return &vob->cob_cl.co_lu;
935}
936
937struct ccc_object *lu2ccc(const struct lu_object *obj)
938{
939 return container_of0(obj, struct ccc_object, cob_cl.co_lu);
940}
941
942struct ccc_object *cl2ccc(const struct cl_object *obj)
943{
944 return container_of0(obj, struct ccc_object, cob_cl);
945}
946
947struct ccc_lock *cl2ccc_lock(const struct cl_lock_slice *slice)
948{
949 return container_of(slice, struct ccc_lock, clk_cl);
950}
951
952struct ccc_io *cl2ccc_io(const struct lu_env *env,
953 const struct cl_io_slice *slice)
954{
955 struct ccc_io *cio;
956
957 cio = container_of(slice, struct ccc_io, cui_cl);
958 LASSERT(cio == ccc_env_io(env));
959 return cio;
960}
961
962struct ccc_req *cl2ccc_req(const struct cl_req_slice *slice)
963{
964 return container_of0(slice, struct ccc_req, crq_cl);
965}
966
967struct page *cl2vm_page(const struct cl_page_slice *slice)
968{
969 return cl2ccc_page(slice)->cpg_page;
970}
971
972/*****************************************************************************
973 *
974 * Accessors.
975 *
976 */
977int ccc_object_invariant(const struct cl_object *obj)
978{
979 struct inode *inode = ccc_object_inode(obj);
980 struct cl_inode_info *lli = cl_i2info(inode);
981
982 return (S_ISREG(cl_inode_mode(inode)) ||
983 /* i_mode of unlinked inode is zeroed. */
984 cl_inode_mode(inode) == 0) && lli->lli_clob == obj;
985}
986
987struct inode *ccc_object_inode(const struct cl_object *obj)
988{
989 return cl2ccc(obj)->cob_inode;
990}
991
992/**
993 * Initialize or update CLIO structures for regular files when new
994 * meta-data arrives from the server.
995 *
996 * \param inode regular file inode
997 * \param md new file metadata from MDS
998 * - allocates cl_object if necessary,
999 * - updated layout, if object was already here.
1000 */
1001int cl_file_inode_init(struct inode *inode, struct lustre_md *md)
1002{
1003 struct lu_env *env;
1004 struct cl_inode_info *lli;
1005 struct cl_object *clob;
1006 struct lu_site *site;
1007 struct lu_fid *fid;
1008 struct cl_object_conf conf = {
1009 .coc_inode = inode,
1010 .u = {
1011 .coc_md = md
1012 }
1013 };
1014 int result = 0;
1015 int refcheck;
1016
1017 LASSERT(md->body->valid & OBD_MD_FLID);
1018 LASSERT(S_ISREG(cl_inode_mode(inode)));
1019
1020 env = cl_env_get(&refcheck);
1021 if (IS_ERR(env))
1022 return PTR_ERR(env);
1023
1024 site = cl_i2sbi(inode)->ll_site;
1025 lli = cl_i2info(inode);
1026 fid = &lli->lli_fid;
1027 LASSERT(fid_is_sane(fid));
1028
1029 if (!lli->lli_clob) {
1030 /* clob is slave of inode, empty lli_clob means for new inode,
1031 * there is no clob in cache with the given fid, so it is
1032 * unnecessary to perform lookup-alloc-lookup-insert, just
1033 * alloc and insert directly.
1034 */
1035 LASSERT(inode->i_state & I_NEW);
1036 conf.coc_lu.loc_flags = LOC_F_NEW;
1037 clob = cl_object_find(env, lu2cl_dev(site->ls_top_dev),
1038 fid, &conf);
1039 if (!IS_ERR(clob)) {
1040 /*
1041 * No locking is necessary, as new inode is
1042 * locked by I_NEW bit.
1043 */
1044 lli->lli_clob = clob;
1045 lli->lli_has_smd = lsm_has_objects(md->lsm);
1046 lu_object_ref_add(&clob->co_lu, "inode", inode);
1047 } else
1048 result = PTR_ERR(clob);
1049 } else {
1050 result = cl_conf_set(env, lli->lli_clob, &conf);
1051 }
1052
1053 cl_env_put(env, &refcheck);
1054
1055 if (result != 0)
1056 CERROR("Failure to initialize cl object "DFID": %d\n",
1057 PFID(fid), result);
1058 return result;
1059}
1060
1061/**
1062 * Wait for others drop their references of the object at first, then we drop
1063 * the last one, which will lead to the object be destroyed immediately.
1064 * Must be called after cl_object_kill() against this object.
1065 *
1066 * The reason we want to do this is: destroying top object will wait for sub
1067 * objects being destroyed first, so we can't let bottom layer (e.g. from ASTs)
1068 * to initiate top object destroying which may deadlock. See bz22520.
1069 */
1070static void cl_object_put_last(struct lu_env *env, struct cl_object *obj)
1071{
1072 struct lu_object_header *header = obj->co_lu.lo_header;
1073 wait_queue_t waiter;
1074
1075 if (unlikely(atomic_read(&header->loh_ref) != 1)) {
1076 struct lu_site *site = obj->co_lu.lo_dev->ld_site;
1077 struct lu_site_bkt_data *bkt;
1078
1079 bkt = lu_site_bkt_from_fid(site, &header->loh_fid);
1080
1081 init_waitqueue_entry(&waiter, current);
1082 add_wait_queue(&bkt->lsb_marche_funebre, &waiter);
1083
1084 while (1) {
1085 set_current_state(TASK_UNINTERRUPTIBLE);
1086 if (atomic_read(&header->loh_ref) == 1)
1087 break;
1088 schedule();
1089 }
1090
1091 set_current_state(TASK_RUNNING);
1092 remove_wait_queue(&bkt->lsb_marche_funebre, &waiter);
1093 }
1094
1095 cl_object_put(env, obj);
1096}
1097
1098void cl_inode_fini(struct inode *inode)
1099{
1100 struct lu_env *env;
1101 struct cl_inode_info *lli = cl_i2info(inode);
1102 struct cl_object *clob = lli->lli_clob;
1103 int refcheck;
1104 int emergency;
1105
1106 if (clob) {
1107 void *cookie;
1108
1109 cookie = cl_env_reenter();
1110 env = cl_env_get(&refcheck);
1111 emergency = IS_ERR(env);
1112 if (emergency) {
1113 mutex_lock(&ccc_inode_fini_guard);
1114 LASSERT(ccc_inode_fini_env);
1115 cl_env_implant(ccc_inode_fini_env, &refcheck);
1116 env = ccc_inode_fini_env;
1117 }
1118 /*
1119 * cl_object cache is a slave to inode cache (which, in turn
1120 * is a slave to dentry cache), don't keep cl_object in memory
1121 * when its master is evicted.
1122 */
1123 cl_object_kill(env, clob);
1124 lu_object_ref_del(&clob->co_lu, "inode", inode);
1125 cl_object_put_last(env, clob);
1126 lli->lli_clob = NULL;
1127 if (emergency) {
1128 cl_env_unplant(ccc_inode_fini_env, &refcheck);
1129 mutex_unlock(&ccc_inode_fini_guard);
1130 } else
1131 cl_env_put(env, &refcheck);
1132 cl_env_reexit(cookie);
1133 }
1134}
1135
1136/**
1137 * return IF_* type for given lu_dirent entry.
1138 * IF_* flag shld be converted to particular OS file type in
1139 * platform llite module.
1140 */
1141__u16 ll_dirent_type_get(struct lu_dirent *ent)
1142{
1143 __u16 type = 0;
1144 struct luda_type *lt;
1145 int len = 0;
1146
1147 if (le32_to_cpu(ent->lde_attrs) & LUDA_TYPE) {
1148 const unsigned align = sizeof(struct luda_type) - 1;
1149
1150 len = le16_to_cpu(ent->lde_namelen);
1151 len = (len + align) & ~align;
1152 lt = (void *)ent->lde_name + len;
1153 type = IFTODT(le16_to_cpu(lt->lt_type));
1154 }
1155 return type;
1156}
1157
1158/**
1159 * build inode number from passed @fid
1160 */
1161__u64 cl_fid_build_ino(const struct lu_fid *fid, int api32)
1162{
1163 if (BITS_PER_LONG == 32 || api32)
1164 return fid_flatten32(fid);
1165 else
1166 return fid_flatten(fid);
1167}
1168
1169/**
1170 * build inode generation from passed @fid. If our FID overflows the 32-bit
1171 * inode number then return a non-zero generation to distinguish them.
1172 */
1173__u32 cl_fid_build_gen(const struct lu_fid *fid)
1174{
1175 __u32 gen;
1176
1177 if (fid_is_igif(fid)) {
1178 gen = lu_igif_gen(fid);
1179 return gen;
1180 }
1181
1182 gen = fid_flatten(fid) >> 32;
1183 return gen;
1184}
1185
1186/* lsm is unreliable after hsm implementation as layout can be changed at
1187 * any time. This is only to support old, non-clio-ized interfaces. It will
1188 * cause deadlock if clio operations are called with this extra layout refcount
1189 * because in case the layout changed during the IO, ll_layout_refresh() will
1190 * have to wait for the refcount to become zero to destroy the older layout.
1191 *
1192 * Notice that the lsm returned by this function may not be valid unless called
1193 * inside layout lock - MDS_INODELOCK_LAYOUT.
1194 */
1195struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode)
1196{
1197 return lov_lsm_get(cl_i2info(inode)->lli_clob);
1198}
1199
1200inline void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm)
1201{
1202 lov_lsm_put(cl_i2info(inode)->lli_clob, lsm);
1203}
diff --git a/drivers/staging/lustre/lustre/ldlm/l_lock.c b/drivers/staging/lustre/lustre/ldlm/l_lock.c
index e5d1344e817a..621323f6ee60 100644
--- a/drivers/staging/lustre/lustre/ldlm/l_lock.c
+++ b/drivers/staging/lustre/lustre/ldlm/l_lock.c
@@ -54,7 +54,7 @@ struct ldlm_resource *lock_res_and_lock(struct ldlm_lock *lock)
54 54
55 lock_res(lock->l_resource); 55 lock_res(lock->l_resource);
56 56
57 lock->l_flags |= LDLM_FL_RES_LOCKED; 57 ldlm_set_res_locked(lock);
58 return lock->l_resource; 58 return lock->l_resource;
59} 59}
60EXPORT_SYMBOL(lock_res_and_lock); 60EXPORT_SYMBOL(lock_res_and_lock);
@@ -65,7 +65,7 @@ EXPORT_SYMBOL(lock_res_and_lock);
65void unlock_res_and_lock(struct ldlm_lock *lock) 65void unlock_res_and_lock(struct ldlm_lock *lock)
66{ 66{
67 /* on server-side resource of lock doesn't change */ 67 /* on server-side resource of lock doesn't change */
68 lock->l_flags &= ~LDLM_FL_RES_LOCKED; 68 ldlm_clear_res_locked(lock);
69 69
70 unlock_res(lock->l_resource); 70 unlock_res(lock->l_resource);
71 spin_unlock(&lock->l_lock); 71 spin_unlock(&lock->l_lock);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
index a803e200f206..cf1f1783632f 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
@@ -75,12 +75,12 @@ __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
75 * just after we finish and take our lock into account in its 75 * just after we finish and take our lock into account in its
76 * calculation of the kms 76 * calculation of the kms
77 */ 77 */
78 lock->l_flags |= LDLM_FL_KMS_IGNORE; 78 ldlm_set_kms_ignore(lock);
79 79
80 list_for_each(tmp, &res->lr_granted) { 80 list_for_each(tmp, &res->lr_granted) {
81 lck = list_entry(tmp, struct ldlm_lock, l_res_link); 81 lck = list_entry(tmp, struct ldlm_lock, l_res_link);
82 82
83 if (lck->l_flags & LDLM_FL_KMS_IGNORE) 83 if (ldlm_is_kms_ignore(lck))
84 continue; 84 continue;
85 85
86 if (lck->l_policy_data.l_extent.end >= old_kms) 86 if (lck->l_policy_data.l_extent.end >= old_kms)
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
index b88b78606aee..349bfcc9b331 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
@@ -101,8 +101,7 @@ ldlm_flock_destroy(struct ldlm_lock *lock, enum ldlm_mode mode, __u64 flags)
101 LASSERT(hlist_unhashed(&lock->l_exp_flock_hash)); 101 LASSERT(hlist_unhashed(&lock->l_exp_flock_hash));
102 102
103 list_del_init(&lock->l_res_link); 103 list_del_init(&lock->l_res_link);
104 if (flags == LDLM_FL_WAIT_NOREPROC && 104 if (flags == LDLM_FL_WAIT_NOREPROC && !ldlm_is_failed(lock)) {
105 !(lock->l_flags & LDLM_FL_FAILED)) {
106 /* client side - set a flag to prevent sending a CANCEL */ 105 /* client side - set a flag to prevent sending a CANCEL */
107 lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING; 106 lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING;
108 107
@@ -436,7 +435,7 @@ ldlm_flock_interrupted_wait(void *data)
436 lock_res_and_lock(lock); 435 lock_res_and_lock(lock);
437 436
438 /* client side - set flag to prevent lock from being put on LRU list */ 437 /* client side - set flag to prevent lock from being put on LRU list */
439 lock->l_flags |= LDLM_FL_CBPENDING; 438 ldlm_set_cbpending(lock);
440 unlock_res_and_lock(lock); 439 unlock_res_and_lock(lock);
441} 440}
442 441
@@ -520,30 +519,29 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
520granted: 519granted:
521 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT, 10); 520 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT, 10);
522 521
523 if (lock->l_flags & LDLM_FL_DESTROYED) { 522 if (ldlm_is_failed(lock)) {
524 LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
525 return 0;
526 }
527
528 if (lock->l_flags & LDLM_FL_FAILED) {
529 LDLM_DEBUG(lock, "client-side enqueue waking up: failed"); 523 LDLM_DEBUG(lock, "client-side enqueue waking up: failed");
530 return -EIO; 524 return -EIO;
531 } 525 }
532 526
533 if (rc) {
534 LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
535 rc);
536 return rc;
537 }
538
539 LDLM_DEBUG(lock, "client-side enqueue granted"); 527 LDLM_DEBUG(lock, "client-side enqueue granted");
540 528
541 lock_res_and_lock(lock); 529 lock_res_and_lock(lock);
542 530
531 /*
532 * Protect against race where lock could have been just destroyed
533 * due to overlap in ldlm_process_flock_lock().
534 */
535 if (ldlm_is_destroyed(lock)) {
536 unlock_res_and_lock(lock);
537 LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
538 return 0;
539 }
540
543 /* ldlm_lock_enqueue() has already placed lock on the granted list. */ 541 /* ldlm_lock_enqueue() has already placed lock on the granted list. */
544 list_del_init(&lock->l_res_link); 542 list_del_init(&lock->l_res_link);
545 543
546 if (lock->l_flags & LDLM_FL_FLOCK_DEADLOCK) { 544 if (ldlm_is_flock_deadlock(lock)) {
547 LDLM_DEBUG(lock, "client-side enqueue deadlock received"); 545 LDLM_DEBUG(lock, "client-side enqueue deadlock received");
548 rc = -EDEADLK; 546 rc = -EDEADLK;
549 } else if (flags & LDLM_FL_TEST_LOCK) { 547 } else if (flags & LDLM_FL_TEST_LOCK) {
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
index e21373e7306f..32f227f37799 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
@@ -95,9 +95,10 @@ enum {
95 LDLM_CANCEL_PASSED = 1 << 1, /* Cancel passed number of locks. */ 95 LDLM_CANCEL_PASSED = 1 << 1, /* Cancel passed number of locks. */
96 LDLM_CANCEL_SHRINK = 1 << 2, /* Cancel locks from shrinker. */ 96 LDLM_CANCEL_SHRINK = 1 << 2, /* Cancel locks from shrinker. */
97 LDLM_CANCEL_LRUR = 1 << 3, /* Cancel locks from lru resize. */ 97 LDLM_CANCEL_LRUR = 1 << 3, /* Cancel locks from lru resize. */
98 LDLM_CANCEL_NO_WAIT = 1 << 4 /* Cancel locks w/o blocking (neither 98 LDLM_CANCEL_NO_WAIT = 1 << 4, /* Cancel locks w/o blocking (neither
99 * sending nor waiting for any rpcs) 99 * sending nor waiting for any rpcs)
100 */ 100 */
101 LDLM_CANCEL_LRUR_NO_WAIT = 1 << 5, /* LRUR + NO_WAIT */
101}; 102};
102 103
103int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr, 104int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr,
@@ -145,7 +146,8 @@ void ldlm_lock_decref_internal(struct ldlm_lock *, __u32 mode);
145void ldlm_lock_decref_internal_nolock(struct ldlm_lock *, __u32 mode); 146void ldlm_lock_decref_internal_nolock(struct ldlm_lock *, __u32 mode);
146int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list, 147int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list,
147 enum ldlm_desc_ast_t ast_type); 148 enum ldlm_desc_ast_t ast_type);
148int ldlm_lock_remove_from_lru(struct ldlm_lock *lock); 149int ldlm_lock_remove_from_lru_check(struct ldlm_lock *lock, time_t last_use);
150#define ldlm_lock_remove_from_lru(lock) ldlm_lock_remove_from_lru_check(lock, 0)
149int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock); 151int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock);
150void ldlm_lock_destroy_nolock(struct ldlm_lock *lock); 152void ldlm_lock_destroy_nolock(struct ldlm_lock *lock);
151 153
@@ -216,8 +218,6 @@ enum ldlm_policy_res {
216 LDLM_POLICY_SKIP_LOCK 218 LDLM_POLICY_SKIP_LOCK
217}; 219};
218 220
219typedef enum ldlm_policy_res ldlm_policy_res_t;
220
221#define LDLM_POOL_SYSFS_PRINT_int(v) sprintf(buf, "%d\n", v) 221#define LDLM_POOL_SYSFS_PRINT_int(v) sprintf(buf, "%d\n", v)
222#define LDLM_POOL_SYSFS_SET_int(a, b) { a = b; } 222#define LDLM_POOL_SYSFS_SET_int(a, b) { a = b; }
223#define LDLM_POOL_SYSFS_PRINT_u64(v) sprintf(buf, "%lld\n", v) 223#define LDLM_POOL_SYSFS_PRINT_u64(v) sprintf(buf, "%lld\n", v)
@@ -305,9 +305,10 @@ static inline int is_granted_or_cancelled(struct ldlm_lock *lock)
305 int ret = 0; 305 int ret = 0;
306 306
307 lock_res_and_lock(lock); 307 lock_res_and_lock(lock);
308 if (((lock->l_req_mode == lock->l_granted_mode) && 308 if ((lock->l_req_mode == lock->l_granted_mode) &&
309 !(lock->l_flags & LDLM_FL_CP_REQD)) || 309 !ldlm_is_cp_reqd(lock))
310 (lock->l_flags & (LDLM_FL_FAILED | LDLM_FL_CANCEL))) 310 ret = 1;
311 else if (ldlm_is_failed(lock) || ldlm_is_cancel(lock))
311 ret = 1; 312 ret = 1;
312 unlock_res_and_lock(lock); 313 unlock_res_and_lock(lock);
313 314
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
index 7dd7df59aa1f..b4ffbe2fc4ed 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
@@ -314,7 +314,7 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
314 INIT_LIST_HEAD(&cli->cl_loi_hp_ready_list); 314 INIT_LIST_HEAD(&cli->cl_loi_hp_ready_list);
315 INIT_LIST_HEAD(&cli->cl_loi_write_list); 315 INIT_LIST_HEAD(&cli->cl_loi_write_list);
316 INIT_LIST_HEAD(&cli->cl_loi_read_list); 316 INIT_LIST_HEAD(&cli->cl_loi_read_list);
317 client_obd_list_lock_init(&cli->cl_loi_list_lock); 317 spin_lock_init(&cli->cl_loi_list_lock);
318 atomic_set(&cli->cl_pending_w_pages, 0); 318 atomic_set(&cli->cl_pending_w_pages, 0);
319 atomic_set(&cli->cl_pending_r_pages, 0); 319 atomic_set(&cli->cl_pending_r_pages, 0);
320 cli->cl_r_in_flight = 0; 320 cli->cl_r_in_flight = 0;
@@ -333,7 +333,8 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
333 atomic_set(&cli->cl_lru_busy, 0); 333 atomic_set(&cli->cl_lru_busy, 0);
334 atomic_set(&cli->cl_lru_in_list, 0); 334 atomic_set(&cli->cl_lru_in_list, 0);
335 INIT_LIST_HEAD(&cli->cl_lru_list); 335 INIT_LIST_HEAD(&cli->cl_lru_list);
336 client_obd_list_lock_init(&cli->cl_lru_list_lock); 336 spin_lock_init(&cli->cl_lru_list_lock);
337 atomic_set(&cli->cl_unstable_count, 0);
337 338
338 init_waitqueue_head(&cli->cl_destroy_waitq); 339 init_waitqueue_head(&cli->cl_destroy_waitq);
339 atomic_set(&cli->cl_destroy_in_flight, 0); 340 atomic_set(&cli->cl_destroy_in_flight, 0);
@@ -355,6 +356,12 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
355 cli->cl_max_pages_per_rpc = min_t(int, PTLRPC_MAX_BRW_PAGES, 356 cli->cl_max_pages_per_rpc = min_t(int, PTLRPC_MAX_BRW_PAGES,
356 LNET_MTU >> PAGE_SHIFT); 357 LNET_MTU >> PAGE_SHIFT);
357 358
359 /*
360 * set cl_chunkbits default value to PAGE_CACHE_SHIFT,
361 * it will be updated at OSC connection time.
362 */
363 cli->cl_chunkbits = PAGE_SHIFT;
364
358 if (!strcmp(name, LUSTRE_MDC_NAME)) { 365 if (!strcmp(name, LUSTRE_MDC_NAME)) {
359 cli->cl_max_rpcs_in_flight = MDC_MAX_RIF_DEFAULT; 366 cli->cl_max_rpcs_in_flight = MDC_MAX_RIF_DEFAULT;
360 } else if (totalram_pages >> (20 - PAGE_SHIFT) <= 128 /* MB */) { 367 } else if (totalram_pages >> (20 - PAGE_SHIFT) <= 128 /* MB */) {
@@ -429,7 +436,6 @@ err_ldlm:
429 ldlm_put_ref(); 436 ldlm_put_ref();
430err: 437err:
431 return rc; 438 return rc;
432
433} 439}
434EXPORT_SYMBOL(client_obd_setup); 440EXPORT_SYMBOL(client_obd_setup);
435 441
@@ -438,6 +444,7 @@ int client_obd_cleanup(struct obd_device *obddev)
438 ldlm_namespace_free_post(obddev->obd_namespace); 444 ldlm_namespace_free_post(obddev->obd_namespace);
439 obddev->obd_namespace = NULL; 445 obddev->obd_namespace = NULL;
440 446
447 obd_cleanup_client_import(obddev);
441 LASSERT(!obddev->u.cli.cl_import); 448 LASSERT(!obddev->u.cli.cl_import);
442 449
443 ldlm_put_ref(); 450 ldlm_put_ref();
@@ -748,6 +755,7 @@ int ldlm_error2errno(enum ldlm_error error)
748 755
749 switch (error) { 756 switch (error) {
750 case ELDLM_OK: 757 case ELDLM_OK:
758 case ELDLM_LOCK_MATCHED:
751 result = 0; 759 result = 0;
752 break; 760 break;
753 case ELDLM_LOCK_CHANGED: 761 case ELDLM_LOCK_CHANGED:
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
index ecd65a7a3dc9..bff94ea12d6f 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
@@ -185,7 +185,7 @@ void ldlm_lock_put(struct ldlm_lock *lock)
185 "final lock_put on destroyed lock, freeing it."); 185 "final lock_put on destroyed lock, freeing it.");
186 186
187 res = lock->l_resource; 187 res = lock->l_resource;
188 LASSERT(lock->l_flags & LDLM_FL_DESTROYED); 188 LASSERT(ldlm_is_destroyed(lock));
189 LASSERT(list_empty(&lock->l_res_link)); 189 LASSERT(list_empty(&lock->l_res_link));
190 LASSERT(list_empty(&lock->l_pending_chain)); 190 LASSERT(list_empty(&lock->l_pending_chain));
191 191
@@ -229,15 +229,25 @@ int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock)
229 229
230/** 230/**
231 * Removes LDLM lock \a lock from LRU. Obtains the LRU lock first. 231 * Removes LDLM lock \a lock from LRU. Obtains the LRU lock first.
232 *
233 * If \a last_use is non-zero, it will remove the lock from LRU only if
234 * it matches lock's l_last_used.
235 *
236 * \retval 0 if \a last_use is set, the lock is not in LRU list or \a last_use
237 * doesn't match lock's l_last_used;
238 * otherwise, the lock hasn't been in the LRU list.
239 * \retval 1 the lock was in LRU list and removed.
232 */ 240 */
233int ldlm_lock_remove_from_lru(struct ldlm_lock *lock) 241int ldlm_lock_remove_from_lru_check(struct ldlm_lock *lock, time_t last_use)
234{ 242{
235 struct ldlm_namespace *ns = ldlm_lock_to_ns(lock); 243 struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
236 int rc; 244 int rc = 0;
237 245
238 spin_lock(&ns->ns_lock); 246 spin_lock(&ns->ns_lock);
239 rc = ldlm_lock_remove_from_lru_nolock(lock); 247 if (last_use == 0 || last_use == lock->l_last_used)
248 rc = ldlm_lock_remove_from_lru_nolock(lock);
240 spin_unlock(&ns->ns_lock); 249 spin_unlock(&ns->ns_lock);
250
241 return rc; 251 return rc;
242} 252}
243 253
@@ -252,8 +262,7 @@ static void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock)
252 LASSERT(list_empty(&lock->l_lru)); 262 LASSERT(list_empty(&lock->l_lru));
253 LASSERT(lock->l_resource->lr_type != LDLM_FLOCK); 263 LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
254 list_add_tail(&lock->l_lru, &ns->ns_unused_list); 264 list_add_tail(&lock->l_lru, &ns->ns_unused_list);
255 if (lock->l_flags & LDLM_FL_SKIPPED) 265 ldlm_clear_skipped(lock);
256 lock->l_flags &= ~LDLM_FL_SKIPPED;
257 LASSERT(ns->ns_nr_unused >= 0); 266 LASSERT(ns->ns_nr_unused >= 0);
258 ns->ns_nr_unused++; 267 ns->ns_nr_unused++;
259} 268}
@@ -318,11 +327,11 @@ static int ldlm_lock_destroy_internal(struct ldlm_lock *lock)
318 LBUG(); 327 LBUG();
319 } 328 }
320 329
321 if (lock->l_flags & LDLM_FL_DESTROYED) { 330 if (ldlm_is_destroyed(lock)) {
322 LASSERT(list_empty(&lock->l_lru)); 331 LASSERT(list_empty(&lock->l_lru));
323 return 0; 332 return 0;
324 } 333 }
325 lock->l_flags |= LDLM_FL_DESTROYED; 334 ldlm_set_destroyed(lock);
326 335
327 if (lock->l_export && lock->l_export->exp_lock_hash) { 336 if (lock->l_export && lock->l_export->exp_lock_hash) {
328 /* NB: it's safe to call cfs_hash_del() even lock isn't 337 /* NB: it's safe to call cfs_hash_del() even lock isn't
@@ -544,7 +553,7 @@ struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
544 /* It's unlikely but possible that someone marked the lock as 553 /* It's unlikely but possible that someone marked the lock as
545 * destroyed after we did handle2object on it 554 * destroyed after we did handle2object on it
546 */ 555 */
547 if (flags == 0 && ((lock->l_flags & LDLM_FL_DESTROYED) == 0)) { 556 if (flags == 0 && !ldlm_is_destroyed(lock)) {
548 lu_ref_add(&lock->l_reference, "handle", current); 557 lu_ref_add(&lock->l_reference, "handle", current);
549 return lock; 558 return lock;
550 } 559 }
@@ -554,21 +563,22 @@ struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
554 LASSERT(lock->l_resource); 563 LASSERT(lock->l_resource);
555 564
556 lu_ref_add_atomic(&lock->l_reference, "handle", current); 565 lu_ref_add_atomic(&lock->l_reference, "handle", current);
557 if (unlikely(lock->l_flags & LDLM_FL_DESTROYED)) { 566 if (unlikely(ldlm_is_destroyed(lock))) {
558 unlock_res_and_lock(lock); 567 unlock_res_and_lock(lock);
559 CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock); 568 CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock);
560 LDLM_LOCK_PUT(lock); 569 LDLM_LOCK_PUT(lock);
561 return NULL; 570 return NULL;
562 } 571 }
563 572
564 if (flags && (lock->l_flags & flags)) { 573 if (flags) {
565 unlock_res_and_lock(lock); 574 if (lock->l_flags & flags) {
566 LDLM_LOCK_PUT(lock); 575 unlock_res_and_lock(lock);
567 return NULL; 576 LDLM_LOCK_PUT(lock);
568 } 577 return NULL;
578 }
569 579
570 if (flags)
571 lock->l_flags |= flags; 580 lock->l_flags |= flags;
581 }
572 582
573 unlock_res_and_lock(lock); 583 unlock_res_and_lock(lock);
574 return lock; 584 return lock;
@@ -599,14 +609,14 @@ EXPORT_SYMBOL(ldlm_lock2desc);
599static void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new, 609static void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
600 struct list_head *work_list) 610 struct list_head *work_list)
601{ 611{
602 if ((lock->l_flags & LDLM_FL_AST_SENT) == 0) { 612 if (!ldlm_is_ast_sent(lock)) {
603 LDLM_DEBUG(lock, "lock incompatible; sending blocking AST."); 613 LDLM_DEBUG(lock, "lock incompatible; sending blocking AST.");
604 lock->l_flags |= LDLM_FL_AST_SENT; 614 ldlm_set_ast_sent(lock);
605 /* If the enqueuing client said so, tell the AST recipient to 615 /* If the enqueuing client said so, tell the AST recipient to
606 * discard dirty data, rather than writing back. 616 * discard dirty data, rather than writing back.
607 */ 617 */
608 if (new->l_flags & LDLM_FL_AST_DISCARD_DATA) 618 if (ldlm_is_ast_discard_data(new))
609 lock->l_flags |= LDLM_FL_DISCARD_DATA; 619 ldlm_set_discard_data(lock);
610 LASSERT(list_empty(&lock->l_bl_ast)); 620 LASSERT(list_empty(&lock->l_bl_ast));
611 list_add(&lock->l_bl_ast, work_list); 621 list_add(&lock->l_bl_ast, work_list);
612 LDLM_LOCK_GET(lock); 622 LDLM_LOCK_GET(lock);
@@ -621,8 +631,8 @@ static void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
621static void ldlm_add_cp_work_item(struct ldlm_lock *lock, 631static void ldlm_add_cp_work_item(struct ldlm_lock *lock,
622 struct list_head *work_list) 632 struct list_head *work_list)
623{ 633{
624 if ((lock->l_flags & LDLM_FL_CP_REQD) == 0) { 634 if (!ldlm_is_cp_reqd(lock)) {
625 lock->l_flags |= LDLM_FL_CP_REQD; 635 ldlm_set_cp_reqd(lock);
626 LDLM_DEBUG(lock, "lock granted; sending completion AST."); 636 LDLM_DEBUG(lock, "lock granted; sending completion AST.");
627 LASSERT(list_empty(&lock->l_cp_ast)); 637 LASSERT(list_empty(&lock->l_cp_ast));
628 list_add(&lock->l_cp_ast, work_list); 638 list_add(&lock->l_cp_ast, work_list);
@@ -657,7 +667,7 @@ void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode)
657 struct ldlm_lock *lock; 667 struct ldlm_lock *lock;
658 668
659 lock = ldlm_handle2lock(lockh); 669 lock = ldlm_handle2lock(lockh);
660 LASSERT(lock); 670 LASSERTF(lock, "Non-existing lock: %llx\n", lockh->cookie);
661 ldlm_lock_addref_internal(lock, mode); 671 ldlm_lock_addref_internal(lock, mode);
662 LDLM_LOCK_PUT(lock); 672 LDLM_LOCK_PUT(lock);
663} 673}
@@ -704,7 +714,7 @@ int ldlm_lock_addref_try(struct lustre_handle *lockh, __u32 mode)
704 if (lock) { 714 if (lock) {
705 lock_res_and_lock(lock); 715 lock_res_and_lock(lock);
706 if (lock->l_readers != 0 || lock->l_writers != 0 || 716 if (lock->l_readers != 0 || lock->l_writers != 0 ||
707 !(lock->l_flags & LDLM_FL_CBPENDING)) { 717 !ldlm_is_cbpending(lock)) {
708 ldlm_lock_addref_internal_nolock(lock, mode); 718 ldlm_lock_addref_internal_nolock(lock, mode);
709 result = 0; 719 result = 0;
710 } 720 }
@@ -770,17 +780,17 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
770 780
771 ldlm_lock_decref_internal_nolock(lock, mode); 781 ldlm_lock_decref_internal_nolock(lock, mode);
772 782
773 if (lock->l_flags & LDLM_FL_LOCAL && 783 if (ldlm_is_local(lock) &&
774 !lock->l_readers && !lock->l_writers) { 784 !lock->l_readers && !lock->l_writers) {
775 /* If this is a local lock on a server namespace and this was 785 /* If this is a local lock on a server namespace and this was
776 * the last reference, cancel the lock. 786 * the last reference, cancel the lock.
777 */ 787 */
778 CDEBUG(D_INFO, "forcing cancel of local lock\n"); 788 CDEBUG(D_INFO, "forcing cancel of local lock\n");
779 lock->l_flags |= LDLM_FL_CBPENDING; 789 ldlm_set_cbpending(lock);
780 } 790 }
781 791
782 if (!lock->l_readers && !lock->l_writers && 792 if (!lock->l_readers && !lock->l_writers &&
783 (lock->l_flags & LDLM_FL_CBPENDING)) { 793 ldlm_is_cbpending(lock)) {
784 /* If we received a blocked AST and this was the last reference, 794 /* If we received a blocked AST and this was the last reference,
785 * run the callback. 795 * run the callback.
786 */ 796 */
@@ -791,16 +801,14 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
791 ldlm_lock_remove_from_lru(lock); 801 ldlm_lock_remove_from_lru(lock);
792 unlock_res_and_lock(lock); 802 unlock_res_and_lock(lock);
793 803
794 if (lock->l_flags & LDLM_FL_FAIL_LOC) 804 if (ldlm_is_fail_loc(lock))
795 OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE); 805 OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
796 806
797 if ((lock->l_flags & LDLM_FL_ATOMIC_CB) || 807 if (ldlm_is_atomic_cb(lock) ||
798 ldlm_bl_to_thread_lock(ns, NULL, lock) != 0) 808 ldlm_bl_to_thread_lock(ns, NULL, lock) != 0)
799 ldlm_handle_bl_callback(ns, NULL, lock); 809 ldlm_handle_bl_callback(ns, NULL, lock);
800 } else if (!lock->l_readers && !lock->l_writers && 810 } else if (!lock->l_readers && !lock->l_writers &&
801 !(lock->l_flags & LDLM_FL_NO_LRU) && 811 !ldlm_is_no_lru(lock) && !ldlm_is_bl_ast(lock)) {
802 !(lock->l_flags & LDLM_FL_BL_AST)) {
803
804 LDLM_DEBUG(lock, "add lock into lru list"); 812 LDLM_DEBUG(lock, "add lock into lru list");
805 813
806 /* If this is a client-side namespace and this was the last 814 /* If this is a client-side namespace and this was the last
@@ -809,7 +817,7 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
809 ldlm_lock_add_to_lru(lock); 817 ldlm_lock_add_to_lru(lock);
810 unlock_res_and_lock(lock); 818 unlock_res_and_lock(lock);
811 819
812 if (lock->l_flags & LDLM_FL_FAIL_LOC) 820 if (ldlm_is_fail_loc(lock))
813 OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE); 821 OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
814 822
815 /* Call ldlm_cancel_lru() only if EARLY_CANCEL and LRU RESIZE 823 /* Call ldlm_cancel_lru() only if EARLY_CANCEL and LRU RESIZE
@@ -853,7 +861,7 @@ void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode)
853 861
854 LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]); 862 LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
855 lock_res_and_lock(lock); 863 lock_res_and_lock(lock);
856 lock->l_flags |= LDLM_FL_CBPENDING; 864 ldlm_set_cbpending(lock);
857 unlock_res_and_lock(lock); 865 unlock_res_and_lock(lock);
858 ldlm_lock_decref_internal(lock, mode); 866 ldlm_lock_decref_internal(lock, mode);
859 LDLM_LOCK_PUT(lock); 867 LDLM_LOCK_PUT(lock);
@@ -971,7 +979,7 @@ static void ldlm_granted_list_add_lock(struct ldlm_lock *lock,
971 ldlm_resource_dump(D_INFO, res); 979 ldlm_resource_dump(D_INFO, res);
972 LDLM_DEBUG(lock, "About to add lock:"); 980 LDLM_DEBUG(lock, "About to add lock:");
973 981
974 if (lock->l_flags & LDLM_FL_DESTROYED) { 982 if (ldlm_is_destroyed(lock)) {
975 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n"); 983 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
976 return; 984 return;
977 } 985 }
@@ -1073,10 +1081,9 @@ static struct ldlm_lock *search_queue(struct list_head *queue,
1073 * whose parents already hold a lock so forward progress 1081 * whose parents already hold a lock so forward progress
1074 * can still happen. 1082 * can still happen.
1075 */ 1083 */
1076 if (lock->l_flags & LDLM_FL_CBPENDING && 1084 if (ldlm_is_cbpending(lock) && !(flags & LDLM_FL_CBPENDING))
1077 !(flags & LDLM_FL_CBPENDING))
1078 continue; 1085 continue;
1079 if (!unref && lock->l_flags & LDLM_FL_CBPENDING && 1086 if (!unref && ldlm_is_cbpending(lock) &&
1080 lock->l_readers == 0 && lock->l_writers == 0) 1087 lock->l_readers == 0 && lock->l_writers == 0)
1081 continue; 1088 continue;
1082 1089
@@ -1092,6 +1099,7 @@ static struct ldlm_lock *search_queue(struct list_head *queue,
1092 1099
1093 if (unlikely(match == LCK_GROUP) && 1100 if (unlikely(match == LCK_GROUP) &&
1094 lock->l_resource->lr_type == LDLM_EXTENT && 1101 lock->l_resource->lr_type == LDLM_EXTENT &&
1102 policy->l_extent.gid != LDLM_GID_ANY &&
1095 lock->l_policy_data.l_extent.gid != policy->l_extent.gid) 1103 lock->l_policy_data.l_extent.gid != policy->l_extent.gid)
1096 continue; 1104 continue;
1097 1105
@@ -1104,11 +1112,10 @@ static struct ldlm_lock *search_queue(struct list_head *queue,
1104 policy->l_inodebits.bits)) 1112 policy->l_inodebits.bits))
1105 continue; 1113 continue;
1106 1114
1107 if (!unref && (lock->l_flags & LDLM_FL_GONE_MASK)) 1115 if (!unref && LDLM_HAVE_MASK(lock, GONE))
1108 continue; 1116 continue;
1109 1117
1110 if ((flags & LDLM_FL_LOCAL_ONLY) && 1118 if ((flags & LDLM_FL_LOCAL_ONLY) && !ldlm_is_local(lock))
1111 !(lock->l_flags & LDLM_FL_LOCAL))
1112 continue; 1119 continue;
1113 1120
1114 if (flags & LDLM_FL_TEST_LOCK) { 1121 if (flags & LDLM_FL_TEST_LOCK) {
@@ -1142,7 +1149,7 @@ EXPORT_SYMBOL(ldlm_lock_fail_match_locked);
1142 */ 1149 */
1143void ldlm_lock_allow_match_locked(struct ldlm_lock *lock) 1150void ldlm_lock_allow_match_locked(struct ldlm_lock *lock)
1144{ 1151{
1145 lock->l_flags |= LDLM_FL_LVB_READY; 1152 ldlm_set_lvb_ready(lock);
1146 wake_up_all(&lock->l_waitq); 1153 wake_up_all(&lock->l_waitq);
1147} 1154}
1148EXPORT_SYMBOL(ldlm_lock_allow_match_locked); 1155EXPORT_SYMBOL(ldlm_lock_allow_match_locked);
@@ -1243,8 +1250,7 @@ enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
1243 1250
1244 if (lock) { 1251 if (lock) {
1245 ldlm_lock2handle(lock, lockh); 1252 ldlm_lock2handle(lock, lockh);
1246 if ((flags & LDLM_FL_LVB_READY) && 1253 if ((flags & LDLM_FL_LVB_READY) && !ldlm_is_lvb_ready(lock)) {
1247 (!(lock->l_flags & LDLM_FL_LVB_READY))) {
1248 __u64 wait_flags = LDLM_FL_LVB_READY | 1254 __u64 wait_flags = LDLM_FL_LVB_READY |
1249 LDLM_FL_DESTROYED | LDLM_FL_FAIL_NOTIFIED; 1255 LDLM_FL_DESTROYED | LDLM_FL_FAIL_NOTIFIED;
1250 struct l_wait_info lwi; 1256 struct l_wait_info lwi;
@@ -1271,7 +1277,7 @@ enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
1271 l_wait_event(lock->l_waitq, 1277 l_wait_event(lock->l_waitq,
1272 lock->l_flags & wait_flags, 1278 lock->l_flags & wait_flags,
1273 &lwi); 1279 &lwi);
1274 if (!(lock->l_flags & LDLM_FL_LVB_READY)) { 1280 if (!ldlm_is_lvb_ready(lock)) {
1275 if (flags & LDLM_FL_TEST_LOCK) 1281 if (flags & LDLM_FL_TEST_LOCK)
1276 LDLM_LOCK_RELEASE(lock); 1282 LDLM_LOCK_RELEASE(lock);
1277 else 1283 else
@@ -1325,10 +1331,10 @@ enum ldlm_mode ldlm_revalidate_lock_handle(struct lustre_handle *lockh,
1325 lock = ldlm_handle2lock(lockh); 1331 lock = ldlm_handle2lock(lockh);
1326 if (lock) { 1332 if (lock) {
1327 lock_res_and_lock(lock); 1333 lock_res_and_lock(lock);
1328 if (lock->l_flags & LDLM_FL_GONE_MASK) 1334 if (LDLM_HAVE_MASK(lock, GONE))
1329 goto out; 1335 goto out;
1330 1336
1331 if (lock->l_flags & LDLM_FL_CBPENDING && 1337 if (ldlm_is_cbpending(lock) &&
1332 lock->l_readers == 0 && lock->l_writers == 0) 1338 lock->l_readers == 0 && lock->l_writers == 0)
1333 goto out; 1339 goto out;
1334 1340
@@ -1542,7 +1548,8 @@ enum ldlm_error ldlm_lock_enqueue(struct ldlm_namespace *ns,
1542 /* Some flags from the enqueue want to make it into the AST, via the 1548 /* Some flags from the enqueue want to make it into the AST, via the
1543 * lock's l_flags. 1549 * lock's l_flags.
1544 */ 1550 */
1545 lock->l_flags |= *flags & LDLM_FL_AST_DISCARD_DATA; 1551 if (*flags & LDLM_FL_AST_DISCARD_DATA)
1552 ldlm_set_ast_discard_data(lock);
1546 1553
1547 /* 1554 /*
1548 * This distinction between local lock trees is very important; a client 1555 * This distinction between local lock trees is very important; a client
@@ -1581,7 +1588,7 @@ ldlm_work_bl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
1581 lock_res_and_lock(lock); 1588 lock_res_and_lock(lock);
1582 list_del_init(&lock->l_bl_ast); 1589 list_del_init(&lock->l_bl_ast);
1583 1590
1584 LASSERT(lock->l_flags & LDLM_FL_AST_SENT); 1591 LASSERT(ldlm_is_ast_sent(lock));
1585 LASSERT(lock->l_bl_ast_run == 0); 1592 LASSERT(lock->l_bl_ast_run == 0);
1586 LASSERT(lock->l_blocking_lock); 1593 LASSERT(lock->l_blocking_lock);
1587 lock->l_bl_ast_run++; 1594 lock->l_bl_ast_run++;
@@ -1628,12 +1635,12 @@ ldlm_work_cp_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
1628 /* nobody should touch l_cp_ast */ 1635 /* nobody should touch l_cp_ast */
1629 lock_res_and_lock(lock); 1636 lock_res_and_lock(lock);
1630 list_del_init(&lock->l_cp_ast); 1637 list_del_init(&lock->l_cp_ast);
1631 LASSERT(lock->l_flags & LDLM_FL_CP_REQD); 1638 LASSERT(ldlm_is_cp_reqd(lock));
1632 /* save l_completion_ast since it can be changed by 1639 /* save l_completion_ast since it can be changed by
1633 * mds_intent_policy(), see bug 14225 1640 * mds_intent_policy(), see bug 14225
1634 */ 1641 */
1635 completion_callback = lock->l_completion_ast; 1642 completion_callback = lock->l_completion_ast;
1636 lock->l_flags &= ~LDLM_FL_CP_REQD; 1643 ldlm_clear_cp_reqd(lock);
1637 unlock_res_and_lock(lock); 1644 unlock_res_and_lock(lock);
1638 1645
1639 if (completion_callback) 1646 if (completion_callback)
@@ -1778,8 +1785,8 @@ out:
1778void ldlm_cancel_callback(struct ldlm_lock *lock) 1785void ldlm_cancel_callback(struct ldlm_lock *lock)
1779{ 1786{
1780 check_res_locked(lock->l_resource); 1787 check_res_locked(lock->l_resource);
1781 if (!(lock->l_flags & LDLM_FL_CANCEL)) { 1788 if (!ldlm_is_cancel(lock)) {
1782 lock->l_flags |= LDLM_FL_CANCEL; 1789 ldlm_set_cancel(lock);
1783 if (lock->l_blocking_ast) { 1790 if (lock->l_blocking_ast) {
1784 unlock_res_and_lock(lock); 1791 unlock_res_and_lock(lock);
1785 lock->l_blocking_ast(lock, NULL, lock->l_ast_data, 1792 lock->l_blocking_ast(lock, NULL, lock->l_ast_data,
@@ -1789,7 +1796,7 @@ void ldlm_cancel_callback(struct ldlm_lock *lock)
1789 LDLM_DEBUG(lock, "no blocking ast"); 1796 LDLM_DEBUG(lock, "no blocking ast");
1790 } 1797 }
1791 } 1798 }
1792 lock->l_flags |= LDLM_FL_BL_DONE; 1799 ldlm_set_bl_done(lock);
1793} 1800}
1794 1801
1795/** 1802/**
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
index ebe9042adb25..ab739f079a48 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
@@ -124,10 +124,10 @@ void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
124 LDLM_DEBUG(lock, "client blocking AST callback handler"); 124 LDLM_DEBUG(lock, "client blocking AST callback handler");
125 125
126 lock_res_and_lock(lock); 126 lock_res_and_lock(lock);
127 lock->l_flags |= LDLM_FL_CBPENDING; 127 ldlm_set_cbpending(lock);
128 128
129 if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) 129 if (ldlm_is_cancel_on_block(lock))
130 lock->l_flags |= LDLM_FL_CANCEL; 130 ldlm_set_cancel(lock);
131 131
132 do_ast = !lock->l_readers && !lock->l_writers; 132 do_ast = !lock->l_readers && !lock->l_writers;
133 unlock_res_and_lock(lock); 133 unlock_res_and_lock(lock);
@@ -172,7 +172,7 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
172 set_current_state(TASK_INTERRUPTIBLE); 172 set_current_state(TASK_INTERRUPTIBLE);
173 schedule_timeout(to); 173 schedule_timeout(to);
174 if (lock->l_granted_mode == lock->l_req_mode || 174 if (lock->l_granted_mode == lock->l_req_mode ||
175 lock->l_flags & LDLM_FL_DESTROYED) 175 ldlm_is_destroyed(lock))
176 break; 176 break;
177 } 177 }
178 } 178 }
@@ -215,7 +215,7 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
215 } 215 }
216 216
217 lock_res_and_lock(lock); 217 lock_res_and_lock(lock);
218 if ((lock->l_flags & LDLM_FL_DESTROYED) || 218 if (ldlm_is_destroyed(lock) ||
219 lock->l_granted_mode == lock->l_req_mode) { 219 lock->l_granted_mode == lock->l_req_mode) {
220 /* bug 11300: the lock has already been granted */ 220 /* bug 11300: the lock has already been granted */
221 unlock_res_and_lock(lock); 221 unlock_res_and_lock(lock);
@@ -291,7 +291,7 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
291out: 291out:
292 if (rc < 0) { 292 if (rc < 0) {
293 lock_res_and_lock(lock); 293 lock_res_and_lock(lock);
294 lock->l_flags |= LDLM_FL_FAILED; 294 ldlm_set_failed(lock);
295 unlock_res_and_lock(lock); 295 unlock_res_and_lock(lock);
296 wake_up(&lock->l_waitq); 296 wake_up(&lock->l_waitq);
297 } 297 }
@@ -360,8 +360,7 @@ static int __ldlm_bl_to_thread(struct ldlm_bl_work_item *blwi,
360 struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool; 360 struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
361 361
362 spin_lock(&blp->blp_lock); 362 spin_lock(&blp->blp_lock);
363 if (blwi->blwi_lock && 363 if (blwi->blwi_lock && ldlm_is_discard_data(blwi->blwi_lock)) {
364 blwi->blwi_lock->l_flags & LDLM_FL_DISCARD_DATA) {
365 /* add LDLM_FL_DISCARD_DATA requests to the priority list */ 364 /* add LDLM_FL_DISCARD_DATA requests to the priority list */
366 list_add_tail(&blwi->blwi_entry, &blp->blp_prio_list); 365 list_add_tail(&blwi->blwi_entry, &blp->blp_prio_list);
367 } else { 366 } else {
@@ -626,23 +625,22 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
626 return 0; 625 return 0;
627 } 626 }
628 627
629 if ((lock->l_flags & LDLM_FL_FAIL_LOC) && 628 if (ldlm_is_fail_loc(lock) &&
630 lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) 629 lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK)
631 OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE); 630 OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
632 631
633 /* Copy hints/flags (e.g. LDLM_FL_DISCARD_DATA) from AST. */ 632 /* Copy hints/flags (e.g. LDLM_FL_DISCARD_DATA) from AST. */
634 lock_res_and_lock(lock); 633 lock_res_and_lock(lock);
635 lock->l_flags |= ldlm_flags_from_wire(dlm_req->lock_flags & 634 lock->l_flags |= ldlm_flags_from_wire(dlm_req->lock_flags &
636 LDLM_AST_FLAGS); 635 LDLM_FL_AST_MASK);
637 if (lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) { 636 if (lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
638 /* If somebody cancels lock and cache is already dropped, 637 /* If somebody cancels lock and cache is already dropped,
639 * or lock is failed before cp_ast received on client, 638 * or lock is failed before cp_ast received on client,
640 * we can tell the server we have no lock. Otherwise, we 639 * we can tell the server we have no lock. Otherwise, we
641 * should send cancel after dropping the cache. 640 * should send cancel after dropping the cache.
642 */ 641 */
643 if (((lock->l_flags & LDLM_FL_CANCELING) && 642 if ((ldlm_is_canceling(lock) && ldlm_is_bl_done(lock)) ||
644 (lock->l_flags & LDLM_FL_BL_DONE)) || 643 ldlm_is_failed(lock)) {
645 (lock->l_flags & LDLM_FL_FAILED)) {
646 LDLM_DEBUG(lock, "callback on lock %#llx - lock disappeared\n", 644 LDLM_DEBUG(lock, "callback on lock %#llx - lock disappeared\n",
647 dlm_req->lock_handle[0].cookie); 645 dlm_req->lock_handle[0].cookie);
648 unlock_res_and_lock(lock); 646 unlock_res_and_lock(lock);
@@ -656,7 +654,7 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
656 * Let ldlm_cancel_lru() be fast. 654 * Let ldlm_cancel_lru() be fast.
657 */ 655 */
658 ldlm_lock_remove_from_lru(lock); 656 ldlm_lock_remove_from_lru(lock);
659 lock->l_flags |= LDLM_FL_BL_AST; 657 ldlm_set_bl_ast(lock);
660 } 658 }
661 unlock_res_and_lock(lock); 659 unlock_res_and_lock(lock);
662 660
@@ -674,7 +672,7 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
674 case LDLM_BL_CALLBACK: 672 case LDLM_BL_CALLBACK:
675 CDEBUG(D_INODE, "blocking ast\n"); 673 CDEBUG(D_INODE, "blocking ast\n");
676 req_capsule_extend(&req->rq_pill, &RQF_LDLM_BL_CALLBACK); 674 req_capsule_extend(&req->rq_pill, &RQF_LDLM_BL_CALLBACK);
677 if (!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)) { 675 if (!ldlm_is_cancel_on_block(lock)) {
678 rc = ldlm_callback_reply(req, 0); 676 rc = ldlm_callback_reply(req, 0);
679 if (req->rq_no_reply || rc) 677 if (req->rq_no_reply || rc)
680 ldlm_callback_errmsg(req, "Normal process", rc, 678 ldlm_callback_errmsg(req, "Normal process", rc,
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
index 74e193e52cd6..107314e284a0 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
@@ -153,7 +153,7 @@ static int ldlm_completion_tail(struct ldlm_lock *lock)
153 long delay; 153 long delay;
154 int result; 154 int result;
155 155
156 if (lock->l_flags & (LDLM_FL_DESTROYED | LDLM_FL_FAILED)) { 156 if (ldlm_is_destroyed(lock) || ldlm_is_failed(lock)) {
157 LDLM_DEBUG(lock, "client-side enqueue: destroyed"); 157 LDLM_DEBUG(lock, "client-side enqueue: destroyed");
158 result = -EIO; 158 result = -EIO;
159 } else { 159 } else {
@@ -252,7 +252,7 @@ noreproc:
252 252
253 lwd.lwd_lock = lock; 253 lwd.lwd_lock = lock;
254 254
255 if (lock->l_flags & LDLM_FL_NO_TIMEOUT) { 255 if (ldlm_is_no_timeout(lock)) {
256 LDLM_DEBUG(lock, "waiting indefinitely because of NO_TIMEOUT"); 256 LDLM_DEBUG(lock, "waiting indefinitely because of NO_TIMEOUT");
257 lwi = LWI_INTR(interrupted_completion_wait, &lwd); 257 lwi = LWI_INTR(interrupted_completion_wait, &lwd);
258 } else { 258 } else {
@@ -269,7 +269,7 @@ noreproc:
269 269
270 if (OBD_FAIL_CHECK_RESET(OBD_FAIL_LDLM_INTR_CP_AST, 270 if (OBD_FAIL_CHECK_RESET(OBD_FAIL_LDLM_INTR_CP_AST,
271 OBD_FAIL_LDLM_CP_BL_RACE | OBD_FAIL_ONCE)) { 271 OBD_FAIL_LDLM_CP_BL_RACE | OBD_FAIL_ONCE)) {
272 lock->l_flags |= LDLM_FL_FAIL_LOC; 272 ldlm_set_fail_loc(lock);
273 rc = -EINTR; 273 rc = -EINTR;
274 } else { 274 } else {
275 /* Go to sleep until the lock is granted or cancelled. */ 275 /* Go to sleep until the lock is granted or cancelled. */
@@ -296,7 +296,7 @@ static void failed_lock_cleanup(struct ldlm_namespace *ns,
296 lock_res_and_lock(lock); 296 lock_res_and_lock(lock);
297 /* Check that lock is not granted or failed, we might race. */ 297 /* Check that lock is not granted or failed, we might race. */
298 if ((lock->l_req_mode != lock->l_granted_mode) && 298 if ((lock->l_req_mode != lock->l_granted_mode) &&
299 !(lock->l_flags & LDLM_FL_FAILED)) { 299 !ldlm_is_failed(lock)) {
300 /* Make sure that this lock will not be found by raced 300 /* Make sure that this lock will not be found by raced
301 * bl_ast and -EINVAL reply is sent to server anyways. 301 * bl_ast and -EINVAL reply is sent to server anyways.
302 * bug 17645 302 * bug 17645
@@ -347,7 +347,6 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
347 struct ldlm_lock *lock; 347 struct ldlm_lock *lock;
348 struct ldlm_reply *reply; 348 struct ldlm_reply *reply;
349 int cleanup_phase = 1; 349 int cleanup_phase = 1;
350 int size = 0;
351 350
352 lock = ldlm_handle2lock(lockh); 351 lock = ldlm_handle2lock(lockh);
353 /* ldlm_cli_enqueue is holding a reference on this lock. */ 352 /* ldlm_cli_enqueue is holding a reference on this lock. */
@@ -375,8 +374,8 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
375 goto cleanup; 374 goto cleanup;
376 } 375 }
377 376
378 if (lvb_len != 0) { 377 if (lvb_len > 0) {
379 LASSERT(lvb); 378 int size = 0;
380 379
381 size = req_capsule_get_size(&req->rq_pill, &RMF_DLM_LVB, 380 size = req_capsule_get_size(&req->rq_pill, &RMF_DLM_LVB,
382 RCL_SERVER); 381 RCL_SERVER);
@@ -390,12 +389,13 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
390 rc = -EINVAL; 389 rc = -EINVAL;
391 goto cleanup; 390 goto cleanup;
392 } 391 }
392 lvb_len = size;
393 } 393 }
394 394
395 if (rc == ELDLM_LOCK_ABORTED) { 395 if (rc == ELDLM_LOCK_ABORTED) {
396 if (lvb_len != 0) 396 if (lvb_len > 0 && lvb)
397 rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_SERVER, 397 rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_SERVER,
398 lvb, size); 398 lvb, lvb_len);
399 if (rc == 0) 399 if (rc == 0)
400 rc = ELDLM_LOCK_ABORTED; 400 rc = ELDLM_LOCK_ABORTED;
401 goto cleanup; 401 goto cleanup;
@@ -421,7 +421,7 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
421 421
422 *flags = ldlm_flags_from_wire(reply->lock_flags); 422 *flags = ldlm_flags_from_wire(reply->lock_flags);
423 lock->l_flags |= ldlm_flags_from_wire(reply->lock_flags & 423 lock->l_flags |= ldlm_flags_from_wire(reply->lock_flags &
424 LDLM_INHERIT_FLAGS); 424 LDLM_FL_INHERIT_MASK);
425 /* move NO_TIMEOUT flag to the lock to force ldlm_lock_match() 425 /* move NO_TIMEOUT flag to the lock to force ldlm_lock_match()
426 * to wait with no timeout as well 426 * to wait with no timeout as well
427 */ 427 */
@@ -489,7 +489,7 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
489 /* If the lock has already been granted by a completion AST, don't 489 /* If the lock has already been granted by a completion AST, don't
490 * clobber the LVB with an older one. 490 * clobber the LVB with an older one.
491 */ 491 */
492 if (lvb_len != 0) { 492 if (lvb_len > 0) {
493 /* We must lock or a racing completion might update lvb without 493 /* We must lock or a racing completion might update lvb without
494 * letting us know and we'll clobber the correct value. 494 * letting us know and we'll clobber the correct value.
495 * Cannot unlock after the check either, as that still leaves 495 * Cannot unlock after the check either, as that still leaves
@@ -498,7 +498,7 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
498 lock_res_and_lock(lock); 498 lock_res_and_lock(lock);
499 if (lock->l_req_mode != lock->l_granted_mode) 499 if (lock->l_req_mode != lock->l_granted_mode)
500 rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_SERVER, 500 rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_SERVER,
501 lock->l_lvb_data, size); 501 lock->l_lvb_data, lvb_len);
502 unlock_res_and_lock(lock); 502 unlock_res_and_lock(lock);
503 if (rc < 0) { 503 if (rc < 0) {
504 cleanup_phase = 1; 504 cleanup_phase = 1;
@@ -518,7 +518,7 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
518 } 518 }
519 } 519 }
520 520
521 if (lvb_len && lvb) { 521 if (lvb_len > 0 && lvb) {
522 /* Copy the LVB here, and not earlier, because the completion 522 /* Copy the LVB here, and not earlier, because the completion
523 * AST (if any) can override what we got in the reply 523 * AST (if any) can override what we got in the reply
524 */ 524 */
@@ -601,7 +601,7 @@ int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req,
601 avail = ldlm_capsule_handles_avail(pill, RCL_CLIENT, canceloff); 601 avail = ldlm_capsule_handles_avail(pill, RCL_CLIENT, canceloff);
602 602
603 flags = ns_connect_lru_resize(ns) ? 603 flags = ns_connect_lru_resize(ns) ?
604 LDLM_CANCEL_LRUR : LDLM_CANCEL_AGED; 604 LDLM_CANCEL_LRUR_NO_WAIT : LDLM_CANCEL_AGED;
605 to_free = !ns_connect_lru_resize(ns) && 605 to_free = !ns_connect_lru_resize(ns) &&
606 opc == LDLM_ENQUEUE ? 1 : 0; 606 opc == LDLM_ENQUEUE ? 1 : 0;
607 607
@@ -821,12 +821,11 @@ static __u64 ldlm_cli_cancel_local(struct ldlm_lock *lock)
821 LDLM_DEBUG(lock, "client-side cancel"); 821 LDLM_DEBUG(lock, "client-side cancel");
822 /* Set this flag to prevent others from getting new references*/ 822 /* Set this flag to prevent others from getting new references*/
823 lock_res_and_lock(lock); 823 lock_res_and_lock(lock);
824 lock->l_flags |= LDLM_FL_CBPENDING; 824 ldlm_set_cbpending(lock);
825 local_only = !!(lock->l_flags & 825 local_only = !!(lock->l_flags &
826 (LDLM_FL_LOCAL_ONLY|LDLM_FL_CANCEL_ON_BLOCK)); 826 (LDLM_FL_LOCAL_ONLY|LDLM_FL_CANCEL_ON_BLOCK));
827 ldlm_cancel_callback(lock); 827 ldlm_cancel_callback(lock);
828 rc = (lock->l_flags & LDLM_FL_BL_AST) ? 828 rc = ldlm_is_bl_ast(lock) ? LDLM_FL_BL_AST : LDLM_FL_CANCELING;
829 LDLM_FL_BL_AST : LDLM_FL_CANCELING;
830 unlock_res_and_lock(lock); 829 unlock_res_and_lock(lock);
831 830
832 if (local_only) { 831 if (local_only) {
@@ -1131,31 +1130,30 @@ EXPORT_SYMBOL(ldlm_cli_cancel_list_local);
1131 * dirty data, to close a file, ...) or waiting for any RPCs in-flight (e.g. 1130 * dirty data, to close a file, ...) or waiting for any RPCs in-flight (e.g.
1132 * readahead requests, ...) 1131 * readahead requests, ...)
1133 */ 1132 */
1134static ldlm_policy_res_t ldlm_cancel_no_wait_policy(struct ldlm_namespace *ns, 1133static enum ldlm_policy_res
1135 struct ldlm_lock *lock, 1134ldlm_cancel_no_wait_policy(struct ldlm_namespace *ns, struct ldlm_lock *lock,
1136 int unused, int added, 1135 int unused, int added, int count)
1137 int count)
1138{ 1136{
1139 ldlm_policy_res_t result = LDLM_POLICY_CANCEL_LOCK; 1137 enum ldlm_policy_res result = LDLM_POLICY_CANCEL_LOCK;
1140 ldlm_cancel_for_recovery cb = ns->ns_cancel_for_recovery;
1141
1142 lock_res_and_lock(lock);
1143 1138
1144 /* don't check added & count since we want to process all locks 1139 /* don't check added & count since we want to process all locks
1145 * from unused list 1140 * from unused list.
1141 * It's fine to not take lock to access lock->l_resource since
1142 * the lock has already been granted so it won't change.
1146 */ 1143 */
1147 switch (lock->l_resource->lr_type) { 1144 switch (lock->l_resource->lr_type) {
1148 case LDLM_EXTENT: 1145 case LDLM_EXTENT:
1149 case LDLM_IBITS: 1146 case LDLM_IBITS:
1150 if (cb && cb(lock)) 1147 if (ns->ns_cancel && ns->ns_cancel(lock) != 0)
1151 break; 1148 break;
1152 default: 1149 default:
1153 result = LDLM_POLICY_SKIP_LOCK; 1150 result = LDLM_POLICY_SKIP_LOCK;
1154 lock->l_flags |= LDLM_FL_SKIPPED; 1151 lock_res_and_lock(lock);
1152 ldlm_set_skipped(lock);
1153 unlock_res_and_lock(lock);
1155 break; 1154 break;
1156 } 1155 }
1157 1156
1158 unlock_res_and_lock(lock);
1159 return result; 1157 return result;
1160} 1158}
1161 1159
@@ -1168,10 +1166,10 @@ static ldlm_policy_res_t ldlm_cancel_no_wait_policy(struct ldlm_namespace *ns,
1168 * 1166 *
1169 * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU 1167 * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
1170 */ 1168 */
1171static ldlm_policy_res_t ldlm_cancel_lrur_policy(struct ldlm_namespace *ns, 1169static enum ldlm_policy_res ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
1172 struct ldlm_lock *lock, 1170 struct ldlm_lock *lock,
1173 int unused, int added, 1171 int unused, int added,
1174 int count) 1172 int count)
1175{ 1173{
1176 unsigned long cur = cfs_time_current(); 1174 unsigned long cur = cfs_time_current();
1177 struct ldlm_pool *pl = &ns->ns_pool; 1175 struct ldlm_pool *pl = &ns->ns_pool;
@@ -1196,8 +1194,13 @@ static ldlm_policy_res_t ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
1196 /* Stop when SLV is not yet come from server or lv is smaller than 1194 /* Stop when SLV is not yet come from server or lv is smaller than
1197 * it is. 1195 * it is.
1198 */ 1196 */
1199 return (slv == 0 || lv < slv) ? 1197 if (slv == 0 || lv < slv)
1200 LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK; 1198 return LDLM_POLICY_KEEP_LOCK;
1199
1200 if (ns->ns_cancel && ns->ns_cancel(lock) == 0)
1201 return LDLM_POLICY_KEEP_LOCK;
1202
1203 return LDLM_POLICY_CANCEL_LOCK;
1201} 1204}
1202 1205
1203/** 1206/**
@@ -1209,10 +1212,10 @@ static ldlm_policy_res_t ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
1209 * 1212 *
1210 * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU 1213 * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
1211 */ 1214 */
1212static ldlm_policy_res_t ldlm_cancel_passed_policy(struct ldlm_namespace *ns, 1215static enum ldlm_policy_res ldlm_cancel_passed_policy(struct ldlm_namespace *ns,
1213 struct ldlm_lock *lock, 1216 struct ldlm_lock *lock,
1214 int unused, int added, 1217 int unused, int added,
1215 int count) 1218 int count)
1216{ 1219{
1217 /* Stop LRU processing when we reach past @count or have checked all 1220 /* Stop LRU processing when we reach past @count or have checked all
1218 * locks in LRU. 1221 * locks in LRU.
@@ -1230,16 +1233,35 @@ static ldlm_policy_res_t ldlm_cancel_passed_policy(struct ldlm_namespace *ns,
1230 * 1233 *
1231 * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU 1234 * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
1232 */ 1235 */
1233static ldlm_policy_res_t ldlm_cancel_aged_policy(struct ldlm_namespace *ns, 1236static enum ldlm_policy_res ldlm_cancel_aged_policy(struct ldlm_namespace *ns,
1234 struct ldlm_lock *lock, 1237 struct ldlm_lock *lock,
1235 int unused, int added, 1238 int unused, int added,
1236 int count) 1239 int count)
1237{ 1240{
1238 /* Stop LRU processing if young lock is found and we reach past count */ 1241 if ((added >= count) &&
1239 return ((added >= count) && 1242 time_before(cfs_time_current(),
1240 time_before(cfs_time_current(), 1243 cfs_time_add(lock->l_last_used, ns->ns_max_age)))
1241 cfs_time_add(lock->l_last_used, ns->ns_max_age))) ? 1244 return LDLM_POLICY_KEEP_LOCK;
1242 LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK; 1245
1246 if (ns->ns_cancel && ns->ns_cancel(lock) == 0)
1247 return LDLM_POLICY_KEEP_LOCK;
1248
1249 return LDLM_POLICY_CANCEL_LOCK;
1250}
1251
1252static enum ldlm_policy_res
1253ldlm_cancel_lrur_no_wait_policy(struct ldlm_namespace *ns,
1254 struct ldlm_lock *lock,
1255 int unused, int added,
1256 int count)
1257{
1258 enum ldlm_policy_res result;
1259
1260 result = ldlm_cancel_lrur_policy(ns, lock, unused, added, count);
1261 if (result == LDLM_POLICY_KEEP_LOCK)
1262 return result;
1263
1264 return ldlm_cancel_no_wait_policy(ns, lock, unused, added, count);
1243} 1265}
1244 1266
1245/** 1267/**
@@ -1251,10 +1273,9 @@ static ldlm_policy_res_t ldlm_cancel_aged_policy(struct ldlm_namespace *ns,
1251 * 1273 *
1252 * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU 1274 * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
1253 */ 1275 */
1254static ldlm_policy_res_t ldlm_cancel_default_policy(struct ldlm_namespace *ns, 1276static enum ldlm_policy_res
1255 struct ldlm_lock *lock, 1277ldlm_cancel_default_policy(struct ldlm_namespace *ns, struct ldlm_lock *lock,
1256 int unused, int added, 1278 int unused, int added, int count)
1257 int count)
1258{ 1279{
1259 /* Stop LRU processing when we reach past count or have checked all 1280 /* Stop LRU processing when we reach past count or have checked all
1260 * locks in LRU. 1281 * locks in LRU.
@@ -1263,7 +1284,8 @@ static ldlm_policy_res_t ldlm_cancel_default_policy(struct ldlm_namespace *ns,
1263 LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK; 1284 LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
1264} 1285}
1265 1286
1266typedef ldlm_policy_res_t (*ldlm_cancel_lru_policy_t)(struct ldlm_namespace *, 1287typedef enum ldlm_policy_res (*ldlm_cancel_lru_policy_t)(
1288 struct ldlm_namespace *,
1267 struct ldlm_lock *, int, 1289 struct ldlm_lock *, int,
1268 int, int); 1290 int, int);
1269 1291
@@ -1281,6 +1303,8 @@ ldlm_cancel_lru_policy(struct ldlm_namespace *ns, int flags)
1281 return ldlm_cancel_lrur_policy; 1303 return ldlm_cancel_lrur_policy;
1282 else if (flags & LDLM_CANCEL_PASSED) 1304 else if (flags & LDLM_CANCEL_PASSED)
1283 return ldlm_cancel_passed_policy; 1305 return ldlm_cancel_passed_policy;
1306 else if (flags & LDLM_CANCEL_LRUR_NO_WAIT)
1307 return ldlm_cancel_lrur_no_wait_policy;
1284 } else { 1308 } else {
1285 if (flags & LDLM_CANCEL_AGED) 1309 if (flags & LDLM_CANCEL_AGED)
1286 return ldlm_cancel_aged_policy; 1310 return ldlm_cancel_aged_policy;
@@ -1329,6 +1353,7 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
1329 ldlm_cancel_lru_policy_t pf; 1353 ldlm_cancel_lru_policy_t pf;
1330 struct ldlm_lock *lock, *next; 1354 struct ldlm_lock *lock, *next;
1331 int added = 0, unused, remained; 1355 int added = 0, unused, remained;
1356 int no_wait = flags & (LDLM_CANCEL_NO_WAIT | LDLM_CANCEL_LRUR_NO_WAIT);
1332 1357
1333 spin_lock(&ns->ns_lock); 1358 spin_lock(&ns->ns_lock);
1334 unused = ns->ns_nr_unused; 1359 unused = ns->ns_nr_unused;
@@ -1341,7 +1366,8 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
1341 LASSERT(pf); 1366 LASSERT(pf);
1342 1367
1343 while (!list_empty(&ns->ns_unused_list)) { 1368 while (!list_empty(&ns->ns_unused_list)) {
1344 ldlm_policy_res_t result; 1369 enum ldlm_policy_res result;
1370 time_t last_use = 0;
1345 1371
1346 /* all unused locks */ 1372 /* all unused locks */
1347 if (remained-- <= 0) 1373 if (remained-- <= 0)
@@ -1354,17 +1380,20 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
1354 list_for_each_entry_safe(lock, next, &ns->ns_unused_list, 1380 list_for_each_entry_safe(lock, next, &ns->ns_unused_list,
1355 l_lru) { 1381 l_lru) {
1356 /* No locks which got blocking requests. */ 1382 /* No locks which got blocking requests. */
1357 LASSERT(!(lock->l_flags & LDLM_FL_BL_AST)); 1383 LASSERT(!ldlm_is_bl_ast(lock));
1358 1384
1359 if (flags & LDLM_CANCEL_NO_WAIT && 1385 if (no_wait && ldlm_is_skipped(lock))
1360 lock->l_flags & LDLM_FL_SKIPPED)
1361 /* already processed */ 1386 /* already processed */
1362 continue; 1387 continue;
1363 1388
1389 last_use = lock->l_last_used;
1390 if (last_use == cfs_time_current())
1391 continue;
1392
1364 /* Somebody is already doing CANCEL. No need for this 1393 /* Somebody is already doing CANCEL. No need for this
1365 * lock in LRU, do not traverse it again. 1394 * lock in LRU, do not traverse it again.
1366 */ 1395 */
1367 if (!(lock->l_flags & LDLM_FL_CANCELING)) 1396 if (!ldlm_is_canceling(lock))
1368 break; 1397 break;
1369 1398
1370 ldlm_lock_remove_from_lru_nolock(lock); 1399 ldlm_lock_remove_from_lru_nolock(lock);
@@ -1407,12 +1436,14 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
1407 1436
1408 lock_res_and_lock(lock); 1437 lock_res_and_lock(lock);
1409 /* Check flags again under the lock. */ 1438 /* Check flags again under the lock. */
1410 if ((lock->l_flags & LDLM_FL_CANCELING) || 1439 if (ldlm_is_canceling(lock) ||
1411 (ldlm_lock_remove_from_lru(lock) == 0)) { 1440 (ldlm_lock_remove_from_lru_check(lock, last_use) == 0)) {
1412 /* Another thread is removing lock from LRU, or 1441 /* Another thread is removing lock from LRU, or
1413 * somebody is already doing CANCEL, or there 1442 * somebody is already doing CANCEL, or there
1414 * is a blocking request which will send cancel 1443 * is a blocking request which will send cancel
1415 * by itself, or the lock is no longer unused. 1444 * by itself, or the lock is no longer unused or
1445 * the lock has been used since the pf() call and
1446 * pages could be put under it.
1416 */ 1447 */
1417 unlock_res_and_lock(lock); 1448 unlock_res_and_lock(lock);
1418 lu_ref_del(&lock->l_reference, 1449 lu_ref_del(&lock->l_reference,
@@ -1429,7 +1460,7 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
1429 * where while we are doing cancel here, server is also 1460 * where while we are doing cancel here, server is also
1430 * silently cancelling this lock. 1461 * silently cancelling this lock.
1431 */ 1462 */
1432 lock->l_flags &= ~LDLM_FL_CANCEL_ON_BLOCK; 1463 ldlm_clear_cancel_on_block(lock);
1433 1464
1434 /* Setting the CBPENDING flag is a little misleading, 1465 /* Setting the CBPENDING flag is a little misleading,
1435 * but prevents an important race; namely, once 1466 * but prevents an important race; namely, once
@@ -1526,8 +1557,7 @@ int ldlm_cancel_resource_local(struct ldlm_resource *res,
1526 /* If somebody is already doing CANCEL, or blocking AST came, 1557 /* If somebody is already doing CANCEL, or blocking AST came,
1527 * skip this lock. 1558 * skip this lock.
1528 */ 1559 */
1529 if (lock->l_flags & LDLM_FL_BL_AST || 1560 if (ldlm_is_bl_ast(lock) || ldlm_is_canceling(lock))
1530 lock->l_flags & LDLM_FL_CANCELING)
1531 continue; 1561 continue;
1532 1562
1533 if (lockmode_compat(lock->l_granted_mode, mode)) 1563 if (lockmode_compat(lock->l_granted_mode, mode))
@@ -1771,7 +1801,6 @@ static void ldlm_namespace_foreach(struct ldlm_namespace *ns,
1771 1801
1772 cfs_hash_for_each_nolock(ns->ns_rs_hash, 1802 cfs_hash_for_each_nolock(ns->ns_rs_hash,
1773 ldlm_res_iter_helper, &helper); 1803 ldlm_res_iter_helper, &helper);
1774
1775} 1804}
1776 1805
1777/* non-blocking function to manipulate a lock whose cb_data is being put away. 1806/* non-blocking function to manipulate a lock whose cb_data is being put away.
@@ -1887,7 +1916,7 @@ static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
1887 int flags; 1916 int flags;
1888 1917
1889 /* Bug 11974: Do not replay a lock which is actively being canceled */ 1918 /* Bug 11974: Do not replay a lock which is actively being canceled */
1890 if (lock->l_flags & LDLM_FL_CANCELING) { 1919 if (ldlm_is_canceling(lock)) {
1891 LDLM_DEBUG(lock, "Not replaying canceled lock:"); 1920 LDLM_DEBUG(lock, "Not replaying canceled lock:");
1892 return 0; 1921 return 0;
1893 } 1922 }
@@ -1896,7 +1925,7 @@ static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
1896 * server might have long dropped it, but notification of that event was 1925 * server might have long dropped it, but notification of that event was
1897 * lost by network. (and server granted conflicting lock already) 1926 * lost by network. (and server granted conflicting lock already)
1898 */ 1927 */
1899 if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) { 1928 if (ldlm_is_cancel_on_block(lock)) {
1900 LDLM_DEBUG(lock, "Not replaying reply-less lock:"); 1929 LDLM_DEBUG(lock, "Not replaying reply-less lock:");
1901 ldlm_lock_cancel(lock); 1930 ldlm_lock_cancel(lock);
1902 return 0; 1931 return 0;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
index 9dede87ad0a3..e99c89c34cd0 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
@@ -124,9 +124,15 @@ int ldlm_debugfs_setup(void)
124 } 124 }
125 125
126 rc = ldebugfs_add_vars(ldlm_debugfs_dir, ldlm_debugfs_list, NULL); 126 rc = ldebugfs_add_vars(ldlm_debugfs_dir, ldlm_debugfs_list, NULL);
127 if (rc) {
128 CERROR("LProcFS failed in ldlm-init\n");
129 goto err_svc;
130 }
127 131
128 return 0; 132 return 0;
129 133
134err_svc:
135 ldebugfs_remove(&ldlm_svc_debugfs_dir);
130err_ns: 136err_ns:
131 ldebugfs_remove(&ldlm_ns_debugfs_dir); 137 ldebugfs_remove(&ldlm_ns_debugfs_dir);
132err_type: 138err_type:
@@ -758,12 +764,12 @@ static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
758 list_for_each(tmp, q) { 764 list_for_each(tmp, q) {
759 lock = list_entry(tmp, struct ldlm_lock, 765 lock = list_entry(tmp, struct ldlm_lock,
760 l_res_link); 766 l_res_link);
761 if (lock->l_flags & LDLM_FL_CLEANED) { 767 if (ldlm_is_cleaned(lock)) {
762 lock = NULL; 768 lock = NULL;
763 continue; 769 continue;
764 } 770 }
765 LDLM_LOCK_GET(lock); 771 LDLM_LOCK_GET(lock);
766 lock->l_flags |= LDLM_FL_CLEANED; 772 ldlm_set_cleaned(lock);
767 break; 773 break;
768 } 774 }
769 775
@@ -775,13 +781,13 @@ static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
775 /* Set CBPENDING so nothing in the cancellation path 781 /* Set CBPENDING so nothing in the cancellation path
776 * can match this lock. 782 * can match this lock.
777 */ 783 */
778 lock->l_flags |= LDLM_FL_CBPENDING; 784 ldlm_set_cbpending(lock);
779 lock->l_flags |= LDLM_FL_FAILED; 785 ldlm_set_failed(lock);
780 lock->l_flags |= flags; 786 lock->l_flags |= flags;
781 787
782 /* ... without sending a CANCEL message for local_only. */ 788 /* ... without sending a CANCEL message for local_only. */
783 if (local_only) 789 if (local_only)
784 lock->l_flags |= LDLM_FL_LOCAL_ONLY; 790 ldlm_set_local_only(lock);
785 791
786 if (local_only && (lock->l_readers || lock->l_writers)) { 792 if (local_only && (lock->l_readers || lock->l_writers)) {
787 /* This is a little bit gross, but much better than the 793 /* This is a little bit gross, but much better than the
@@ -1275,7 +1281,7 @@ void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
1275 1281
1276 LDLM_DEBUG(lock, "About to add this lock:\n"); 1282 LDLM_DEBUG(lock, "About to add this lock:\n");
1277 1283
1278 if (lock->l_flags & LDLM_FL_DESTROYED) { 1284 if (ldlm_is_destroyed(lock)) {
1279 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n"); 1285 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
1280 return; 1286 return;
1281 } 1287 }
@@ -1400,3 +1406,4 @@ void ldlm_resource_dump(int level, struct ldlm_resource *res)
1400 LDLM_DEBUG_LIMIT(level, lock, "###"); 1406 LDLM_DEBUG_LIMIT(level, lock, "###");
1401 } 1407 }
1402} 1408}
1409EXPORT_SYMBOL(ldlm_resource_dump);
diff --git a/drivers/staging/lustre/lustre/llite/Makefile b/drivers/staging/lustre/lustre/llite/Makefile
index 9ac29e718da3..2ce10ff01b80 100644
--- a/drivers/staging/lustre/lustre/llite/Makefile
+++ b/drivers/staging/lustre/lustre/llite/Makefile
@@ -4,7 +4,8 @@ lustre-y := dcache.o dir.o file.o llite_close.o llite_lib.o llite_nfs.o \
4 rw.o namei.o symlink.o llite_mmap.o \ 4 rw.o namei.o symlink.o llite_mmap.o \
5 xattr.o xattr_cache.o remote_perm.o llite_rmtacl.o \ 5 xattr.o xattr_cache.o remote_perm.o llite_rmtacl.o \
6 rw26.o super25.o statahead.o \ 6 rw26.o super25.o statahead.o \
7 ../lclient/glimpse.o ../lclient/lcommon_cl.o ../lclient/lcommon_misc.o \ 7 glimpse.o lcommon_cl.o lcommon_misc.o \
8 vvp_dev.o vvp_page.o vvp_lock.o vvp_io.o vvp_object.o lproc_llite.o 8 vvp_dev.o vvp_page.o vvp_lock.o vvp_io.o vvp_object.o vvp_req.o \
9 lproc_llite.o
9 10
10llite_lloop-y := lloop.o 11llite_lloop-y := lloop.o
diff --git a/drivers/staging/lustre/lustre/llite/dcache.c b/drivers/staging/lustre/lustre/llite/dcache.c
index dd1c827013b9..1b6f82a1a435 100644
--- a/drivers/staging/lustre/lustre/llite/dcache.c
+++ b/drivers/staging/lustre/lustre/llite/dcache.c
@@ -108,11 +108,8 @@ static int ll_dcompare(const struct dentry *parent, const struct dentry *dentry,
108 108
109static inline int return_if_equal(struct ldlm_lock *lock, void *data) 109static inline int return_if_equal(struct ldlm_lock *lock, void *data)
110{ 110{
111 if ((lock->l_flags & 111 return (ldlm_is_canceling(lock) && ldlm_is_discard_data(lock)) ?
112 (LDLM_FL_CANCELING | LDLM_FL_DISCARD_DATA)) == 112 LDLM_ITER_CONTINUE : LDLM_ITER_STOP;
113 (LDLM_FL_CANCELING | LDLM_FL_DISCARD_DATA))
114 return LDLM_ITER_CONTINUE;
115 return LDLM_ITER_STOP;
116} 113}
117 114
118/* find any ldlm lock of the inode in mdc and lov 115/* find any ldlm lock of the inode in mdc and lov
@@ -253,8 +250,8 @@ void ll_invalidate_aliases(struct inode *inode)
253{ 250{
254 struct dentry *dentry; 251 struct dentry *dentry;
255 252
256 CDEBUG(D_INODE, "marking dentries for ino %lu/%u(%p) invalid\n", 253 CDEBUG(D_INODE, "marking dentries for ino "DFID"(%p) invalid\n",
257 inode->i_ino, inode->i_generation, inode); 254 PFID(ll_inode2fid(inode)), inode);
258 255
259 ll_lock_dcache(inode); 256 ll_lock_dcache(inode);
260 hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) { 257 hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
@@ -289,8 +286,8 @@ void ll_lookup_finish_locks(struct lookup_intent *it, struct inode *inode)
289 if (it->d.lustre.it_lock_mode && inode) { 286 if (it->d.lustre.it_lock_mode && inode) {
290 struct ll_sb_info *sbi = ll_i2sbi(inode); 287 struct ll_sb_info *sbi = ll_i2sbi(inode);
291 288
292 CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)\n", 289 CDEBUG(D_DLMTRACE, "setting l_data to inode "DFID"(%p)\n",
293 inode, inode->i_ino, inode->i_generation); 290 PFID(ll_inode2fid(inode)), inode);
294 ll_set_lock_data(sbi->ll_md_exp, inode, it, NULL); 291 ll_set_lock_data(sbi->ll_md_exp, inode, it, NULL);
295 } 292 }
296 293
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index 7a0a67f3c4e7..4b00d1ac84fb 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -158,11 +158,16 @@ static int ll_dir_filler(void *_hash, struct page *page0)
158 int i; 158 int i;
159 int rc; 159 int rc;
160 160
161 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) hash %llu\n", 161 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p) hash %llu\n",
162 inode->i_ino, inode->i_generation, inode, hash); 162 PFID(ll_inode2fid(inode)), inode, hash);
163 163
164 LASSERT(max_pages > 0 && max_pages <= MD_MAX_BRW_PAGES); 164 LASSERT(max_pages > 0 && max_pages <= MD_MAX_BRW_PAGES);
165 165
166 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
167 LUSTRE_OPC_ANY, NULL);
168 if (IS_ERR(op_data))
169 return PTR_ERR(op_data);
170
166 page_pool = kcalloc(max_pages, sizeof(page), GFP_NOFS); 171 page_pool = kcalloc(max_pages, sizeof(page), GFP_NOFS);
167 if (page_pool) { 172 if (page_pool) {
168 page_pool[0] = page0; 173 page_pool[0] = page0;
@@ -177,8 +182,6 @@ static int ll_dir_filler(void *_hash, struct page *page0)
177 page_pool[npages] = page; 182 page_pool[npages] = page;
178 } 183 }
179 184
180 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
181 LUSTRE_OPC_ANY, NULL);
182 op_data->op_npages = npages; 185 op_data->op_npages = npages;
183 op_data->op_offset = hash; 186 op_data->op_offset = hash;
184 rc = md_readpage(exp, op_data, page_pool, &request); 187 rc = md_readpage(exp, op_data, page_pool, &request);
@@ -190,7 +193,7 @@ static int ll_dir_filler(void *_hash, struct page *page0)
190 body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY); 193 body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY);
191 /* Checked by mdc_readpage() */ 194 /* Checked by mdc_readpage() */
192 if (body->valid & OBD_MD_FLSIZE) 195 if (body->valid & OBD_MD_FLSIZE)
193 cl_isize_write(inode, body->size); 196 i_size_write(inode, body->size);
194 197
195 nrdpgs = (request->rq_bulk->bd_nob_transferred+PAGE_SIZE-1) 198 nrdpgs = (request->rq_bulk->bd_nob_transferred+PAGE_SIZE-1)
196 >> PAGE_SHIFT; 199 >> PAGE_SHIFT;
@@ -372,8 +375,8 @@ struct page *ll_get_dir_page(struct inode *dir, __u64 hash,
372 return ERR_PTR(rc); 375 return ERR_PTR(rc);
373 } 376 }
374 377
375 CDEBUG(D_INODE, "setting lr_lvb_inode to inode %p (%lu/%u)\n", 378 CDEBUG(D_INODE, "setting lr_lvb_inode to inode "DFID"(%p)\n",
376 dir, dir->i_ino, dir->i_generation); 379 PFID(ll_inode2fid(dir)), dir);
377 md_set_lock_data(ll_i2sbi(dir)->ll_md_exp, 380 md_set_lock_data(ll_i2sbi(dir)->ll_md_exp,
378 &it.d.lustre.it_lock_handle, dir, NULL); 381 &it.d.lustre.it_lock_handle, dir, NULL);
379 } else { 382 } else {
@@ -468,6 +471,28 @@ fail:
468 goto out_unlock; 471 goto out_unlock;
469} 472}
470 473
474/**
475 * return IF_* type for given lu_dirent entry.
476 * IF_* flag shld be converted to particular OS file type in
477 * platform llite module.
478 */
479static __u16 ll_dirent_type_get(struct lu_dirent *ent)
480{
481 __u16 type = 0;
482 struct luda_type *lt;
483 int len = 0;
484
485 if (le32_to_cpu(ent->lde_attrs) & LUDA_TYPE) {
486 const unsigned int align = sizeof(struct luda_type) - 1;
487
488 len = le16_to_cpu(ent->lde_namelen);
489 len = (len + align) & ~align;
490 lt = (void *)ent->lde_name + len;
491 type = IFTODT(le16_to_cpu(lt->lt_type));
492 }
493 return type;
494}
495
471int ll_dir_read(struct inode *inode, struct dir_context *ctx) 496int ll_dir_read(struct inode *inode, struct dir_context *ctx)
472{ 497{
473 struct ll_inode_info *info = ll_i2info(inode); 498 struct ll_inode_info *info = ll_i2info(inode);
@@ -589,15 +614,16 @@ static int ll_readdir(struct file *filp, struct dir_context *ctx)
589 struct inode *inode = file_inode(filp); 614 struct inode *inode = file_inode(filp);
590 struct ll_file_data *lfd = LUSTRE_FPRIVATE(filp); 615 struct ll_file_data *lfd = LUSTRE_FPRIVATE(filp);
591 struct ll_sb_info *sbi = ll_i2sbi(inode); 616 struct ll_sb_info *sbi = ll_i2sbi(inode);
617 __u64 pos = lfd ? lfd->lfd_pos : 0;
592 int hash64 = sbi->ll_flags & LL_SBI_64BIT_HASH; 618 int hash64 = sbi->ll_flags & LL_SBI_64BIT_HASH;
593 int api32 = ll_need_32bit_api(sbi); 619 int api32 = ll_need_32bit_api(sbi);
594 int rc; 620 int rc;
595 621
596 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) pos %lu/%llu 32bit_api %d\n", 622 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p) pos %lu/%llu 32bit_api %d\n",
597 inode->i_ino, inode->i_generation, 623 PFID(ll_inode2fid(inode)), inode, (unsigned long)pos,
598 inode, (unsigned long)lfd->lfd_pos, i_size_read(inode), api32); 624 i_size_read(inode), api32);
599 625
600 if (lfd->lfd_pos == MDS_DIR_END_OFF) { 626 if (pos == MDS_DIR_END_OFF) {
601 /* 627 /*
602 * end-of-file. 628 * end-of-file.
603 */ 629 */
@@ -605,9 +631,10 @@ static int ll_readdir(struct file *filp, struct dir_context *ctx)
605 goto out; 631 goto out;
606 } 632 }
607 633
608 ctx->pos = lfd->lfd_pos; 634 ctx->pos = pos;
609 rc = ll_dir_read(inode, ctx); 635 rc = ll_dir_read(inode, ctx);
610 lfd->lfd_pos = ctx->pos; 636 if (lfd)
637 lfd->lfd_pos = ctx->pos;
611 if (ctx->pos == MDS_DIR_END_OFF) { 638 if (ctx->pos == MDS_DIR_END_OFF) {
612 if (api32) 639 if (api32)
613 ctx->pos = LL_DIR_END_OFF_32BIT; 640 ctx->pos = LL_DIR_END_OFF_32BIT;
@@ -804,9 +831,8 @@ int ll_dir_getstripe(struct inode *inode, struct lov_mds_md **lmmp,
804 rc = md_getattr(sbi->ll_md_exp, op_data, &req); 831 rc = md_getattr(sbi->ll_md_exp, op_data, &req);
805 ll_finish_md_op_data(op_data); 832 ll_finish_md_op_data(op_data);
806 if (rc < 0) { 833 if (rc < 0) {
807 CDEBUG(D_INFO, "md_getattr failed on inode %lu/%u: rc %d\n", 834 CDEBUG(D_INFO, "md_getattr failed on inode "DFID": rc %d\n",
808 inode->i_ino, 835 PFID(ll_inode2fid(inode)), rc);
809 inode->i_generation, rc);
810 goto out; 836 goto out;
811 } 837 }
812 838
@@ -916,7 +942,7 @@ static int ll_ioc_copy_start(struct super_block *sb, struct hsm_copy *copy)
916 } 942 }
917 943
918 /* Read current file data version */ 944 /* Read current file data version */
919 rc = ll_data_version(inode, &data_version, 1); 945 rc = ll_data_version(inode, &data_version, LL_DV_RD_FLUSH);
920 iput(inode); 946 iput(inode);
921 if (rc != 0) { 947 if (rc != 0) {
922 CDEBUG(D_HSM, "Could not read file data version of " 948 CDEBUG(D_HSM, "Could not read file data version of "
@@ -936,6 +962,9 @@ static int ll_ioc_copy_start(struct super_block *sb, struct hsm_copy *copy)
936 } 962 }
937 963
938progress: 964progress:
965 /* On error, the request should be considered as completed */
966 if (hpk.hpk_errval > 0)
967 hpk.hpk_flags |= HP_FLAG_COMPLETED;
939 rc = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk), 968 rc = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk),
940 &hpk, NULL); 969 &hpk, NULL);
941 970
@@ -997,8 +1026,7 @@ static int ll_ioc_copy_end(struct super_block *sb, struct hsm_copy *copy)
997 goto progress; 1026 goto progress;
998 } 1027 }
999 1028
1000 rc = ll_data_version(inode, &data_version, 1029 rc = ll_data_version(inode, &data_version, LL_DV_RD_FLUSH);
1001 copy->hc_hai.hai_action == HSMA_ARCHIVE);
1002 iput(inode); 1030 iput(inode);
1003 if (rc) { 1031 if (rc) {
1004 CDEBUG(D_HSM, "Could not read file data version. Request could not be confirmed.\n"); 1032 CDEBUG(D_HSM, "Could not read file data version. Request could not be confirmed.\n");
@@ -1033,7 +1061,6 @@ static int ll_ioc_copy_end(struct super_block *sb, struct hsm_copy *copy)
1033 /* hpk_errval must be >= 0 */ 1061 /* hpk_errval must be >= 0 */
1034 hpk.hpk_errval = EBUSY; 1062 hpk.hpk_errval = EBUSY;
1035 } 1063 }
1036
1037 } 1064 }
1038 1065
1039progress: 1066progress:
@@ -1242,8 +1269,8 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1242 struct obd_ioctl_data *data; 1269 struct obd_ioctl_data *data;
1243 int rc = 0; 1270 int rc = 0;
1244 1271
1245 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), cmd=%#x\n", 1272 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), cmd=%#x\n",
1246 inode->i_ino, inode->i_generation, inode, cmd); 1273 PFID(ll_inode2fid(inode)), inode, cmd);
1247 1274
1248 /* asm-ppc{,64} declares TCGETS, et. al. as type 't' not 'T' */ 1275 /* asm-ppc{,64} declares TCGETS, et. al. as type 't' not 'T' */
1249 if (_IOC_TYPE(cmd) == 'T' || _IOC_TYPE(cmd) == 't') /* tty ioctls */ 1276 if (_IOC_TYPE(cmd) == 'T' || _IOC_TYPE(cmd) == 't') /* tty ioctls */
@@ -1362,7 +1389,6 @@ out_free:
1362lmv_out_free: 1389lmv_out_free:
1363 obd_ioctl_freedata(buf, len); 1390 obd_ioctl_freedata(buf, len);
1364 return rc; 1391 return rc;
1365
1366 } 1392 }
1367 case LL_IOC_LOV_SETSTRIPE: { 1393 case LL_IOC_LOV_SETSTRIPE: {
1368 struct lov_user_md_v3 lumv3; 1394 struct lov_user_md_v3 lumv3;
@@ -1474,8 +1500,9 @@ free_lmv:
1474 cmd == LL_IOC_MDC_GETINFO)) { 1500 cmd == LL_IOC_MDC_GETINFO)) {
1475 rc = 0; 1501 rc = 0;
1476 goto skip_lmm; 1502 goto skip_lmm;
1477 } else 1503 } else {
1478 goto out_req; 1504 goto out_req;
1505 }
1479 } 1506 }
1480 1507
1481 if (cmd == IOC_MDC_GETFILESTRIPE || 1508 if (cmd == IOC_MDC_GETFILESTRIPE ||
@@ -1688,15 +1715,16 @@ out_quotactl:
1688 return ll_flush_ctx(inode); 1715 return ll_flush_ctx(inode);
1689#ifdef CONFIG_FS_POSIX_ACL 1716#ifdef CONFIG_FS_POSIX_ACL
1690 case LL_IOC_RMTACL: { 1717 case LL_IOC_RMTACL: {
1691 if (sbi->ll_flags & LL_SBI_RMT_CLIENT && is_root_inode(inode)) { 1718 if (sbi->ll_flags & LL_SBI_RMT_CLIENT && is_root_inode(inode)) {
1692 struct ll_file_data *fd = LUSTRE_FPRIVATE(file); 1719 struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
1693 1720
1694 rc = rct_add(&sbi->ll_rct, current_pid(), arg); 1721 rc = rct_add(&sbi->ll_rct, current_pid(), arg);
1695 if (!rc) 1722 if (!rc)
1696 fd->fd_flags |= LL_FILE_RMTACL; 1723 fd->fd_flags |= LL_FILE_RMTACL;
1697 return rc; 1724 return rc;
1698 } else 1725 } else {
1699 return 0; 1726 return 0;
1727 }
1700 } 1728 }
1701#endif 1729#endif
1702 case LL_IOC_GETOBDCOUNT: { 1730 case LL_IOC_GETOBDCOUNT: {
@@ -1817,6 +1845,9 @@ out_quotactl:
1817 return rc; 1845 return rc;
1818 } 1846 }
1819 case LL_IOC_HSM_CT_START: 1847 case LL_IOC_HSM_CT_START:
1848 if (!capable(CFS_CAP_SYS_ADMIN))
1849 return -EPERM;
1850
1820 rc = copy_and_ioctl(cmd, sbi->ll_md_exp, (void __user *)arg, 1851 rc = copy_and_ioctl(cmd, sbi->ll_md_exp, (void __user *)arg,
1821 sizeof(struct lustre_kernelcomm)); 1852 sizeof(struct lustre_kernelcomm));
1822 return rc; 1853 return rc;
diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c
index cf619af3caf5..f47f2acaf90c 100644
--- a/drivers/staging/lustre/lustre/llite/file.c
+++ b/drivers/staging/lustre/lustre/llite/file.c
@@ -45,6 +45,7 @@
45#include "../include/lustre_lite.h" 45#include "../include/lustre_lite.h"
46#include <linux/pagemap.h> 46#include <linux/pagemap.h>
47#include <linux/file.h> 47#include <linux/file.h>
48#include <linux/mount.h>
48#include "llite_internal.h" 49#include "llite_internal.h"
49#include "../include/lustre/ll_fiemap.h" 50#include "../include/lustre/ll_fiemap.h"
50 51
@@ -87,8 +88,7 @@ void ll_pack_inode2opdata(struct inode *inode, struct md_op_data *op_data,
87 op_data->op_attr.ia_ctime = inode->i_ctime; 88 op_data->op_attr.ia_ctime = inode->i_ctime;
88 op_data->op_attr.ia_size = i_size_read(inode); 89 op_data->op_attr.ia_size = i_size_read(inode);
89 op_data->op_attr_blocks = inode->i_blocks; 90 op_data->op_attr_blocks = inode->i_blocks;
90 ((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags = 91 op_data->op_attr_flags = ll_inode_to_ext_flags(inode->i_flags);
91 ll_inode_to_ext_flags(inode->i_flags);
92 op_data->op_ioepoch = ll_i2info(inode)->lli_ioepoch; 92 op_data->op_ioepoch = ll_i2info(inode)->lli_ioepoch;
93 if (fh) 93 if (fh)
94 op_data->op_handle = *fh; 94 op_data->op_handle = *fh;
@@ -170,13 +170,15 @@ static int ll_close_inode_openhandle(struct obd_export *md_exp,
170 */ 170 */
171 rc = ll_som_update(inode, op_data); 171 rc = ll_som_update(inode, op_data);
172 if (rc) { 172 if (rc) {
173 CERROR("inode %lu mdc Size-on-MDS update failed: rc = %d\n", 173 CERROR("%s: inode "DFID" mdc Size-on-MDS update failed: rc = %d\n",
174 inode->i_ino, rc); 174 ll_i2mdexp(inode)->exp_obd->obd_name,
175 PFID(ll_inode2fid(inode)), rc);
175 rc = 0; 176 rc = 0;
176 } 177 }
177 } else if (rc) { 178 } else if (rc) {
178 CERROR("inode %lu mdc close failed: rc = %d\n", 179 CERROR("%s: inode "DFID" mdc close failed: rc = %d\n",
179 inode->i_ino, rc); 180 ll_i2mdexp(inode)->exp_obd->obd_name,
181 PFID(ll_inode2fid(inode)), rc);
180 } 182 }
181 183
182 /* DATA_MODIFIED flag was successfully sent on close, cancel data 184 /* DATA_MODIFIED flag was successfully sent on close, cancel data
@@ -278,7 +280,7 @@ static int ll_md_close(struct obd_export *md_exp, struct inode *inode,
278 280
279 /* clear group lock, if present */ 281 /* clear group lock, if present */
280 if (unlikely(fd->fd_flags & LL_FILE_GROUP_LOCKED)) 282 if (unlikely(fd->fd_flags & LL_FILE_GROUP_LOCKED))
281 ll_put_grouplock(inode, file, fd->fd_grouplock.cg_gid); 283 ll_put_grouplock(inode, file, fd->fd_grouplock.lg_gid);
282 284
283 if (fd->fd_lease_och) { 285 if (fd->fd_lease_och) {
284 bool lease_broken; 286 bool lease_broken;
@@ -343,8 +345,8 @@ int ll_file_release(struct inode *inode, struct file *file)
343 struct ll_inode_info *lli = ll_i2info(inode); 345 struct ll_inode_info *lli = ll_i2info(inode);
344 int rc; 346 int rc;
345 347
346 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino, 348 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
347 inode->i_generation, inode); 349 PFID(ll_inode2fid(inode)), inode);
348 350
349#ifdef CONFIG_FS_POSIX_ACL 351#ifdef CONFIG_FS_POSIX_ACL
350 if (sbi->ll_flags & LL_SBI_RMT_CLIENT && is_root_inode(inode)) { 352 if (sbi->ll_flags & LL_SBI_RMT_CLIENT && is_root_inode(inode)) {
@@ -543,8 +545,8 @@ int ll_file_open(struct inode *inode, struct file *file)
543 struct ll_file_data *fd; 545 struct ll_file_data *fd;
544 int rc = 0, opendir_set = 0; 546 int rc = 0, opendir_set = 0;
545 547
546 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), flags %o\n", inode->i_ino, 548 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), flags %o\n",
547 inode->i_generation, inode, file->f_flags); 549 PFID(ll_inode2fid(inode)), inode, file->f_flags);
548 550
549 it = file->private_data; /* XXX: compat macro */ 551 it = file->private_data; /* XXX: compat macro */
550 file->private_data = NULL; /* prevent ll_local_open assertion */ 552 file->private_data = NULL; /* prevent ll_local_open assertion */
@@ -677,7 +679,9 @@ restart:
677 if (rc) 679 if (rc)
678 goto out_och_free; 680 goto out_och_free;
679 681
680 LASSERT(it_disposition(it, DISP_ENQ_OPEN_REF)); 682 LASSERTF(it_disposition(it, DISP_ENQ_OPEN_REF),
683 "inode %p: disposition %x, status %d\n", inode,
684 it_disposition(it, ~0), it->d.lustre.it_status);
681 685
682 rc = ll_local_open(file, it, fd, *och_p); 686 rc = ll_local_open(file, it, fd, *och_p);
683 if (rc) 687 if (rc)
@@ -875,16 +879,19 @@ ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode,
875 return och; 879 return och;
876 880
877out_close: 881out_close:
878 rc2 = ll_close_inode_openhandle(sbi->ll_md_exp, inode, och, NULL); 882 /* Cancel open lock */
879 if (rc2)
880 CERROR("Close openhandle returned %d\n", rc2);
881
882 /* cancel open lock */
883 if (it.d.lustre.it_lock_mode != 0) { 883 if (it.d.lustre.it_lock_mode != 0) {
884 ldlm_lock_decref_and_cancel(&och->och_lease_handle, 884 ldlm_lock_decref_and_cancel(&och->och_lease_handle,
885 it.d.lustre.it_lock_mode); 885 it.d.lustre.it_lock_mode);
886 it.d.lustre.it_lock_mode = 0; 886 it.d.lustre.it_lock_mode = 0;
887 och->och_lease_handle.cookie = 0ULL;
887 } 888 }
889 rc2 = ll_close_inode_openhandle(sbi->ll_md_exp, inode, och, NULL);
890 if (rc2 < 0)
891 CERROR("%s: error closing file "DFID": %d\n",
892 ll_get_fsname(inode->i_sb, NULL, 0),
893 PFID(&ll_i2info(inode)->lli_fid), rc2);
894 och = NULL; /* och has been freed in ll_close_inode_openhandle() */
888out_release_it: 895out_release_it:
889 ll_intent_release(&it); 896 ll_intent_release(&it);
890out: 897out:
@@ -908,7 +915,7 @@ static int ll_lease_close(struct obd_client_handle *och, struct inode *inode,
908 lock_res_and_lock(lock); 915 lock_res_and_lock(lock);
909 cancelled = ldlm_is_cancel(lock); 916 cancelled = ldlm_is_cancel(lock);
910 unlock_res_and_lock(lock); 917 unlock_res_and_lock(lock);
911 ldlm_lock_put(lock); 918 LDLM_LOCK_PUT(lock);
912 } 919 }
913 920
914 CDEBUG(D_INODE, "lease for " DFID " broken? %d\n", 921 CDEBUG(D_INODE, "lease for " DFID " broken? %d\n",
@@ -926,7 +933,7 @@ static int ll_lease_close(struct obd_client_handle *och, struct inode *inode,
926 933
927/* Fills the obdo with the attributes for the lsm */ 934/* Fills the obdo with the attributes for the lsm */
928static int ll_lsm_getattr(struct lov_stripe_md *lsm, struct obd_export *exp, 935static int ll_lsm_getattr(struct lov_stripe_md *lsm, struct obd_export *exp,
929 struct obdo *obdo, __u64 ioepoch, int sync) 936 struct obdo *obdo, __u64 ioepoch, int dv_flags)
930{ 937{
931 struct ptlrpc_request_set *set; 938 struct ptlrpc_request_set *set;
932 struct obd_info oinfo = { }; 939 struct obd_info oinfo = { };
@@ -945,9 +952,11 @@ static int ll_lsm_getattr(struct lov_stripe_md *lsm, struct obd_export *exp,
945 OBD_MD_FLMTIME | OBD_MD_FLCTIME | 952 OBD_MD_FLMTIME | OBD_MD_FLCTIME |
946 OBD_MD_FLGROUP | OBD_MD_FLEPOCH | 953 OBD_MD_FLGROUP | OBD_MD_FLEPOCH |
947 OBD_MD_FLDATAVERSION; 954 OBD_MD_FLDATAVERSION;
948 if (sync) { 955 if (dv_flags & (LL_DV_WR_FLUSH | LL_DV_RD_FLUSH)) {
949 oinfo.oi_oa->o_valid |= OBD_MD_FLFLAGS; 956 oinfo.oi_oa->o_valid |= OBD_MD_FLFLAGS;
950 oinfo.oi_oa->o_flags |= OBD_FL_SRVLOCK; 957 oinfo.oi_oa->o_flags |= OBD_FL_SRVLOCK;
958 if (dv_flags & LL_DV_WR_FLUSH)
959 oinfo.oi_oa->o_flags |= OBD_FL_FLUSH;
951 } 960 }
952 961
953 set = ptlrpc_prep_set(); 962 set = ptlrpc_prep_set();
@@ -960,11 +969,16 @@ static int ll_lsm_getattr(struct lov_stripe_md *lsm, struct obd_export *exp,
960 rc = ptlrpc_set_wait(set); 969 rc = ptlrpc_set_wait(set);
961 ptlrpc_set_destroy(set); 970 ptlrpc_set_destroy(set);
962 } 971 }
963 if (rc == 0) 972 if (rc == 0) {
964 oinfo.oi_oa->o_valid &= (OBD_MD_FLBLOCKS | OBD_MD_FLBLKSZ | 973 oinfo.oi_oa->o_valid &= (OBD_MD_FLBLOCKS | OBD_MD_FLBLKSZ |
965 OBD_MD_FLATIME | OBD_MD_FLMTIME | 974 OBD_MD_FLATIME | OBD_MD_FLMTIME |
966 OBD_MD_FLCTIME | OBD_MD_FLSIZE | 975 OBD_MD_FLCTIME | OBD_MD_FLSIZE |
967 OBD_MD_FLDATAVERSION); 976 OBD_MD_FLDATAVERSION | OBD_MD_FLFLAGS);
977 if (dv_flags & LL_DV_WR_FLUSH &&
978 !(oinfo.oi_oa->o_valid & OBD_MD_FLFLAGS &&
979 oinfo.oi_oa->o_flags & OBD_FL_FLUSH))
980 return -ENOTSUPP;
981 }
968 return rc; 982 return rc;
969} 983}
970 984
@@ -980,7 +994,7 @@ int ll_inode_getattr(struct inode *inode, struct obdo *obdo,
980 994
981 lsm = ccc_inode_lsm_get(inode); 995 lsm = ccc_inode_lsm_get(inode);
982 rc = ll_lsm_getattr(lsm, ll_i2dtexp(inode), 996 rc = ll_lsm_getattr(lsm, ll_i2dtexp(inode),
983 obdo, ioepoch, sync); 997 obdo, ioepoch, sync ? LL_DV_RD_FLUSH : 0);
984 if (rc == 0) { 998 if (rc == 0) {
985 struct ost_id *oi = lsm ? &lsm->lsm_oi : &obdo->o_oi; 999 struct ost_id *oi = lsm ? &lsm->lsm_oi : &obdo->o_oi;
986 1000
@@ -994,50 +1008,57 @@ int ll_inode_getattr(struct inode *inode, struct obdo *obdo,
994 return rc; 1008 return rc;
995} 1009}
996 1010
997int ll_merge_lvb(const struct lu_env *env, struct inode *inode) 1011int ll_merge_attr(const struct lu_env *env, struct inode *inode)
998{ 1012{
999 struct ll_inode_info *lli = ll_i2info(inode); 1013 struct ll_inode_info *lli = ll_i2info(inode);
1000 struct cl_object *obj = lli->lli_clob; 1014 struct cl_object *obj = lli->lli_clob;
1001 struct cl_attr *attr = ccc_env_thread_attr(env); 1015 struct cl_attr *attr = vvp_env_thread_attr(env);
1002 struct ost_lvb lvb; 1016 s64 atime;
1017 s64 mtime;
1018 s64 ctime;
1003 int rc = 0; 1019 int rc = 0;
1004 1020
1005 ll_inode_size_lock(inode); 1021 ll_inode_size_lock(inode);
1022
1006 /* merge timestamps the most recently obtained from mds with 1023 /* merge timestamps the most recently obtained from mds with
1007 * timestamps obtained from osts 1024 * timestamps obtained from osts
1008 */ 1025 */
1009 LTIME_S(inode->i_atime) = lli->lli_lvb.lvb_atime; 1026 LTIME_S(inode->i_atime) = lli->lli_atime;
1010 LTIME_S(inode->i_mtime) = lli->lli_lvb.lvb_mtime; 1027 LTIME_S(inode->i_mtime) = lli->lli_mtime;
1011 LTIME_S(inode->i_ctime) = lli->lli_lvb.lvb_ctime; 1028 LTIME_S(inode->i_ctime) = lli->lli_ctime;
1012 1029
1013 lvb.lvb_size = i_size_read(inode); 1030 mtime = LTIME_S(inode->i_mtime);
1014 lvb.lvb_blocks = inode->i_blocks; 1031 atime = LTIME_S(inode->i_atime);
1015 lvb.lvb_mtime = LTIME_S(inode->i_mtime); 1032 ctime = LTIME_S(inode->i_ctime);
1016 lvb.lvb_atime = LTIME_S(inode->i_atime);
1017 lvb.lvb_ctime = LTIME_S(inode->i_ctime);
1018 1033
1019 cl_object_attr_lock(obj); 1034 cl_object_attr_lock(obj);
1020 rc = cl_object_attr_get(env, obj, attr); 1035 rc = cl_object_attr_get(env, obj, attr);
1021 cl_object_attr_unlock(obj); 1036 cl_object_attr_unlock(obj);
1022 1037
1023 if (rc == 0) { 1038 if (rc != 0)
1024 if (lvb.lvb_atime < attr->cat_atime) 1039 goto out_size_unlock;
1025 lvb.lvb_atime = attr->cat_atime;
1026 if (lvb.lvb_ctime < attr->cat_ctime)
1027 lvb.lvb_ctime = attr->cat_ctime;
1028 if (lvb.lvb_mtime < attr->cat_mtime)
1029 lvb.lvb_mtime = attr->cat_mtime;
1030 1040
1031 CDEBUG(D_VFSTRACE, DFID " updating i_size %llu\n", 1041 if (atime < attr->cat_atime)
1032 PFID(&lli->lli_fid), attr->cat_size); 1042 atime = attr->cat_atime;
1033 cl_isize_write_nolock(inode, attr->cat_size);
1034 1043
1035 inode->i_blocks = attr->cat_blocks; 1044 if (ctime < attr->cat_ctime)
1045 ctime = attr->cat_ctime;
1036 1046
1037 LTIME_S(inode->i_mtime) = lvb.lvb_mtime; 1047 if (mtime < attr->cat_mtime)
1038 LTIME_S(inode->i_atime) = lvb.lvb_atime; 1048 mtime = attr->cat_mtime;
1039 LTIME_S(inode->i_ctime) = lvb.lvb_ctime; 1049
1040 } 1050 CDEBUG(D_VFSTRACE, DFID " updating i_size %llu\n",
1051 PFID(&lli->lli_fid), attr->cat_size);
1052
1053 i_size_write(inode, attr->cat_size);
1054
1055 inode->i_blocks = attr->cat_blocks;
1056
1057 LTIME_S(inode->i_mtime) = mtime;
1058 LTIME_S(inode->i_atime) = atime;
1059 LTIME_S(inode->i_ctime) = ctime;
1060
1061out_size_unlock:
1041 ll_inode_size_unlock(inode); 1062 ll_inode_size_unlock(inode);
1042 1063
1043 return rc; 1064 return rc;
@@ -1120,47 +1141,48 @@ ll_file_io_generic(const struct lu_env *env, struct vvp_io_args *args,
1120 struct cl_io *io; 1141 struct cl_io *io;
1121 ssize_t result; 1142 ssize_t result;
1122 1143
1144 CDEBUG(D_VFSTRACE, "file: %s, type: %d ppos: %llu, count: %zd\n",
1145 file->f_path.dentry->d_name.name, iot, *ppos, count);
1146
1123restart: 1147restart:
1124 io = ccc_env_thread_io(env); 1148 io = vvp_env_thread_io(env);
1125 ll_io_init(io, file, iot == CIT_WRITE); 1149 ll_io_init(io, file, iot == CIT_WRITE);
1126 1150
1127 if (cl_io_rw_init(env, io, iot, *ppos, count) == 0) { 1151 if (cl_io_rw_init(env, io, iot, *ppos, count) == 0) {
1128 struct vvp_io *vio = vvp_env_io(env); 1152 struct vvp_io *vio = vvp_env_io(env);
1129 struct ccc_io *cio = ccc_env_io(env);
1130 int write_mutex_locked = 0; 1153 int write_mutex_locked = 0;
1131 1154
1132 cio->cui_fd = LUSTRE_FPRIVATE(file); 1155 vio->vui_fd = LUSTRE_FPRIVATE(file);
1133 vio->cui_io_subtype = args->via_io_subtype; 1156 vio->vui_io_subtype = args->via_io_subtype;
1134 1157
1135 switch (vio->cui_io_subtype) { 1158 switch (vio->vui_io_subtype) {
1136 case IO_NORMAL: 1159 case IO_NORMAL:
1137 cio->cui_iter = args->u.normal.via_iter; 1160 vio->vui_iter = args->u.normal.via_iter;
1138 cio->cui_iocb = args->u.normal.via_iocb; 1161 vio->vui_iocb = args->u.normal.via_iocb;
1139 if ((iot == CIT_WRITE) && 1162 if ((iot == CIT_WRITE) &&
1140 !(cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) { 1163 !(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
1141 if (mutex_lock_interruptible(&lli-> 1164 if (mutex_lock_interruptible(&lli->
1142 lli_write_mutex)) { 1165 lli_write_mutex)) {
1143 result = -ERESTARTSYS; 1166 result = -ERESTARTSYS;
1144 goto out; 1167 goto out;
1145 } 1168 }
1146 write_mutex_locked = 1; 1169 write_mutex_locked = 1;
1147 } else if (iot == CIT_READ) {
1148 down_read(&lli->lli_trunc_sem);
1149 } 1170 }
1171 down_read(&lli->lli_trunc_sem);
1150 break; 1172 break;
1151 case IO_SPLICE: 1173 case IO_SPLICE:
1152 vio->u.splice.cui_pipe = args->u.splice.via_pipe; 1174 vio->u.splice.vui_pipe = args->u.splice.via_pipe;
1153 vio->u.splice.cui_flags = args->u.splice.via_flags; 1175 vio->u.splice.vui_flags = args->u.splice.via_flags;
1154 break; 1176 break;
1155 default: 1177 default:
1156 CERROR("Unknown IO type - %u\n", vio->cui_io_subtype); 1178 CERROR("Unknown IO type - %u\n", vio->vui_io_subtype);
1157 LBUG(); 1179 LBUG();
1158 } 1180 }
1159 result = cl_io_loop(env, io); 1181 result = cl_io_loop(env, io);
1182 if (args->via_io_subtype == IO_NORMAL)
1183 up_read(&lli->lli_trunc_sem);
1160 if (write_mutex_locked) 1184 if (write_mutex_locked)
1161 mutex_unlock(&lli->lli_write_mutex); 1185 mutex_unlock(&lli->lli_write_mutex);
1162 else if (args->via_io_subtype == IO_NORMAL && iot == CIT_READ)
1163 up_read(&lli->lli_trunc_sem);
1164 } else { 1186 } else {
1165 /* cl_io_rw_init() handled IO */ 1187 /* cl_io_rw_init() handled IO */
1166 result = io->ci_result; 1188 result = io->ci_result;
@@ -1197,6 +1219,7 @@ out:
1197 fd->fd_write_failed = true; 1219 fd->fd_write_failed = true;
1198 } 1220 }
1199 } 1221 }
1222 CDEBUG(D_VFSTRACE, "iot: %d, result: %zd\n", iot, result);
1200 1223
1201 return result; 1224 return result;
1202} 1225}
@@ -1212,7 +1235,7 @@ static ssize_t ll_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
1212 if (IS_ERR(env)) 1235 if (IS_ERR(env))
1213 return PTR_ERR(env); 1236 return PTR_ERR(env);
1214 1237
1215 args = vvp_env_args(env, IO_NORMAL); 1238 args = ll_env_args(env, IO_NORMAL);
1216 args->u.normal.via_iter = to; 1239 args->u.normal.via_iter = to;
1217 args->u.normal.via_iocb = iocb; 1240 args->u.normal.via_iocb = iocb;
1218 1241
@@ -1236,7 +1259,7 @@ static ssize_t ll_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1236 if (IS_ERR(env)) 1259 if (IS_ERR(env))
1237 return PTR_ERR(env); 1260 return PTR_ERR(env);
1238 1261
1239 args = vvp_env_args(env, IO_NORMAL); 1262 args = ll_env_args(env, IO_NORMAL);
1240 args->u.normal.via_iter = from; 1263 args->u.normal.via_iter = from;
1241 args->u.normal.via_iocb = iocb; 1264 args->u.normal.via_iocb = iocb;
1242 1265
@@ -1262,7 +1285,7 @@ static ssize_t ll_file_splice_read(struct file *in_file, loff_t *ppos,
1262 if (IS_ERR(env)) 1285 if (IS_ERR(env))
1263 return PTR_ERR(env); 1286 return PTR_ERR(env);
1264 1287
1265 args = vvp_env_args(env, IO_SPLICE); 1288 args = ll_env_args(env, IO_SPLICE);
1266 args->u.splice.via_pipe = pipe; 1289 args->u.splice.via_pipe = pipe;
1267 args->u.splice.via_flags = flags; 1290 args->u.splice.via_flags = flags;
1268 1291
@@ -1354,7 +1377,8 @@ static int ll_lov_recreate_fid(struct inode *inode, unsigned long arg)
1354} 1377}
1355 1378
1356int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry, 1379int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
1357 int flags, struct lov_user_md *lum, int lum_size) 1380 __u64 flags, struct lov_user_md *lum,
1381 int lum_size)
1358{ 1382{
1359 struct lov_stripe_md *lsm = NULL; 1383 struct lov_stripe_md *lsm = NULL;
1360 struct lookup_intent oit = {.it_op = IT_OPEN, .it_flags = flags}; 1384 struct lookup_intent oit = {.it_op = IT_OPEN, .it_flags = flags};
@@ -1363,8 +1387,8 @@ int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
1363 lsm = ccc_inode_lsm_get(inode); 1387 lsm = ccc_inode_lsm_get(inode);
1364 if (lsm) { 1388 if (lsm) {
1365 ccc_inode_lsm_put(inode, lsm); 1389 ccc_inode_lsm_put(inode, lsm);
1366 CDEBUG(D_IOCTL, "stripe already exists for ino %lu\n", 1390 CDEBUG(D_IOCTL, "stripe already exists for inode "DFID"\n",
1367 inode->i_ino); 1391 PFID(ll_inode2fid(inode)));
1368 rc = -EEXIST; 1392 rc = -EEXIST;
1369 goto out; 1393 goto out;
1370 } 1394 }
@@ -1478,7 +1502,7 @@ out:
1478static int ll_lov_setea(struct inode *inode, struct file *file, 1502static int ll_lov_setea(struct inode *inode, struct file *file,
1479 unsigned long arg) 1503 unsigned long arg)
1480{ 1504{
1481 int flags = MDS_OPEN_HAS_OBJS | FMODE_WRITE; 1505 __u64 flags = MDS_OPEN_HAS_OBJS | FMODE_WRITE;
1482 struct lov_user_md *lump; 1506 struct lov_user_md *lump;
1483 int lum_size = sizeof(struct lov_user_md) + 1507 int lum_size = sizeof(struct lov_user_md) +
1484 sizeof(struct lov_user_ost_data); 1508 sizeof(struct lov_user_ost_data);
@@ -1512,7 +1536,7 @@ static int ll_lov_setstripe(struct inode *inode, struct file *file,
1512 struct lov_user_md_v1 __user *lumv1p = (void __user *)arg; 1536 struct lov_user_md_v1 __user *lumv1p = (void __user *)arg;
1513 struct lov_user_md_v3 __user *lumv3p = (void __user *)arg; 1537 struct lov_user_md_v3 __user *lumv3p = (void __user *)arg;
1514 int lum_size, rc; 1538 int lum_size, rc;
1515 int flags = FMODE_WRITE; 1539 __u64 flags = FMODE_WRITE;
1516 1540
1517 /* first try with v1 which is smaller than v3 */ 1541 /* first try with v1 which is smaller than v3 */
1518 lum_size = sizeof(struct lov_user_md_v1); 1542 lum_size = sizeof(struct lov_user_md_v1);
@@ -1561,7 +1585,7 @@ ll_get_grouplock(struct inode *inode, struct file *file, unsigned long arg)
1561{ 1585{
1562 struct ll_inode_info *lli = ll_i2info(inode); 1586 struct ll_inode_info *lli = ll_i2info(inode);
1563 struct ll_file_data *fd = LUSTRE_FPRIVATE(file); 1587 struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
1564 struct ccc_grouplock grouplock; 1588 struct ll_grouplock grouplock;
1565 int rc; 1589 int rc;
1566 1590
1567 if (arg == 0) { 1591 if (arg == 0) {
@@ -1575,14 +1599,14 @@ ll_get_grouplock(struct inode *inode, struct file *file, unsigned long arg)
1575 spin_lock(&lli->lli_lock); 1599 spin_lock(&lli->lli_lock);
1576 if (fd->fd_flags & LL_FILE_GROUP_LOCKED) { 1600 if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
1577 CWARN("group lock already existed with gid %lu\n", 1601 CWARN("group lock already existed with gid %lu\n",
1578 fd->fd_grouplock.cg_gid); 1602 fd->fd_grouplock.lg_gid);
1579 spin_unlock(&lli->lli_lock); 1603 spin_unlock(&lli->lli_lock);
1580 return -EINVAL; 1604 return -EINVAL;
1581 } 1605 }
1582 LASSERT(!fd->fd_grouplock.cg_lock); 1606 LASSERT(!fd->fd_grouplock.lg_lock);
1583 spin_unlock(&lli->lli_lock); 1607 spin_unlock(&lli->lli_lock);
1584 1608
1585 rc = cl_get_grouplock(cl_i2info(inode)->lli_clob, 1609 rc = cl_get_grouplock(ll_i2info(inode)->lli_clob,
1586 arg, (file->f_flags & O_NONBLOCK), &grouplock); 1610 arg, (file->f_flags & O_NONBLOCK), &grouplock);
1587 if (rc) 1611 if (rc)
1588 return rc; 1612 return rc;
@@ -1608,7 +1632,7 @@ static int ll_put_grouplock(struct inode *inode, struct file *file,
1608{ 1632{
1609 struct ll_inode_info *lli = ll_i2info(inode); 1633 struct ll_inode_info *lli = ll_i2info(inode);
1610 struct ll_file_data *fd = LUSTRE_FPRIVATE(file); 1634 struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
1611 struct ccc_grouplock grouplock; 1635 struct ll_grouplock grouplock;
1612 1636
1613 spin_lock(&lli->lli_lock); 1637 spin_lock(&lli->lli_lock);
1614 if (!(fd->fd_flags & LL_FILE_GROUP_LOCKED)) { 1638 if (!(fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
@@ -1616,11 +1640,11 @@ static int ll_put_grouplock(struct inode *inode, struct file *file,
1616 CWARN("no group lock held\n"); 1640 CWARN("no group lock held\n");
1617 return -EINVAL; 1641 return -EINVAL;
1618 } 1642 }
1619 LASSERT(fd->fd_grouplock.cg_lock); 1643 LASSERT(fd->fd_grouplock.lg_lock);
1620 1644
1621 if (fd->fd_grouplock.cg_gid != arg) { 1645 if (fd->fd_grouplock.lg_gid != arg) {
1622 CWARN("group lock %lu doesn't match current id %lu\n", 1646 CWARN("group lock %lu doesn't match current id %lu\n",
1623 arg, fd->fd_grouplock.cg_gid); 1647 arg, fd->fd_grouplock.lg_gid);
1624 spin_unlock(&lli->lli_lock); 1648 spin_unlock(&lli->lli_lock);
1625 return -EINVAL; 1649 return -EINVAL;
1626 } 1650 }
@@ -1861,11 +1885,12 @@ error:
1861 * This value is computed using stripe object version on OST. 1885 * This value is computed using stripe object version on OST.
1862 * Version is computed using server side locking. 1886 * Version is computed using server side locking.
1863 * 1887 *
1864 * @param extent_lock Take extent lock. Not needed if a process is already 1888 * @param sync if do sync on the OST side;
1865 * holding the OST object group locks. 1889 * 0: no sync
1890 * LL_DV_RD_FLUSH: flush dirty pages, LCK_PR on OSTs
1891 * LL_DV_WR_FLUSH: drop all caching pages, LCK_PW on OSTs
1866 */ 1892 */
1867int ll_data_version(struct inode *inode, __u64 *data_version, 1893int ll_data_version(struct inode *inode, __u64 *data_version, int flags)
1868 int extent_lock)
1869{ 1894{
1870 struct lov_stripe_md *lsm = NULL; 1895 struct lov_stripe_md *lsm = NULL;
1871 struct ll_sb_info *sbi = ll_i2sbi(inode); 1896 struct ll_sb_info *sbi = ll_i2sbi(inode);
@@ -1887,7 +1912,7 @@ int ll_data_version(struct inode *inode, __u64 *data_version,
1887 goto out; 1912 goto out;
1888 } 1913 }
1889 1914
1890 rc = ll_lsm_getattr(lsm, sbi->ll_dt_exp, obdo, 0, extent_lock); 1915 rc = ll_lsm_getattr(lsm, sbi->ll_dt_exp, obdo, 0, flags);
1891 if (rc == 0) { 1916 if (rc == 0) {
1892 if (!(obdo->o_valid & OBD_MD_FLDATAVERSION)) 1917 if (!(obdo->o_valid & OBD_MD_FLDATAVERSION))
1893 rc = -EOPNOTSUPP; 1918 rc = -EOPNOTSUPP;
@@ -1923,7 +1948,7 @@ int ll_hsm_release(struct inode *inode)
1923 } 1948 }
1924 1949
1925 /* Grab latest data_version and [am]time values */ 1950 /* Grab latest data_version and [am]time values */
1926 rc = ll_data_version(inode, &data_version, 1); 1951 rc = ll_data_version(inode, &data_version, LL_DV_WR_FLUSH);
1927 if (rc != 0) 1952 if (rc != 0)
1928 goto out; 1953 goto out;
1929 1954
@@ -1933,7 +1958,7 @@ int ll_hsm_release(struct inode *inode)
1933 goto out; 1958 goto out;
1934 } 1959 }
1935 1960
1936 ll_merge_lvb(env, inode); 1961 ll_merge_attr(env, inode);
1937 cl_env_nested_put(&nest, env); 1962 cl_env_nested_put(&nest, env);
1938 1963
1939 /* Release the file. 1964 /* Release the file.
@@ -2227,8 +2252,8 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2227 struct ll_file_data *fd = LUSTRE_FPRIVATE(file); 2252 struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
2228 int flags, rc; 2253 int flags, rc;
2229 2254
2230 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),cmd=%x\n", inode->i_ino, 2255 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p),cmd=%x\n",
2231 inode->i_generation, inode, cmd); 2256 PFID(ll_inode2fid(inode)), inode, cmd);
2232 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_IOCTL, 1); 2257 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_IOCTL, 1);
2233 2258
2234 /* asm-ppc{,64} declares TCGETS, et. al. as type 't' not 'T' */ 2259 /* asm-ppc{,64} declares TCGETS, et. al. as type 't' not 'T' */
@@ -2331,9 +2356,8 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2331 if (copy_from_user(&idv, (char __user *)arg, sizeof(idv))) 2356 if (copy_from_user(&idv, (char __user *)arg, sizeof(idv)))
2332 return -EFAULT; 2357 return -EFAULT;
2333 2358
2334 rc = ll_data_version(inode, &idv.idv_version, 2359 idv.idv_flags &= LL_DV_RD_FLUSH | LL_DV_WR_FLUSH;
2335 !(idv.idv_flags & LL_DV_NOFLUSH)); 2360 rc = ll_data_version(inode, &idv.idv_version, idv.idv_flags);
2336
2337 if (rc == 0 && copy_to_user((char __user *)arg, &idv, 2361 if (rc == 0 && copy_to_user((char __user *)arg, &idv,
2338 sizeof(idv))) 2362 sizeof(idv)))
2339 return -EFAULT; 2363 return -EFAULT;
@@ -2499,7 +2523,7 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2499 rc = och->och_flags & 2523 rc = och->och_flags &
2500 (FMODE_READ | FMODE_WRITE); 2524 (FMODE_READ | FMODE_WRITE);
2501 unlock_res_and_lock(lock); 2525 unlock_res_and_lock(lock);
2502 ldlm_lock_put(lock); 2526 LDLM_LOCK_PUT(lock);
2503 } 2527 }
2504 } 2528 }
2505 mutex_unlock(&lli->lli_och_mutex); 2529 mutex_unlock(&lli->lli_och_mutex);
@@ -2537,9 +2561,8 @@ static loff_t ll_file_seek(struct file *file, loff_t offset, int origin)
2537 2561
2538 retval = offset + ((origin == SEEK_END) ? i_size_read(inode) : 2562 retval = offset + ((origin == SEEK_END) ? i_size_read(inode) :
2539 (origin == SEEK_CUR) ? file->f_pos : 0); 2563 (origin == SEEK_CUR) ? file->f_pos : 0);
2540 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), to=%llu=%#llx(%d)\n", 2564 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), to=%llu=%#llx(%d)\n",
2541 inode->i_ino, inode->i_generation, inode, retval, retval, 2565 PFID(ll_inode2fid(inode)), inode, retval, retval, origin);
2542 origin);
2543 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_LLSEEK, 1); 2566 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_LLSEEK, 1);
2544 2567
2545 if (origin == SEEK_END || origin == SEEK_HOLE || origin == SEEK_DATA) { 2568 if (origin == SEEK_END || origin == SEEK_HOLE || origin == SEEK_DATA) {
@@ -2603,8 +2626,8 @@ int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end,
2603 if (IS_ERR(env)) 2626 if (IS_ERR(env))
2604 return PTR_ERR(env); 2627 return PTR_ERR(env);
2605 2628
2606 io = ccc_env_thread_io(env); 2629 io = vvp_env_thread_io(env);
2607 io->ci_obj = cl_i2info(inode)->lli_clob; 2630 io->ci_obj = ll_i2info(inode)->lli_clob;
2608 io->ci_ignore_layout = ignore_layout; 2631 io->ci_ignore_layout = ignore_layout;
2609 2632
2610 /* initialize parameters for sync */ 2633 /* initialize parameters for sync */
@@ -2634,8 +2657,8 @@ int ll_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2634 struct ptlrpc_request *req; 2657 struct ptlrpc_request *req;
2635 int rc, err; 2658 int rc, err;
2636 2659
2637 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino, 2660 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
2638 inode->i_generation, inode); 2661 PFID(ll_inode2fid(inode)), inode);
2639 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FSYNC, 1); 2662 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FSYNC, 1);
2640 2663
2641 rc = filemap_write_and_wait_range(inode->i_mapping, start, end); 2664 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
@@ -2693,8 +2716,8 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
2693 int rc; 2716 int rc;
2694 int rc2 = 0; 2717 int rc2 = 0;
2695 2718
2696 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu file_lock=%p\n", 2719 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID" file_lock=%p\n",
2697 inode->i_ino, file_lock); 2720 PFID(ll_inode2fid(inode)), file_lock);
2698 2721
2699 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FLOCK, 1); 2722 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FLOCK, 1);
2700 2723
@@ -2777,9 +2800,9 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
2777 if (IS_ERR(op_data)) 2800 if (IS_ERR(op_data))
2778 return PTR_ERR(op_data); 2801 return PTR_ERR(op_data);
2779 2802
2780 CDEBUG(D_DLMTRACE, "inode=%lu, pid=%u, flags=%#llx, mode=%u, start=%llu, end=%llu\n", 2803 CDEBUG(D_DLMTRACE, "inode="DFID", pid=%u, flags=%#llx, mode=%u, start=%llu, end=%llu\n",
2781 inode->i_ino, flock.l_flock.pid, flags, einfo.ei_mode, 2804 PFID(ll_inode2fid(inode)), flock.l_flock.pid, flags,
2782 flock.l_flock.start, flock.l_flock.end); 2805 einfo.ei_mode, flock.l_flock.start, flock.l_flock.end);
2783 2806
2784 rc = md_enqueue(sbi->ll_md_exp, &einfo, NULL, 2807 rc = md_enqueue(sbi->ll_md_exp, &einfo, NULL,
2785 op_data, &lockh, &flock, 0, NULL /* req */, flags); 2808 op_data, &lockh, &flock, 0, NULL /* req */, flags);
@@ -2901,8 +2924,8 @@ static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
2901 struct obd_export *exp; 2924 struct obd_export *exp;
2902 int rc = 0; 2925 int rc = 0;
2903 2926
2904 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),name=%pd\n", 2927 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p),name=%pd\n",
2905 inode->i_ino, inode->i_generation, inode, dentry); 2928 PFID(ll_inode2fid(inode)), inode, dentry);
2906 2929
2907 exp = ll_i2mdexp(inode); 2930 exp = ll_i2mdexp(inode);
2908 2931
@@ -2998,9 +3021,9 @@ static int ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
2998 3021
2999 /* if object isn't regular file, don't validate size */ 3022 /* if object isn't regular file, don't validate size */
3000 if (!S_ISREG(inode->i_mode)) { 3023 if (!S_ISREG(inode->i_mode)) {
3001 LTIME_S(inode->i_atime) = ll_i2info(inode)->lli_lvb.lvb_atime; 3024 LTIME_S(inode->i_atime) = ll_i2info(inode)->lli_atime;
3002 LTIME_S(inode->i_mtime) = ll_i2info(inode)->lli_lvb.lvb_mtime; 3025 LTIME_S(inode->i_mtime) = ll_i2info(inode)->lli_mtime;
3003 LTIME_S(inode->i_ctime) = ll_i2info(inode)->lli_lvb.lvb_ctime; 3026 LTIME_S(inode->i_ctime) = ll_i2info(inode)->lli_ctime;
3004 } else { 3027 } else {
3005 /* In case of restore, the MDT has the right size and has 3028 /* In case of restore, the MDT has the right size and has
3006 * already send it back without granting the layout lock, 3029 * already send it back without granting the layout lock,
@@ -3124,8 +3147,8 @@ int ll_inode_permission(struct inode *inode, int mask)
3124 return rc; 3147 return rc;
3125 } 3148 }
3126 3149
3127 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), inode mode %x mask %o\n", 3150 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), inode mode %x mask %o\n",
3128 inode->i_ino, inode->i_generation, inode, inode->i_mode, mask); 3151 PFID(ll_inode2fid(inode)), inode, inode->i_mode, mask);
3129 3152
3130 if (ll_i2sbi(inode)->ll_flags & LL_SBI_RMT_CLIENT) 3153 if (ll_i2sbi(inode)->ll_flags & LL_SBI_RMT_CLIENT)
3131 return lustre_check_remote_perm(inode, mask); 3154 return lustre_check_remote_perm(inode, mask);
@@ -3335,10 +3358,10 @@ static int ll_layout_fetch(struct inode *inode, struct ldlm_lock *lock)
3335 int rc; 3358 int rc;
3336 3359
3337 CDEBUG(D_INODE, DFID" LVB_READY=%d l_lvb_data=%p l_lvb_len=%d\n", 3360 CDEBUG(D_INODE, DFID" LVB_READY=%d l_lvb_data=%p l_lvb_len=%d\n",
3338 PFID(ll_inode2fid(inode)), !!(lock->l_flags & LDLM_FL_LVB_READY), 3361 PFID(ll_inode2fid(inode)), ldlm_is_lvb_ready(lock),
3339 lock->l_lvb_data, lock->l_lvb_len); 3362 lock->l_lvb_data, lock->l_lvb_len);
3340 3363
3341 if (lock->l_lvb_data && (lock->l_flags & LDLM_FL_LVB_READY)) 3364 if (lock->l_lvb_data && ldlm_is_lvb_ready(lock))
3342 return 0; 3365 return 0;
3343 3366
3344 /* if layout lock was granted right away, the layout is returned 3367 /* if layout lock was granted right away, the layout is returned
@@ -3415,14 +3438,14 @@ static int ll_layout_lock_set(struct lustre_handle *lockh, enum ldlm_mode mode,
3415 LASSERT(lock); 3438 LASSERT(lock);
3416 LASSERT(ldlm_has_layout(lock)); 3439 LASSERT(ldlm_has_layout(lock));
3417 3440
3418 LDLM_DEBUG(lock, "File %p/"DFID" being reconfigured: %d", 3441 LDLM_DEBUG(lock, "File "DFID"(%p) being reconfigured: %d",
3419 inode, PFID(&lli->lli_fid), reconf); 3442 PFID(&lli->lli_fid), inode, reconf);
3420 3443
3421 /* in case this is a caching lock and reinstate with new inode */ 3444 /* in case this is a caching lock and reinstate with new inode */
3422 md_set_lock_data(sbi->ll_md_exp, &lockh->cookie, inode, NULL); 3445 md_set_lock_data(sbi->ll_md_exp, &lockh->cookie, inode, NULL);
3423 3446
3424 lock_res_and_lock(lock); 3447 lock_res_and_lock(lock);
3425 lvb_ready = !!(lock->l_flags & LDLM_FL_LVB_READY); 3448 lvb_ready = ldlm_is_lvb_ready(lock);
3426 unlock_res_and_lock(lock); 3449 unlock_res_and_lock(lock);
3427 /* checking lvb_ready is racy but this is okay. The worst case is 3450 /* checking lvb_ready is racy but this is okay. The worst case is
3428 * that multi processes may configure the file on the same time. 3451 * that multi processes may configure the file on the same time.
@@ -3487,9 +3510,9 @@ out:
3487 3510
3488 /* wait for IO to complete if it's still being used. */ 3511 /* wait for IO to complete if it's still being used. */
3489 if (wait_layout) { 3512 if (wait_layout) {
3490 CDEBUG(D_INODE, "%s: %p/" DFID " wait for layout reconf.\n", 3513 CDEBUG(D_INODE, "%s: "DFID"(%p) wait for layout reconf\n",
3491 ll_get_fsname(inode->i_sb, NULL, 0), 3514 ll_get_fsname(inode->i_sb, NULL, 0),
3492 inode, PFID(&lli->lli_fid)); 3515 PFID(&lli->lli_fid), inode);
3493 3516
3494 memset(&conf, 0, sizeof(conf)); 3517 memset(&conf, 0, sizeof(conf));
3495 conf.coc_opc = OBJECT_CONF_WAIT; 3518 conf.coc_opc = OBJECT_CONF_WAIT;
@@ -3498,7 +3521,8 @@ out:
3498 if (rc == 0) 3521 if (rc == 0)
3499 rc = -EAGAIN; 3522 rc = -EAGAIN;
3500 3523
3501 CDEBUG(D_INODE, "file: " DFID " waiting layout return: %d.\n", 3524 CDEBUG(D_INODE, "%s: file="DFID" waiting layout return: %d.\n",
3525 ll_get_fsname(inode->i_sb, NULL, 0),
3502 PFID(&lli->lli_fid), rc); 3526 PFID(&lli->lli_fid), rc);
3503 } 3527 }
3504 return rc; 3528 return rc;
@@ -3571,9 +3595,9 @@ again:
3571 it.it_op = IT_LAYOUT; 3595 it.it_op = IT_LAYOUT;
3572 lockh.cookie = 0ULL; 3596 lockh.cookie = 0ULL;
3573 3597
3574 LDLM_DEBUG_NOLOCK("%s: requeue layout lock for file %p/" DFID "", 3598 LDLM_DEBUG_NOLOCK("%s: requeue layout lock for file "DFID"(%p)",
3575 ll_get_fsname(inode->i_sb, NULL, 0), inode, 3599 ll_get_fsname(inode->i_sb, NULL, 0),
3576 PFID(&lli->lli_fid)); 3600 PFID(&lli->lli_fid), inode);
3577 3601
3578 rc = md_enqueue(sbi->ll_md_exp, &einfo, &it, op_data, &lockh, 3602 rc = md_enqueue(sbi->ll_md_exp, &einfo, &it, op_data, &lockh,
3579 NULL, 0, NULL, 0); 3603 NULL, 0, NULL, 0);
@@ -3601,7 +3625,7 @@ again:
3601/** 3625/**
3602 * This function send a restore request to the MDT 3626 * This function send a restore request to the MDT
3603 */ 3627 */
3604int ll_layout_restore(struct inode *inode) 3628int ll_layout_restore(struct inode *inode, loff_t offset, __u64 length)
3605{ 3629{
3606 struct hsm_user_request *hur; 3630 struct hsm_user_request *hur;
3607 int len, rc; 3631 int len, rc;
@@ -3617,9 +3641,10 @@ int ll_layout_restore(struct inode *inode)
3617 hur->hur_request.hr_flags = 0; 3641 hur->hur_request.hr_flags = 0;
3618 memcpy(&hur->hur_user_item[0].hui_fid, &ll_i2info(inode)->lli_fid, 3642 memcpy(&hur->hur_user_item[0].hui_fid, &ll_i2info(inode)->lli_fid,
3619 sizeof(hur->hur_user_item[0].hui_fid)); 3643 sizeof(hur->hur_user_item[0].hui_fid));
3620 hur->hur_user_item[0].hui_extent.length = -1; 3644 hur->hur_user_item[0].hui_extent.offset = offset;
3645 hur->hur_user_item[0].hui_extent.length = length;
3621 hur->hur_request.hr_itemcount = 1; 3646 hur->hur_request.hr_itemcount = 1;
3622 rc = obd_iocontrol(LL_IOC_HSM_REQUEST, cl_i2sbi(inode)->ll_md_exp, 3647 rc = obd_iocontrol(LL_IOC_HSM_REQUEST, ll_i2sbi(inode)->ll_md_exp,
3623 len, hur, NULL); 3648 len, hur, NULL);
3624 kfree(hur); 3649 kfree(hur);
3625 return rc; 3650 return rc;
diff --git a/drivers/staging/lustre/lustre/lclient/glimpse.c b/drivers/staging/lustre/lustre/llite/glimpse.c
index c4e8a0878ac8..d8ea75424e2f 100644
--- a/drivers/staging/lustre/lustre/lclient/glimpse.c
+++ b/drivers/staging/lustre/lustre/llite/glimpse.c
@@ -52,7 +52,6 @@
52#include <linux/file.h> 52#include <linux/file.h>
53 53
54#include "../include/cl_object.h" 54#include "../include/cl_object.h"
55#include "../include/lclient.h"
56#include "../llite/llite_internal.h" 55#include "../llite/llite_internal.h"
57 56
58static const struct cl_lock_descr whole_file = { 57static const struct cl_lock_descr whole_file = {
@@ -70,14 +69,14 @@ static const struct cl_lock_descr whole_file = {
70blkcnt_t dirty_cnt(struct inode *inode) 69blkcnt_t dirty_cnt(struct inode *inode)
71{ 70{
72 blkcnt_t cnt = 0; 71 blkcnt_t cnt = 0;
73 struct ccc_object *vob = cl_inode2ccc(inode); 72 struct vvp_object *vob = cl_inode2vvp(inode);
74 void *results[1]; 73 void *results[1];
75 74
76 if (inode->i_mapping) 75 if (inode->i_mapping)
77 cnt += radix_tree_gang_lookup_tag(&inode->i_mapping->page_tree, 76 cnt += radix_tree_gang_lookup_tag(&inode->i_mapping->page_tree,
78 results, 0, 1, 77 results, 0, 1,
79 PAGECACHE_TAG_DIRTY); 78 PAGECACHE_TAG_DIRTY);
80 if (cnt == 0 && atomic_read(&vob->cob_mmap_cnt) > 0) 79 if (cnt == 0 && atomic_read(&vob->vob_mmap_cnt) > 0)
81 cnt = 1; 80 cnt = 1;
82 81
83 return (cnt > 0) ? 1 : 0; 82 return (cnt > 0) ? 1 : 0;
@@ -86,17 +85,17 @@ blkcnt_t dirty_cnt(struct inode *inode)
86int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io, 85int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
87 struct inode *inode, struct cl_object *clob, int agl) 86 struct inode *inode, struct cl_object *clob, int agl)
88{ 87{
89 struct cl_lock_descr *descr = &ccc_env_info(env)->cti_descr; 88 struct ll_inode_info *lli = ll_i2info(inode);
90 struct cl_inode_info *lli = cl_i2info(inode);
91 const struct lu_fid *fid = lu_object_fid(&clob->co_lu); 89 const struct lu_fid *fid = lu_object_fid(&clob->co_lu);
92 struct ccc_io *cio = ccc_env_io(env);
93 struct cl_lock *lock;
94 int result; 90 int result;
95 91
96 result = 0; 92 result = 0;
97 if (!(lli->lli_flags & LLIF_MDS_SIZE_LOCK)) { 93 if (!(lli->lli_flags & LLIF_MDS_SIZE_LOCK)) {
98 CDEBUG(D_DLMTRACE, "Glimpsing inode "DFID"\n", PFID(fid)); 94 CDEBUG(D_DLMTRACE, "Glimpsing inode " DFID "\n", PFID(fid));
99 if (lli->lli_has_smd) { 95 if (lli->lli_has_smd) {
96 struct cl_lock *lock = vvp_env_lock(env);
97 struct cl_lock_descr *descr = &lock->cll_descr;
98
100 /* NOTE: this looks like DLM lock request, but it may 99 /* NOTE: this looks like DLM lock request, but it may
101 * not be one. Due to CEF_ASYNC flag (translated 100 * not be one. Due to CEF_ASYNC flag (translated
102 * to LDLM_FL_HAS_INTENT by osc), this is 101 * to LDLM_FL_HAS_INTENT by osc), this is
@@ -113,11 +112,10 @@ int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
113 */ 112 */
114 *descr = whole_file; 113 *descr = whole_file;
115 descr->cld_obj = clob; 114 descr->cld_obj = clob;
116 descr->cld_mode = CLM_PHANTOM; 115 descr->cld_mode = CLM_READ;
117 descr->cld_enq_flags = CEF_ASYNC | CEF_MUST; 116 descr->cld_enq_flags = CEF_ASYNC | CEF_MUST;
118 if (agl) 117 if (agl)
119 descr->cld_enq_flags |= CEF_AGL; 118 descr->cld_enq_flags |= CEF_AGL;
120 cio->cui_glimpse = 1;
121 /* 119 /*
122 * CEF_ASYNC is used because glimpse sub-locks cannot 120 * CEF_ASYNC is used because glimpse sub-locks cannot
123 * deadlock (because they never conflict with other 121 * deadlock (because they never conflict with other
@@ -126,21 +124,13 @@ int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
126 * CEF_MUST protects glimpse lock from conversion into 124 * CEF_MUST protects glimpse lock from conversion into
127 * a lockless mode. 125 * a lockless mode.
128 */ 126 */
129 lock = cl_lock_request(env, io, descr, "glimpse", 127 result = cl_lock_request(env, io, lock);
130 current); 128 if (result < 0)
131 cio->cui_glimpse = 0; 129 return result;
132
133 if (!lock)
134 return 0;
135
136 if (IS_ERR(lock))
137 return PTR_ERR(lock);
138 130
139 LASSERT(agl == 0); 131 if (!agl) {
140 result = cl_wait(env, lock); 132 ll_merge_attr(env, inode);
141 if (result == 0) { 133 if (i_size_read(inode) > 0 &&
142 cl_merge_lvb(env, inode);
143 if (cl_isize_read(inode) > 0 &&
144 inode->i_blocks == 0) { 134 inode->i_blocks == 0) {
145 /* 135 /*
146 * LU-417: Add dirty pages block count 136 * LU-417: Add dirty pages block count
@@ -150,12 +140,11 @@ int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
150 */ 140 */
151 inode->i_blocks = dirty_cnt(inode); 141 inode->i_blocks = dirty_cnt(inode);
152 } 142 }
153 cl_unuse(env, lock);
154 } 143 }
155 cl_lock_release(env, lock, "glimpse", current); 144 cl_lock_release(env, lock);
156 } else { 145 } else {
157 CDEBUG(D_DLMTRACE, "No objects for inode\n"); 146 CDEBUG(D_DLMTRACE, "No objects for inode\n");
158 cl_merge_lvb(env, inode); 147 ll_merge_attr(env, inode);
159 } 148 }
160 } 149 }
161 150
@@ -167,22 +156,24 @@ static int cl_io_get(struct inode *inode, struct lu_env **envout,
167{ 156{
168 struct lu_env *env; 157 struct lu_env *env;
169 struct cl_io *io; 158 struct cl_io *io;
170 struct cl_inode_info *lli = cl_i2info(inode); 159 struct ll_inode_info *lli = ll_i2info(inode);
171 struct cl_object *clob = lli->lli_clob; 160 struct cl_object *clob = lli->lli_clob;
172 int result; 161 int result;
173 162
174 if (S_ISREG(cl_inode_mode(inode))) { 163 if (S_ISREG(inode->i_mode)) {
175 env = cl_env_get(refcheck); 164 env = cl_env_get(refcheck);
176 if (!IS_ERR(env)) { 165 if (!IS_ERR(env)) {
177 io = ccc_env_thread_io(env); 166 io = vvp_env_thread_io(env);
178 io->ci_obj = clob; 167 io->ci_obj = clob;
179 *envout = env; 168 *envout = env;
180 *ioout = io; 169 *ioout = io;
181 result = 1; 170 result = 1;
182 } else 171 } else {
183 result = PTR_ERR(env); 172 result = PTR_ERR(env);
184 } else 173 }
174 } else {
185 result = 0; 175 result = 0;
176 }
186 return result; 177 return result;
187} 178}
188 179
@@ -231,14 +222,11 @@ int cl_local_size(struct inode *inode)
231{ 222{
232 struct lu_env *env = NULL; 223 struct lu_env *env = NULL;
233 struct cl_io *io = NULL; 224 struct cl_io *io = NULL;
234 struct ccc_thread_info *cti;
235 struct cl_object *clob; 225 struct cl_object *clob;
236 struct cl_lock_descr *descr;
237 struct cl_lock *lock;
238 int result; 226 int result;
239 int refcheck; 227 int refcheck;
240 228
241 if (!cl_i2info(inode)->lli_has_smd) 229 if (!ll_i2info(inode)->lli_has_smd)
242 return 0; 230 return 0;
243 231
244 result = cl_io_get(inode, &env, &io, &refcheck); 232 result = cl_io_get(inode, &env, &io, &refcheck);
@@ -247,22 +235,19 @@ int cl_local_size(struct inode *inode)
247 235
248 clob = io->ci_obj; 236 clob = io->ci_obj;
249 result = cl_io_init(env, io, CIT_MISC, clob); 237 result = cl_io_init(env, io, CIT_MISC, clob);
250 if (result > 0) 238 if (result > 0) {
251 result = io->ci_result; 239 result = io->ci_result;
252 else if (result == 0) { 240 } else if (result == 0) {
253 cti = ccc_env_info(env); 241 struct cl_lock *lock = vvp_env_lock(env);
254 descr = &cti->cti_descr; 242
255 243 lock->cll_descr = whole_file;
256 *descr = whole_file; 244 lock->cll_descr.cld_enq_flags = CEF_PEEK;
257 descr->cld_obj = clob; 245 lock->cll_descr.cld_obj = clob;
258 lock = cl_lock_peek(env, io, descr, "localsize", current); 246 result = cl_lock_request(env, io, lock);
259 if (lock) { 247 if (result == 0) {
260 cl_merge_lvb(env, inode); 248 ll_merge_attr(env, inode);
261 cl_unuse(env, lock); 249 cl_lock_release(env, lock);
262 cl_lock_release(env, lock, "localsize", current); 250 }
263 result = 0;
264 } else
265 result = -ENODATA;
266 } 251 }
267 cl_io_fini(env, io); 252 cl_io_fini(env, io);
268 cl_env_put(env, &refcheck); 253 cl_env_put(env, &refcheck);
diff --git a/drivers/staging/lustre/lustre/llite/lcommon_cl.c b/drivers/staging/lustre/lustre/llite/lcommon_cl.c
new file mode 100644
index 000000000000..6c00715b438f
--- /dev/null
+++ b/drivers/staging/lustre/lustre/llite/lcommon_cl.c
@@ -0,0 +1,327 @@
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26/*
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2015, Intel Corporation.
31 */
32/*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * cl code shared between vvp and liblustre (and other Lustre clients in the
37 * future).
38 *
39 * Author: Nikita Danilov <nikita.danilov@sun.com>
40 */
41
42#define DEBUG_SUBSYSTEM S_LLITE
43
44#include "../../include/linux/libcfs/libcfs.h"
45# include <linux/fs.h>
46# include <linux/sched.h>
47# include <linux/mm.h>
48# include <linux/quotaops.h>
49# include <linux/highmem.h>
50# include <linux/pagemap.h>
51# include <linux/rbtree.h>
52
53#include "../include/obd.h"
54#include "../include/obd_support.h"
55#include "../include/lustre_fid.h"
56#include "../include/lustre_lite.h"
57#include "../include/lustre_dlm.h"
58#include "../include/lustre_ver.h"
59#include "../include/lustre_mdc.h"
60#include "../include/cl_object.h"
61
62#include "../llite/llite_internal.h"
63
64/*
65 * ccc_ prefix stands for "Common Client Code".
66 */
67
68/*****************************************************************************
69 *
70 * Vvp device and device type functions.
71 *
72 */
73
74/**
75 * An `emergency' environment used by cl_inode_fini() when cl_env_get()
76 * fails. Access to this environment is serialized by cl_inode_fini_guard
77 * mutex.
78 */
79struct lu_env *cl_inode_fini_env;
80int cl_inode_fini_refcheck;
81
82/**
83 * A mutex serializing calls to slp_inode_fini() under extreme memory
84 * pressure, when environments cannot be allocated.
85 */
86static DEFINE_MUTEX(cl_inode_fini_guard);
87
88int cl_setattr_ost(struct inode *inode, const struct iattr *attr)
89{
90 struct lu_env *env;
91 struct cl_io *io;
92 int result;
93 int refcheck;
94
95 env = cl_env_get(&refcheck);
96 if (IS_ERR(env))
97 return PTR_ERR(env);
98
99 io = vvp_env_thread_io(env);
100 io->ci_obj = ll_i2info(inode)->lli_clob;
101
102 io->u.ci_setattr.sa_attr.lvb_atime = LTIME_S(attr->ia_atime);
103 io->u.ci_setattr.sa_attr.lvb_mtime = LTIME_S(attr->ia_mtime);
104 io->u.ci_setattr.sa_attr.lvb_ctime = LTIME_S(attr->ia_ctime);
105 io->u.ci_setattr.sa_attr.lvb_size = attr->ia_size;
106 io->u.ci_setattr.sa_valid = attr->ia_valid;
107
108again:
109 if (cl_io_init(env, io, CIT_SETATTR, io->ci_obj) == 0) {
110 struct vvp_io *vio = vvp_env_io(env);
111
112 if (attr->ia_valid & ATTR_FILE)
113 /* populate the file descriptor for ftruncate to honor
114 * group lock - see LU-787
115 */
116 vio->vui_fd = LUSTRE_FPRIVATE(attr->ia_file);
117
118 result = cl_io_loop(env, io);
119 } else {
120 result = io->ci_result;
121 }
122 cl_io_fini(env, io);
123 if (unlikely(io->ci_need_restart))
124 goto again;
125 /* HSM import case: file is released, cannot be restored
126 * no need to fail except if restore registration failed
127 * with -ENODATA
128 */
129 if (result == -ENODATA && io->ci_restore_needed &&
130 io->ci_result != -ENODATA)
131 result = 0;
132 cl_env_put(env, &refcheck);
133 return result;
134}
135
136/**
137 * Initialize or update CLIO structures for regular files when new
138 * meta-data arrives from the server.
139 *
140 * \param inode regular file inode
141 * \param md new file metadata from MDS
142 * - allocates cl_object if necessary,
143 * - updated layout, if object was already here.
144 */
145int cl_file_inode_init(struct inode *inode, struct lustre_md *md)
146{
147 struct lu_env *env;
148 struct ll_inode_info *lli;
149 struct cl_object *clob;
150 struct lu_site *site;
151 struct lu_fid *fid;
152 struct cl_object_conf conf = {
153 .coc_inode = inode,
154 .u = {
155 .coc_md = md
156 }
157 };
158 int result = 0;
159 int refcheck;
160
161 LASSERT(md->body->valid & OBD_MD_FLID);
162 LASSERT(S_ISREG(inode->i_mode));
163
164 env = cl_env_get(&refcheck);
165 if (IS_ERR(env))
166 return PTR_ERR(env);
167
168 site = ll_i2sbi(inode)->ll_site;
169 lli = ll_i2info(inode);
170 fid = &lli->lli_fid;
171 LASSERT(fid_is_sane(fid));
172
173 if (!lli->lli_clob) {
174 /* clob is slave of inode, empty lli_clob means for new inode,
175 * there is no clob in cache with the given fid, so it is
176 * unnecessary to perform lookup-alloc-lookup-insert, just
177 * alloc and insert directly.
178 */
179 LASSERT(inode->i_state & I_NEW);
180 conf.coc_lu.loc_flags = LOC_F_NEW;
181 clob = cl_object_find(env, lu2cl_dev(site->ls_top_dev),
182 fid, &conf);
183 if (!IS_ERR(clob)) {
184 /*
185 * No locking is necessary, as new inode is
186 * locked by I_NEW bit.
187 */
188 lli->lli_clob = clob;
189 lli->lli_has_smd = lsm_has_objects(md->lsm);
190 lu_object_ref_add(&clob->co_lu, "inode", inode);
191 } else {
192 result = PTR_ERR(clob);
193 }
194 } else {
195 result = cl_conf_set(env, lli->lli_clob, &conf);
196 }
197
198 cl_env_put(env, &refcheck);
199
200 if (result != 0)
201 CERROR("Failure to initialize cl object " DFID ": %d\n",
202 PFID(fid), result);
203 return result;
204}
205
206/**
207 * Wait for others drop their references of the object at first, then we drop
208 * the last one, which will lead to the object be destroyed immediately.
209 * Must be called after cl_object_kill() against this object.
210 *
211 * The reason we want to do this is: destroying top object will wait for sub
212 * objects being destroyed first, so we can't let bottom layer (e.g. from ASTs)
213 * to initiate top object destroying which may deadlock. See bz22520.
214 */
215static void cl_object_put_last(struct lu_env *env, struct cl_object *obj)
216{
217 struct lu_object_header *header = obj->co_lu.lo_header;
218 wait_queue_t waiter;
219
220 if (unlikely(atomic_read(&header->loh_ref) != 1)) {
221 struct lu_site *site = obj->co_lu.lo_dev->ld_site;
222 struct lu_site_bkt_data *bkt;
223
224 bkt = lu_site_bkt_from_fid(site, &header->loh_fid);
225
226 init_waitqueue_entry(&waiter, current);
227 add_wait_queue(&bkt->lsb_marche_funebre, &waiter);
228
229 while (1) {
230 set_current_state(TASK_UNINTERRUPTIBLE);
231 if (atomic_read(&header->loh_ref) == 1)
232 break;
233 schedule();
234 }
235
236 set_current_state(TASK_RUNNING);
237 remove_wait_queue(&bkt->lsb_marche_funebre, &waiter);
238 }
239
240 cl_object_put(env, obj);
241}
242
243void cl_inode_fini(struct inode *inode)
244{
245 struct lu_env *env;
246 struct ll_inode_info *lli = ll_i2info(inode);
247 struct cl_object *clob = lli->lli_clob;
248 int refcheck;
249 int emergency;
250
251 if (clob) {
252 void *cookie;
253
254 cookie = cl_env_reenter();
255 env = cl_env_get(&refcheck);
256 emergency = IS_ERR(env);
257 if (emergency) {
258 mutex_lock(&cl_inode_fini_guard);
259 LASSERT(cl_inode_fini_env);
260 cl_env_implant(cl_inode_fini_env, &refcheck);
261 env = cl_inode_fini_env;
262 }
263 /*
264 * cl_object cache is a slave to inode cache (which, in turn
265 * is a slave to dentry cache), don't keep cl_object in memory
266 * when its master is evicted.
267 */
268 cl_object_kill(env, clob);
269 lu_object_ref_del(&clob->co_lu, "inode", inode);
270 cl_object_put_last(env, clob);
271 lli->lli_clob = NULL;
272 if (emergency) {
273 cl_env_unplant(cl_inode_fini_env, &refcheck);
274 mutex_unlock(&cl_inode_fini_guard);
275 } else {
276 cl_env_put(env, &refcheck);
277 }
278 cl_env_reexit(cookie);
279 }
280}
281
282/**
283 * build inode number from passed @fid
284 */
285__u64 cl_fid_build_ino(const struct lu_fid *fid, int api32)
286{
287 if (BITS_PER_LONG == 32 || api32)
288 return fid_flatten32(fid);
289 else
290 return fid_flatten(fid);
291}
292
293/**
294 * build inode generation from passed @fid. If our FID overflows the 32-bit
295 * inode number then return a non-zero generation to distinguish them.
296 */
297__u32 cl_fid_build_gen(const struct lu_fid *fid)
298{
299 __u32 gen;
300
301 if (fid_is_igif(fid)) {
302 gen = lu_igif_gen(fid);
303 return gen;
304 }
305
306 gen = fid_flatten(fid) >> 32;
307 return gen;
308}
309
310/* lsm is unreliable after hsm implementation as layout can be changed at
311 * any time. This is only to support old, non-clio-ized interfaces. It will
312 * cause deadlock if clio operations are called with this extra layout refcount
313 * because in case the layout changed during the IO, ll_layout_refresh() will
314 * have to wait for the refcount to become zero to destroy the older layout.
315 *
316 * Notice that the lsm returned by this function may not be valid unless called
317 * inside layout lock - MDS_INODELOCK_LAYOUT.
318 */
319struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode)
320{
321 return lov_lsm_get(ll_i2info(inode)->lli_clob);
322}
323
324inline void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm)
325{
326 lov_lsm_put(ll_i2info(inode)->lli_clob, lsm);
327}
diff --git a/drivers/staging/lustre/lustre/lclient/lcommon_misc.c b/drivers/staging/lustre/lustre/llite/lcommon_misc.c
index d80bcedd78d1..12f3e71f48c2 100644
--- a/drivers/staging/lustre/lustre/lclient/lcommon_misc.c
+++ b/drivers/staging/lustre/lustre/llite/lcommon_misc.c
@@ -41,9 +41,9 @@
41#include "../include/obd_support.h" 41#include "../include/obd_support.h"
42#include "../include/obd.h" 42#include "../include/obd.h"
43#include "../include/cl_object.h" 43#include "../include/cl_object.h"
44#include "../include/lclient.h"
45 44
46#include "../include/lustre_lite.h" 45#include "../include/lustre_lite.h"
46#include "llite_internal.h"
47 47
48/* Initialize the default and maximum LOV EA and cookie sizes. This allows 48/* Initialize the default and maximum LOV EA and cookie sizes. This allows
49 * us to make MDS RPCs with large enough reply buffers to hold the 49 * us to make MDS RPCs with large enough reply buffers to hold the
@@ -126,7 +126,7 @@ int cl_ocd_update(struct obd_device *host,
126#define GROUPLOCK_SCOPE "grouplock" 126#define GROUPLOCK_SCOPE "grouplock"
127 127
128int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock, 128int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
129 struct ccc_grouplock *cg) 129 struct ll_grouplock *cg)
130{ 130{
131 struct lu_env *env; 131 struct lu_env *env;
132 struct cl_io *io; 132 struct cl_io *io;
@@ -140,20 +140,22 @@ int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
140 if (IS_ERR(env)) 140 if (IS_ERR(env))
141 return PTR_ERR(env); 141 return PTR_ERR(env);
142 142
143 io = ccc_env_thread_io(env); 143 io = vvp_env_thread_io(env);
144 io->ci_obj = obj; 144 io->ci_obj = obj;
145 io->ci_ignore_layout = 1; 145 io->ci_ignore_layout = 1;
146 146
147 rc = cl_io_init(env, io, CIT_MISC, io->ci_obj); 147 rc = cl_io_init(env, io, CIT_MISC, io->ci_obj);
148 if (rc) { 148 if (rc != 0) {
149 cl_io_fini(env, io);
150 cl_env_put(env, &refcheck);
149 /* Does not make sense to take GL for released layout */ 151 /* Does not make sense to take GL for released layout */
150 if (rc > 0) 152 if (rc > 0)
151 rc = -ENOTSUPP; 153 rc = -ENOTSUPP;
152 cl_env_put(env, &refcheck);
153 return rc; 154 return rc;
154 } 155 }
155 156
156 descr = &ccc_env_info(env)->cti_descr; 157 lock = vvp_env_lock(env);
158 descr = &lock->cll_descr;
157 descr->cld_obj = obj; 159 descr->cld_obj = obj;
158 descr->cld_start = 0; 160 descr->cld_start = 0;
159 descr->cld_end = CL_PAGE_EOF; 161 descr->cld_end = CL_PAGE_EOF;
@@ -163,38 +165,37 @@ int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
163 enqflags = CEF_MUST | (nonblock ? CEF_NONBLOCK : 0); 165 enqflags = CEF_MUST | (nonblock ? CEF_NONBLOCK : 0);
164 descr->cld_enq_flags = enqflags; 166 descr->cld_enq_flags = enqflags;
165 167
166 lock = cl_lock_request(env, io, descr, GROUPLOCK_SCOPE, current); 168 rc = cl_lock_request(env, io, lock);
167 if (IS_ERR(lock)) { 169 if (rc < 0) {
168 cl_io_fini(env, io); 170 cl_io_fini(env, io);
169 cl_env_put(env, &refcheck); 171 cl_env_put(env, &refcheck);
170 return PTR_ERR(lock); 172 return rc;
171 } 173 }
172 174
173 cg->cg_env = cl_env_get(&refcheck); 175 cg->lg_env = cl_env_get(&refcheck);
174 cg->cg_io = io; 176 cg->lg_io = io;
175 cg->cg_lock = lock; 177 cg->lg_lock = lock;
176 cg->cg_gid = gid; 178 cg->lg_gid = gid;
177 LASSERT(cg->cg_env == env); 179 LASSERT(cg->lg_env == env);
178 180
179 cl_env_unplant(env, &refcheck); 181 cl_env_unplant(env, &refcheck);
180 return 0; 182 return 0;
181} 183}
182 184
183void cl_put_grouplock(struct ccc_grouplock *cg) 185void cl_put_grouplock(struct ll_grouplock *cg)
184{ 186{
185 struct lu_env *env = cg->cg_env; 187 struct lu_env *env = cg->lg_env;
186 struct cl_io *io = cg->cg_io; 188 struct cl_io *io = cg->lg_io;
187 struct cl_lock *lock = cg->cg_lock; 189 struct cl_lock *lock = cg->lg_lock;
188 int refcheck; 190 int refcheck;
189 191
190 LASSERT(cg->cg_env); 192 LASSERT(cg->lg_env);
191 LASSERT(cg->cg_gid); 193 LASSERT(cg->lg_gid);
192 194
193 cl_env_implant(env, &refcheck); 195 cl_env_implant(env, &refcheck);
194 cl_env_put(env, &refcheck); 196 cl_env_put(env, &refcheck);
195 197
196 cl_unuse(env, lock); 198 cl_lock_release(env, lock);
197 cl_lock_release(env, lock, GROUPLOCK_SCOPE, current);
198 cl_io_fini(env, io); 199 cl_io_fini(env, io);
199 cl_env_put(env, NULL); 200 cl_env_put(env, NULL);
200} 201}
diff --git a/drivers/staging/lustre/lustre/llite/llite_close.c b/drivers/staging/lustre/lustre/llite/llite_close.c
index a55ac4dccd90..2df551d3ae6c 100644
--- a/drivers/staging/lustre/lustre/llite/llite_close.c
+++ b/drivers/staging/lustre/lustre/llite/llite_close.c
@@ -46,31 +46,31 @@
46#include "llite_internal.h" 46#include "llite_internal.h"
47 47
48/** records that a write is in flight */ 48/** records that a write is in flight */
49void vvp_write_pending(struct ccc_object *club, struct ccc_page *page) 49void vvp_write_pending(struct vvp_object *club, struct vvp_page *page)
50{ 50{
51 struct ll_inode_info *lli = ll_i2info(club->cob_inode); 51 struct ll_inode_info *lli = ll_i2info(club->vob_inode);
52 52
53 spin_lock(&lli->lli_lock); 53 spin_lock(&lli->lli_lock);
54 lli->lli_flags |= LLIF_SOM_DIRTY; 54 lli->lli_flags |= LLIF_SOM_DIRTY;
55 if (page && list_empty(&page->cpg_pending_linkage)) 55 if (page && list_empty(&page->vpg_pending_linkage))
56 list_add(&page->cpg_pending_linkage, &club->cob_pending_list); 56 list_add(&page->vpg_pending_linkage, &club->vob_pending_list);
57 spin_unlock(&lli->lli_lock); 57 spin_unlock(&lli->lli_lock);
58} 58}
59 59
60/** records that a write has completed */ 60/** records that a write has completed */
61void vvp_write_complete(struct ccc_object *club, struct ccc_page *page) 61void vvp_write_complete(struct vvp_object *club, struct vvp_page *page)
62{ 62{
63 struct ll_inode_info *lli = ll_i2info(club->cob_inode); 63 struct ll_inode_info *lli = ll_i2info(club->vob_inode);
64 int rc = 0; 64 int rc = 0;
65 65
66 spin_lock(&lli->lli_lock); 66 spin_lock(&lli->lli_lock);
67 if (page && !list_empty(&page->cpg_pending_linkage)) { 67 if (page && !list_empty(&page->vpg_pending_linkage)) {
68 list_del_init(&page->cpg_pending_linkage); 68 list_del_init(&page->vpg_pending_linkage);
69 rc = 1; 69 rc = 1;
70 } 70 }
71 spin_unlock(&lli->lli_lock); 71 spin_unlock(&lli->lli_lock);
72 if (rc) 72 if (rc)
73 ll_queue_done_writing(club->cob_inode, 0); 73 ll_queue_done_writing(club->vob_inode, 0);
74} 74}
75 75
76/** Queues DONE_WRITING if 76/** Queues DONE_WRITING if
@@ -80,25 +80,25 @@ void vvp_write_complete(struct ccc_object *club, struct ccc_page *page)
80void ll_queue_done_writing(struct inode *inode, unsigned long flags) 80void ll_queue_done_writing(struct inode *inode, unsigned long flags)
81{ 81{
82 struct ll_inode_info *lli = ll_i2info(inode); 82 struct ll_inode_info *lli = ll_i2info(inode);
83 struct ccc_object *club = cl2ccc(ll_i2info(inode)->lli_clob); 83 struct vvp_object *club = cl2vvp(ll_i2info(inode)->lli_clob);
84 84
85 spin_lock(&lli->lli_lock); 85 spin_lock(&lli->lli_lock);
86 lli->lli_flags |= flags; 86 lli->lli_flags |= flags;
87 87
88 if ((lli->lli_flags & LLIF_DONE_WRITING) && 88 if ((lli->lli_flags & LLIF_DONE_WRITING) &&
89 list_empty(&club->cob_pending_list)) { 89 list_empty(&club->vob_pending_list)) {
90 struct ll_close_queue *lcq = ll_i2sbi(inode)->ll_lcq; 90 struct ll_close_queue *lcq = ll_i2sbi(inode)->ll_lcq;
91 91
92 if (lli->lli_flags & LLIF_MDS_SIZE_LOCK) 92 if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
93 CWARN("ino %lu/%u(flags %u) som valid it just after recovery\n", 93 CWARN("%s: file "DFID"(flags %u) Size-on-MDS valid, done writing allowed and no diry pages\n",
94 inode->i_ino, inode->i_generation, 94 ll_get_fsname(inode->i_sb, NULL, 0),
95 lli->lli_flags); 95 PFID(ll_inode2fid(inode)), lli->lli_flags);
96 /* DONE_WRITING is allowed and inode has no dirty page. */ 96 /* DONE_WRITING is allowed and inode has no dirty page. */
97 spin_lock(&lcq->lcq_lock); 97 spin_lock(&lcq->lcq_lock);
98 98
99 LASSERT(list_empty(&lli->lli_close_list)); 99 LASSERT(list_empty(&lli->lli_close_list));
100 CDEBUG(D_INODE, "adding inode %lu/%u to close list\n", 100 CDEBUG(D_INODE, "adding inode "DFID" to close list\n",
101 inode->i_ino, inode->i_generation); 101 PFID(ll_inode2fid(inode)));
102 list_add_tail(&lli->lli_close_list, &lcq->lcq_head); 102 list_add_tail(&lli->lli_close_list, &lcq->lcq_head);
103 103
104 /* Avoid a concurrent insertion into the close thread queue: 104 /* Avoid a concurrent insertion into the close thread queue:
@@ -124,9 +124,9 @@ void ll_done_writing_attr(struct inode *inode, struct md_op_data *op_data)
124 op_data->op_flags |= MF_SOM_CHANGE; 124 op_data->op_flags |= MF_SOM_CHANGE;
125 /* Check if Size-on-MDS attributes are valid. */ 125 /* Check if Size-on-MDS attributes are valid. */
126 if (lli->lli_flags & LLIF_MDS_SIZE_LOCK) 126 if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
127 CERROR("ino %lu/%u(flags %u) som valid it just after recovery\n", 127 CERROR("%s: inode "DFID"(flags %u) MDS holds lock on Size-on-MDS attributes\n",
128 inode->i_ino, inode->i_generation, 128 ll_get_fsname(inode->i_sb, NULL, 0),
129 lli->lli_flags); 129 PFID(ll_inode2fid(inode)), lli->lli_flags);
130 130
131 if (!cl_local_size(inode)) { 131 if (!cl_local_size(inode)) {
132 /* Send Size-on-MDS Attributes if valid. */ 132 /* Send Size-on-MDS Attributes if valid. */
@@ -140,10 +140,10 @@ void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data,
140 struct obd_client_handle **och, unsigned long flags) 140 struct obd_client_handle **och, unsigned long flags)
141{ 141{
142 struct ll_inode_info *lli = ll_i2info(inode); 142 struct ll_inode_info *lli = ll_i2info(inode);
143 struct ccc_object *club = cl2ccc(ll_i2info(inode)->lli_clob); 143 struct vvp_object *club = cl2vvp(ll_i2info(inode)->lli_clob);
144 144
145 spin_lock(&lli->lli_lock); 145 spin_lock(&lli->lli_lock);
146 if (!(list_empty(&club->cob_pending_list))) { 146 if (!(list_empty(&club->vob_pending_list))) {
147 if (!(lli->lli_flags & LLIF_EPOCH_PENDING)) { 147 if (!(lli->lli_flags & LLIF_EPOCH_PENDING)) {
148 LASSERT(*och); 148 LASSERT(*och);
149 LASSERT(!lli->lli_pending_och); 149 LASSERT(!lli->lli_pending_och);
@@ -198,7 +198,7 @@ void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data,
198 } 198 }
199 } 199 }
200 200
201 LASSERT(list_empty(&club->cob_pending_list)); 201 LASSERT(list_empty(&club->vob_pending_list));
202 lli->lli_flags &= ~LLIF_SOM_DIRTY; 202 lli->lli_flags &= ~LLIF_SOM_DIRTY;
203 spin_unlock(&lli->lli_lock); 203 spin_unlock(&lli->lli_lock);
204 ll_done_writing_attr(inode, op_data); 204 ll_done_writing_attr(inode, op_data);
@@ -221,9 +221,9 @@ int ll_som_update(struct inode *inode, struct md_op_data *op_data)
221 221
222 LASSERT(op_data); 222 LASSERT(op_data);
223 if (lli->lli_flags & LLIF_MDS_SIZE_LOCK) 223 if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
224 CERROR("ino %lu/%u(flags %u) som valid it just after recovery\n", 224 CERROR("%s: inode "DFID"(flags %u) MDS holds lock on Size-on-MDS attributes\n",
225 inode->i_ino, inode->i_generation, 225 ll_get_fsname(inode->i_sb, NULL, 0),
226 lli->lli_flags); 226 PFID(ll_inode2fid(inode)), lli->lli_flags);
227 227
228 oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS); 228 oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
229 if (!oa) { 229 if (!oa) {
@@ -241,9 +241,9 @@ int ll_som_update(struct inode *inode, struct md_op_data *op_data)
241 if (rc) { 241 if (rc) {
242 oa->o_valid = 0; 242 oa->o_valid = 0;
243 if (rc != -ENOENT) 243 if (rc != -ENOENT)
244 CERROR("inode_getattr failed (%d): unable to send a Size-on-MDS attribute update for inode %lu/%u\n", 244 CERROR("%s: inode_getattr failed - unable to send a Size-on-MDS attribute update for inode "DFID": rc = %d\n",
245 rc, inode->i_ino, 245 ll_get_fsname(inode->i_sb, NULL, 0),
246 inode->i_generation); 246 PFID(ll_inode2fid(inode)), rc);
247 } else { 247 } else {
248 CDEBUG(D_INODE, "Size-on-MDS update on "DFID"\n", 248 CDEBUG(D_INODE, "Size-on-MDS update on "DFID"\n",
249 PFID(&lli->lli_fid)); 249 PFID(&lli->lli_fid));
@@ -302,9 +302,11 @@ static void ll_done_writing(struct inode *inode)
302 * OSTs and send setattr to back to MDS. 302 * OSTs and send setattr to back to MDS.
303 */ 303 */
304 rc = ll_som_update(inode, op_data); 304 rc = ll_som_update(inode, op_data);
305 else if (rc) 305 else if (rc) {
306 CERROR("inode %lu mdc done_writing failed: rc = %d\n", 306 CERROR("%s: inode "DFID" mdc done_writing failed: rc = %d\n",
307 inode->i_ino, rc); 307 ll_get_fsname(inode->i_sb, NULL, 0),
308 PFID(ll_inode2fid(inode)), rc);
309 }
308out: 310out:
309 ll_finish_md_op_data(op_data); 311 ll_finish_md_op_data(op_data);
310 if (och) { 312 if (och) {
@@ -323,8 +325,9 @@ static struct ll_inode_info *ll_close_next_lli(struct ll_close_queue *lcq)
323 lli = list_entry(lcq->lcq_head.next, struct ll_inode_info, 325 lli = list_entry(lcq->lcq_head.next, struct ll_inode_info,
324 lli_close_list); 326 lli_close_list);
325 list_del_init(&lli->lli_close_list); 327 list_del_init(&lli->lli_close_list);
326 } else if (atomic_read(&lcq->lcq_stop)) 328 } else if (atomic_read(&lcq->lcq_stop)) {
327 lli = ERR_PTR(-EALREADY); 329 lli = ERR_PTR(-EALREADY);
330 }
328 331
329 spin_unlock(&lcq->lcq_lock); 332 spin_unlock(&lcq->lcq_lock);
330 return lli; 333 return lli;
@@ -348,8 +351,8 @@ static int ll_close_thread(void *arg)
348 break; 351 break;
349 352
350 inode = ll_info2i(lli); 353 inode = ll_info2i(lli);
351 CDEBUG(D_INFO, "done_writing for inode %lu/%u\n", 354 CDEBUG(D_INFO, "done_writing for inode "DFID"\n",
352 inode->i_ino, inode->i_generation); 355 PFID(ll_inode2fid(inode)));
353 ll_done_writing(inode); 356 ll_done_writing(inode);
354 iput(inode); 357 iput(inode);
355 } 358 }
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index 65a6acec663b..ce1f949430f1 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -43,11 +43,11 @@
43 43
44/* for struct cl_lock_descr and struct cl_io */ 44/* for struct cl_lock_descr and struct cl_io */
45#include "../include/cl_object.h" 45#include "../include/cl_object.h"
46#include "../include/lclient.h"
47#include "../include/lustre_mdc.h" 46#include "../include/lustre_mdc.h"
48#include "../include/lustre_intent.h" 47#include "../include/lustre_intent.h"
49#include <linux/compat.h> 48#include <linux/compat.h>
50#include <linux/posix_acl_xattr.h> 49#include <linux/posix_acl_xattr.h>
50#include "vvp_internal.h"
51 51
52#ifndef FMODE_EXEC 52#ifndef FMODE_EXEC
53#define FMODE_EXEC 0 53#define FMODE_EXEC 0
@@ -99,6 +99,13 @@ struct ll_remote_perm {
99 */ 99 */
100}; 100};
101 101
102struct ll_grouplock {
103 struct lu_env *lg_env;
104 struct cl_io *lg_io;
105 struct cl_lock *lg_lock;
106 unsigned long lg_gid;
107};
108
102enum lli_flags { 109enum lli_flags {
103 /* MDS has an authority for the Size-on-MDS attributes. */ 110 /* MDS has an authority for the Size-on-MDS attributes. */
104 LLIF_MDS_SIZE_LOCK = (1 << 0), 111 LLIF_MDS_SIZE_LOCK = (1 << 0),
@@ -161,7 +168,9 @@ struct ll_inode_info {
161 struct inode lli_vfs_inode; 168 struct inode lli_vfs_inode;
162 169
163 /* the most recent timestamps obtained from mds */ 170 /* the most recent timestamps obtained from mds */
164 struct ost_lvb lli_lvb; 171 s64 lli_atime;
172 s64 lli_mtime;
173 s64 lli_ctime;
165 spinlock_t lli_agl_lock; 174 spinlock_t lli_agl_lock;
166 175
167 /* Try to make the d::member and f::member are aligned. Before using 176 /* Try to make the d::member and f::member are aligned. Before using
@@ -328,6 +337,7 @@ enum ra_stat {
328 RA_STAT_EOF, 337 RA_STAT_EOF,
329 RA_STAT_MAX_IN_FLIGHT, 338 RA_STAT_MAX_IN_FLIGHT,
330 RA_STAT_WRONG_GRAB_PAGE, 339 RA_STAT_WRONG_GRAB_PAGE,
340 RA_STAT_FAILED_REACH_END,
331 _NR_RA_STAT, 341 _NR_RA_STAT,
332}; 342};
333 343
@@ -481,6 +491,12 @@ struct ll_sb_info {
481 491
482 struct lprocfs_stats *ll_stats; /* lprocfs stats counter */ 492 struct lprocfs_stats *ll_stats; /* lprocfs stats counter */
483 493
494 /*
495 * Used to track "unstable" pages on a client, and maintain a
496 * LRU list of clean pages. An "unstable" page is defined as
497 * any page which is sent to a server as part of a bulk request,
498 * but is uncommitted to stable storage.
499 */
484 struct cl_client_cache ll_cache; 500 struct cl_client_cache ll_cache;
485 501
486 struct lprocfs_stats *ll_ra_stats; 502 struct lprocfs_stats *ll_ra_stats;
@@ -525,13 +541,6 @@ struct ll_sb_info {
525 struct completion ll_kobj_unregister; 541 struct completion ll_kobj_unregister;
526}; 542};
527 543
528struct ll_ra_read {
529 pgoff_t lrr_start;
530 pgoff_t lrr_count;
531 struct task_struct *lrr_reader;
532 struct list_head lrr_linkage;
533};
534
535/* 544/*
536 * per file-descriptor read-ahead data. 545 * per file-descriptor read-ahead data.
537 */ 546 */
@@ -590,12 +599,6 @@ struct ll_readahead_state {
590 */ 599 */
591 unsigned long ras_request_index; 600 unsigned long ras_request_index;
592 /* 601 /*
593 * list of struct ll_ra_read's one per read(2) call current in
594 * progress against this file descriptor. Used by read-ahead code,
595 * protected by ->ras_lock.
596 */
597 struct list_head ras_read_beads;
598 /*
599 * The following 3 items are used for detecting the stride I/O 602 * The following 3 items are used for detecting the stride I/O
600 * mode. 603 * mode.
601 * In stride I/O mode, 604 * In stride I/O mode,
@@ -622,7 +625,7 @@ extern struct kmem_cache *ll_file_data_slab;
622struct lustre_handle; 625struct lustre_handle;
623struct ll_file_data { 626struct ll_file_data {
624 struct ll_readahead_state fd_ras; 627 struct ll_readahead_state fd_ras;
625 struct ccc_grouplock fd_grouplock; 628 struct ll_grouplock fd_grouplock;
626 __u64 lfd_pos; 629 __u64 lfd_pos;
627 __u32 fd_flags; 630 __u32 fd_flags;
628 fmode_t fd_omode; 631 fmode_t fd_omode;
@@ -663,8 +666,16 @@ static inline int ll_need_32bit_api(struct ll_sb_info *sbi)
663#endif 666#endif
664} 667}
665 668
666void ll_ra_read_in(struct file *f, struct ll_ra_read *rar); 669void ll_ras_enter(struct file *f);
667void ll_ra_read_ex(struct file *f, struct ll_ra_read *rar); 670
671/* llite/lcommon_misc.c */
672int cl_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp);
673int cl_ocd_update(struct obd_device *host,
674 struct obd_device *watched,
675 enum obd_notify_event ev, void *owner, void *data);
676int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
677 struct ll_grouplock *cg);
678void cl_put_grouplock(struct ll_grouplock *cg);
668 679
669/* llite/lproc_llite.c */ 680/* llite/lproc_llite.c */
670int ldebugfs_register_mountpoint(struct dentry *parent, 681int ldebugfs_register_mountpoint(struct dentry *parent,
@@ -697,15 +708,15 @@ int ll_md_blocking_ast(struct ldlm_lock *, struct ldlm_lock_desc *,
697struct dentry *ll_splice_alias(struct inode *inode, struct dentry *de); 708struct dentry *ll_splice_alias(struct inode *inode, struct dentry *de);
698 709
699/* llite/rw.c */ 710/* llite/rw.c */
700int ll_prepare_write(struct file *, struct page *, unsigned from, unsigned to);
701int ll_commit_write(struct file *, struct page *, unsigned from, unsigned to);
702int ll_writepage(struct page *page, struct writeback_control *wbc); 711int ll_writepage(struct page *page, struct writeback_control *wbc);
703int ll_writepages(struct address_space *, struct writeback_control *wbc); 712int ll_writepages(struct address_space *, struct writeback_control *wbc);
704int ll_readpage(struct file *file, struct page *page); 713int ll_readpage(struct file *file, struct page *page);
705void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras); 714void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras);
706int ll_readahead(const struct lu_env *env, struct cl_io *io, 715int ll_readahead(const struct lu_env *env, struct cl_io *io,
707 struct ll_readahead_state *ras, struct address_space *mapping, 716 struct cl_page_list *queue, struct ll_readahead_state *ras,
708 struct cl_page_list *queue, int flags); 717 bool hit);
718struct ll_cl_context *ll_cl_init(struct file *file, struct page *vmpage);
719void ll_cl_fini(struct ll_cl_context *lcc);
709 720
710extern const struct address_space_operations ll_aops; 721extern const struct address_space_operations ll_aops;
711 722
@@ -740,7 +751,7 @@ struct posix_acl *ll_get_acl(struct inode *inode, int type);
740int ll_inode_permission(struct inode *inode, int mask); 751int ll_inode_permission(struct inode *inode, int mask);
741 752
742int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry, 753int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
743 int flags, struct lov_user_md *lum, 754 __u64 flags, struct lov_user_md *lum,
744 int lum_size); 755 int lum_size);
745int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename, 756int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename,
746 struct lov_mds_md **lmm, int *lmm_size, 757 struct lov_mds_md **lmm, int *lmm_size,
@@ -750,9 +761,9 @@ int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump,
750int ll_dir_getstripe(struct inode *inode, struct lov_mds_md **lmmp, 761int ll_dir_getstripe(struct inode *inode, struct lov_mds_md **lmmp,
751 int *lmm_size, struct ptlrpc_request **request); 762 int *lmm_size, struct ptlrpc_request **request);
752int ll_fsync(struct file *file, loff_t start, loff_t end, int data); 763int ll_fsync(struct file *file, loff_t start, loff_t end, int data);
753int ll_merge_lvb(const struct lu_env *env, struct inode *inode); 764int ll_merge_attr(const struct lu_env *env, struct inode *inode);
754int ll_fid2path(struct inode *inode, void __user *arg); 765int ll_fid2path(struct inode *inode, void __user *arg);
755int ll_data_version(struct inode *inode, __u64 *data_version, int extent_lock); 766int ll_data_version(struct inode *inode, __u64 *data_version, int flags);
756int ll_hsm_release(struct inode *inode); 767int ll_hsm_release(struct inode *inode);
757 768
758/* llite/dcache.c */ 769/* llite/dcache.c */
@@ -824,65 +835,8 @@ struct ll_close_queue {
824 atomic_t lcq_stop; 835 atomic_t lcq_stop;
825}; 836};
826 837
827struct ccc_object *cl_inode2ccc(struct inode *inode); 838void vvp_write_pending(struct vvp_object *club, struct vvp_page *page);
828 839void vvp_write_complete(struct vvp_object *club, struct vvp_page *page);
829void vvp_write_pending (struct ccc_object *club, struct ccc_page *page);
830void vvp_write_complete(struct ccc_object *club, struct ccc_page *page);
831
832/* specific architecture can implement only part of this list */
833enum vvp_io_subtype {
834 /** normal IO */
835 IO_NORMAL,
836 /** io started from splice_{read|write} */
837 IO_SPLICE
838};
839
840/* IO subtypes */
841struct vvp_io {
842 /** io subtype */
843 enum vvp_io_subtype cui_io_subtype;
844
845 union {
846 struct {
847 struct pipe_inode_info *cui_pipe;
848 unsigned int cui_flags;
849 } splice;
850 struct vvp_fault_io {
851 /**
852 * Inode modification time that is checked across DLM
853 * lock request.
854 */
855 time64_t ft_mtime;
856 struct vm_area_struct *ft_vma;
857 /**
858 * locked page returned from vvp_io
859 */
860 struct page *ft_vmpage;
861 struct vm_fault_api {
862 /**
863 * kernel fault info
864 */
865 struct vm_fault *ft_vmf;
866 /**
867 * fault API used bitflags for return code.
868 */
869 unsigned int ft_flags;
870 /**
871 * check that flags are from filemap_fault
872 */
873 bool ft_flags_valid;
874 } fault;
875 } fault;
876 } u;
877 /**
878 * Read-ahead state used by read and page-fault IO contexts.
879 */
880 struct ll_ra_read cui_bead;
881 /**
882 * Set when cui_bead has been initialized.
883 */
884 int cui_ra_window_set;
885};
886 840
887/** 841/**
888 * IO arguments for various VFS I/O interfaces. 842 * IO arguments for various VFS I/O interfaces.
@@ -911,54 +865,32 @@ struct ll_cl_context {
911 int lcc_refcheck; 865 int lcc_refcheck;
912}; 866};
913 867
914struct vvp_thread_info { 868struct ll_thread_info {
915 struct vvp_io_args vti_args; 869 struct vvp_io_args lti_args;
916 struct ra_io_arg vti_ria; 870 struct ra_io_arg lti_ria;
917 struct ll_cl_context vti_io_ctx; 871 struct ll_cl_context lti_io_ctx;
918}; 872};
919 873
920static inline struct vvp_thread_info *vvp_env_info(const struct lu_env *env) 874extern struct lu_context_key ll_thread_key;
921{ 875static inline struct ll_thread_info *ll_env_info(const struct lu_env *env)
922 extern struct lu_context_key vvp_key;
923 struct vvp_thread_info *info;
924
925 info = lu_context_key_get(&env->le_ctx, &vvp_key);
926 LASSERT(info);
927 return info;
928}
929
930static inline struct vvp_io_args *vvp_env_args(const struct lu_env *env,
931 enum vvp_io_subtype type)
932{ 876{
933 struct vvp_io_args *ret = &vvp_env_info(env)->vti_args; 877 struct ll_thread_info *lti;
934
935 ret->via_io_subtype = type;
936 878
937 return ret; 879 lti = lu_context_key_get(&env->le_ctx, &ll_thread_key);
880 LASSERT(lti);
881 return lti;
938} 882}
939 883
940struct vvp_session { 884static inline struct vvp_io_args *ll_env_args(const struct lu_env *env,
941 struct vvp_io vs_ios; 885 enum vvp_io_subtype type)
942};
943
944static inline struct vvp_session *vvp_env_session(const struct lu_env *env)
945{ 886{
946 extern struct lu_context_key vvp_session_key; 887 struct vvp_io_args *via = &ll_env_info(env)->lti_args;
947 struct vvp_session *ses;
948 888
949 ses = lu_context_key_get(env->le_ses, &vvp_session_key); 889 via->via_io_subtype = type;
950 LASSERT(ses);
951 return ses;
952}
953 890
954static inline struct vvp_io *vvp_env_io(const struct lu_env *env) 891 return via;
955{
956 return &vvp_env_session(env)->vs_ios;
957} 892}
958 893
959int vvp_global_init(void);
960void vvp_global_fini(void);
961
962void ll_queue_done_writing(struct inode *inode, unsigned long flags); 894void ll_queue_done_writing(struct inode *inode, unsigned long flags);
963void ll_close_thread_shutdown(struct ll_close_queue *lcq); 895void ll_close_thread_shutdown(struct ll_close_queue *lcq);
964int ll_close_thread_start(struct ll_close_queue **lcq_ret); 896int ll_close_thread_start(struct ll_close_queue **lcq_ret);
@@ -981,6 +913,10 @@ static inline void ll_invalidate_page(struct page *vmpage)
981 if (!mapping) 913 if (!mapping)
982 return; 914 return;
983 915
916 /*
917 * truncate_complete_page() calls
918 * a_ops->invalidatepage()->cl_page_delete()->vvp_page_delete().
919 */
984 ll_teardown_mmaps(mapping, offset, offset + PAGE_SIZE); 920 ll_teardown_mmaps(mapping, offset, offset + PAGE_SIZE);
985 truncate_complete_page(mapping, vmpage); 921 truncate_complete_page(mapping, vmpage);
986} 922}
@@ -1055,9 +991,6 @@ void free_rmtperm_hash(struct hlist_head *hash);
1055int ll_update_remote_perm(struct inode *inode, struct mdt_remote_perm *perm); 991int ll_update_remote_perm(struct inode *inode, struct mdt_remote_perm *perm);
1056int lustre_check_remote_perm(struct inode *inode, int mask); 992int lustre_check_remote_perm(struct inode *inode, int mask);
1057 993
1058/* llite/llite_cl.c */
1059extern struct lu_device_type vvp_device_type;
1060
1061/** 994/**
1062 * Common IO arguments for various VFS I/O interfaces. 995 * Common IO arguments for various VFS I/O interfaces.
1063 */ 996 */
@@ -1069,7 +1002,7 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
1069 struct ll_readahead_state *ras, unsigned long index, 1002 struct ll_readahead_state *ras, unsigned long index,
1070 unsigned hit); 1003 unsigned hit);
1071void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len); 1004void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len);
1072void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which); 1005void ll_ra_stats_inc(struct inode *inode, enum ra_stat which);
1073 1006
1074/* llite/llite_rmtacl.c */ 1007/* llite/llite_rmtacl.c */
1075#ifdef CONFIG_FS_POSIX_ACL 1008#ifdef CONFIG_FS_POSIX_ACL
@@ -1163,6 +1096,22 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentry,
1163 int only_unplug); 1096 int only_unplug);
1164void ll_stop_statahead(struct inode *dir, void *key); 1097void ll_stop_statahead(struct inode *dir, void *key);
1165 1098
1099blkcnt_t dirty_cnt(struct inode *inode);
1100
1101int cl_glimpse_size0(struct inode *inode, int agl);
1102int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
1103 struct inode *inode, struct cl_object *clob, int agl);
1104
1105static inline int cl_glimpse_size(struct inode *inode)
1106{
1107 return cl_glimpse_size0(inode, 0);
1108}
1109
1110static inline int cl_agl(struct inode *inode)
1111{
1112 return cl_glimpse_size0(inode, 1);
1113}
1114
1166static inline int ll_glimpse_size(struct inode *inode) 1115static inline int ll_glimpse_size(struct inode *inode)
1167{ 1116{
1168 struct ll_inode_info *lli = ll_i2info(inode); 1117 struct ll_inode_info *lli = ll_i2info(inode);
@@ -1285,43 +1234,6 @@ typedef enum llioc_iter (*llioc_callback_t)(struct inode *inode,
1285void *ll_iocontrol_register(llioc_callback_t cb, int count, unsigned int *cmd); 1234void *ll_iocontrol_register(llioc_callback_t cb, int count, unsigned int *cmd);
1286void ll_iocontrol_unregister(void *magic); 1235void ll_iocontrol_unregister(void *magic);
1287 1236
1288/* lclient compat stuff */
1289#define cl_inode_info ll_inode_info
1290#define cl_i2info(info) ll_i2info(info)
1291#define cl_inode_mode(inode) ((inode)->i_mode)
1292#define cl_i2sbi ll_i2sbi
1293
1294static inline struct ll_file_data *cl_iattr2fd(struct inode *inode,
1295 const struct iattr *attr)
1296{
1297 LASSERT(attr->ia_valid & ATTR_FILE);
1298 return LUSTRE_FPRIVATE(attr->ia_file);
1299}
1300
1301static inline void cl_isize_write_nolock(struct inode *inode, loff_t kms)
1302{
1303 LASSERT(mutex_is_locked(&ll_i2info(inode)->lli_size_mutex));
1304 i_size_write(inode, kms);
1305}
1306
1307static inline void cl_isize_write(struct inode *inode, loff_t kms)
1308{
1309 ll_inode_size_lock(inode);
1310 i_size_write(inode, kms);
1311 ll_inode_size_unlock(inode);
1312}
1313
1314#define cl_isize_read(inode) i_size_read(inode)
1315
1316static inline int cl_merge_lvb(const struct lu_env *env, struct inode *inode)
1317{
1318 return ll_merge_lvb(env, inode);
1319}
1320
1321#define cl_inode_atime(inode) LTIME_S((inode)->i_atime)
1322#define cl_inode_ctime(inode) LTIME_S((inode)->i_ctime)
1323#define cl_inode_mtime(inode) LTIME_S((inode)->i_mtime)
1324
1325int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end, 1237int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end,
1326 enum cl_fsync_mode mode, int ignore_layout); 1238 enum cl_fsync_mode mode, int ignore_layout);
1327 1239
@@ -1350,7 +1262,7 @@ static inline void cl_stats_tally(struct cl_device *dev, enum cl_req_type crt,
1350 int opc = (crt == CRT_READ) ? LPROC_LL_OSC_READ : 1262 int opc = (crt == CRT_READ) ? LPROC_LL_OSC_READ :
1351 LPROC_LL_OSC_WRITE; 1263 LPROC_LL_OSC_WRITE;
1352 1264
1353 ll_stats_ops_tally(ll_s2sbi(cl2ccc_dev(dev)->cdv_sb), opc, rc); 1265 ll_stats_ops_tally(ll_s2sbi(cl2vvp_dev(dev)->vdv_sb), opc, rc);
1354} 1266}
1355 1267
1356ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io, 1268ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
@@ -1382,18 +1294,16 @@ static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode,
1382 */ 1294 */
1383 if (it->d.lustre.it_remote_lock_mode) { 1295 if (it->d.lustre.it_remote_lock_mode) {
1384 handle.cookie = it->d.lustre.it_remote_lock_handle; 1296 handle.cookie = it->d.lustre.it_remote_lock_handle;
1385 CDEBUG(D_DLMTRACE, "setting l_data to inode %p(%lu/%u) for remote lock %#llx\n", 1297 CDEBUG(D_DLMTRACE, "setting l_data to inode "DFID"%p for remote lock %#llx\n",
1386 inode, 1298 PFID(ll_inode2fid(inode)), inode,
1387 inode->i_ino, inode->i_generation,
1388 handle.cookie); 1299 handle.cookie);
1389 md_set_lock_data(exp, &handle.cookie, inode, NULL); 1300 md_set_lock_data(exp, &handle.cookie, inode, NULL);
1390 } 1301 }
1391 1302
1392 handle.cookie = it->d.lustre.it_lock_handle; 1303 handle.cookie = it->d.lustre.it_lock_handle;
1393 1304
1394 CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u) for lock %#llx\n", 1305 CDEBUG(D_DLMTRACE, "setting l_data to inode "DFID"%p for lock %#llx\n",
1395 inode, inode->i_ino, 1306 PFID(ll_inode2fid(inode)), inode, handle.cookie);
1396 inode->i_generation, handle.cookie);
1397 1307
1398 md_set_lock_data(exp, &handle.cookie, inode, 1308 md_set_lock_data(exp, &handle.cookie, inode,
1399 &it->d.lustre.it_lock_bits); 1309 &it->d.lustre.it_lock_bits);
@@ -1471,9 +1381,25 @@ enum {
1471 1381
1472int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf); 1382int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf);
1473int ll_layout_refresh(struct inode *inode, __u32 *gen); 1383int ll_layout_refresh(struct inode *inode, __u32 *gen);
1474int ll_layout_restore(struct inode *inode); 1384int ll_layout_restore(struct inode *inode, loff_t start, __u64 length);
1475 1385
1476int ll_xattr_init(void); 1386int ll_xattr_init(void);
1477void ll_xattr_fini(void); 1387void ll_xattr_fini(void);
1478 1388
1389int ll_page_sync_io(const struct lu_env *env, struct cl_io *io,
1390 struct cl_page *page, enum cl_req_type crt);
1391
1392/* lcommon_cl.c */
1393int cl_setattr_ost(struct inode *inode, const struct iattr *attr);
1394
1395extern struct lu_env *cl_inode_fini_env;
1396extern int cl_inode_fini_refcheck;
1397
1398int cl_file_inode_init(struct inode *inode, struct lustre_md *md);
1399void cl_inode_fini(struct inode *inode);
1400int cl_local_size(struct inode *inode);
1401
1402__u64 cl_fid_build_ino(const struct lu_fid *fid, int api32);
1403__u32 cl_fid_build_gen(const struct lu_fid *fid);
1404
1479#endif /* LLITE_INTERNAL_H */ 1405#endif /* LLITE_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index b57a992688a8..96c7e9fc6e5f 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -85,18 +85,18 @@ static struct ll_sb_info *ll_init_sbi(struct super_block *sb)
85 85
86 si_meminfo(&si); 86 si_meminfo(&si);
87 pages = si.totalram - si.totalhigh; 87 pages = si.totalram - si.totalhigh;
88 if (pages >> (20 - PAGE_SHIFT) < 512) 88 lru_page_max = pages / 2;
89 lru_page_max = pages / 2;
90 else
91 lru_page_max = (pages / 4) * 3;
92 89
93 /* initialize lru data */ 90 /* initialize ll_cache data */
94 atomic_set(&sbi->ll_cache.ccc_users, 0); 91 atomic_set(&sbi->ll_cache.ccc_users, 0);
95 sbi->ll_cache.ccc_lru_max = lru_page_max; 92 sbi->ll_cache.ccc_lru_max = lru_page_max;
96 atomic_set(&sbi->ll_cache.ccc_lru_left, lru_page_max); 93 atomic_set(&sbi->ll_cache.ccc_lru_left, lru_page_max);
97 spin_lock_init(&sbi->ll_cache.ccc_lru_lock); 94 spin_lock_init(&sbi->ll_cache.ccc_lru_lock);
98 INIT_LIST_HEAD(&sbi->ll_cache.ccc_lru); 95 INIT_LIST_HEAD(&sbi->ll_cache.ccc_lru);
99 96
97 atomic_set(&sbi->ll_cache.ccc_unstable_nr, 0);
98 init_waitqueue_head(&sbi->ll_cache.ccc_unstable_waitq);
99
100 sbi->ll_ra_info.ra_max_pages_per_file = min(pages / 32, 100 sbi->ll_ra_info.ra_max_pages_per_file = min(pages / 32,
101 SBI_DEFAULT_READAHEAD_MAX); 101 SBI_DEFAULT_READAHEAD_MAX);
102 sbi->ll_ra_info.ra_max_pages = sbi->ll_ra_info.ra_max_pages_per_file; 102 sbi->ll_ra_info.ra_max_pages = sbi->ll_ra_info.ra_max_pages_per_file;
@@ -169,12 +169,6 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
169 return -ENOMEM; 169 return -ENOMEM;
170 } 170 }
171 171
172 if (llite_root) {
173 err = ldebugfs_register_mountpoint(llite_root, sb, dt, md);
174 if (err < 0)
175 CERROR("could not register mount in <debugfs>/lustre/llite\n");
176 }
177
178 /* indicate the features supported by this client */ 172 /* indicate the features supported by this client */
179 data->ocd_connect_flags = OBD_CONNECT_IBITS | OBD_CONNECT_NODEVOH | 173 data->ocd_connect_flags = OBD_CONNECT_IBITS | OBD_CONNECT_NODEVOH |
180 OBD_CONNECT_ATTRFID | 174 OBD_CONNECT_ATTRFID |
@@ -337,10 +331,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
337 else 331 else
338 sbi->ll_md_brw_size = PAGE_SIZE; 332 sbi->ll_md_brw_size = PAGE_SIZE;
339 333
340 if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK) { 334 if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK)
341 LCONSOLE_INFO("Layout lock feature supported.\n");
342 sbi->ll_flags |= LL_SBI_LAYOUT_LOCK; 335 sbi->ll_flags |= LL_SBI_LAYOUT_LOCK;
343 }
344 336
345 if (data->ocd_ibits_known & MDS_INODELOCK_XATTR) { 337 if (data->ocd_ibits_known & MDS_INODELOCK_XATTR) {
346 if (!(data->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)) { 338 if (!(data->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)) {
@@ -453,7 +445,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
453 /* make root inode 445 /* make root inode
454 * XXX: move this to after cbd setup? 446 * XXX: move this to after cbd setup?
455 */ 447 */
456 valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS; 448 valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS | OBD_MD_FLMODEASIZE;
457 if (sbi->ll_flags & LL_SBI_RMT_CLIENT) 449 if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
458 valid |= OBD_MD_FLRMTPERM; 450 valid |= OBD_MD_FLRMTPERM;
459 else if (sbi->ll_flags & LL_SBI_ACL) 451 else if (sbi->ll_flags & LL_SBI_ACL)
@@ -555,6 +547,15 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
555 kfree(data); 547 kfree(data);
556 kfree(osfs); 548 kfree(osfs);
557 549
550 if (llite_root) {
551 err = ldebugfs_register_mountpoint(llite_root, sb, dt, md);
552 if (err < 0) {
553 CERROR("%s: could not register mount in debugfs: "
554 "rc = %d\n", ll_get_fsname(sb, NULL, 0), err);
555 err = 0;
556 }
557 }
558
558 return err; 559 return err;
559out_root: 560out_root:
560 iput(root); 561 iput(root);
@@ -573,7 +574,6 @@ out_md:
573out: 574out:
574 kfree(data); 575 kfree(data);
575 kfree(osfs); 576 kfree(osfs);
576 ldebugfs_unregister_mountpoint(sbi);
577 return err; 577 return err;
578} 578}
579 579
@@ -897,10 +897,8 @@ int ll_fill_super(struct super_block *sb, struct vfsmount *mnt)
897 cfg->cfg_callback = class_config_llog_handler; 897 cfg->cfg_callback = class_config_llog_handler;
898 /* set up client obds */ 898 /* set up client obds */
899 err = lustre_process_log(sb, profilenm, cfg); 899 err = lustre_process_log(sb, profilenm, cfg);
900 if (err < 0) { 900 if (err < 0)
901 CERROR("Unable to process log: %d\n", err);
902 goto out_free; 901 goto out_free;
903 }
904 902
905 /* Profile set with LCFG_MOUNTOPT so we can find our mdc and osc obds */ 903 /* Profile set with LCFG_MOUNTOPT so we can find our mdc and osc obds */
906 lprof = class_get_profile(profilenm); 904 lprof = class_get_profile(profilenm);
@@ -947,7 +945,7 @@ void ll_put_super(struct super_block *sb)
947 struct lustre_sb_info *lsi = s2lsi(sb); 945 struct lustre_sb_info *lsi = s2lsi(sb);
948 struct ll_sb_info *sbi = ll_s2sbi(sb); 946 struct ll_sb_info *sbi = ll_s2sbi(sb);
949 char *profilenm = get_profile_name(sb); 947 char *profilenm = get_profile_name(sb);
950 int next, force = 1; 948 int ccc_count, next, force = 1, rc = 0;
951 949
952 CDEBUG(D_VFSTRACE, "VFS Op: sb %p - %s\n", sb, profilenm); 950 CDEBUG(D_VFSTRACE, "VFS Op: sb %p - %s\n", sb, profilenm);
953 951
@@ -963,6 +961,19 @@ void ll_put_super(struct super_block *sb)
963 force = obd->obd_force; 961 force = obd->obd_force;
964 } 962 }
965 963
964 /* Wait for unstable pages to be committed to stable storage */
965 if (!force) {
966 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
967
968 rc = l_wait_event(sbi->ll_cache.ccc_unstable_waitq,
969 !atomic_read(&sbi->ll_cache.ccc_unstable_nr),
970 &lwi);
971 }
972
973 ccc_count = atomic_read(&sbi->ll_cache.ccc_unstable_nr);
974 if (!force && rc != -EINTR)
975 LASSERTF(!ccc_count, "count: %i\n", ccc_count);
976
966 /* We need to set force before the lov_disconnect in 977 /* We need to set force before the lov_disconnect in
967 * lustre_common_put_super, since l_d cleans up osc's as well. 978 * lustre_common_put_super, since l_d cleans up osc's as well.
968 */ 979 */
@@ -999,6 +1010,8 @@ void ll_put_super(struct super_block *sb)
999 1010
1000 lustre_common_put_super(sb); 1011 lustre_common_put_super(sb);
1001 1012
1013 cl_env_cache_purge(~0);
1014
1002 module_put(THIS_MODULE); 1015 module_put(THIS_MODULE);
1003} /* client_put_super */ 1016} /* client_put_super */
1004 1017
@@ -1032,8 +1045,8 @@ void ll_clear_inode(struct inode *inode)
1032 struct ll_inode_info *lli = ll_i2info(inode); 1045 struct ll_inode_info *lli = ll_i2info(inode);
1033 struct ll_sb_info *sbi = ll_i2sbi(inode); 1046 struct ll_sb_info *sbi = ll_i2sbi(inode);
1034 1047
1035 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino, 1048 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
1036 inode->i_generation, inode); 1049 PFID(ll_inode2fid(inode)), inode);
1037 1050
1038 if (S_ISDIR(inode->i_mode)) { 1051 if (S_ISDIR(inode->i_mode)) {
1039 /* these should have been cleared in ll_file_release */ 1052 /* these should have been cleared in ll_file_release */
@@ -1180,9 +1193,11 @@ static int ll_setattr_done_writing(struct inode *inode,
1180 * from OSTs and send setattr to back to MDS. 1193 * from OSTs and send setattr to back to MDS.
1181 */ 1194 */
1182 rc = ll_som_update(inode, op_data); 1195 rc = ll_som_update(inode, op_data);
1183 else if (rc) 1196 else if (rc) {
1184 CERROR("inode %lu mdc truncate failed: rc = %d\n", 1197 CERROR("%s: inode "DFID" mdc truncate failed: rc = %d\n",
1185 inode->i_ino, rc); 1198 ll_i2sbi(inode)->ll_md_exp->exp_obd->obd_name,
1199 PFID(ll_inode2fid(inode)), rc);
1200 }
1186 return rc; 1201 return rc;
1187} 1202}
1188 1203
@@ -1210,12 +1225,9 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
1210 bool file_is_released = false; 1225 bool file_is_released = false;
1211 int rc = 0, rc1 = 0; 1226 int rc = 0, rc1 = 0;
1212 1227
1213 CDEBUG(D_VFSTRACE, 1228 CDEBUG(D_VFSTRACE, "%s: setattr inode "DFID"(%p) from %llu to %llu, valid %x, hsm_import %d\n",
1214 "%s: setattr inode %p/fid:" DFID 1229 ll_get_fsname(inode->i_sb, NULL, 0), PFID(&lli->lli_fid), inode,
1215 " from %llu to %llu, valid %x, hsm_import %d\n", 1230 i_size_read(inode), attr->ia_size, attr->ia_valid, hsm_import);
1216 ll_get_fsname(inode->i_sb, NULL, 0), inode,
1217 PFID(&lli->lli_fid), i_size_read(inode), attr->ia_size,
1218 attr->ia_valid, hsm_import);
1219 1231
1220 if (attr->ia_valid & ATTR_SIZE) { 1232 if (attr->ia_valid & ATTR_SIZE) {
1221 /* Check new size against VFS/VM file size limit and rlimit */ 1233 /* Check new size against VFS/VM file size limit and rlimit */
@@ -1265,14 +1277,6 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
1265 LTIME_S(attr->ia_mtime), LTIME_S(attr->ia_ctime), 1277 LTIME_S(attr->ia_mtime), LTIME_S(attr->ia_ctime),
1266 (s64)ktime_get_real_seconds()); 1278 (s64)ktime_get_real_seconds());
1267 1279
1268 /* If we are changing file size, file content is modified, flag it. */
1269 if (attr->ia_valid & ATTR_SIZE) {
1270 attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
1271 spin_lock(&lli->lli_lock);
1272 lli->lli_flags |= LLIF_DATA_MODIFIED;
1273 spin_unlock(&lli->lli_lock);
1274 }
1275
1276 /* We always do an MDS RPC, even if we're only changing the size; 1280 /* We always do an MDS RPC, even if we're only changing the size;
1277 * only the MDS knows whether truncate() should fail with -ETXTBUSY 1281 * only the MDS knows whether truncate() should fail with -ETXTBUSY
1278 */ 1282 */
@@ -1284,13 +1288,6 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
1284 if (!S_ISDIR(inode->i_mode)) 1288 if (!S_ISDIR(inode->i_mode))
1285 inode_unlock(inode); 1289 inode_unlock(inode);
1286 1290
1287 memcpy(&op_data->op_attr, attr, sizeof(*attr));
1288
1289 /* Open epoch for truncate. */
1290 if (exp_connect_som(ll_i2mdexp(inode)) &&
1291 (attr->ia_valid & (ATTR_SIZE | ATTR_MTIME | ATTR_MTIME_SET)))
1292 op_data->op_flags = MF_EPOCH_OPEN;
1293
1294 /* truncate on a released file must failed with -ENODATA, 1291 /* truncate on a released file must failed with -ENODATA,
1295 * so size must not be set on MDS for released file 1292 * so size must not be set on MDS for released file
1296 * but other attributes must be set 1293 * but other attributes must be set
@@ -1304,29 +1301,40 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
1304 if (lsm && lsm->lsm_pattern & LOV_PATTERN_F_RELEASED) 1301 if (lsm && lsm->lsm_pattern & LOV_PATTERN_F_RELEASED)
1305 file_is_released = true; 1302 file_is_released = true;
1306 ccc_inode_lsm_put(inode, lsm); 1303 ccc_inode_lsm_put(inode, lsm);
1304
1305 if (!hsm_import && attr->ia_valid & ATTR_SIZE) {
1306 if (file_is_released) {
1307 rc = ll_layout_restore(inode, 0, attr->ia_size);
1308 if (rc < 0)
1309 goto out;
1310
1311 file_is_released = false;
1312 ll_layout_refresh(inode, &gen);
1313 }
1314
1315 /*
1316 * If we are changing file size, file content is
1317 * modified, flag it.
1318 */
1319 attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
1320 spin_lock(&lli->lli_lock);
1321 lli->lli_flags |= LLIF_DATA_MODIFIED;
1322 spin_unlock(&lli->lli_lock);
1323 op_data->op_bias |= MDS_DATA_MODIFIED;
1324 }
1307 } 1325 }
1308 1326
1309 /* if not in HSM import mode, clear size attr for released file 1327 memcpy(&op_data->op_attr, attr, sizeof(*attr));
1310 * we clear the attribute send to MDT in op_data, not the original 1328
1311 * received from caller in attr which is used later to 1329 /* Open epoch for truncate. */
1312 * decide return code 1330 if (exp_connect_som(ll_i2mdexp(inode)) && !hsm_import &&
1313 */ 1331 (attr->ia_valid & (ATTR_SIZE | ATTR_MTIME | ATTR_MTIME_SET)))
1314 if (file_is_released && (attr->ia_valid & ATTR_SIZE) && !hsm_import) 1332 op_data->op_flags = MF_EPOCH_OPEN;
1315 op_data->op_attr.ia_valid &= ~ATTR_SIZE;
1316 1333
1317 rc = ll_md_setattr(dentry, op_data, &mod); 1334 rc = ll_md_setattr(dentry, op_data, &mod);
1318 if (rc) 1335 if (rc)
1319 goto out; 1336 goto out;
1320 1337
1321 /* truncate failed (only when non HSM import), others succeed */
1322 if (file_is_released) {
1323 if ((attr->ia_valid & ATTR_SIZE) && !hsm_import)
1324 rc = -ENODATA;
1325 else
1326 rc = 0;
1327 goto out;
1328 }
1329
1330 /* RPC to MDT is sent, cancel data modification flag */ 1338 /* RPC to MDT is sent, cancel data modification flag */
1331 if (op_data->op_bias & MDS_DATA_MODIFIED) { 1339 if (op_data->op_bias & MDS_DATA_MODIFIED) {
1332 spin_lock(&lli->lli_lock); 1340 spin_lock(&lli->lli_lock);
@@ -1335,7 +1343,7 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
1335 } 1343 }
1336 1344
1337 ll_ioepoch_open(lli, op_data->op_ioepoch); 1345 ll_ioepoch_open(lli, op_data->op_ioepoch);
1338 if (!S_ISREG(inode->i_mode)) { 1346 if (!S_ISREG(inode->i_mode) || file_is_released) {
1339 rc = 0; 1347 rc = 0;
1340 goto out; 1348 goto out;
1341 } 1349 }
@@ -1552,7 +1560,7 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md)
1552 if (body->valid & OBD_MD_FLATIME) { 1560 if (body->valid & OBD_MD_FLATIME) {
1553 if (body->atime > LTIME_S(inode->i_atime)) 1561 if (body->atime > LTIME_S(inode->i_atime))
1554 LTIME_S(inode->i_atime) = body->atime; 1562 LTIME_S(inode->i_atime) = body->atime;
1555 lli->lli_lvb.lvb_atime = body->atime; 1563 lli->lli_atime = body->atime;
1556 } 1564 }
1557 if (body->valid & OBD_MD_FLMTIME) { 1565 if (body->valid & OBD_MD_FLMTIME) {
1558 if (body->mtime > LTIME_S(inode->i_mtime)) { 1566 if (body->mtime > LTIME_S(inode->i_mtime)) {
@@ -1561,12 +1569,12 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md)
1561 body->mtime); 1569 body->mtime);
1562 LTIME_S(inode->i_mtime) = body->mtime; 1570 LTIME_S(inode->i_mtime) = body->mtime;
1563 } 1571 }
1564 lli->lli_lvb.lvb_mtime = body->mtime; 1572 lli->lli_mtime = body->mtime;
1565 } 1573 }
1566 if (body->valid & OBD_MD_FLCTIME) { 1574 if (body->valid & OBD_MD_FLCTIME) {
1567 if (body->ctime > LTIME_S(inode->i_ctime)) 1575 if (body->ctime > LTIME_S(inode->i_ctime))
1568 LTIME_S(inode->i_ctime) = body->ctime; 1576 LTIME_S(inode->i_ctime) = body->ctime;
1569 lli->lli_lvb.lvb_ctime = body->ctime; 1577 lli->lli_ctime = body->ctime;
1570 } 1578 }
1571 if (body->valid & OBD_MD_FLMODE) 1579 if (body->valid & OBD_MD_FLMODE)
1572 inode->i_mode = (inode->i_mode & S_IFMT)|(body->mode & ~S_IFMT); 1580 inode->i_mode = (inode->i_mode & S_IFMT)|(body->mode & ~S_IFMT);
@@ -1593,12 +1601,12 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md)
1593 /* FID shouldn't be changed! */ 1601 /* FID shouldn't be changed! */
1594 if (fid_is_sane(&lli->lli_fid)) { 1602 if (fid_is_sane(&lli->lli_fid)) {
1595 LASSERTF(lu_fid_eq(&lli->lli_fid, &body->fid1), 1603 LASSERTF(lu_fid_eq(&lli->lli_fid, &body->fid1),
1596 "Trying to change FID "DFID 1604 "Trying to change FID "DFID" to the "DFID", inode "DFID"(%p)\n",
1597 " to the "DFID", inode %lu/%u(%p)\n",
1598 PFID(&lli->lli_fid), PFID(&body->fid1), 1605 PFID(&lli->lli_fid), PFID(&body->fid1),
1599 inode->i_ino, inode->i_generation, inode); 1606 PFID(ll_inode2fid(inode)), inode);
1600 } else 1607 } else {
1601 lli->lli_fid = body->fid1; 1608 lli->lli_fid = body->fid1;
1609 }
1602 } 1610 }
1603 1611
1604 LASSERT(fid_seq(&lli->lli_fid) != 0); 1612 LASSERT(fid_seq(&lli->lli_fid) != 0);
@@ -1622,8 +1630,10 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md)
1622 if (lli->lli_flags & (LLIF_DONE_WRITING | 1630 if (lli->lli_flags & (LLIF_DONE_WRITING |
1623 LLIF_EPOCH_PENDING | 1631 LLIF_EPOCH_PENDING |
1624 LLIF_SOM_DIRTY)) { 1632 LLIF_SOM_DIRTY)) {
1625 CERROR("ino %lu flags %u still has size authority! do not trust the size got from MDS\n", 1633 CERROR("%s: inode "DFID" flags %u still has size authority! do not trust the size got from MDS\n",
1626 inode->i_ino, lli->lli_flags); 1634 sbi->ll_md_exp->exp_obd->obd_name,
1635 PFID(ll_inode2fid(inode)),
1636 lli->lli_flags);
1627 } else { 1637 } else {
1628 /* Use old size assignment to avoid 1638 /* Use old size assignment to avoid
1629 * deadlock bz14138 & bz14326 1639 * deadlock bz14138 & bz14326
@@ -1699,7 +1709,7 @@ void ll_read_inode2(struct inode *inode, void *opaque)
1699 1709
1700void ll_delete_inode(struct inode *inode) 1710void ll_delete_inode(struct inode *inode)
1701{ 1711{
1702 struct cl_inode_info *lli = cl_i2info(inode); 1712 struct ll_inode_info *lli = ll_i2info(inode);
1703 1713
1704 if (S_ISREG(inode->i_mode) && lli->lli_clob) 1714 if (S_ISREG(inode->i_mode) && lli->lli_clob)
1705 /* discard all dirty pages before truncating them, required by 1715 /* discard all dirty pages before truncating them, required by
@@ -1715,8 +1725,8 @@ void ll_delete_inode(struct inode *inode)
1715 spin_lock_irq(&inode->i_data.tree_lock); 1725 spin_lock_irq(&inode->i_data.tree_lock);
1716 spin_unlock_irq(&inode->i_data.tree_lock); 1726 spin_unlock_irq(&inode->i_data.tree_lock);
1717 LASSERTF(inode->i_data.nrpages == 0, 1727 LASSERTF(inode->i_data.nrpages == 0,
1718 "inode=%lu/%u(%p) nrpages=%lu, see http://jira.whamcloud.com/browse/LU-118\n", 1728 "inode="DFID"(%p) nrpages=%lu, see http://jira.whamcloud.com/browse/LU-118\n",
1719 inode->i_ino, inode->i_generation, inode, 1729 PFID(ll_inode2fid(inode)), inode,
1720 inode->i_data.nrpages); 1730 inode->i_data.nrpages);
1721 } 1731 }
1722 /* Workaround end */ 1732 /* Workaround end */
@@ -1747,7 +1757,9 @@ int ll_iocontrol(struct inode *inode, struct file *file,
1747 rc = md_getattr(sbi->ll_md_exp, op_data, &req); 1757 rc = md_getattr(sbi->ll_md_exp, op_data, &req);
1748 ll_finish_md_op_data(op_data); 1758 ll_finish_md_op_data(op_data);
1749 if (rc) { 1759 if (rc) {
1750 CERROR("failure %d inode %lu\n", rc, inode->i_ino); 1760 CERROR("%s: failure inode "DFID": rc = %d\n",
1761 sbi->ll_md_exp->exp_obd->obd_name,
1762 PFID(ll_inode2fid(inode)), rc);
1751 return -abs(rc); 1763 return -abs(rc);
1752 } 1764 }
1753 1765
@@ -1772,7 +1784,7 @@ int ll_iocontrol(struct inode *inode, struct file *file,
1772 if (IS_ERR(op_data)) 1784 if (IS_ERR(op_data))
1773 return PTR_ERR(op_data); 1785 return PTR_ERR(op_data);
1774 1786
1775 ((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags = flags; 1787 op_data->op_attr_flags = flags;
1776 op_data->op_attr.ia_valid |= ATTR_ATTR_FLAG; 1788 op_data->op_attr.ia_valid |= ATTR_ATTR_FLAG;
1777 rc = md_setattr(sbi->ll_md_exp, op_data, 1789 rc = md_setattr(sbi->ll_md_exp, op_data,
1778 NULL, 0, NULL, 0, &req, NULL); 1790 NULL, 0, NULL, 0, &req, NULL);
@@ -2066,11 +2078,11 @@ int ll_obd_statfs(struct inode *inode, void __user *arg)
2066 } 2078 }
2067 2079
2068 memcpy(&type, data->ioc_inlbuf1, sizeof(__u32)); 2080 memcpy(&type, data->ioc_inlbuf1, sizeof(__u32));
2069 if (type & LL_STATFS_LMV) 2081 if (type & LL_STATFS_LMV) {
2070 exp = sbi->ll_md_exp; 2082 exp = sbi->ll_md_exp;
2071 else if (type & LL_STATFS_LOV) 2083 } else if (type & LL_STATFS_LOV) {
2072 exp = sbi->ll_dt_exp; 2084 exp = sbi->ll_dt_exp;
2073 else { 2085 } else {
2074 rc = -ENODEV; 2086 rc = -ENODEV;
2075 goto out_statfs; 2087 goto out_statfs;
2076 } 2088 }
@@ -2271,7 +2283,7 @@ void ll_dirty_page_discard_warn(struct page *page, int ioret)
2271{ 2283{
2272 char *buf, *path = NULL; 2284 char *buf, *path = NULL;
2273 struct dentry *dentry = NULL; 2285 struct dentry *dentry = NULL;
2274 struct ccc_object *obj = cl_inode2ccc(page->mapping->host); 2286 struct vvp_object *obj = cl_inode2vvp(page->mapping->host);
2275 2287
2276 /* this can be called inside spin lock so use GFP_ATOMIC. */ 2288 /* this can be called inside spin lock so use GFP_ATOMIC. */
2277 buf = (char *)__get_free_page(GFP_ATOMIC); 2289 buf = (char *)__get_free_page(GFP_ATOMIC);
@@ -2285,7 +2297,7 @@ void ll_dirty_page_discard_warn(struct page *page, int ioret)
2285 "%s: dirty page discard: %s/fid: " DFID "/%s may get corrupted (rc %d)\n", 2297 "%s: dirty page discard: %s/fid: " DFID "/%s may get corrupted (rc %d)\n",
2286 ll_get_fsname(page->mapping->host->i_sb, NULL, 0), 2298 ll_get_fsname(page->mapping->host->i_sb, NULL, 0),
2287 s2lsi(page->mapping->host->i_sb)->lsi_lmd->lmd_dev, 2299 s2lsi(page->mapping->host->i_sb)->lsi_lmd->lmd_dev,
2288 PFID(&obj->cob_header.coh_lu.loh_fid), 2300 PFID(&obj->vob_header.coh_lu.loh_fid),
2289 (path && !IS_ERR(path)) ? path : "", ioret); 2301 (path && !IS_ERR(path)) ? path : "", ioret);
2290 2302
2291 if (dentry) 2303 if (dentry)
diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c
index 5b484e62ffd0..88ef1cac9e0f 100644
--- a/drivers/staging/lustre/lustre/llite/llite_mmap.c
+++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c
@@ -57,10 +57,10 @@ void policy_from_vma(ldlm_policy_data_t *policy,
57 struct vm_area_struct *vma, unsigned long addr, 57 struct vm_area_struct *vma, unsigned long addr,
58 size_t count) 58 size_t count)
59{ 59{
60 policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) + 60 policy->l_extent.start = ((addr - vma->vm_start) & PAGE_MASK) +
61 (vma->vm_pgoff << PAGE_SHIFT); 61 (vma->vm_pgoff << PAGE_SHIFT);
62 policy->l_extent.end = (policy->l_extent.start + count - 1) | 62 policy->l_extent.end = (policy->l_extent.start + count - 1) |
63 ~CFS_PAGE_MASK; 63 ~PAGE_MASK;
64} 64}
65 65
66struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr, 66struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
@@ -123,7 +123,8 @@ ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret,
123 123
124 *env_ret = env; 124 *env_ret = env;
125 125
126 io = ccc_env_thread_io(env); 126restart:
127 io = vvp_env_thread_io(env);
127 io->ci_obj = ll_i2info(inode)->lli_clob; 128 io->ci_obj = ll_i2info(inode)->lli_clob;
128 LASSERT(io->ci_obj); 129 LASSERT(io->ci_obj);
129 130
@@ -146,17 +147,20 @@ ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret,
146 147
147 rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj); 148 rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj);
148 if (rc == 0) { 149 if (rc == 0) {
149 struct ccc_io *cio = ccc_env_io(env); 150 struct vvp_io *vio = vvp_env_io(env);
150 struct ll_file_data *fd = LUSTRE_FPRIVATE(file); 151 struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
151 152
152 LASSERT(cio->cui_cl.cis_io == io); 153 LASSERT(vio->vui_cl.cis_io == io);
153 154
154 /* mmap lock must be MANDATORY it has to cache pages. */ 155 /* mmap lock must be MANDATORY it has to cache pages. */
155 io->ci_lockreq = CILR_MANDATORY; 156 io->ci_lockreq = CILR_MANDATORY;
156 cio->cui_fd = fd; 157 vio->vui_fd = fd;
157 } else { 158 } else {
158 LASSERT(rc < 0); 159 LASSERT(rc < 0);
159 cl_io_fini(env, io); 160 cl_io_fini(env, io);
161 if (io->ci_need_restart)
162 goto restart;
163
160 cl_env_nested_put(nest, env); 164 cl_env_nested_put(nest, env);
161 io = ERR_PTR(rc); 165 io = ERR_PTR(rc);
162 } 166 }
@@ -200,7 +204,7 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
200 * Otherwise, we could add dirty pages into osc cache 204 * Otherwise, we could add dirty pages into osc cache
201 * while truncate is on-going. 205 * while truncate is on-going.
202 */ 206 */
203 inode = ccc_object_inode(io->ci_obj); 207 inode = vvp_object_inode(io->ci_obj);
204 lli = ll_i2info(inode); 208 lli = ll_i2info(inode);
205 down_read(&lli->lli_trunc_sem); 209 down_read(&lli->lli_trunc_sem);
206 210
@@ -307,17 +311,17 @@ static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
307 vio = vvp_env_io(env); 311 vio = vvp_env_io(env);
308 vio->u.fault.ft_vma = vma; 312 vio->u.fault.ft_vma = vma;
309 vio->u.fault.ft_vmpage = NULL; 313 vio->u.fault.ft_vmpage = NULL;
310 vio->u.fault.fault.ft_vmf = vmf; 314 vio->u.fault.ft_vmf = vmf;
311 vio->u.fault.fault.ft_flags = 0; 315 vio->u.fault.ft_flags = 0;
312 vio->u.fault.fault.ft_flags_valid = false; 316 vio->u.fault.ft_flags_valid = false;
313 317
314 result = cl_io_loop(env, io); 318 result = cl_io_loop(env, io);
315 319
316 /* ft_flags are only valid if we reached 320 /* ft_flags are only valid if we reached
317 * the call to filemap_fault 321 * the call to filemap_fault
318 */ 322 */
319 if (vio->u.fault.fault.ft_flags_valid) 323 if (vio->u.fault.ft_flags_valid)
320 fault_ret = vio->u.fault.fault.ft_flags; 324 fault_ret = vio->u.fault.ft_flags;
321 325
322 vmpage = vio->u.fault.ft_vmpage; 326 vmpage = vio->u.fault.ft_vmpage;
323 if (result != 0 && vmpage) { 327 if (result != 0 && vmpage) {
@@ -390,9 +394,11 @@ static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
390 result = ll_page_mkwrite0(vma, vmf->page, &retry); 394 result = ll_page_mkwrite0(vma, vmf->page, &retry);
391 395
392 if (!printed && ++count > 16) { 396 if (!printed && ++count > 16) {
393 CWARN("app(%s): the page %lu of file %lu is under heavy contention.\n", 397 const struct dentry *de = vma->vm_file->f_path.dentry;
398
399 CWARN("app(%s): the page %lu of file "DFID" is under heavy contention\n",
394 current->comm, vmf->pgoff, 400 current->comm, vmf->pgoff,
395 file_inode(vma->vm_file)->i_ino); 401 PFID(ll_inode2fid(de->d_inode)));
396 printed = true; 402 printed = true;
397 } 403 }
398 } while (retry); 404 } while (retry);
@@ -422,16 +428,16 @@ static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
422 428
423/** 429/**
424 * To avoid cancel the locks covering mmapped region for lock cache pressure, 430 * To avoid cancel the locks covering mmapped region for lock cache pressure,
425 * we track the mapped vma count in ccc_object::cob_mmap_cnt. 431 * we track the mapped vma count in vvp_object::vob_mmap_cnt.
426 */ 432 */
427static void ll_vm_open(struct vm_area_struct *vma) 433static void ll_vm_open(struct vm_area_struct *vma)
428{ 434{
429 struct inode *inode = file_inode(vma->vm_file); 435 struct inode *inode = file_inode(vma->vm_file);
430 struct ccc_object *vob = cl_inode2ccc(inode); 436 struct vvp_object *vob = cl_inode2vvp(inode);
431 437
432 LASSERT(vma->vm_file); 438 LASSERT(vma->vm_file);
433 LASSERT(atomic_read(&vob->cob_mmap_cnt) >= 0); 439 LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0);
434 atomic_inc(&vob->cob_mmap_cnt); 440 atomic_inc(&vob->vob_mmap_cnt);
435} 441}
436 442
437/** 443/**
@@ -440,11 +446,11 @@ static void ll_vm_open(struct vm_area_struct *vma)
440static void ll_vm_close(struct vm_area_struct *vma) 446static void ll_vm_close(struct vm_area_struct *vma)
441{ 447{
442 struct inode *inode = file_inode(vma->vm_file); 448 struct inode *inode = file_inode(vma->vm_file);
443 struct ccc_object *vob = cl_inode2ccc(inode); 449 struct vvp_object *vob = cl_inode2vvp(inode);
444 450
445 LASSERT(vma->vm_file); 451 LASSERT(vma->vm_file);
446 atomic_dec(&vob->cob_mmap_cnt); 452 atomic_dec(&vob->vob_mmap_cnt);
447 LASSERT(atomic_read(&vob->cob_mmap_cnt) >= 0); 453 LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0);
448} 454}
449 455
450/* XXX put nice comment here. talk about __free_pte -> dirty pages and 456/* XXX put nice comment here. talk about __free_pte -> dirty pages and
diff --git a/drivers/staging/lustre/lustre/llite/llite_nfs.c b/drivers/staging/lustre/lustre/llite/llite_nfs.c
index 193aab879709..c1eef6198b25 100644
--- a/drivers/staging/lustre/lustre/llite/llite_nfs.c
+++ b/drivers/staging/lustre/lustre/llite/llite_nfs.c
@@ -119,7 +119,7 @@ struct inode *search_inode_for_lustre(struct super_block *sb,
119 rc = md_getattr(sbi->ll_md_exp, op_data, &req); 119 rc = md_getattr(sbi->ll_md_exp, op_data, &req);
120 kfree(op_data); 120 kfree(op_data);
121 if (rc) { 121 if (rc) {
122 CERROR("can't get object attrs, fid "DFID", rc %d\n", 122 CDEBUG(D_INFO, "can't get object attrs, fid "DFID", rc %d\n",
123 PFID(fid), rc); 123 PFID(fid), rc);
124 return ERR_PTR(rc); 124 return ERR_PTR(rc);
125 } 125 }
@@ -191,8 +191,9 @@ static int ll_encode_fh(struct inode *inode, __u32 *fh, int *plen,
191 int fileid_len = sizeof(struct lustre_nfs_fid) / 4; 191 int fileid_len = sizeof(struct lustre_nfs_fid) / 4;
192 struct lustre_nfs_fid *nfs_fid = (void *)fh; 192 struct lustre_nfs_fid *nfs_fid = (void *)fh;
193 193
194 CDEBUG(D_INFO, "encoding for (%lu," DFID ") maxlen=%d minlen=%d\n", 194 CDEBUG(D_INFO, "%s: encoding for ("DFID") maxlen=%d minlen=%d\n",
195 inode->i_ino, PFID(ll_inode2fid(inode)), *plen, fileid_len); 195 ll_get_fsname(inode->i_sb, NULL, 0),
196 PFID(ll_inode2fid(inode)), *plen, fileid_len);
196 197
197 if (*plen < fileid_len) { 198 if (*plen < fileid_len) {
198 *plen = fileid_len; 199 *plen = fileid_len;
@@ -298,8 +299,9 @@ static struct dentry *ll_get_parent(struct dentry *dchild)
298 299
299 sbi = ll_s2sbi(dir->i_sb); 300 sbi = ll_s2sbi(dir->i_sb);
300 301
301 CDEBUG(D_INFO, "getting parent for (%lu," DFID ")\n", 302 CDEBUG(D_INFO, "%s: getting parent for ("DFID")\n",
302 dir->i_ino, PFID(ll_inode2fid(dir))); 303 ll_get_fsname(dir->i_sb, NULL, 0),
304 PFID(ll_inode2fid(dir)));
303 305
304 rc = ll_get_default_mdsize(sbi, &lmmsize); 306 rc = ll_get_default_mdsize(sbi, &lmmsize);
305 if (rc != 0) 307 if (rc != 0)
@@ -314,15 +316,20 @@ static struct dentry *ll_get_parent(struct dentry *dchild)
314 rc = md_getattr_name(sbi->ll_md_exp, op_data, &req); 316 rc = md_getattr_name(sbi->ll_md_exp, op_data, &req);
315 ll_finish_md_op_data(op_data); 317 ll_finish_md_op_data(op_data);
316 if (rc) { 318 if (rc) {
317 CERROR("failure %d inode %lu get parent\n", rc, dir->i_ino); 319 CERROR("%s: failure inode "DFID" get parent: rc = %d\n",
320 ll_get_fsname(dir->i_sb, NULL, 0),
321 PFID(ll_inode2fid(dir)), rc);
318 return ERR_PTR(rc); 322 return ERR_PTR(rc);
319 } 323 }
320 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); 324 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
321 LASSERT(body->valid & OBD_MD_FLID); 325 /*
322 326 * LU-3952: MDT may lost the FID of its parent, we should not crash
323 CDEBUG(D_INFO, "parent for " DFID " is " DFID "\n", 327 * the NFS server, ll_iget_for_nfs() will handle the error.
324 PFID(ll_inode2fid(dir)), PFID(&body->fid1)); 328 */
325 329 if (body->valid & OBD_MD_FLID) {
330 CDEBUG(D_INFO, "parent for " DFID " is " DFID "\n",
331 PFID(ll_inode2fid(dir)), PFID(&body->fid1));
332 }
326 result = ll_iget_for_nfs(dir->i_sb, &body->fid1, NULL); 333 result = ll_iget_for_nfs(dir->i_sb, &body->fid1, NULL);
327 334
328 ptlrpc_req_finished(req); 335 ptlrpc_req_finished(req);
diff --git a/drivers/staging/lustre/lustre/llite/lloop.c b/drivers/staging/lustre/lustre/llite/lloop.c
index f169c0db63b4..813a9a354e5f 100644
--- a/drivers/staging/lustre/lustre/llite/lloop.c
+++ b/drivers/staging/lustre/lustre/llite/lloop.c
@@ -274,8 +274,9 @@ static void loop_add_bio(struct lloop_device *lo, struct bio *bio)
274 if (lo->lo_biotail) { 274 if (lo->lo_biotail) {
275 lo->lo_biotail->bi_next = bio; 275 lo->lo_biotail->bi_next = bio;
276 lo->lo_biotail = bio; 276 lo->lo_biotail = bio;
277 } else 277 } else {
278 lo->lo_bio = lo->lo_biotail = bio; 278 lo->lo_bio = lo->lo_biotail = bio;
279 }
279 spin_unlock_irqrestore(&lo->lo_lock, flags); 280 spin_unlock_irqrestore(&lo->lo_lock, flags);
280 281
281 atomic_inc(&lo->lo_pending); 282 atomic_inc(&lo->lo_pending);
diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c
index 27ab1261400e..55d62eb11957 100644
--- a/drivers/staging/lustre/lustre/llite/lproc_llite.c
+++ b/drivers/staging/lustre/lustre/llite/lproc_llite.c
@@ -254,7 +254,6 @@ static ssize_t max_read_ahead_mb_store(struct kobject *kobj,
254 pages_number *= 1 << (20 - PAGE_SHIFT); /* MB -> pages */ 254 pages_number *= 1 << (20 - PAGE_SHIFT); /* MB -> pages */
255 255
256 if (pages_number > totalram_pages / 2) { 256 if (pages_number > totalram_pages / 2) {
257
258 CERROR("can't set file readahead more than %lu MB\n", 257 CERROR("can't set file readahead more than %lu MB\n",
259 totalram_pages >> (20 - PAGE_SHIFT + 1)); /*1/2 of RAM*/ 258 totalram_pages >> (20 - PAGE_SHIFT + 1)); /*1/2 of RAM*/
260 return -ERANGE; 259 return -ERANGE;
@@ -393,6 +392,8 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file,
393 struct super_block *sb = ((struct seq_file *)file->private_data)->private; 392 struct super_block *sb = ((struct seq_file *)file->private_data)->private;
394 struct ll_sb_info *sbi = ll_s2sbi(sb); 393 struct ll_sb_info *sbi = ll_s2sbi(sb);
395 struct cl_client_cache *cache = &sbi->ll_cache; 394 struct cl_client_cache *cache = &sbi->ll_cache;
395 struct lu_env *env;
396 int refcheck;
396 int mult, rc, pages_number; 397 int mult, rc, pages_number;
397 int diff = 0; 398 int diff = 0;
398 int nrpages = 0; 399 int nrpages = 0;
@@ -430,6 +431,10 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file,
430 goto out; 431 goto out;
431 } 432 }
432 433
434 env = cl_env_get(&refcheck);
435 if (IS_ERR(env))
436 return 0;
437
433 diff = -diff; 438 diff = -diff;
434 while (diff > 0) { 439 while (diff > 0) {
435 int tmp; 440 int tmp;
@@ -455,19 +460,20 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file,
455 break; 460 break;
456 461
457 if (!sbi->ll_dt_exp) { /* being initialized */ 462 if (!sbi->ll_dt_exp) { /* being initialized */
458 rc = -ENODEV; 463 rc = 0;
459 break; 464 goto out;
460 } 465 }
461 466
462 /* difficult - have to ask OSCs to drop LRU slots. */ 467 /* difficult - have to ask OSCs to drop LRU slots. */
463 tmp = diff << 1; 468 tmp = diff << 1;
464 rc = obd_set_info_async(NULL, sbi->ll_dt_exp, 469 rc = obd_set_info_async(env, sbi->ll_dt_exp,
465 sizeof(KEY_CACHE_LRU_SHRINK), 470 sizeof(KEY_CACHE_LRU_SHRINK),
466 KEY_CACHE_LRU_SHRINK, 471 KEY_CACHE_LRU_SHRINK,
467 sizeof(tmp), &tmp, NULL); 472 sizeof(tmp), &tmp, NULL);
468 if (rc < 0) 473 if (rc < 0)
469 break; 474 break;
470 } 475 }
476 cl_env_put(env, &refcheck);
471 477
472out: 478out:
473 if (rc >= 0) { 479 if (rc >= 0) {
@@ -818,6 +824,23 @@ static ssize_t xattr_cache_store(struct kobject *kobj,
818} 824}
819LUSTRE_RW_ATTR(xattr_cache); 825LUSTRE_RW_ATTR(xattr_cache);
820 826
827static ssize_t unstable_stats_show(struct kobject *kobj,
828 struct attribute *attr,
829 char *buf)
830{
831 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
832 ll_kobj);
833 struct cl_client_cache *cache = &sbi->ll_cache;
834 int pages, mb;
835
836 pages = atomic_read(&cache->ccc_unstable_nr);
837 mb = (pages * PAGE_SIZE) >> 20;
838
839 return sprintf(buf, "unstable_pages: %8d\n"
840 "unstable_mb: %8d\n", pages, mb);
841}
842LUSTRE_RO_ATTR(unstable_stats);
843
821static struct lprocfs_vars lprocfs_llite_obd_vars[] = { 844static struct lprocfs_vars lprocfs_llite_obd_vars[] = {
822 /* { "mntpt_path", ll_rd_path, 0, 0 }, */ 845 /* { "mntpt_path", ll_rd_path, 0, 0 }, */
823 { "site", &ll_site_stats_fops, NULL, 0 }, 846 { "site", &ll_site_stats_fops, NULL, 0 },
@@ -853,6 +876,7 @@ static struct attribute *llite_attrs[] = {
853 &lustre_attr_max_easize.attr, 876 &lustre_attr_max_easize.attr,
854 &lustre_attr_default_easize.attr, 877 &lustre_attr_default_easize.attr,
855 &lustre_attr_xattr_cache.attr, 878 &lustre_attr_xattr_cache.attr,
879 &lustre_attr_unstable_stats.attr,
856 NULL, 880 NULL,
857}; 881};
858 882
@@ -953,6 +977,7 @@ static const char *ra_stat_string[] = {
953 [RA_STAT_EOF] = "read-ahead to EOF", 977 [RA_STAT_EOF] = "read-ahead to EOF",
954 [RA_STAT_MAX_IN_FLIGHT] = "hit max r-a issue", 978 [RA_STAT_MAX_IN_FLIGHT] = "hit max r-a issue",
955 [RA_STAT_WRONG_GRAB_PAGE] = "wrong page from grab_cache_page", 979 [RA_STAT_WRONG_GRAB_PAGE] = "wrong page from grab_cache_page",
980 [RA_STAT_FAILED_REACH_END] = "failed to reach end"
956}; 981};
957 982
958int ldebugfs_register_mountpoint(struct dentry *parent, 983int ldebugfs_register_mountpoint(struct dentry *parent,
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
index f8f98e4e8258..5eba0ebae10f 100644
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -128,12 +128,14 @@ struct inode *ll_iget(struct super_block *sb, ino_t hash,
128 if (rc != 0) { 128 if (rc != 0) {
129 iget_failed(inode); 129 iget_failed(inode);
130 inode = NULL; 130 inode = NULL;
131 } else 131 } else {
132 unlock_new_inode(inode); 132 unlock_new_inode(inode);
133 } else if (!(inode->i_state & (I_FREEING | I_CLEAR))) 133 }
134 } else if (!(inode->i_state & (I_FREEING | I_CLEAR))) {
134 ll_update_inode(inode, md); 135 ll_update_inode(inode, md);
135 CDEBUG(D_VFSTRACE, "got inode: %p for "DFID"\n", 136 CDEBUG(D_VFSTRACE, "got inode: "DFID"(%p)\n",
136 inode, PFID(&md->body->fid1)); 137 PFID(&md->body->fid1), inode);
138 }
137 } 139 }
138 return inode; 140 return inode;
139} 141}
@@ -188,7 +190,7 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
188 break; 190 break;
189 191
190 /* Invalidate all dentries associated with this inode */ 192 /* Invalidate all dentries associated with this inode */
191 LASSERT(lock->l_flags & LDLM_FL_CANCELING); 193 LASSERT(ldlm_is_canceling(lock));
192 194
193 if (!fid_res_name_eq(ll_inode2fid(inode), 195 if (!fid_res_name_eq(ll_inode2fid(inode),
194 &lock->l_resource->lr_name)) { 196 &lock->l_resource->lr_name)) {
@@ -255,8 +257,8 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
255 } 257 }
256 258
257 if ((bits & MDS_INODELOCK_UPDATE) && S_ISDIR(inode->i_mode)) { 259 if ((bits & MDS_INODELOCK_UPDATE) && S_ISDIR(inode->i_mode)) {
258 CDEBUG(D_INODE, "invalidating inode %lu\n", 260 CDEBUG(D_INODE, "invalidating inode "DFID"\n",
259 inode->i_ino); 261 PFID(ll_inode2fid(inode)));
260 truncate_inode_pages(inode->i_mapping, 0); 262 truncate_inode_pages(inode->i_mapping, 0);
261 ll_invalidate_negative_children(inode); 263 ll_invalidate_negative_children(inode);
262 } 264 }
@@ -476,9 +478,8 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry,
476 if (dentry->d_name.len > ll_i2sbi(parent)->ll_namelen) 478 if (dentry->d_name.len > ll_i2sbi(parent)->ll_namelen)
477 return ERR_PTR(-ENAMETOOLONG); 479 return ERR_PTR(-ENAMETOOLONG);
478 480
479 CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p),intent=%s\n", 481 CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir="DFID"(%p),intent=%s\n",
480 dentry, parent->i_ino, 482 dentry, PFID(ll_inode2fid(parent)), parent, LL_IT2STR(it));
481 parent->i_generation, parent, LL_IT2STR(it));
482 483
483 if (d_mountpoint(dentry)) 484 if (d_mountpoint(dentry))
484 CERROR("Tell Peter, lookup on mtpt, it %s\n", LL_IT2STR(it)); 485 CERROR("Tell Peter, lookup on mtpt, it %s\n", LL_IT2STR(it));
@@ -553,9 +554,8 @@ static struct dentry *ll_lookup_nd(struct inode *parent, struct dentry *dentry,
553 struct lookup_intent *itp, it = { .it_op = IT_GETATTR }; 554 struct lookup_intent *itp, it = { .it_op = IT_GETATTR };
554 struct dentry *de; 555 struct dentry *de;
555 556
556 CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p),flags=%u\n", 557 CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir="DFID"(%p),flags=%u\n",
557 dentry, parent->i_ino, 558 dentry, PFID(ll_inode2fid(parent)), parent, flags);
558 parent->i_generation, parent, flags);
559 559
560 /* Optimize away (CREATE && !OPEN). Let .create handle the race. */ 560 /* Optimize away (CREATE && !OPEN). Let .create handle the race. */
561 if ((flags & LOOKUP_CREATE) && !(flags & LOOKUP_OPEN)) 561 if ((flags & LOOKUP_CREATE) && !(flags & LOOKUP_OPEN))
@@ -586,10 +586,9 @@ static int ll_atomic_open(struct inode *dir, struct dentry *dentry,
586 long long lookup_flags = LOOKUP_OPEN; 586 long long lookup_flags = LOOKUP_OPEN;
587 int rc = 0; 587 int rc = 0;
588 588
589 CDEBUG(D_VFSTRACE, 589 CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir="DFID"(%p),file %p,open_flags %x,mode %x opened %d\n",
590 "VFS Op:name=%pd,dir=%lu/%u(%p),file %p,open_flags %x,mode %x opened %d\n", 590 dentry, PFID(ll_inode2fid(dir)), dir, file, open_flags, mode,
591 dentry, dir->i_ino, 591 *opened);
592 dir->i_generation, dir, file, open_flags, mode, *opened);
593 592
594 it = kzalloc(sizeof(*it), GFP_NOFS); 593 it = kzalloc(sizeof(*it), GFP_NOFS);
595 if (!it) 594 if (!it)
@@ -680,8 +679,8 @@ static struct inode *ll_create_node(struct inode *dir, struct lookup_intent *it)
680 * lock on the inode. Since we finally have an inode pointer, 679 * lock on the inode. Since we finally have an inode pointer,
681 * stuff it in the lock. 680 * stuff it in the lock.
682 */ 681 */
683 CDEBUG(D_DLMTRACE, "setting l_ast_data to inode %p (%lu/%u)\n", 682 CDEBUG(D_DLMTRACE, "setting l_ast_data to inode "DFID"(%p)\n",
684 inode, inode->i_ino, inode->i_generation); 683 PFID(ll_inode2fid(dir)), inode);
685 ll_set_lock_data(sbi->ll_md_exp, inode, it, NULL); 684 ll_set_lock_data(sbi->ll_md_exp, inode, it, NULL);
686 out: 685 out:
687 ptlrpc_req_finished(request); 686 ptlrpc_req_finished(request);
@@ -708,9 +707,8 @@ static int ll_create_it(struct inode *dir, struct dentry *dentry, int mode,
708 struct inode *inode; 707 struct inode *inode;
709 int rc = 0; 708 int rc = 0;
710 709
711 CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p),intent=%s\n", 710 CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir="DFID"(%p), intent=%s\n",
712 dentry, dir->i_ino, 711 dentry, PFID(ll_inode2fid(dir)), dir, LL_IT2STR(it));
713 dir->i_generation, dir, LL_IT2STR(it));
714 712
715 rc = it_open_error(DISP_OPEN_CREATE, it); 713 rc = it_open_error(DISP_OPEN_CREATE, it);
716 if (rc) 714 if (rc)
@@ -733,8 +731,9 @@ static void ll_update_times(struct ptlrpc_request *request,
733 LASSERT(body); 731 LASSERT(body);
734 if (body->valid & OBD_MD_FLMTIME && 732 if (body->valid & OBD_MD_FLMTIME &&
735 body->mtime > LTIME_S(inode->i_mtime)) { 733 body->mtime > LTIME_S(inode->i_mtime)) {
736 CDEBUG(D_INODE, "setting ino %lu mtime from %lu to %llu\n", 734 CDEBUG(D_INODE, "setting fid "DFID" mtime from %lu to %llu\n",
737 inode->i_ino, LTIME_S(inode->i_mtime), body->mtime); 735 PFID(ll_inode2fid(inode)), LTIME_S(inode->i_mtime),
736 body->mtime);
738 LTIME_S(inode->i_mtime) = body->mtime; 737 LTIME_S(inode->i_mtime) = body->mtime;
739 } 738 }
740 if (body->valid & OBD_MD_FLCTIME && 739 if (body->valid & OBD_MD_FLCTIME &&
@@ -791,9 +790,9 @@ static int ll_mknod(struct inode *dir, struct dentry *dchild,
791{ 790{
792 int err; 791 int err;
793 792
794 CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p) mode %o dev %x\n", 793 CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir="DFID"(%p) mode %o dev %x\n",
795 dchild, dir->i_ino, dir->i_generation, dir, 794 dchild, PFID(ll_inode2fid(dir)), dir, mode,
796 mode, old_encode_dev(rdev)); 795 old_encode_dev(rdev));
797 796
798 if (!IS_POSIXACL(dir) || !exp_connect_umask(ll_i2mdexp(dir))) 797 if (!IS_POSIXACL(dir) || !exp_connect_umask(ll_i2mdexp(dir)))
799 mode &= ~current_umask(); 798 mode &= ~current_umask();
@@ -831,9 +830,8 @@ static int ll_create_nd(struct inode *dir, struct dentry *dentry,
831{ 830{
832 int rc; 831 int rc;
833 832
834 CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p),flags=%u, excl=%d\n", 833 CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir="DFID"(%p), flags=%u, excl=%d\n",
835 dentry, dir->i_ino, 834 dentry, PFID(ll_inode2fid(dir)), dir, mode, want_excl);
836 dir->i_generation, dir, mode, want_excl);
837 835
838 rc = ll_mknod(dir, dentry, mode, 0); 836 rc = ll_mknod(dir, dentry, mode, 0);
839 837
@@ -845,12 +843,6 @@ static int ll_create_nd(struct inode *dir, struct dentry *dentry,
845 return rc; 843 return rc;
846} 844}
847 845
848static inline void ll_get_child_fid(struct dentry *child, struct lu_fid *fid)
849{
850 if (d_really_is_positive(child))
851 *fid = *ll_inode2fid(d_inode(child));
852}
853
854int ll_objects_destroy(struct ptlrpc_request *request, struct inode *dir) 846int ll_objects_destroy(struct ptlrpc_request *request, struct inode *dir)
855{ 847{
856 struct mdt_body *body; 848 struct mdt_body *body;
@@ -927,23 +919,25 @@ out:
927 * is any lock existing. They will recycle dentries and inodes based upon locks 919 * is any lock existing. They will recycle dentries and inodes based upon locks
928 * too. b=20433 920 * too. b=20433
929 */ 921 */
930static int ll_unlink(struct inode *dir, struct dentry *dentry) 922static int ll_unlink(struct inode *dir, struct dentry *dchild)
931{ 923{
932 struct ptlrpc_request *request = NULL; 924 struct ptlrpc_request *request = NULL;
933 struct md_op_data *op_data; 925 struct md_op_data *op_data;
934 int rc; 926 int rc;
935 927
936 CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p)\n", 928 CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p)\n",
937 dentry, dir->i_ino, dir->i_generation, dir); 929 dchild, dir->i_ino, dir->i_generation, dir);
938 930
939 op_data = ll_prep_md_op_data(NULL, dir, NULL, 931 op_data = ll_prep_md_op_data(NULL, dir, NULL,
940 dentry->d_name.name, 932 dchild->d_name.name,
941 dentry->d_name.len, 933 dchild->d_name.len,
942 0, LUSTRE_OPC_ANY, NULL); 934 0, LUSTRE_OPC_ANY, NULL);
943 if (IS_ERR(op_data)) 935 if (IS_ERR(op_data))
944 return PTR_ERR(op_data); 936 return PTR_ERR(op_data);
945 937
946 ll_get_child_fid(dentry, &op_data->op_fid3); 938 if (dchild && dchild->d_inode)
939 op_data->op_fid3 = *ll_inode2fid(dchild->d_inode);
940
947 op_data->op_fid2 = op_data->op_fid3; 941 op_data->op_fid2 = op_data->op_fid3;
948 rc = md_unlink(ll_i2sbi(dir)->ll_md_exp, op_data, &request); 942 rc = md_unlink(ll_i2sbi(dir)->ll_md_exp, op_data, &request);
949 ll_finish_md_op_data(op_data); 943 ll_finish_md_op_data(op_data);
@@ -963,8 +957,8 @@ static int ll_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
963{ 957{
964 int err; 958 int err;
965 959
966 CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p)\n", 960 CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir"DFID"(%p)\n",
967 dentry, dir->i_ino, dir->i_generation, dir); 961 dentry, PFID(ll_inode2fid(dir)), dir);
968 962
969 if (!IS_POSIXACL(dir) || !exp_connect_umask(ll_i2mdexp(dir))) 963 if (!IS_POSIXACL(dir) || !exp_connect_umask(ll_i2mdexp(dir)))
970 mode &= ~current_umask(); 964 mode &= ~current_umask();
@@ -977,23 +971,25 @@ static int ll_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
977 return err; 971 return err;
978} 972}
979 973
980static int ll_rmdir(struct inode *dir, struct dentry *dentry) 974static int ll_rmdir(struct inode *dir, struct dentry *dchild)
981{ 975{
982 struct ptlrpc_request *request = NULL; 976 struct ptlrpc_request *request = NULL;
983 struct md_op_data *op_data; 977 struct md_op_data *op_data;
984 int rc; 978 int rc;
985 979
986 CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p)\n", 980 CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir="DFID"(%p)\n",
987 dentry, dir->i_ino, dir->i_generation, dir); 981 dchild, PFID(ll_inode2fid(dir)), dir);
988 982
989 op_data = ll_prep_md_op_data(NULL, dir, NULL, 983 op_data = ll_prep_md_op_data(NULL, dir, NULL,
990 dentry->d_name.name, 984 dchild->d_name.name,
991 dentry->d_name.len, 985 dchild->d_name.len,
992 S_IFDIR, LUSTRE_OPC_ANY, NULL); 986 S_IFDIR, LUSTRE_OPC_ANY, NULL);
993 if (IS_ERR(op_data)) 987 if (IS_ERR(op_data))
994 return PTR_ERR(op_data); 988 return PTR_ERR(op_data);
995 989
996 ll_get_child_fid(dentry, &op_data->op_fid3); 990 if (dchild && dchild->d_inode)
991 op_data->op_fid3 = *ll_inode2fid(dchild->d_inode);
992
997 op_data->op_fid2 = op_data->op_fid3; 993 op_data->op_fid2 = op_data->op_fid3;
998 rc = md_unlink(ll_i2sbi(dir)->ll_md_exp, op_data, &request); 994 rc = md_unlink(ll_i2sbi(dir)->ll_md_exp, op_data, &request);
999 ll_finish_md_op_data(op_data); 995 ll_finish_md_op_data(op_data);
@@ -1011,9 +1007,8 @@ static int ll_symlink(struct inode *dir, struct dentry *dentry,
1011{ 1007{
1012 int err; 1008 int err;
1013 1009
1014 CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p),target=%.*s\n", 1010 CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir="DFID"(%p),target=%.*s\n",
1015 dentry, dir->i_ino, dir->i_generation, 1011 dentry, PFID(ll_inode2fid(dir)), dir, 3000, oldname);
1016 dir, 3000, oldname);
1017 1012
1018 err = ll_new_node(dir, dentry, oldname, S_IFLNK | S_IRWXUGO, 1013 err = ll_new_node(dir, dentry, oldname, S_IFLNK | S_IRWXUGO,
1019 0, LUSTRE_OPC_SYMLINK); 1014 0, LUSTRE_OPC_SYMLINK);
@@ -1033,10 +1028,9 @@ static int ll_link(struct dentry *old_dentry, struct inode *dir,
1033 struct md_op_data *op_data; 1028 struct md_op_data *op_data;
1034 int err; 1029 int err;
1035 1030
1036 CDEBUG(D_VFSTRACE, 1031 CDEBUG(D_VFSTRACE, "VFS Op: inode="DFID"(%p), dir="DFID"(%p), target=%pd\n",
1037 "VFS Op: inode=%lu/%u(%p), dir=%lu/%u(%p), target=%pd\n", 1032 PFID(ll_inode2fid(src)), src, PFID(ll_inode2fid(dir)), dir,
1038 src->i_ino, src->i_generation, src, dir->i_ino, 1033 new_dentry);
1039 dir->i_generation, dir, new_dentry);
1040 1034
1041 op_data = ll_prep_md_op_data(NULL, src, dir, new_dentry->d_name.name, 1035 op_data = ll_prep_md_op_data(NULL, src, dir, new_dentry->d_name.name,
1042 new_dentry->d_name.len, 1036 new_dentry->d_name.len,
@@ -1056,42 +1050,45 @@ out:
1056 return err; 1050 return err;
1057} 1051}
1058 1052
1059static int ll_rename(struct inode *old_dir, struct dentry *old_dentry, 1053static int ll_rename(struct inode *src, struct dentry *src_dchild,
1060 struct inode *new_dir, struct dentry *new_dentry) 1054 struct inode *tgt, struct dentry *tgt_dchild)
1061{ 1055{
1062 struct ptlrpc_request *request = NULL; 1056 struct ptlrpc_request *request = NULL;
1063 struct ll_sb_info *sbi = ll_i2sbi(old_dir); 1057 struct ll_sb_info *sbi = ll_i2sbi(src);
1064 struct md_op_data *op_data; 1058 struct md_op_data *op_data;
1065 int err; 1059 int err;
1066 1060
1067 CDEBUG(D_VFSTRACE, 1061 CDEBUG(D_VFSTRACE,
1068 "VFS Op:oldname=%pd,src_dir=%lu/%u(%p),newname=%pd,tgt_dir=%lu/%u(%p)\n", 1062 "VFS Op:oldname=%pd, src_dir="DFID"(%p), newname=%pd, tgt_dir="DFID"(%p)\n",
1069 old_dentry, old_dir->i_ino, old_dir->i_generation, old_dir, 1063 src_dchild, PFID(ll_inode2fid(src)), src,
1070 new_dentry, new_dir->i_ino, new_dir->i_generation, new_dir); 1064 tgt_dchild, PFID(ll_inode2fid(tgt)), tgt);
1071 1065
1072 op_data = ll_prep_md_op_data(NULL, old_dir, new_dir, NULL, 0, 0, 1066 op_data = ll_prep_md_op_data(NULL, src, tgt, NULL, 0, 0,
1073 LUSTRE_OPC_ANY, NULL); 1067 LUSTRE_OPC_ANY, NULL);
1074 if (IS_ERR(op_data)) 1068 if (IS_ERR(op_data))
1075 return PTR_ERR(op_data); 1069 return PTR_ERR(op_data);
1076 1070
1077 ll_get_child_fid(old_dentry, &op_data->op_fid3); 1071 if (src_dchild && src_dchild->d_inode)
1078 ll_get_child_fid(new_dentry, &op_data->op_fid4); 1072 op_data->op_fid3 = *ll_inode2fid(src_dchild->d_inode);
1073 if (tgt_dchild && tgt_dchild->d_inode)
1074 op_data->op_fid4 = *ll_inode2fid(tgt_dchild->d_inode);
1075
1079 err = md_rename(sbi->ll_md_exp, op_data, 1076 err = md_rename(sbi->ll_md_exp, op_data,
1080 old_dentry->d_name.name, 1077 src_dchild->d_name.name,
1081 old_dentry->d_name.len, 1078 src_dchild->d_name.len,
1082 new_dentry->d_name.name, 1079 tgt_dchild->d_name.name,
1083 new_dentry->d_name.len, &request); 1080 tgt_dchild->d_name.len, &request);
1084 ll_finish_md_op_data(op_data); 1081 ll_finish_md_op_data(op_data);
1085 if (!err) { 1082 if (!err) {
1086 ll_update_times(request, old_dir); 1083 ll_update_times(request, src);
1087 ll_update_times(request, new_dir); 1084 ll_update_times(request, tgt);
1088 ll_stats_ops_tally(sbi, LPROC_LL_RENAME, 1); 1085 ll_stats_ops_tally(sbi, LPROC_LL_RENAME, 1);
1089 err = ll_objects_destroy(request, old_dir); 1086 err = ll_objects_destroy(request, src);
1090 } 1087 }
1091 1088
1092 ptlrpc_req_finished(request); 1089 ptlrpc_req_finished(request);
1093 if (!err) 1090 if (!err)
1094 d_move(old_dentry, new_dentry); 1091 d_move(src_dchild, tgt_dchild);
1095 return err; 1092 return err;
1096} 1093}
1097 1094
diff --git a/drivers/staging/lustre/lustre/llite/rw.c b/drivers/staging/lustre/lustre/llite/rw.c
index edab6c5b7e50..336397773fbb 100644
--- a/drivers/staging/lustre/lustre/llite/rw.c
+++ b/drivers/staging/lustre/lustre/llite/rw.c
@@ -63,7 +63,7 @@
63 * Finalizes cl-data before exiting typical address_space operation. Dual to 63 * Finalizes cl-data before exiting typical address_space operation. Dual to
64 * ll_cl_init(). 64 * ll_cl_init().
65 */ 65 */
66static void ll_cl_fini(struct ll_cl_context *lcc) 66void ll_cl_fini(struct ll_cl_context *lcc)
67{ 67{
68 struct lu_env *env = lcc->lcc_env; 68 struct lu_env *env = lcc->lcc_env;
69 struct cl_io *io = lcc->lcc_io; 69 struct cl_io *io = lcc->lcc_io;
@@ -84,200 +84,59 @@ static void ll_cl_fini(struct ll_cl_context *lcc)
84 * Initializes common cl-data at the typical address_space operation entry 84 * Initializes common cl-data at the typical address_space operation entry
85 * point. 85 * point.
86 */ 86 */
87static struct ll_cl_context *ll_cl_init(struct file *file, 87struct ll_cl_context *ll_cl_init(struct file *file, struct page *vmpage)
88 struct page *vmpage, int create)
89{ 88{
90 struct ll_cl_context *lcc; 89 struct ll_cl_context *lcc;
91 struct lu_env *env; 90 struct lu_env *env;
92 struct cl_io *io; 91 struct cl_io *io;
93 struct cl_object *clob; 92 struct cl_object *clob;
94 struct ccc_io *cio; 93 struct vvp_io *vio;
95 94
96 int refcheck; 95 int refcheck;
97 int result = 0; 96 int result = 0;
98 97
99 clob = ll_i2info(vmpage->mapping->host)->lli_clob; 98 clob = ll_i2info(file_inode(file))->lli_clob;
100 LASSERT(clob); 99 LASSERT(clob);
101 100
102 env = cl_env_get(&refcheck); 101 env = cl_env_get(&refcheck);
103 if (IS_ERR(env)) 102 if (IS_ERR(env))
104 return ERR_CAST(env); 103 return ERR_CAST(env);
105 104
106 lcc = &vvp_env_info(env)->vti_io_ctx; 105 lcc = &ll_env_info(env)->lti_io_ctx;
107 memset(lcc, 0, sizeof(*lcc)); 106 memset(lcc, 0, sizeof(*lcc));
108 lcc->lcc_env = env; 107 lcc->lcc_env = env;
109 lcc->lcc_refcheck = refcheck; 108 lcc->lcc_refcheck = refcheck;
110 lcc->lcc_cookie = current; 109 lcc->lcc_cookie = current;
111 110
112 cio = ccc_env_io(env); 111 vio = vvp_env_io(env);
113 io = cio->cui_cl.cis_io; 112 io = vio->vui_cl.cis_io;
114 if (!io && create) {
115 struct inode *inode = vmpage->mapping->host;
116 loff_t pos;
117
118 if (inode_trylock(inode)) {
119 inode_unlock((inode));
120
121 /* this is too bad. Someone is trying to write the
122 * page w/o holding inode mutex. This means we can
123 * add dirty pages into cache during truncate
124 */
125 CERROR("Proc %s is dirtying page w/o inode lock, this will break truncate\n",
126 current->comm);
127 dump_stack();
128 LBUG();
129 return ERR_PTR(-EIO);
130 }
131
132 /*
133 * Loop-back driver calls ->prepare_write().
134 * methods directly, bypassing file system ->write() operation,
135 * so cl_io has to be created here.
136 */
137 io = ccc_env_thread_io(env);
138 ll_io_init(io, file, 1);
139
140 /* No lock at all for this kind of IO - we can't do it because
141 * we have held page lock, it would cause deadlock.
142 * XXX: This causes poor performance to loop device - One page
143 * per RPC.
144 * In order to get better performance, users should use
145 * lloop driver instead.
146 */
147 io->ci_lockreq = CILR_NEVER;
148
149 pos = vmpage->index << PAGE_SHIFT;
150
151 /* Create a temp IO to serve write. */
152 result = cl_io_rw_init(env, io, CIT_WRITE, pos, PAGE_SIZE);
153 if (result == 0) {
154 cio->cui_fd = LUSTRE_FPRIVATE(file);
155 cio->cui_iter = NULL;
156 result = cl_io_iter_init(env, io);
157 if (result == 0) {
158 result = cl_io_lock(env, io);
159 if (result == 0)
160 result = cl_io_start(env, io);
161 }
162 } else
163 result = io->ci_result;
164 }
165
166 lcc->lcc_io = io; 113 lcc->lcc_io = io;
167 if (!io) 114 if (!io)
168 result = -EIO; 115 result = -EIO;
169 if (result == 0) { 116
117 if (result == 0 && vmpage) {
170 struct cl_page *page; 118 struct cl_page *page;
171 119
172 LASSERT(io->ci_state == CIS_IO_GOING); 120 LASSERT(io->ci_state == CIS_IO_GOING);
173 LASSERT(cio->cui_fd == LUSTRE_FPRIVATE(file)); 121 LASSERT(vio->vui_fd == LUSTRE_FPRIVATE(file));
174 page = cl_page_find(env, clob, vmpage->index, vmpage, 122 page = cl_page_find(env, clob, vmpage->index, vmpage,
175 CPT_CACHEABLE); 123 CPT_CACHEABLE);
176 if (!IS_ERR(page)) { 124 if (!IS_ERR(page)) {
177 lcc->lcc_page = page; 125 lcc->lcc_page = page;
178 lu_ref_add(&page->cp_reference, "cl_io", io); 126 lu_ref_add(&page->cp_reference, "cl_io", io);
179 result = 0; 127 result = 0;
180 } else 128 } else {
181 result = PTR_ERR(page); 129 result = PTR_ERR(page);
130 }
182 } 131 }
183 if (result) { 132 if (result) {
184 ll_cl_fini(lcc); 133 ll_cl_fini(lcc);
185 lcc = ERR_PTR(result); 134 lcc = ERR_PTR(result);
186 } 135 }
187 136
188 CDEBUG(D_VFSTRACE, "%lu@"DFID" -> %d %p %p\n",
189 vmpage->index, PFID(lu_object_fid(&clob->co_lu)), result,
190 env, io);
191 return lcc;
192}
193
194static struct ll_cl_context *ll_cl_get(void)
195{
196 struct ll_cl_context *lcc;
197 struct lu_env *env;
198 int refcheck;
199
200 env = cl_env_get(&refcheck);
201 LASSERT(!IS_ERR(env));
202 lcc = &vvp_env_info(env)->vti_io_ctx;
203 LASSERT(env == lcc->lcc_env);
204 LASSERT(current == lcc->lcc_cookie);
205 cl_env_put(env, &refcheck);
206
207 /* env has got in ll_cl_init, so it is still usable. */
208 return lcc; 137 return lcc;
209} 138}
210 139
211/**
212 * ->prepare_write() address space operation called by generic_file_write()
213 * for every page during write.
214 */
215int ll_prepare_write(struct file *file, struct page *vmpage, unsigned from,
216 unsigned to)
217{
218 struct ll_cl_context *lcc;
219 int result;
220
221 lcc = ll_cl_init(file, vmpage, 1);
222 if (!IS_ERR(lcc)) {
223 struct lu_env *env = lcc->lcc_env;
224 struct cl_io *io = lcc->lcc_io;
225 struct cl_page *page = lcc->lcc_page;
226
227 cl_page_assume(env, io, page);
228
229 result = cl_io_prepare_write(env, io, page, from, to);
230 if (result == 0) {
231 /*
232 * Add a reference, so that page is not evicted from
233 * the cache until ->commit_write() is called.
234 */
235 cl_page_get(page);
236 lu_ref_add(&page->cp_reference, "prepare_write",
237 current);
238 } else {
239 cl_page_unassume(env, io, page);
240 ll_cl_fini(lcc);
241 }
242 /* returning 0 in prepare assumes commit must be called
243 * afterwards
244 */
245 } else {
246 result = PTR_ERR(lcc);
247 }
248 return result;
249}
250
251int ll_commit_write(struct file *file, struct page *vmpage, unsigned from,
252 unsigned to)
253{
254 struct ll_cl_context *lcc;
255 struct lu_env *env;
256 struct cl_io *io;
257 struct cl_page *page;
258 int result = 0;
259
260 lcc = ll_cl_get();
261 env = lcc->lcc_env;
262 page = lcc->lcc_page;
263 io = lcc->lcc_io;
264
265 LASSERT(cl_page_is_owned(page, io));
266 LASSERT(from <= to);
267 if (from != to) /* handle short write case. */
268 result = cl_io_commit_write(env, io, page, from, to);
269 if (cl_page_is_owned(page, io))
270 cl_page_unassume(env, io, page);
271
272 /*
273 * Release reference acquired by ll_prepare_write().
274 */
275 lu_ref_del(&page->cp_reference, "prepare_write", current);
276 cl_page_put(env, page);
277 ll_cl_fini(lcc);
278 return result;
279}
280
281static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which); 140static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which);
282 141
283/** 142/**
@@ -301,7 +160,7 @@ static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which);
301 */ 160 */
302static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, 161static unsigned long ll_ra_count_get(struct ll_sb_info *sbi,
303 struct ra_io_arg *ria, 162 struct ra_io_arg *ria,
304 unsigned long pages) 163 unsigned long pages, unsigned long min)
305{ 164{
306 struct ll_ra_info *ra = &sbi->ll_ra_info; 165 struct ll_ra_info *ra = &sbi->ll_ra_info;
307 long ret; 166 long ret;
@@ -341,6 +200,11 @@ static unsigned long ll_ra_count_get(struct ll_sb_info *sbi,
341 } 200 }
342 201
343out: 202out:
203 if (ret < min) {
204 /* override ra limit for maximum performance */
205 atomic_add(min - ret, &ra->ra_cur_pages);
206 ret = min;
207 }
344 return ret; 208 return ret;
345} 209}
346 210
@@ -357,9 +221,9 @@ static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which)
357 lprocfs_counter_incr(sbi->ll_ra_stats, which); 221 lprocfs_counter_incr(sbi->ll_ra_stats, which);
358} 222}
359 223
360void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which) 224void ll_ra_stats_inc(struct inode *inode, enum ra_stat which)
361{ 225{
362 struct ll_sb_info *sbi = ll_i2sbi(mapping->host); 226 struct ll_sb_info *sbi = ll_i2sbi(inode);
363 227
364 ll_ra_stats_inc_sbi(sbi, which); 228 ll_ra_stats_inc_sbi(sbi, which);
365} 229}
@@ -388,61 +252,42 @@ static int index_in_window(unsigned long index, unsigned long point,
388 return start <= index && index <= end; 252 return start <= index && index <= end;
389} 253}
390 254
391static struct ll_readahead_state *ll_ras_get(struct file *f) 255void ll_ras_enter(struct file *f)
392{ 256{
393 struct ll_file_data *fd; 257 struct ll_file_data *fd = LUSTRE_FPRIVATE(f);
394 258 struct ll_readahead_state *ras = &fd->fd_ras;
395 fd = LUSTRE_FPRIVATE(f);
396 return &fd->fd_ras;
397}
398
399void ll_ra_read_in(struct file *f, struct ll_ra_read *rar)
400{
401 struct ll_readahead_state *ras;
402
403 ras = ll_ras_get(f);
404 259
405 spin_lock(&ras->ras_lock); 260 spin_lock(&ras->ras_lock);
406 ras->ras_requests++; 261 ras->ras_requests++;
407 ras->ras_request_index = 0; 262 ras->ras_request_index = 0;
408 ras->ras_consecutive_requests++; 263 ras->ras_consecutive_requests++;
409 rar->lrr_reader = current;
410
411 list_add(&rar->lrr_linkage, &ras->ras_read_beads);
412 spin_unlock(&ras->ras_lock);
413}
414
415void ll_ra_read_ex(struct file *f, struct ll_ra_read *rar)
416{
417 struct ll_readahead_state *ras;
418
419 ras = ll_ras_get(f);
420
421 spin_lock(&ras->ras_lock);
422 list_del_init(&rar->lrr_linkage);
423 spin_unlock(&ras->ras_lock); 264 spin_unlock(&ras->ras_lock);
424} 265}
425 266
426static int cl_read_ahead_page(const struct lu_env *env, struct cl_io *io, 267static int cl_read_ahead_page(const struct lu_env *env, struct cl_io *io,
427 struct cl_page_list *queue, struct cl_page *page, 268 struct cl_page_list *queue, struct cl_page *page,
428 struct page *vmpage) 269 struct cl_object *clob, pgoff_t *max_index)
429{ 270{
430 struct ccc_page *cp; 271 struct page *vmpage = page->cp_vmpage;
272 struct vvp_page *vpg;
431 int rc; 273 int rc;
432 274
433 rc = 0; 275 rc = 0;
434 cl_page_assume(env, io, page); 276 cl_page_assume(env, io, page);
435 lu_ref_add(&page->cp_reference, "ra", current); 277 lu_ref_add(&page->cp_reference, "ra", current);
436 cp = cl2ccc_page(cl_page_at(page, &vvp_device_type)); 278 vpg = cl2vvp_page(cl_object_page_slice(clob, page));
437 if (!cp->cpg_defer_uptodate && !PageUptodate(vmpage)) { 279 if (!vpg->vpg_defer_uptodate && !PageUptodate(vmpage)) {
438 rc = cl_page_is_under_lock(env, io, page); 280 CDEBUG(D_READA, "page index %lu, max_index: %lu\n",
439 if (rc == -EBUSY) { 281 vvp_index(vpg), *max_index);
440 cp->cpg_defer_uptodate = 1; 282 if (*max_index == 0 || vvp_index(vpg) > *max_index)
441 cp->cpg_ra_used = 0; 283 rc = cl_page_is_under_lock(env, io, page, max_index);
284 if (rc == 0) {
285 vpg->vpg_defer_uptodate = 1;
286 vpg->vpg_ra_used = 0;
442 cl_page_list_add(queue, page); 287 cl_page_list_add(queue, page);
443 rc = 1; 288 rc = 1;
444 } else { 289 } else {
445 cl_page_delete(env, page); 290 cl_page_discard(env, io, page);
446 rc = -ENOLCK; 291 rc = -ENOLCK;
447 } 292 }
448 } else { 293 } else {
@@ -466,24 +311,25 @@ static int cl_read_ahead_page(const struct lu_env *env, struct cl_io *io,
466 */ 311 */
467static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io, 312static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
468 struct cl_page_list *queue, 313 struct cl_page_list *queue,
469 pgoff_t index, struct address_space *mapping) 314 pgoff_t index, pgoff_t *max_index)
470{ 315{
316 struct cl_object *clob = io->ci_obj;
317 struct inode *inode = vvp_object_inode(clob);
471 struct page *vmpage; 318 struct page *vmpage;
472 struct cl_object *clob = ll_i2info(mapping->host)->lli_clob;
473 struct cl_page *page; 319 struct cl_page *page;
474 enum ra_stat which = _NR_RA_STAT; /* keep gcc happy */ 320 enum ra_stat which = _NR_RA_STAT; /* keep gcc happy */
475 int rc = 0; 321 int rc = 0;
476 const char *msg = NULL; 322 const char *msg = NULL;
477 323
478 vmpage = grab_cache_page_nowait(mapping, index); 324 vmpage = grab_cache_page_nowait(inode->i_mapping, index);
479 if (vmpage) { 325 if (vmpage) {
480 /* Check if vmpage was truncated or reclaimed */ 326 /* Check if vmpage was truncated or reclaimed */
481 if (vmpage->mapping == mapping) { 327 if (vmpage->mapping == inode->i_mapping) {
482 page = cl_page_find(env, clob, vmpage->index, 328 page = cl_page_find(env, clob, vmpage->index,
483 vmpage, CPT_CACHEABLE); 329 vmpage, CPT_CACHEABLE);
484 if (!IS_ERR(page)) { 330 if (!IS_ERR(page)) {
485 rc = cl_read_ahead_page(env, io, queue, 331 rc = cl_read_ahead_page(env, io, queue,
486 page, vmpage); 332 page, clob, max_index);
487 if (rc == -ENOLCK) { 333 if (rc == -ENOLCK) {
488 which = RA_STAT_FAILED_MATCH; 334 which = RA_STAT_FAILED_MATCH;
489 msg = "lock match failed"; 335 msg = "lock match failed";
@@ -504,7 +350,7 @@ static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
504 msg = "g_c_p_n failed"; 350 msg = "g_c_p_n failed";
505 } 351 }
506 if (msg) { 352 if (msg) {
507 ll_ra_stats_inc(mapping, which); 353 ll_ra_stats_inc(inode, which);
508 CDEBUG(D_READA, "%s\n", msg); 354 CDEBUG(D_READA, "%s\n", msg);
509 } 355 }
510 return rc; 356 return rc;
@@ -616,11 +462,12 @@ static int ll_read_ahead_pages(const struct lu_env *env,
616 struct cl_io *io, struct cl_page_list *queue, 462 struct cl_io *io, struct cl_page_list *queue,
617 struct ra_io_arg *ria, 463 struct ra_io_arg *ria,
618 unsigned long *reserved_pages, 464 unsigned long *reserved_pages,
619 struct address_space *mapping,
620 unsigned long *ra_end) 465 unsigned long *ra_end)
621{ 466{
622 int rc, count = 0, stride_ria; 467 int rc, count = 0;
623 unsigned long page_idx; 468 bool stride_ria;
469 pgoff_t page_idx;
470 pgoff_t max_index = 0;
624 471
625 LASSERT(ria); 472 LASSERT(ria);
626 RIA_DEBUG(ria); 473 RIA_DEBUG(ria);
@@ -631,12 +478,13 @@ static int ll_read_ahead_pages(const struct lu_env *env,
631 if (ras_inside_ra_window(page_idx, ria)) { 478 if (ras_inside_ra_window(page_idx, ria)) {
632 /* If the page is inside the read-ahead window*/ 479 /* If the page is inside the read-ahead window*/
633 rc = ll_read_ahead_page(env, io, queue, 480 rc = ll_read_ahead_page(env, io, queue,
634 page_idx, mapping); 481 page_idx, &max_index);
635 if (rc == 1) { 482 if (rc == 1) {
636 (*reserved_pages)--; 483 (*reserved_pages)--;
637 count++; 484 count++;
638 } else if (rc == -ENOLCK) 485 } else if (rc == -ENOLCK) {
639 break; 486 break;
487 }
640 } else if (stride_ria) { 488 } else if (stride_ria) {
641 /* If it is not in the read-ahead window, and it is 489 /* If it is not in the read-ahead window, and it is
642 * read-ahead mode, then check whether it should skip 490 * read-ahead mode, then check whether it should skip
@@ -666,25 +514,22 @@ static int ll_read_ahead_pages(const struct lu_env *env,
666} 514}
667 515
668int ll_readahead(const struct lu_env *env, struct cl_io *io, 516int ll_readahead(const struct lu_env *env, struct cl_io *io,
669 struct ll_readahead_state *ras, struct address_space *mapping, 517 struct cl_page_list *queue, struct ll_readahead_state *ras,
670 struct cl_page_list *queue, int flags) 518 bool hit)
671{ 519{
672 struct vvp_io *vio = vvp_env_io(env); 520 struct vvp_io *vio = vvp_env_io(env);
673 struct vvp_thread_info *vti = vvp_env_info(env); 521 struct ll_thread_info *lti = ll_env_info(env);
674 struct cl_attr *attr = ccc_env_thread_attr(env); 522 struct cl_attr *attr = vvp_env_thread_attr(env);
675 unsigned long start = 0, end = 0, reserved; 523 unsigned long start = 0, end = 0, reserved;
676 unsigned long ra_end, len; 524 unsigned long ra_end, len, mlen = 0;
677 struct inode *inode; 525 struct inode *inode;
678 struct ll_ra_read *bead; 526 struct ra_io_arg *ria = &lti->lti_ria;
679 struct ra_io_arg *ria = &vti->vti_ria;
680 struct ll_inode_info *lli;
681 struct cl_object *clob; 527 struct cl_object *clob;
682 int ret = 0; 528 int ret = 0;
683 __u64 kms; 529 __u64 kms;
684 530
685 inode = mapping->host; 531 clob = io->ci_obj;
686 lli = ll_i2info(inode); 532 inode = vvp_object_inode(clob);
687 clob = lli->lli_clob;
688 533
689 memset(ria, 0, sizeof(*ria)); 534 memset(ria, 0, sizeof(*ria));
690 535
@@ -696,22 +541,20 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
696 return ret; 541 return ret;
697 kms = attr->cat_kms; 542 kms = attr->cat_kms;
698 if (kms == 0) { 543 if (kms == 0) {
699 ll_ra_stats_inc(mapping, RA_STAT_ZERO_LEN); 544 ll_ra_stats_inc(inode, RA_STAT_ZERO_LEN);
700 return 0; 545 return 0;
701 } 546 }
702 547
703 spin_lock(&ras->ras_lock); 548 spin_lock(&ras->ras_lock);
704 if (vio->cui_ra_window_set)
705 bead = &vio->cui_bead;
706 else
707 bead = NULL;
708 549
709 /* Enlarge the RA window to encompass the full read */ 550 /* Enlarge the RA window to encompass the full read */
710 if (bead && ras->ras_window_start + ras->ras_window_len < 551 if (vio->vui_ra_valid &&
711 bead->lrr_start + bead->lrr_count) { 552 ras->ras_window_start + ras->ras_window_len <
712 ras->ras_window_len = bead->lrr_start + bead->lrr_count - 553 vio->vui_ra_start + vio->vui_ra_count) {
554 ras->ras_window_len = vio->vui_ra_start + vio->vui_ra_count -
713 ras->ras_window_start; 555 ras->ras_window_start;
714 } 556 }
557
715 /* Reserve a part of the read-ahead window that we'll be issuing */ 558 /* Reserve a part of the read-ahead window that we'll be issuing */
716 if (ras->ras_window_len) { 559 if (ras->ras_window_len) {
717 start = ras->ras_next_readahead; 560 start = ras->ras_next_readahead;
@@ -755,29 +598,48 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
755 spin_unlock(&ras->ras_lock); 598 spin_unlock(&ras->ras_lock);
756 599
757 if (end == 0) { 600 if (end == 0) {
758 ll_ra_stats_inc(mapping, RA_STAT_ZERO_WINDOW); 601 ll_ra_stats_inc(inode, RA_STAT_ZERO_WINDOW);
759 return 0; 602 return 0;
760 } 603 }
761 len = ria_page_count(ria); 604 len = ria_page_count(ria);
762 if (len == 0) 605 if (len == 0) {
606 ll_ra_stats_inc(inode, RA_STAT_ZERO_WINDOW);
763 return 0; 607 return 0;
608 }
609
610 CDEBUG(D_READA, DFID ": ria: %lu/%lu, bead: %lu/%lu, hit: %d\n",
611 PFID(lu_object_fid(&clob->co_lu)),
612 ria->ria_start, ria->ria_end,
613 vio->vui_ra_valid ? vio->vui_ra_start : 0,
614 vio->vui_ra_valid ? vio->vui_ra_count : 0,
615 hit);
616
617 /* at least to extend the readahead window to cover current read */
618 if (!hit && vio->vui_ra_valid &&
619 vio->vui_ra_start + vio->vui_ra_count > ria->ria_start) {
620 /* to the end of current read window. */
621 mlen = vio->vui_ra_start + vio->vui_ra_count - ria->ria_start;
622 /* trim to RPC boundary */
623 start = ria->ria_start & (PTLRPC_MAX_BRW_PAGES - 1);
624 mlen = min(mlen, PTLRPC_MAX_BRW_PAGES - start);
625 }
764 626
765 reserved = ll_ra_count_get(ll_i2sbi(inode), ria, len); 627 reserved = ll_ra_count_get(ll_i2sbi(inode), ria, len, mlen);
766 if (reserved < len) 628 if (reserved < len)
767 ll_ra_stats_inc(mapping, RA_STAT_MAX_IN_FLIGHT); 629 ll_ra_stats_inc(inode, RA_STAT_MAX_IN_FLIGHT);
768 630
769 CDEBUG(D_READA, "reserved page %lu ra_cur %d ra_max %lu\n", reserved, 631 CDEBUG(D_READA, "reserved pages %lu/%lu/%lu, ra_cur %d, ra_max %lu\n",
632 reserved, len, mlen,
770 atomic_read(&ll_i2sbi(inode)->ll_ra_info.ra_cur_pages), 633 atomic_read(&ll_i2sbi(inode)->ll_ra_info.ra_cur_pages),
771 ll_i2sbi(inode)->ll_ra_info.ra_max_pages); 634 ll_i2sbi(inode)->ll_ra_info.ra_max_pages);
772 635
773 ret = ll_read_ahead_pages(env, io, queue, 636 ret = ll_read_ahead_pages(env, io, queue, ria, &reserved, &ra_end);
774 ria, &reserved, mapping, &ra_end);
775 637
776 if (reserved != 0) 638 if (reserved != 0)
777 ll_ra_count_put(ll_i2sbi(inode), reserved); 639 ll_ra_count_put(ll_i2sbi(inode), reserved);
778 640
779 if (ra_end == end + 1 && ra_end == (kms >> PAGE_SHIFT)) 641 if (ra_end == end + 1 && ra_end == (kms >> PAGE_SHIFT))
780 ll_ra_stats_inc(mapping, RA_STAT_EOF); 642 ll_ra_stats_inc(inode, RA_STAT_EOF);
781 643
782 /* if we didn't get to the end of the region we reserved from 644 /* if we didn't get to the end of the region we reserved from
783 * the ras we need to go back and update the ras so that the 645 * the ras we need to go back and update the ras so that the
@@ -789,6 +651,7 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
789 ra_end, end, ria->ria_end); 651 ra_end, end, ria->ria_end);
790 652
791 if (ra_end != end + 1) { 653 if (ra_end != end + 1) {
654 ll_ra_stats_inc(inode, RA_STAT_FAILED_REACH_END);
792 spin_lock(&ras->ras_lock); 655 spin_lock(&ras->ras_lock);
793 if (ra_end < ras->ras_next_readahead && 656 if (ra_end < ras->ras_next_readahead &&
794 index_in_window(ra_end, ras->ras_window_start, 0, 657 index_in_window(ra_end, ras->ras_window_start, 0,
@@ -836,7 +699,6 @@ void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras)
836 spin_lock_init(&ras->ras_lock); 699 spin_lock_init(&ras->ras_lock);
837 ras_reset(inode, ras, 0); 700 ras_reset(inode, ras, 0);
838 ras->ras_requests = 0; 701 ras->ras_requests = 0;
839 INIT_LIST_HEAD(&ras->ras_read_beads);
840} 702}
841 703
842/* 704/*
@@ -1059,15 +921,18 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
1059 ras->ras_last_readpage = index; 921 ras->ras_last_readpage = index;
1060 ras_set_start(inode, ras, index); 922 ras_set_start(inode, ras, index);
1061 923
1062 if (stride_io_mode(ras)) 924 if (stride_io_mode(ras)) {
1063 /* Since stride readahead is sensitive to the offset 925 /* Since stride readahead is sensitive to the offset
1064 * of read-ahead, so we use original offset here, 926 * of read-ahead, so we use original offset here,
1065 * instead of ras_window_start, which is RPC aligned 927 * instead of ras_window_start, which is RPC aligned
1066 */ 928 */
1067 ras->ras_next_readahead = max(index, ras->ras_next_readahead); 929 ras->ras_next_readahead = max(index, ras->ras_next_readahead);
1068 else 930 } else {
1069 ras->ras_next_readahead = max(ras->ras_window_start, 931 if (ras->ras_next_readahead < ras->ras_window_start)
1070 ras->ras_next_readahead); 932 ras->ras_next_readahead = ras->ras_window_start;
933 if (!hit)
934 ras->ras_next_readahead = index + 1;
935 }
1071 RAS_CDEBUG(ras); 936 RAS_CDEBUG(ras);
1072 937
1073 /* Trigger RA in the mmap case where ras_consecutive_requests 938 /* Trigger RA in the mmap case where ras_consecutive_requests
@@ -1129,7 +994,7 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc)
1129 clob = ll_i2info(inode)->lli_clob; 994 clob = ll_i2info(inode)->lli_clob;
1130 LASSERT(clob); 995 LASSERT(clob);
1131 996
1132 io = ccc_env_thread_io(env); 997 io = vvp_env_thread_io(env);
1133 io->ci_obj = clob; 998 io->ci_obj = clob;
1134 io->ci_ignore_layout = 1; 999 io->ci_ignore_layout = 1;
1135 result = cl_io_init(env, io, CIT_MISC, clob); 1000 result = cl_io_init(env, io, CIT_MISC, clob);
@@ -1240,8 +1105,9 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc)
1240 1105
1241 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) { 1106 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) {
1242 if (end == OBD_OBJECT_EOF) 1107 if (end == OBD_OBJECT_EOF)
1243 end = i_size_read(inode); 1108 mapping->writeback_index = 0;
1244 mapping->writeback_index = (end >> PAGE_SHIFT) + 1; 1109 else
1110 mapping->writeback_index = (end >> PAGE_SHIFT) + 1;
1245 } 1111 }
1246 return result; 1112 return result;
1247} 1113}
@@ -1251,7 +1117,7 @@ int ll_readpage(struct file *file, struct page *vmpage)
1251 struct ll_cl_context *lcc; 1117 struct ll_cl_context *lcc;
1252 int result; 1118 int result;
1253 1119
1254 lcc = ll_cl_init(file, vmpage, 0); 1120 lcc = ll_cl_init(file, vmpage);
1255 if (!IS_ERR(lcc)) { 1121 if (!IS_ERR(lcc)) {
1256 struct lu_env *env = lcc->lcc_env; 1122 struct lu_env *env = lcc->lcc_env;
1257 struct cl_io *io = lcc->lcc_io; 1123 struct cl_io *io = lcc->lcc_io;
@@ -1273,3 +1139,28 @@ int ll_readpage(struct file *file, struct page *vmpage)
1273 } 1139 }
1274 return result; 1140 return result;
1275} 1141}
1142
1143int ll_page_sync_io(const struct lu_env *env, struct cl_io *io,
1144 struct cl_page *page, enum cl_req_type crt)
1145{
1146 struct cl_2queue *queue;
1147 int result;
1148
1149 LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
1150
1151 queue = &io->ci_queue;
1152 cl_2queue_init_page(queue, page);
1153
1154 result = cl_io_submit_sync(env, io, crt, queue, 0);
1155 LASSERT(cl_page_is_owned(page, io));
1156
1157 if (crt == CRT_READ)
1158 /*
1159 * in CRT_WRITE case page is left locked even in case of
1160 * error.
1161 */
1162 cl_page_list_disown(env, io, &queue->c2_qin);
1163 cl_2queue_fini(env, queue);
1164
1165 return result;
1166}
diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c
index 0c3459c1a518..c12a048fce59 100644
--- a/drivers/staging/lustre/lustre/llite/rw26.c
+++ b/drivers/staging/lustre/lustre/llite/rw26.c
@@ -95,15 +95,12 @@ static void ll_invalidatepage(struct page *vmpage, unsigned int offset,
95 if (obj) { 95 if (obj) {
96 page = cl_vmpage_page(vmpage, obj); 96 page = cl_vmpage_page(vmpage, obj);
97 if (page) { 97 if (page) {
98 lu_ref_add(&page->cp_reference,
99 "delete", vmpage);
100 cl_page_delete(env, page); 98 cl_page_delete(env, page);
101 lu_ref_del(&page->cp_reference,
102 "delete", vmpage);
103 cl_page_put(env, page); 99 cl_page_put(env, page);
104 } 100 }
105 } else 101 } else {
106 LASSERT(vmpage->private == 0); 102 LASSERT(vmpage->private == 0);
103 }
107 cl_env_put(env, &refcheck); 104 cl_env_put(env, &refcheck);
108 } 105 }
109 } 106 }
@@ -111,12 +108,12 @@ static void ll_invalidatepage(struct page *vmpage, unsigned int offset,
111 108
112static int ll_releasepage(struct page *vmpage, gfp_t gfp_mask) 109static int ll_releasepage(struct page *vmpage, gfp_t gfp_mask)
113{ 110{
114 struct cl_env_nest nest;
115 struct lu_env *env; 111 struct lu_env *env;
112 void *cookie;
116 struct cl_object *obj; 113 struct cl_object *obj;
117 struct cl_page *page; 114 struct cl_page *page;
118 struct address_space *mapping; 115 struct address_space *mapping;
119 int result; 116 int result = 0;
120 117
121 LASSERT(PageLocked(vmpage)); 118 LASSERT(PageLocked(vmpage));
122 if (PageWriteback(vmpage) || PageDirty(vmpage)) 119 if (PageWriteback(vmpage) || PageDirty(vmpage))
@@ -130,53 +127,42 @@ static int ll_releasepage(struct page *vmpage, gfp_t gfp_mask)
130 if (!obj) 127 if (!obj)
131 return 1; 128 return 1;
132 129
133 /* 1 for page allocator, 1 for cl_page and 1 for page cache */ 130 /* 1 for caller, 1 for cl_page and 1 for page cache */
134 if (page_count(vmpage) > 3) 131 if (page_count(vmpage) > 3)
135 return 0; 132 return 0;
136 133
137 /* TODO: determine what gfp should be used by @gfp_mask. */
138 env = cl_env_nested_get(&nest);
139 if (IS_ERR(env))
140 /* If we can't allocate an env we won't call cl_page_put()
141 * later on which further means it's impossible to drop
142 * page refcount by cl_page, so ask kernel to not free
143 * this page.
144 */
145 return 0;
146
147 page = cl_vmpage_page(vmpage, obj); 134 page = cl_vmpage_page(vmpage, obj);
148 result = !page; 135 if (!page)
149 if (page) { 136 return 1;
150 if (!cl_page_in_use(page)) {
151 result = 1;
152 cl_page_delete(env, page);
153 }
154 cl_page_put(env, page);
155 }
156 cl_env_nested_put(&nest, env);
157 return result;
158}
159 137
160static int ll_set_page_dirty(struct page *vmpage) 138 cookie = cl_env_reenter();
161{ 139 env = cl_env_percpu_get();
162#if 0 140 LASSERT(!IS_ERR(env));
163 struct cl_page *page = vvp_vmpage_page_transient(vmpage);
164 struct vvp_object *obj = cl_inode2vvp(vmpage->mapping->host);
165 struct vvp_page *cpg;
166 141
167 /* 142 if (!cl_page_in_use(page)) {
168 * XXX should page method be called here? 143 result = 1;
169 */ 144 cl_page_delete(env, page);
170 LASSERT(&obj->co_cl == page->cp_obj); 145 }
171 cpg = cl2vvp_page(cl_page_at(page, &vvp_device_type)); 146
172 /* 147 /* To use percpu env array, the call path can not be rescheduled;
173 * XXX cannot do much here, because page is possibly not locked: 148 * otherwise percpu array will be messed if ll_releaspage() called
174 * sys_munmap()->... 149 * again on the same CPU.
175 * ->unmap_page_range()->zap_pte_range()->set_page_dirty(). 150 *
151 * If this page holds the last refc of cl_object, the following
152 * call path may cause reschedule:
153 * cl_page_put -> cl_page_free -> cl_object_put ->
154 * lu_object_put -> lu_object_free -> lov_delete_raid0.
155 *
156 * However, the kernel can't get rid of this inode until all pages have
157 * been cleaned up. Now that we hold page lock here, it's pretty safe
158 * that we won't get into object delete path.
176 */ 159 */
177 vvp_write_pending(obj, cpg); 160 LASSERT(cl_object_refc(obj) > 1);
178#endif 161 cl_page_put(env, page);
179 return __set_page_dirty_nobuffers(vmpage); 162
163 cl_env_percpu_put(env);
164 cl_env_reexit(cookie);
165 return result;
180} 166}
181 167
182#define MAX_DIRECTIO_SIZE (2*1024*1024*1024UL) 168#define MAX_DIRECTIO_SIZE (2*1024*1024*1024UL)
@@ -266,7 +252,7 @@ ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
266 * write directly 252 * write directly
267 */ 253 */
268 if (clp->cp_type == CPT_CACHEABLE) { 254 if (clp->cp_type == CPT_CACHEABLE) {
269 struct page *vmpage = cl_page_vmpage(env, clp); 255 struct page *vmpage = cl_page_vmpage(clp);
270 struct page *src_page; 256 struct page *src_page;
271 struct page *dst_page; 257 struct page *dst_page;
272 void *src; 258 void *src;
@@ -364,7 +350,7 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter)
364 struct cl_io *io; 350 struct cl_io *io;
365 struct file *file = iocb->ki_filp; 351 struct file *file = iocb->ki_filp;
366 struct inode *inode = file->f_mapping->host; 352 struct inode *inode = file->f_mapping->host;
367 struct ccc_object *obj = cl_inode2ccc(inode); 353 struct vvp_object *obj = cl_inode2vvp(inode);
368 loff_t file_offset = iocb->ki_pos; 354 loff_t file_offset = iocb->ki_pos;
369 ssize_t count = iov_iter_count(iter); 355 ssize_t count = iov_iter_count(iter);
370 ssize_t tot_bytes = 0, result = 0; 356 ssize_t tot_bytes = 0, result = 0;
@@ -376,22 +362,21 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter)
376 return -EBADF; 362 return -EBADF;
377 363
378 /* FIXME: io smaller than PAGE_SIZE is broken on ia64 ??? */ 364 /* FIXME: io smaller than PAGE_SIZE is broken on ia64 ??? */
379 if ((file_offset & ~CFS_PAGE_MASK) || (count & ~CFS_PAGE_MASK)) 365 if ((file_offset & ~PAGE_MASK) || (count & ~PAGE_MASK))
380 return -EINVAL; 366 return -EINVAL;
381 367
382 CDEBUG(D_VFSTRACE, 368 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), size=%zd (max %lu), offset=%lld=%llx, pages %zd (max %lu)\n",
383 "VFS Op:inode=%lu/%u(%p), size=%zd (max %lu), offset=%lld=%llx, pages %zd (max %lu)\n", 369 PFID(ll_inode2fid(inode)), inode, count, MAX_DIO_SIZE,
384 inode->i_ino, inode->i_generation, inode, count, MAX_DIO_SIZE,
385 file_offset, file_offset, count >> PAGE_SHIFT, 370 file_offset, file_offset, count >> PAGE_SHIFT,
386 MAX_DIO_SIZE >> PAGE_SHIFT); 371 MAX_DIO_SIZE >> PAGE_SHIFT);
387 372
388 /* Check that all user buffers are aligned as well */ 373 /* Check that all user buffers are aligned as well */
389 if (iov_iter_alignment(iter) & ~CFS_PAGE_MASK) 374 if (iov_iter_alignment(iter) & ~PAGE_MASK)
390 return -EINVAL; 375 return -EINVAL;
391 376
392 env = cl_env_get(&refcheck); 377 env = cl_env_get(&refcheck);
393 LASSERT(!IS_ERR(env)); 378 LASSERT(!IS_ERR(env));
394 io = ccc_env_io(env)->cui_cl.cis_io; 379 io = vvp_env_io(env)->vui_cl.cis_io;
395 LASSERT(io); 380 LASSERT(io);
396 381
397 /* 0. Need locking between buffered and direct access. and race with 382 /* 0. Need locking between buffered and direct access. and race with
@@ -401,7 +386,7 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter)
401 if (iov_iter_rw(iter) == READ) 386 if (iov_iter_rw(iter) == READ)
402 inode_lock(inode); 387 inode_lock(inode);
403 388
404 LASSERT(obj->cob_transient_pages == 0); 389 LASSERT(obj->vob_transient_pages == 0);
405 while (iov_iter_count(iter)) { 390 while (iov_iter_count(iter)) {
406 struct page **pages; 391 struct page **pages;
407 size_t offs; 392 size_t offs;
@@ -435,8 +420,8 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter)
435 size > (PAGE_SIZE / sizeof(*pages)) * 420 size > (PAGE_SIZE / sizeof(*pages)) *
436 PAGE_SIZE) { 421 PAGE_SIZE) {
437 size = ((((size / 2) - 1) | 422 size = ((((size / 2) - 1) |
438 ~CFS_PAGE_MASK) + 1) & 423 ~PAGE_MASK) + 1) &
439 CFS_PAGE_MASK; 424 PAGE_MASK;
440 CDEBUG(D_VFSTRACE, "DIO size now %lu\n", 425 CDEBUG(D_VFSTRACE, "DIO size now %lu\n",
441 size); 426 size);
442 continue; 427 continue;
@@ -449,62 +434,213 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter)
449 file_offset += result; 434 file_offset += result;
450 } 435 }
451out: 436out:
452 LASSERT(obj->cob_transient_pages == 0); 437 LASSERT(obj->vob_transient_pages == 0);
453 if (iov_iter_rw(iter) == READ) 438 if (iov_iter_rw(iter) == READ)
454 inode_unlock(inode); 439 inode_unlock(inode);
455 440
456 if (tot_bytes > 0) { 441 if (tot_bytes > 0) {
457 if (iov_iter_rw(iter) == WRITE) { 442 struct vvp_io *vio = vvp_env_io(env);
458 struct lov_stripe_md *lsm; 443
459 444 /* no commit async for direct IO */
460 lsm = ccc_inode_lsm_get(inode); 445 vio->u.write.vui_written += tot_bytes;
461 LASSERT(lsm);
462 lov_stripe_lock(lsm);
463 obd_adjust_kms(ll_i2dtexp(inode), lsm, file_offset, 0);
464 lov_stripe_unlock(lsm);
465 ccc_inode_lsm_put(inode, lsm);
466 }
467 } 446 }
468 447
469 cl_env_put(env, &refcheck); 448 cl_env_put(env, &refcheck);
470 return tot_bytes ? : result; 449 return tot_bytes ? tot_bytes : result;
450}
451
452/**
453 * Prepare partially written-to page for a write.
454 */
455static int ll_prepare_partial_page(const struct lu_env *env, struct cl_io *io,
456 struct cl_page *pg)
457{
458 struct cl_attr *attr = vvp_env_thread_attr(env);
459 struct cl_object *obj = io->ci_obj;
460 struct vvp_page *vpg = cl_object_page_slice(obj, pg);
461 loff_t offset = cl_offset(obj, vvp_index(vpg));
462 int result;
463
464 cl_object_attr_lock(obj);
465 result = cl_object_attr_get(env, obj, attr);
466 cl_object_attr_unlock(obj);
467 if (result == 0) {
468 /*
469 * If are writing to a new page, no need to read old data.
470 * The extent locking will have updated the KMS, and for our
471 * purposes here we can treat it like i_size.
472 */
473 if (attr->cat_kms <= offset) {
474 char *kaddr = kmap_atomic(vpg->vpg_page);
475
476 memset(kaddr, 0, cl_page_size(obj));
477 kunmap_atomic(kaddr);
478 } else if (vpg->vpg_defer_uptodate) {
479 vpg->vpg_ra_used = 1;
480 } else {
481 result = ll_page_sync_io(env, io, pg, CRT_READ);
482 }
483 }
484 return result;
471} 485}
472 486
473static int ll_write_begin(struct file *file, struct address_space *mapping, 487static int ll_write_begin(struct file *file, struct address_space *mapping,
474 loff_t pos, unsigned len, unsigned flags, 488 loff_t pos, unsigned len, unsigned flags,
475 struct page **pagep, void **fsdata) 489 struct page **pagep, void **fsdata)
476{ 490{
491 struct ll_cl_context *lcc;
492 struct lu_env *env;
493 struct cl_io *io;
494 struct cl_page *page;
495 struct cl_object *clob = ll_i2info(mapping->host)->lli_clob;
477 pgoff_t index = pos >> PAGE_SHIFT; 496 pgoff_t index = pos >> PAGE_SHIFT;
478 struct page *page; 497 struct page *vmpage = NULL;
479 int rc; 498 unsigned int from = pos & (PAGE_SIZE - 1);
480 unsigned from = pos & (PAGE_SIZE - 1); 499 unsigned int to = from + len;
500 int result = 0;
481 501
482 page = grab_cache_page_write_begin(mapping, index, flags); 502 CDEBUG(D_VFSTRACE, "Writing %lu of %d to %d bytes\n", index, from, len);
483 if (!page)
484 return -ENOMEM;
485 503
486 *pagep = page; 504 lcc = ll_cl_init(file, NULL);
505 if (IS_ERR(lcc)) {
506 result = PTR_ERR(lcc);
507 goto out;
508 }
487 509
488 rc = ll_prepare_write(file, page, from, from + len); 510 env = lcc->lcc_env;
489 if (rc) { 511 io = lcc->lcc_io;
490 unlock_page(page); 512
491 put_page(page); 513 /* To avoid deadlock, try to lock page first. */
514 vmpage = grab_cache_page_nowait(mapping, index);
515 if (unlikely(!vmpage || PageDirty(vmpage) || PageWriteback(vmpage))) {
516 struct vvp_io *vio = vvp_env_io(env);
517 struct cl_page_list *plist = &vio->u.write.vui_queue;
518
519 /* if the page is already in dirty cache, we have to commit
520 * the pages right now; otherwise, it may cause deadlock
521 * because it holds page lock of a dirty page and request for
522 * more grants. It's okay for the dirty page to be the first
523 * one in commit page list, though.
524 */
525 if (vmpage && plist->pl_nr > 0) {
526 unlock_page(vmpage);
527 put_page(vmpage);
528 vmpage = NULL;
529 }
530
531 /* commit pages and then wait for page lock */
532 result = vvp_io_write_commit(env, io);
533 if (result < 0)
534 goto out;
535
536 if (!vmpage) {
537 vmpage = grab_cache_page_write_begin(mapping, index,
538 flags);
539 if (!vmpage) {
540 result = -ENOMEM;
541 goto out;
542 }
543 }
492 } 544 }
493 return rc; 545
546 page = cl_page_find(env, clob, vmpage->index, vmpage, CPT_CACHEABLE);
547 if (IS_ERR(page)) {
548 result = PTR_ERR(page);
549 goto out;
550 }
551
552 lcc->lcc_page = page;
553 lu_ref_add(&page->cp_reference, "cl_io", io);
554
555 cl_page_assume(env, io, page);
556 if (!PageUptodate(vmpage)) {
557 /*
558 * We're completely overwriting an existing page,
559 * so _don't_ set it up to date until commit_write
560 */
561 if (from == 0 && to == PAGE_SIZE) {
562 CL_PAGE_HEADER(D_PAGE, env, page, "full page write\n");
563 POISON_PAGE(vmpage, 0x11);
564 } else {
565 /* TODO: can be optimized at OSC layer to check if it
566 * is a lockless IO. In that case, it's not necessary
567 * to read the data.
568 */
569 result = ll_prepare_partial_page(env, io, page);
570 if (result == 0)
571 SetPageUptodate(vmpage);
572 }
573 }
574 if (result < 0)
575 cl_page_unassume(env, io, page);
576out:
577 if (result < 0) {
578 if (vmpage) {
579 unlock_page(vmpage);
580 put_page(vmpage);
581 }
582 if (!IS_ERR(lcc))
583 ll_cl_fini(lcc);
584 } else {
585 *pagep = vmpage;
586 *fsdata = lcc;
587 }
588 return result;
494} 589}
495 590
496static int ll_write_end(struct file *file, struct address_space *mapping, 591static int ll_write_end(struct file *file, struct address_space *mapping,
497 loff_t pos, unsigned len, unsigned copied, 592 loff_t pos, unsigned len, unsigned copied,
498 struct page *page, void *fsdata) 593 struct page *vmpage, void *fsdata)
499{ 594{
595 struct ll_cl_context *lcc = fsdata;
596 struct lu_env *env;
597 struct cl_io *io;
598 struct vvp_io *vio;
599 struct cl_page *page;
500 unsigned from = pos & (PAGE_SIZE - 1); 600 unsigned from = pos & (PAGE_SIZE - 1);
501 int rc; 601 bool unplug = false;
602 int result = 0;
603
604 put_page(vmpage);
605
606 env = lcc->lcc_env;
607 page = lcc->lcc_page;
608 io = lcc->lcc_io;
609 vio = vvp_env_io(env);
610
611 LASSERT(cl_page_is_owned(page, io));
612 if (copied > 0) {
613 struct cl_page_list *plist = &vio->u.write.vui_queue;
614
615 lcc->lcc_page = NULL; /* page will be queued */
616
617 /* Add it into write queue */
618 cl_page_list_add(plist, page);
619 if (plist->pl_nr == 1) /* first page */
620 vio->u.write.vui_from = from;
621 else
622 LASSERT(from == 0);
623 vio->u.write.vui_to = from + copied;
624
625 /* We may have one full RPC, commit it soon */
626 if (plist->pl_nr >= PTLRPC_MAX_BRW_PAGES)
627 unplug = true;
628
629 CL_PAGE_DEBUG(D_VFSTRACE, env, page,
630 "queued page: %d.\n", plist->pl_nr);
631 } else {
632 cl_page_disown(env, io, page);
633
634 /* page list is not contiguous now, commit it now */
635 unplug = true;
636 }
502 637
503 rc = ll_commit_write(file, page, from, from + copied); 638 if (unplug ||
504 unlock_page(page); 639 file->f_flags & O_SYNC || IS_SYNC(file_inode(file)))
505 put_page(page); 640 result = vvp_io_write_commit(env, io);
506 641
507 return rc ?: copied; 642 ll_cl_fini(lcc);
643 return result >= 0 ? copied : result;
508} 644}
509 645
510#ifdef CONFIG_MIGRATION 646#ifdef CONFIG_MIGRATION
@@ -523,7 +659,7 @@ const struct address_space_operations ll_aops = {
523 .direct_IO = ll_direct_IO_26, 659 .direct_IO = ll_direct_IO_26,
524 .writepage = ll_writepage, 660 .writepage = ll_writepage,
525 .writepages = ll_writepages, 661 .writepages = ll_writepages,
526 .set_page_dirty = ll_set_page_dirty, 662 .set_page_dirty = __set_page_dirty_nobuffers,
527 .write_begin = ll_write_begin, 663 .write_begin = ll_write_begin,
528 .write_end = ll_write_end, 664 .write_end = ll_write_end,
529 .invalidatepage = ll_invalidatepage, 665 .invalidatepage = ll_invalidatepage,
diff --git a/drivers/staging/lustre/lustre/llite/statahead.c b/drivers/staging/lustre/lustre/llite/statahead.c
index 99ffd1589df8..6322f88661e8 100644
--- a/drivers/staging/lustre/lustre/llite/statahead.c
+++ b/drivers/staging/lustre/lustre/llite/statahead.c
@@ -661,8 +661,9 @@ static void ll_post_statahead(struct ll_statahead_info *sai)
661 if (rc) 661 if (rc)
662 goto out; 662 goto out;
663 663
664 CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)\n", 664 CDEBUG(D_DLMTRACE, "%s: setting l_data to inode "DFID"%p\n",
665 child, child->i_ino, child->i_generation); 665 ll_get_fsname(child->i_sb, NULL, 0),
666 PFID(ll_inode2fid(child)), child);
666 ll_set_lock_data(ll_i2sbi(dir)->ll_md_exp, child, it, NULL); 667 ll_set_lock_data(ll_i2sbi(dir)->ll_md_exp, child, it, NULL);
667 668
668 entry->se_inode = child; 669 entry->se_inode = child;
@@ -1591,13 +1592,11 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
1591 *dentryp = alias; 1592 *dentryp = alias;
1592 } else if (d_inode(*dentryp) != inode) { 1593 } else if (d_inode(*dentryp) != inode) {
1593 /* revalidate, but inode is recreated */ 1594 /* revalidate, but inode is recreated */
1594 CDEBUG(D_READA, 1595 CDEBUG(D_READA, "%s: stale dentry %pd inode "DFID", statahead inode "DFID"\n",
1595 "stale dentry %pd inode %lu/%u, statahead inode %lu/%u\n", 1596 ll_get_fsname(d_inode(*dentryp)->i_sb, NULL, 0),
1596 *dentryp, 1597 *dentryp,
1597 d_inode(*dentryp)->i_ino, 1598 PFID(ll_inode2fid(d_inode(*dentryp))),
1598 d_inode(*dentryp)->i_generation, 1599 PFID(ll_inode2fid(inode)));
1599 inode->i_ino,
1600 inode->i_generation);
1601 ll_sai_unplug(sai, entry); 1600 ll_sai_unplug(sai, entry);
1602 return -ESTALE; 1601 return -ESTALE;
1603 } else { 1602 } else {
diff --git a/drivers/staging/lustre/lustre/llite/super25.c b/drivers/staging/lustre/lustre/llite/super25.c
index 61856d37afc5..415750b0bff4 100644
--- a/drivers/staging/lustre/lustre/llite/super25.c
+++ b/drivers/staging/lustre/lustre/llite/super25.c
@@ -164,9 +164,18 @@ static int __init lustre_init(void)
164 if (rc != 0) 164 if (rc != 0)
165 goto out_sysfs; 165 goto out_sysfs;
166 166
167 cl_inode_fini_env = cl_env_alloc(&cl_inode_fini_refcheck,
168 LCT_REMEMBER | LCT_NOREF);
169 if (IS_ERR(cl_inode_fini_env)) {
170 rc = PTR_ERR(cl_inode_fini_env);
171 goto out_vvp;
172 }
173
174 cl_inode_fini_env->le_ctx.lc_cookie = 0x4;
175
167 rc = ll_xattr_init(); 176 rc = ll_xattr_init();
168 if (rc != 0) 177 if (rc != 0)
169 goto out_vvp; 178 goto out_inode_fini_env;
170 179
171 lustre_register_client_fill_super(ll_fill_super); 180 lustre_register_client_fill_super(ll_fill_super);
172 lustre_register_kill_super_cb(ll_kill_super); 181 lustre_register_kill_super_cb(ll_kill_super);
@@ -174,6 +183,8 @@ static int __init lustre_init(void)
174 183
175 return 0; 184 return 0;
176 185
186out_inode_fini_env:
187 cl_env_put(cl_inode_fini_env, &cl_inode_fini_refcheck);
177out_vvp: 188out_vvp:
178 vvp_global_fini(); 189 vvp_global_fini();
179out_sysfs: 190out_sysfs:
@@ -198,6 +209,7 @@ static void __exit lustre_exit(void)
198 kset_unregister(llite_kset); 209 kset_unregister(llite_kset);
199 210
200 ll_xattr_fini(); 211 ll_xattr_fini();
212 cl_env_put(cl_inode_fini_env, &cl_inode_fini_refcheck);
201 vvp_global_fini(); 213 vvp_global_fini();
202 214
203 kmem_cache_destroy(ll_inode_cachep); 215 kmem_cache_destroy(ll_inode_cachep);
diff --git a/drivers/staging/lustre/lustre/llite/symlink.c b/drivers/staging/lustre/lustre/llite/symlink.c
index 46d03ea48352..3fc736ccf85e 100644
--- a/drivers/staging/lustre/lustre/llite/symlink.c
+++ b/drivers/staging/lustre/lustre/llite/symlink.c
@@ -77,7 +77,9 @@ static int ll_readlink_internal(struct inode *inode,
77 ll_finish_md_op_data(op_data); 77 ll_finish_md_op_data(op_data);
78 if (rc) { 78 if (rc) {
79 if (rc != -ENOENT) 79 if (rc != -ENOENT)
80 CERROR("inode %lu: rc = %d\n", inode->i_ino, rc); 80 CERROR("%s: inode "DFID": rc = %d\n",
81 ll_get_fsname(inode->i_sb, NULL, 0),
82 PFID(ll_inode2fid(inode)), rc);
81 goto failed; 83 goto failed;
82 } 84 }
83 85
@@ -90,8 +92,10 @@ static int ll_readlink_internal(struct inode *inode,
90 92
91 LASSERT(symlen != 0); 93 LASSERT(symlen != 0);
92 if (body->eadatasize != symlen) { 94 if (body->eadatasize != symlen) {
93 CERROR("inode %lu: symlink length %d not expected %d\n", 95 CERROR("%s: inode "DFID": symlink length %d not expected %d\n",
94 inode->i_ino, body->eadatasize - 1, symlen - 1); 96 ll_get_fsname(inode->i_sb, NULL, 0),
97 PFID(ll_inode2fid(inode)), body->eadatasize - 1,
98 symlen - 1);
95 rc = -EPROTO; 99 rc = -EPROTO;
96 goto failed; 100 goto failed;
97 } 101 }
diff --git a/drivers/staging/lustre/lustre/llite/vvp_dev.c b/drivers/staging/lustre/lustre/llite/vvp_dev.c
index 282b70b776da..47101de1c020 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_dev.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_dev.c
@@ -36,6 +36,7 @@
36 * cl_device and cl_device_type implementation for VVP layer. 36 * cl_device and cl_device_type implementation for VVP layer.
37 * 37 *
38 * Author: Nikita Danilov <nikita.danilov@sun.com> 38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 * Author: Jinshan Xiong <jinshan.xiong@intel.com>
39 */ 40 */
40 41
41#define DEBUG_SUBSYSTEM S_LLITE 42#define DEBUG_SUBSYSTEM S_LLITE
@@ -56,13 +57,33 @@
56 * "llite_" (var. "ll_") prefix. 57 * "llite_" (var. "ll_") prefix.
57 */ 58 */
58 59
59static struct kmem_cache *vvp_thread_kmem; 60static struct kmem_cache *ll_thread_kmem;
61struct kmem_cache *vvp_lock_kmem;
62struct kmem_cache *vvp_object_kmem;
63struct kmem_cache *vvp_req_kmem;
60static struct kmem_cache *vvp_session_kmem; 64static struct kmem_cache *vvp_session_kmem;
65static struct kmem_cache *vvp_thread_kmem;
66
61static struct lu_kmem_descr vvp_caches[] = { 67static struct lu_kmem_descr vvp_caches[] = {
62 { 68 {
63 .ckd_cache = &vvp_thread_kmem, 69 .ckd_cache = &ll_thread_kmem,
64 .ckd_name = "vvp_thread_kmem", 70 .ckd_name = "ll_thread_kmem",
65 .ckd_size = sizeof(struct vvp_thread_info), 71 .ckd_size = sizeof(struct ll_thread_info),
72 },
73 {
74 .ckd_cache = &vvp_lock_kmem,
75 .ckd_name = "vvp_lock_kmem",
76 .ckd_size = sizeof(struct vvp_lock),
77 },
78 {
79 .ckd_cache = &vvp_object_kmem,
80 .ckd_name = "vvp_object_kmem",
81 .ckd_size = sizeof(struct vvp_object),
82 },
83 {
84 .ckd_cache = &vvp_req_kmem,
85 .ckd_name = "vvp_req_kmem",
86 .ckd_size = sizeof(struct vvp_req),
66 }, 87 },
67 { 88 {
68 .ckd_cache = &vvp_session_kmem, 89 .ckd_cache = &vvp_session_kmem,
@@ -70,29 +91,40 @@ static struct lu_kmem_descr vvp_caches[] = {
70 .ckd_size = sizeof(struct vvp_session) 91 .ckd_size = sizeof(struct vvp_session)
71 }, 92 },
72 { 93 {
94 .ckd_cache = &vvp_thread_kmem,
95 .ckd_name = "vvp_thread_kmem",
96 .ckd_size = sizeof(struct vvp_thread_info),
97 },
98 {
73 .ckd_cache = NULL 99 .ckd_cache = NULL
74 } 100 }
75}; 101};
76 102
77static void *vvp_key_init(const struct lu_context *ctx, 103static void *ll_thread_key_init(const struct lu_context *ctx,
78 struct lu_context_key *key) 104 struct lu_context_key *key)
79{ 105{
80 struct vvp_thread_info *info; 106 struct vvp_thread_info *info;
81 107
82 info = kmem_cache_zalloc(vvp_thread_kmem, GFP_NOFS); 108 info = kmem_cache_zalloc(ll_thread_kmem, GFP_NOFS);
83 if (!info) 109 if (!info)
84 info = ERR_PTR(-ENOMEM); 110 info = ERR_PTR(-ENOMEM);
85 return info; 111 return info;
86} 112}
87 113
88static void vvp_key_fini(const struct lu_context *ctx, 114static void ll_thread_key_fini(const struct lu_context *ctx,
89 struct lu_context_key *key, void *data) 115 struct lu_context_key *key, void *data)
90{ 116{
91 struct vvp_thread_info *info = data; 117 struct vvp_thread_info *info = data;
92 118
93 kmem_cache_free(vvp_thread_kmem, info); 119 kmem_cache_free(ll_thread_kmem, info);
94} 120}
95 121
122struct lu_context_key ll_thread_key = {
123 .lct_tags = LCT_CL_THREAD,
124 .lct_init = ll_thread_key_init,
125 .lct_fini = ll_thread_key_fini
126};
127
96static void *vvp_session_key_init(const struct lu_context *ctx, 128static void *vvp_session_key_init(const struct lu_context *ctx,
97 struct lu_context_key *key) 129 struct lu_context_key *key)
98{ 130{
@@ -112,34 +144,127 @@ static void vvp_session_key_fini(const struct lu_context *ctx,
112 kmem_cache_free(vvp_session_kmem, session); 144 kmem_cache_free(vvp_session_kmem, session);
113} 145}
114 146
115struct lu_context_key vvp_key = {
116 .lct_tags = LCT_CL_THREAD,
117 .lct_init = vvp_key_init,
118 .lct_fini = vvp_key_fini
119};
120
121struct lu_context_key vvp_session_key = { 147struct lu_context_key vvp_session_key = {
122 .lct_tags = LCT_SESSION, 148 .lct_tags = LCT_SESSION,
123 .lct_init = vvp_session_key_init, 149 .lct_init = vvp_session_key_init,
124 .lct_fini = vvp_session_key_fini 150 .lct_fini = vvp_session_key_fini
125}; 151};
126 152
153void *vvp_thread_key_init(const struct lu_context *ctx,
154 struct lu_context_key *key)
155{
156 struct vvp_thread_info *vti;
157
158 vti = kmem_cache_zalloc(vvp_thread_kmem, GFP_NOFS);
159 if (!vti)
160 vti = ERR_PTR(-ENOMEM);
161 return vti;
162}
163
164void vvp_thread_key_fini(const struct lu_context *ctx,
165 struct lu_context_key *key, void *data)
166{
167 struct vvp_thread_info *vti = data;
168
169 kmem_cache_free(vvp_thread_kmem, vti);
170}
171
172struct lu_context_key vvp_thread_key = {
173 .lct_tags = LCT_CL_THREAD,
174 .lct_init = vvp_thread_key_init,
175 .lct_fini = vvp_thread_key_fini
176};
177
127/* type constructor/destructor: vvp_type_{init,fini,start,stop}(). */ 178/* type constructor/destructor: vvp_type_{init,fini,start,stop}(). */
128LU_TYPE_INIT_FINI(vvp, &ccc_key, &ccc_session_key, &vvp_key, &vvp_session_key); 179LU_TYPE_INIT_FINI(vvp, &vvp_thread_key, &ll_thread_key, &vvp_session_key);
129 180
130static const struct lu_device_operations vvp_lu_ops = { 181static const struct lu_device_operations vvp_lu_ops = {
131 .ldo_object_alloc = vvp_object_alloc 182 .ldo_object_alloc = vvp_object_alloc
132}; 183};
133 184
134static const struct cl_device_operations vvp_cl_ops = { 185static const struct cl_device_operations vvp_cl_ops = {
135 .cdo_req_init = ccc_req_init 186 .cdo_req_init = vvp_req_init
136}; 187};
137 188
189static struct lu_device *vvp_device_free(const struct lu_env *env,
190 struct lu_device *d)
191{
192 struct vvp_device *vdv = lu2vvp_dev(d);
193 struct cl_site *site = lu2cl_site(d->ld_site);
194 struct lu_device *next = cl2lu_dev(vdv->vdv_next);
195
196 if (d->ld_site) {
197 cl_site_fini(site);
198 kfree(site);
199 }
200 cl_device_fini(lu2cl_dev(d));
201 kfree(vdv);
202 return next;
203}
204
138static struct lu_device *vvp_device_alloc(const struct lu_env *env, 205static struct lu_device *vvp_device_alloc(const struct lu_env *env,
139 struct lu_device_type *t, 206 struct lu_device_type *t,
140 struct lustre_cfg *cfg) 207 struct lustre_cfg *cfg)
141{ 208{
142 return ccc_device_alloc(env, t, cfg, &vvp_lu_ops, &vvp_cl_ops); 209 struct vvp_device *vdv;
210 struct lu_device *lud;
211 struct cl_site *site;
212 int rc;
213
214 vdv = kzalloc(sizeof(*vdv), GFP_NOFS);
215 if (!vdv)
216 return ERR_PTR(-ENOMEM);
217
218 lud = &vdv->vdv_cl.cd_lu_dev;
219 cl_device_init(&vdv->vdv_cl, t);
220 vvp2lu_dev(vdv)->ld_ops = &vvp_lu_ops;
221 vdv->vdv_cl.cd_ops = &vvp_cl_ops;
222
223 site = kzalloc(sizeof(*site), GFP_NOFS);
224 if (site) {
225 rc = cl_site_init(site, &vdv->vdv_cl);
226 if (rc == 0) {
227 rc = lu_site_init_finish(&site->cs_lu);
228 } else {
229 LASSERT(!lud->ld_site);
230 CERROR("Cannot init lu_site, rc %d.\n", rc);
231 kfree(site);
232 }
233 } else {
234 rc = -ENOMEM;
235 }
236 if (rc != 0) {
237 vvp_device_free(env, lud);
238 lud = ERR_PTR(rc);
239 }
240 return lud;
241}
242
243static int vvp_device_init(const struct lu_env *env, struct lu_device *d,
244 const char *name, struct lu_device *next)
245{
246 struct vvp_device *vdv;
247 int rc;
248
249 vdv = lu2vvp_dev(d);
250 vdv->vdv_next = lu2cl_dev(next);
251
252 LASSERT(d->ld_site && next->ld_type);
253 next->ld_site = d->ld_site;
254 rc = next->ld_type->ldt_ops->ldto_device_init(env, next,
255 next->ld_type->ldt_name,
256 NULL);
257 if (rc == 0) {
258 lu_device_get(next);
259 lu_ref_add(&next->ld_reference, "lu-stack", &lu_site_init);
260 }
261 return rc;
262}
263
264static struct lu_device *vvp_device_fini(const struct lu_env *env,
265 struct lu_device *d)
266{
267 return cl2lu_dev(lu2vvp_dev(d)->vdv_next);
143} 268}
144 269
145static const struct lu_device_type_operations vvp_device_type_ops = { 270static const struct lu_device_type_operations vvp_device_type_ops = {
@@ -150,9 +275,9 @@ static const struct lu_device_type_operations vvp_device_type_ops = {
150 .ldto_stop = vvp_type_stop, 275 .ldto_stop = vvp_type_stop,
151 276
152 .ldto_device_alloc = vvp_device_alloc, 277 .ldto_device_alloc = vvp_device_alloc,
153 .ldto_device_free = ccc_device_free, 278 .ldto_device_free = vvp_device_free,
154 .ldto_device_init = ccc_device_init, 279 .ldto_device_init = vvp_device_init,
155 .ldto_device_fini = ccc_device_fini 280 .ldto_device_fini = vvp_device_fini,
156}; 281};
157 282
158struct lu_device_type vvp_device_type = { 283struct lu_device_type vvp_device_type = {
@@ -168,20 +293,27 @@ struct lu_device_type vvp_device_type = {
168 */ 293 */
169int vvp_global_init(void) 294int vvp_global_init(void)
170{ 295{
171 int result; 296 int rc;
172 297
173 result = lu_kmem_init(vvp_caches); 298 rc = lu_kmem_init(vvp_caches);
174 if (result == 0) { 299 if (rc != 0)
175 result = ccc_global_init(&vvp_device_type); 300 return rc;
176 if (result != 0) 301
177 lu_kmem_fini(vvp_caches); 302 rc = lu_device_type_init(&vvp_device_type);
178 } 303 if (rc != 0)
179 return result; 304 goto out_kmem;
305
306 return 0;
307
308out_kmem:
309 lu_kmem_fini(vvp_caches);
310
311 return rc;
180} 312}
181 313
182void vvp_global_fini(void) 314void vvp_global_fini(void)
183{ 315{
184 ccc_global_fini(&vvp_device_type); 316 lu_device_type_fini(&vvp_device_type);
185 lu_kmem_fini(vvp_caches); 317 lu_kmem_fini(vvp_caches);
186} 318}
187 319
@@ -205,13 +337,14 @@ int cl_sb_init(struct super_block *sb)
205 cl = cl_type_setup(env, NULL, &vvp_device_type, 337 cl = cl_type_setup(env, NULL, &vvp_device_type,
206 sbi->ll_dt_exp->exp_obd->obd_lu_dev); 338 sbi->ll_dt_exp->exp_obd->obd_lu_dev);
207 if (!IS_ERR(cl)) { 339 if (!IS_ERR(cl)) {
208 cl2ccc_dev(cl)->cdv_sb = sb; 340 cl2vvp_dev(cl)->vdv_sb = sb;
209 sbi->ll_cl = cl; 341 sbi->ll_cl = cl;
210 sbi->ll_site = cl2lu_dev(cl)->ld_site; 342 sbi->ll_site = cl2lu_dev(cl)->ld_site;
211 } 343 }
212 cl_env_put(env, &refcheck); 344 cl_env_put(env, &refcheck);
213 } else 345 } else {
214 rc = PTR_ERR(env); 346 rc = PTR_ERR(env);
347 }
215 return rc; 348 return rc;
216} 349}
217 350
@@ -356,23 +489,18 @@ static loff_t vvp_pgcache_find(const struct lu_env *env,
356 return ~0ULL; 489 return ~0ULL;
357 clob = vvp_pgcache_obj(env, dev, &id); 490 clob = vvp_pgcache_obj(env, dev, &id);
358 if (clob) { 491 if (clob) {
359 struct cl_object_header *hdr; 492 struct inode *inode = vvp_object_inode(clob);
360 int nr; 493 struct page *vmpage;
361 struct cl_page *pg; 494 int nr;
362
363 /* got an object. Find next page. */
364 hdr = cl_object_header(clob);
365 495
366 spin_lock(&hdr->coh_page_guard); 496 nr = find_get_pages_contig(inode->i_mapping,
367 nr = radix_tree_gang_lookup(&hdr->coh_tree, 497 id.vpi_index, 1, &vmpage);
368 (void **)&pg,
369 id.vpi_index, 1);
370 if (nr > 0) { 498 if (nr > 0) {
371 id.vpi_index = pg->cp_index; 499 id.vpi_index = vmpage->index;
372 /* Cant support over 16T file */ 500 /* Cant support over 16T file */
373 nr = !(pg->cp_index > 0xffffffff); 501 nr = !(vmpage->index > 0xffffffff);
502 put_page(vmpage);
374 } 503 }
375 spin_unlock(&hdr->coh_page_guard);
376 504
377 lu_object_ref_del(&clob->co_lu, "dump", current); 505 lu_object_ref_del(&clob->co_lu, "dump", current);
378 cl_object_put(env, clob); 506 cl_object_put(env, clob);
@@ -398,21 +526,20 @@ static loff_t vvp_pgcache_find(const struct lu_env *env,
398static void vvp_pgcache_page_show(const struct lu_env *env, 526static void vvp_pgcache_page_show(const struct lu_env *env,
399 struct seq_file *seq, struct cl_page *page) 527 struct seq_file *seq, struct cl_page *page)
400{ 528{
401 struct ccc_page *cpg; 529 struct vvp_page *vpg;
402 struct page *vmpage; 530 struct page *vmpage;
403 int has_flags; 531 int has_flags;
404 532
405 cpg = cl2ccc_page(cl_page_at(page, &vvp_device_type)); 533 vpg = cl2vvp_page(cl_page_at(page, &vvp_device_type));
406 vmpage = cpg->cpg_page; 534 vmpage = vpg->vpg_page;
407 seq_printf(seq, " %5i | %p %p %s %s %s %s | %p %lu/%u(%p) %lu %u [", 535 seq_printf(seq, " %5i | %p %p %s %s %s %s | %p "DFID"(%p) %lu %u [",
408 0 /* gen */, 536 0 /* gen */,
409 cpg, page, 537 vpg, page,
410 "none", 538 "none",
411 cpg->cpg_write_queued ? "wq" : "- ", 539 vpg->vpg_write_queued ? "wq" : "- ",
412 cpg->cpg_defer_uptodate ? "du" : "- ", 540 vpg->vpg_defer_uptodate ? "du" : "- ",
413 PageWriteback(vmpage) ? "wb" : "-", 541 PageWriteback(vmpage) ? "wb" : "-",
414 vmpage, vmpage->mapping->host->i_ino, 542 vmpage, PFID(ll_inode2fid(vmpage->mapping->host)),
415 vmpage->mapping->host->i_generation,
416 vmpage->mapping->host, vmpage->index, 543 vmpage->mapping->host, vmpage->index,
417 page_count(vmpage)); 544 page_count(vmpage));
418 has_flags = 0; 545 has_flags = 0;
@@ -431,8 +558,6 @@ static int vvp_pgcache_show(struct seq_file *f, void *v)
431 struct ll_sb_info *sbi; 558 struct ll_sb_info *sbi;
432 struct cl_object *clob; 559 struct cl_object *clob;
433 struct lu_env *env; 560 struct lu_env *env;
434 struct cl_page *page;
435 struct cl_object_header *hdr;
436 struct vvp_pgcache_id id; 561 struct vvp_pgcache_id id;
437 int refcheck; 562 int refcheck;
438 int result; 563 int result;
@@ -444,27 +569,38 @@ static int vvp_pgcache_show(struct seq_file *f, void *v)
444 sbi = f->private; 569 sbi = f->private;
445 clob = vvp_pgcache_obj(env, &sbi->ll_cl->cd_lu_dev, &id); 570 clob = vvp_pgcache_obj(env, &sbi->ll_cl->cd_lu_dev, &id);
446 if (clob) { 571 if (clob) {
447 hdr = cl_object_header(clob); 572 struct inode *inode = vvp_object_inode(clob);
448 573 struct cl_page *page = NULL;
449 spin_lock(&hdr->coh_page_guard); 574 struct page *vmpage;
450 page = cl_page_lookup(hdr, id.vpi_index); 575
451 spin_unlock(&hdr->coh_page_guard); 576 result = find_get_pages_contig(inode->i_mapping,
577 id.vpi_index, 1,
578 &vmpage);
579 if (result > 0) {
580 lock_page(vmpage);
581 page = cl_vmpage_page(vmpage, clob);
582 unlock_page(vmpage);
583 put_page(vmpage);
584 }
452 585
453 seq_printf(f, "%8x@"DFID": ", 586 seq_printf(f, "%8x@" DFID ": ", id.vpi_index,
454 id.vpi_index, PFID(&hdr->coh_lu.loh_fid)); 587 PFID(lu_object_fid(&clob->co_lu)));
455 if (page) { 588 if (page) {
456 vvp_pgcache_page_show(env, f, page); 589 vvp_pgcache_page_show(env, f, page);
457 cl_page_put(env, page); 590 cl_page_put(env, page);
458 } else 591 } else {
459 seq_puts(f, "missing\n"); 592 seq_puts(f, "missing\n");
593 }
460 lu_object_ref_del(&clob->co_lu, "dump", current); 594 lu_object_ref_del(&clob->co_lu, "dump", current);
461 cl_object_put(env, clob); 595 cl_object_put(env, clob);
462 } else 596 } else {
463 seq_printf(f, "%llx missing\n", pos); 597 seq_printf(f, "%llx missing\n", pos);
598 }
464 cl_env_put(env, &refcheck); 599 cl_env_put(env, &refcheck);
465 result = 0; 600 result = 0;
466 } else 601 } else {
467 result = PTR_ERR(env); 602 result = PTR_ERR(env);
603 }
468 return result; 604 return result;
469} 605}
470 606
diff --git a/drivers/staging/lustre/lustre/llite/vvp_internal.h b/drivers/staging/lustre/lustre/llite/vvp_internal.h
index bb393378c9bb..27b9b0a01f32 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_internal.h
+++ b/drivers/staging/lustre/lustre/llite/vvp_internal.h
@@ -41,21 +41,337 @@
41#ifndef VVP_INTERNAL_H 41#ifndef VVP_INTERNAL_H
42#define VVP_INTERNAL_H 42#define VVP_INTERNAL_H
43 43
44#include "../include/lustre/lustre_idl.h"
44#include "../include/cl_object.h" 45#include "../include/cl_object.h"
45#include "llite_internal.h"
46 46
47int vvp_io_init(const struct lu_env *env, 47enum obd_notify_event;
48 struct cl_object *obj, struct cl_io *io); 48struct inode;
49int vvp_lock_init(const struct lu_env *env, 49struct lov_stripe_md;
50 struct cl_object *obj, struct cl_lock *lock, 50struct lustre_md;
51 const struct cl_io *io); 51struct obd_capa;
52struct obd_device;
53struct obd_export;
54struct page;
55
56/* specific architecture can implement only part of this list */
57enum vvp_io_subtype {
58 /** normal IO */
59 IO_NORMAL,
60 /** io started from splice_{read|write} */
61 IO_SPLICE
62};
63
64/**
65 * IO state private to IO state private to VVP layer.
66 */
67struct vvp_io {
68 /** super class */
69 struct cl_io_slice vui_cl;
70 struct cl_io_lock_link vui_link;
71 /**
72 * I/O vector information to or from which read/write is going.
73 */
74 struct iov_iter *vui_iter;
75 /**
76 * Total size for the left IO.
77 */
78 size_t vui_tot_count;
79
80 union {
81 struct vvp_fault_io {
82 /**
83 * Inode modification time that is checked across DLM
84 * lock request.
85 */
86 time64_t ft_mtime;
87 struct vm_area_struct *ft_vma;
88 /**
89 * locked page returned from vvp_io
90 */
91 struct page *ft_vmpage;
92 /**
93 * kernel fault info
94 */
95 struct vm_fault *ft_vmf;
96 /**
97 * fault API used bitflags for return code.
98 */
99 unsigned int ft_flags;
100 /**
101 * check that flags are from filemap_fault
102 */
103 bool ft_flags_valid;
104 } fault;
105 struct {
106 struct pipe_inode_info *vui_pipe;
107 unsigned int vui_flags;
108 } splice;
109 struct {
110 struct cl_page_list vui_queue;
111 unsigned long vui_written;
112 int vui_from;
113 int vui_to;
114 } write;
115 } u;
116
117 enum vvp_io_subtype vui_io_subtype;
118
119 /**
120 * Layout version when this IO is initialized
121 */
122 __u32 vui_layout_gen;
123 /**
124 * File descriptor against which IO is done.
125 */
126 struct ll_file_data *vui_fd;
127 struct kiocb *vui_iocb;
128
129 /* Readahead state. */
130 pgoff_t vui_ra_start;
131 pgoff_t vui_ra_count;
132 /* Set when vui_ra_{start,count} have been initialized. */
133 bool vui_ra_valid;
134};
135
136extern struct lu_device_type vvp_device_type;
137
138extern struct lu_context_key vvp_session_key;
139extern struct lu_context_key vvp_thread_key;
140
141extern struct kmem_cache *vvp_lock_kmem;
142extern struct kmem_cache *vvp_object_kmem;
143extern struct kmem_cache *vvp_req_kmem;
144
145struct vvp_thread_info {
146 struct cl_lock vti_lock;
147 struct cl_lock_descr vti_descr;
148 struct cl_io vti_io;
149 struct cl_attr vti_attr;
150};
151
152static inline struct vvp_thread_info *vvp_env_info(const struct lu_env *env)
153{
154 struct vvp_thread_info *vti;
155
156 vti = lu_context_key_get(&env->le_ctx, &vvp_thread_key);
157 LASSERT(vti);
158
159 return vti;
160}
161
162static inline struct cl_lock *vvp_env_lock(const struct lu_env *env)
163{
164 struct cl_lock *lock = &vvp_env_info(env)->vti_lock;
165
166 memset(lock, 0, sizeof(*lock));
167 return lock;
168}
169
170static inline struct cl_attr *vvp_env_thread_attr(const struct lu_env *env)
171{
172 struct cl_attr *attr = &vvp_env_info(env)->vti_attr;
173
174 memset(attr, 0, sizeof(*attr));
175
176 return attr;
177}
178
179static inline struct cl_io *vvp_env_thread_io(const struct lu_env *env)
180{
181 struct cl_io *io = &vvp_env_info(env)->vti_io;
182
183 memset(io, 0, sizeof(*io));
184
185 return io;
186}
187
188struct vvp_session {
189 struct vvp_io cs_ios;
190};
191
192static inline struct vvp_session *vvp_env_session(const struct lu_env *env)
193{
194 struct vvp_session *ses;
195
196 ses = lu_context_key_get(env->le_ses, &vvp_session_key);
197 LASSERT(ses);
198
199 return ses;
200}
201
202static inline struct vvp_io *vvp_env_io(const struct lu_env *env)
203{
204 return &vvp_env_session(env)->cs_ios;
205}
206
207/**
208 * ccc-private object state.
209 */
210struct vvp_object {
211 struct cl_object_header vob_header;
212 struct cl_object vob_cl;
213 struct inode *vob_inode;
214
215 /**
216 * A list of dirty pages pending IO in the cache. Used by
217 * SOM. Protected by ll_inode_info::lli_lock.
218 *
219 * \see vvp_page::vpg_pending_linkage
220 */
221 struct list_head vob_pending_list;
222
223 /**
224 * Access this counter is protected by inode->i_sem. Now that
225 * the lifetime of transient pages must be covered by inode sem,
226 * we don't need to hold any lock..
227 */
228 int vob_transient_pages;
229 /**
230 * Number of outstanding mmaps on this file.
231 *
232 * \see ll_vm_open(), ll_vm_close().
233 */
234 atomic_t vob_mmap_cnt;
235
236 /**
237 * various flags
238 * vob_discard_page_warned
239 * if pages belonging to this object are discarded when a client
240 * is evicted, some debug info will be printed, this flag will be set
241 * during processing the first discarded page, then avoid flooding
242 * debug message for lots of discarded pages.
243 *
244 * \see ll_dirty_page_discard_warn.
245 */
246 unsigned int vob_discard_page_warned:1;
247};
248
249/**
250 * VVP-private page state.
251 */
252struct vvp_page {
253 struct cl_page_slice vpg_cl;
254 int vpg_defer_uptodate;
255 int vpg_ra_used;
256 int vpg_write_queued;
257 /**
258 * Non-empty iff this page is already counted in
259 * vvp_object::vob_pending_list. This list is only used as a flag,
260 * that is, never iterated through, only checked for list_empty(), but
261 * having a list is useful for debugging.
262 */
263 struct list_head vpg_pending_linkage;
264 /** VM page */
265 struct page *vpg_page;
266};
267
268static inline struct vvp_page *cl2vvp_page(const struct cl_page_slice *slice)
269{
270 return container_of(slice, struct vvp_page, vpg_cl);
271}
272
273static inline pgoff_t vvp_index(struct vvp_page *vvp)
274{
275 return vvp->vpg_cl.cpl_index;
276}
277
278struct vvp_device {
279 struct cl_device vdv_cl;
280 struct super_block *vdv_sb;
281 struct cl_device *vdv_next;
282};
283
284struct vvp_lock {
285 struct cl_lock_slice vlk_cl;
286};
287
288struct vvp_req {
289 struct cl_req_slice vrq_cl;
290};
291
292void *ccc_key_init(const struct lu_context *ctx,
293 struct lu_context_key *key);
294void ccc_key_fini(const struct lu_context *ctx,
295 struct lu_context_key *key, void *data);
296
297void ccc_umount(const struct lu_env *env, struct cl_device *dev);
298
299static inline struct lu_device *vvp2lu_dev(struct vvp_device *vdv)
300{
301 return &vdv->vdv_cl.cd_lu_dev;
302}
303
304static inline struct vvp_device *lu2vvp_dev(const struct lu_device *d)
305{
306 return container_of0(d, struct vvp_device, vdv_cl.cd_lu_dev);
307}
308
309static inline struct vvp_device *cl2vvp_dev(const struct cl_device *d)
310{
311 return container_of0(d, struct vvp_device, vdv_cl);
312}
313
314static inline struct vvp_object *cl2vvp(const struct cl_object *obj)
315{
316 return container_of0(obj, struct vvp_object, vob_cl);
317}
318
319static inline struct vvp_object *lu2vvp(const struct lu_object *obj)
320{
321 return container_of0(obj, struct vvp_object, vob_cl.co_lu);
322}
323
324static inline struct inode *vvp_object_inode(const struct cl_object *obj)
325{
326 return cl2vvp(obj)->vob_inode;
327}
328
329int vvp_object_invariant(const struct cl_object *obj);
330struct vvp_object *cl_inode2vvp(struct inode *inode);
331
332static inline struct page *cl2vm_page(const struct cl_page_slice *slice)
333{
334 return cl2vvp_page(slice)->vpg_page;
335}
336
337static inline struct vvp_lock *cl2vvp_lock(const struct cl_lock_slice *slice)
338{
339 return container_of(slice, struct vvp_lock, vlk_cl);
340}
341
342# define CLOBINVRNT(env, clob, expr) \
343 ((void)sizeof(env), (void)sizeof(clob), (void)sizeof(!!(expr)))
344
345/**
346 * New interfaces to get and put lov_stripe_md from lov layer. This violates
347 * layering because lov_stripe_md is supposed to be a private data in lov.
348 *
349 * NB: If you find you have to use these interfaces for your new code, please
350 * think about it again. These interfaces may be removed in the future for
351 * better layering.
352 */
353struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj);
354void lov_lsm_put(struct cl_object *clobj, struct lov_stripe_md *lsm);
355int lov_read_and_clear_async_rc(struct cl_object *clob);
356
357struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode);
358void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm);
359
360int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
361 struct cl_io *io);
362int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io);
363int vvp_lock_init(const struct lu_env *env, struct cl_object *obj,
364 struct cl_lock *lock, const struct cl_io *io);
52int vvp_page_init(const struct lu_env *env, struct cl_object *obj, 365int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
53 struct cl_page *page, struct page *vmpage); 366 struct cl_page *page, pgoff_t index);
367int vvp_req_init(const struct lu_env *env, struct cl_device *dev,
368 struct cl_req *req);
54struct lu_object *vvp_object_alloc(const struct lu_env *env, 369struct lu_object *vvp_object_alloc(const struct lu_env *env,
55 const struct lu_object_header *hdr, 370 const struct lu_object_header *hdr,
56 struct lu_device *dev); 371 struct lu_device *dev);
57 372
58struct ccc_object *cl_inode2ccc(struct inode *inode); 373int vvp_global_init(void);
374void vvp_global_fini(void);
59 375
60extern const struct file_operations vvp_dump_pgcache_file_ops; 376extern const struct file_operations vvp_dump_pgcache_file_ops;
61 377
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index 85a835976174..5bf9592ae5d2 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -44,21 +44,30 @@
44#include "../include/obd.h" 44#include "../include/obd.h"
45#include "../include/lustre_lite.h" 45#include "../include/lustre_lite.h"
46 46
47#include "llite_internal.h"
47#include "vvp_internal.h" 48#include "vvp_internal.h"
48 49
49static struct vvp_io *cl2vvp_io(const struct lu_env *env, 50struct vvp_io *cl2vvp_io(const struct lu_env *env,
50 const struct cl_io_slice *slice); 51 const struct cl_io_slice *slice)
52{
53 struct vvp_io *vio;
54
55 vio = container_of(slice, struct vvp_io, vui_cl);
56 LASSERT(vio == vvp_env_io(env));
57
58 return vio;
59}
51 60
52/** 61/**
53 * True, if \a io is a normal io, False for splice_{read,write} 62 * True, if \a io is a normal io, False for splice_{read,write}
54 */ 63 */
55int cl_is_normalio(const struct lu_env *env, const struct cl_io *io) 64static int cl_is_normalio(const struct lu_env *env, const struct cl_io *io)
56{ 65{
57 struct vvp_io *vio = vvp_env_io(env); 66 struct vvp_io *vio = vvp_env_io(env);
58 67
59 LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE); 68 LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
60 69
61 return vio->cui_io_subtype == IO_NORMAL; 70 return vio->vui_io_subtype == IO_NORMAL;
62} 71}
63 72
64/** 73/**
@@ -71,7 +80,7 @@ static bool can_populate_pages(const struct lu_env *env, struct cl_io *io,
71 struct inode *inode) 80 struct inode *inode)
72{ 81{
73 struct ll_inode_info *lli = ll_i2info(inode); 82 struct ll_inode_info *lli = ll_i2info(inode);
74 struct ccc_io *cio = ccc_env_io(env); 83 struct vvp_io *vio = vvp_env_io(env);
75 bool rc = true; 84 bool rc = true;
76 85
77 switch (io->ci_type) { 86 switch (io->ci_type) {
@@ -80,7 +89,7 @@ static bool can_populate_pages(const struct lu_env *env, struct cl_io *io,
80 /* don't need lock here to check lli_layout_gen as we have held 89 /* don't need lock here to check lli_layout_gen as we have held
81 * extent lock and GROUP lock has to hold to swap layout 90 * extent lock and GROUP lock has to hold to swap layout
82 */ 91 */
83 if (ll_layout_version_get(lli) != cio->cui_layout_gen) { 92 if (ll_layout_version_get(lli) != vio->vui_layout_gen) {
84 io->ci_need_restart = 1; 93 io->ci_need_restart = 1;
85 /* this will return application a short read/write */ 94 /* this will return application a short read/write */
86 io->ci_continue = 0; 95 io->ci_continue = 0;
@@ -95,20 +104,187 @@ static bool can_populate_pages(const struct lu_env *env, struct cl_io *io,
95 return rc; 104 return rc;
96} 105}
97 106
107static void vvp_object_size_lock(struct cl_object *obj)
108{
109 struct inode *inode = vvp_object_inode(obj);
110
111 ll_inode_size_lock(inode);
112 cl_object_attr_lock(obj);
113}
114
115static void vvp_object_size_unlock(struct cl_object *obj)
116{
117 struct inode *inode = vvp_object_inode(obj);
118
119 cl_object_attr_unlock(obj);
120 ll_inode_size_unlock(inode);
121}
122
123/**
124 * Helper function that if necessary adjusts file size (inode->i_size), when
125 * position at the offset \a pos is accessed. File size can be arbitrary stale
126 * on a Lustre client, but client at least knows KMS. If accessed area is
127 * inside [0, KMS], set file size to KMS, otherwise glimpse file size.
128 *
129 * Locking: cl_isize_lock is used to serialize changes to inode size and to
130 * protect consistency between inode size and cl_object
131 * attributes. cl_object_size_lock() protects consistency between cl_attr's of
132 * top-object and sub-objects.
133 */
134static int vvp_prep_size(const struct lu_env *env, struct cl_object *obj,
135 struct cl_io *io, loff_t start, size_t count,
136 int *exceed)
137{
138 struct cl_attr *attr = vvp_env_thread_attr(env);
139 struct inode *inode = vvp_object_inode(obj);
140 loff_t pos = start + count - 1;
141 loff_t kms;
142 int result;
143
144 /*
145 * Consistency guarantees: following possibilities exist for the
146 * relation between region being accessed and real file size at this
147 * moment:
148 *
149 * (A): the region is completely inside of the file;
150 *
151 * (B-x): x bytes of region are inside of the file, the rest is
152 * outside;
153 *
154 * (C): the region is completely outside of the file.
155 *
156 * This classification is stable under DLM lock already acquired by
157 * the caller, because to change the class, other client has to take
158 * DLM lock conflicting with our lock. Also, any updates to ->i_size
159 * by other threads on this client are serialized by
160 * ll_inode_size_lock(). This guarantees that short reads are handled
161 * correctly in the face of concurrent writes and truncates.
162 */
163 vvp_object_size_lock(obj);
164 result = cl_object_attr_get(env, obj, attr);
165 if (result == 0) {
166 kms = attr->cat_kms;
167 if (pos > kms) {
168 /*
169 * A glimpse is necessary to determine whether we
170 * return a short read (B) or some zeroes at the end
171 * of the buffer (C)
172 */
173 vvp_object_size_unlock(obj);
174 result = cl_glimpse_lock(env, io, inode, obj, 0);
175 if (result == 0 && exceed) {
176 /* If objective page index exceed end-of-file
177 * page index, return directly. Do not expect
178 * kernel will check such case correctly.
179 * linux-2.6.18-128.1.1 miss to do that.
180 * --bug 17336
181 */
182 loff_t size = i_size_read(inode);
183 loff_t cur_index = start >> PAGE_SHIFT;
184 loff_t size_index = (size - 1) >> PAGE_SHIFT;
185
186 if ((size == 0 && cur_index != 0) ||
187 size_index < cur_index)
188 *exceed = 1;
189 }
190 return result;
191 }
192 /*
193 * region is within kms and, hence, within real file
194 * size (A). We need to increase i_size to cover the
195 * read region so that generic_file_read() will do its
196 * job, but that doesn't mean the kms size is
197 * _correct_, it is only the _minimum_ size. If
198 * someone does a stat they will get the correct size
199 * which will always be >= the kms value here.
200 * b=11081
201 */
202 if (i_size_read(inode) < kms) {
203 i_size_write(inode, kms);
204 CDEBUG(D_VFSTRACE, DFID " updating i_size %llu\n",
205 PFID(lu_object_fid(&obj->co_lu)),
206 (__u64)i_size_read(inode));
207 }
208 }
209
210 vvp_object_size_unlock(obj);
211
212 return result;
213}
214
98/***************************************************************************** 215/*****************************************************************************
99 * 216 *
100 * io operations. 217 * io operations.
101 * 218 *
102 */ 219 */
103 220
221static int vvp_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
222 __u32 enqflags, enum cl_lock_mode mode,
223 pgoff_t start, pgoff_t end)
224{
225 struct vvp_io *vio = vvp_env_io(env);
226 struct cl_lock_descr *descr = &vio->vui_link.cill_descr;
227 struct cl_object *obj = io->ci_obj;
228
229 CLOBINVRNT(env, obj, vvp_object_invariant(obj));
230
231 CDEBUG(D_VFSTRACE, "lock: %d [%lu, %lu]\n", mode, start, end);
232
233 memset(&vio->vui_link, 0, sizeof(vio->vui_link));
234
235 if (vio->vui_fd && (vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
236 descr->cld_mode = CLM_GROUP;
237 descr->cld_gid = vio->vui_fd->fd_grouplock.lg_gid;
238 } else {
239 descr->cld_mode = mode;
240 }
241 descr->cld_obj = obj;
242 descr->cld_start = start;
243 descr->cld_end = end;
244 descr->cld_enq_flags = enqflags;
245
246 cl_io_lock_add(env, io, &vio->vui_link);
247 return 0;
248}
249
250static int vvp_io_one_lock(const struct lu_env *env, struct cl_io *io,
251 __u32 enqflags, enum cl_lock_mode mode,
252 loff_t start, loff_t end)
253{
254 struct cl_object *obj = io->ci_obj;
255
256 return vvp_io_one_lock_index(env, io, enqflags, mode,
257 cl_index(obj, start), cl_index(obj, end));
258}
259
260static int vvp_io_write_iter_init(const struct lu_env *env,
261 const struct cl_io_slice *ios)
262{
263 struct vvp_io *vio = cl2vvp_io(env, ios);
264
265 cl_page_list_init(&vio->u.write.vui_queue);
266 vio->u.write.vui_written = 0;
267 vio->u.write.vui_from = 0;
268 vio->u.write.vui_to = PAGE_SIZE;
269
270 return 0;
271}
272
273static void vvp_io_write_iter_fini(const struct lu_env *env,
274 const struct cl_io_slice *ios)
275{
276 struct vvp_io *vio = cl2vvp_io(env, ios);
277
278 LASSERT(vio->u.write.vui_queue.pl_nr == 0);
279}
280
104static int vvp_io_fault_iter_init(const struct lu_env *env, 281static int vvp_io_fault_iter_init(const struct lu_env *env,
105 const struct cl_io_slice *ios) 282 const struct cl_io_slice *ios)
106{ 283{
107 struct vvp_io *vio = cl2vvp_io(env, ios); 284 struct vvp_io *vio = cl2vvp_io(env, ios);
108 struct inode *inode = ccc_object_inode(ios->cis_obj); 285 struct inode *inode = vvp_object_inode(ios->cis_obj);
109 286
110 LASSERT(inode == 287 LASSERT(inode == file_inode(vio->vui_fd->fd_file));
111 file_inode(cl2ccc_io(env, ios)->cui_fd->fd_file));
112 vio->u.fault.ft_mtime = inode->i_mtime.tv_sec; 288 vio->u.fault.ft_mtime = inode->i_mtime.tv_sec;
113 return 0; 289 return 0;
114} 290}
@@ -117,15 +293,16 @@ static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
117{ 293{
118 struct cl_io *io = ios->cis_io; 294 struct cl_io *io = ios->cis_io;
119 struct cl_object *obj = io->ci_obj; 295 struct cl_object *obj = io->ci_obj;
120 struct ccc_io *cio = cl2ccc_io(env, ios); 296 struct vvp_io *vio = cl2vvp_io(env, ios);
297 struct inode *inode = vvp_object_inode(obj);
121 298
122 CLOBINVRNT(env, obj, ccc_object_invariant(obj)); 299 CLOBINVRNT(env, obj, vvp_object_invariant(obj));
123 300
124 CDEBUG(D_VFSTRACE, DFID 301 CDEBUG(D_VFSTRACE, DFID
125 " ignore/verify layout %d/%d, layout version %d restore needed %d\n", 302 " ignore/verify layout %d/%d, layout version %d restore needed %d\n",
126 PFID(lu_object_fid(&obj->co_lu)), 303 PFID(lu_object_fid(&obj->co_lu)),
127 io->ci_ignore_layout, io->ci_verify_layout, 304 io->ci_ignore_layout, io->ci_verify_layout,
128 cio->cui_layout_gen, io->ci_restore_needed); 305 vio->vui_layout_gen, io->ci_restore_needed);
129 306
130 if (io->ci_restore_needed == 1) { 307 if (io->ci_restore_needed == 1) {
131 int rc; 308 int rc;
@@ -133,7 +310,7 @@ static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
133 /* file was detected release, we need to restore it 310 /* file was detected release, we need to restore it
134 * before finishing the io 311 * before finishing the io
135 */ 312 */
136 rc = ll_layout_restore(ccc_object_inode(obj)); 313 rc = ll_layout_restore(inode, 0, OBD_OBJECT_EOF);
137 /* if restore registration failed, no restart, 314 /* if restore registration failed, no restart,
138 * we will return -ENODATA 315 * we will return -ENODATA
139 */ 316 */
@@ -159,16 +336,16 @@ static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
159 __u32 gen = 0; 336 __u32 gen = 0;
160 337
161 /* check layout version */ 338 /* check layout version */
162 ll_layout_refresh(ccc_object_inode(obj), &gen); 339 ll_layout_refresh(inode, &gen);
163 io->ci_need_restart = cio->cui_layout_gen != gen; 340 io->ci_need_restart = vio->vui_layout_gen != gen;
164 if (io->ci_need_restart) { 341 if (io->ci_need_restart) {
165 CDEBUG(D_VFSTRACE, 342 CDEBUG(D_VFSTRACE,
166 DFID" layout changed from %d to %d.\n", 343 DFID" layout changed from %d to %d.\n",
167 PFID(lu_object_fid(&obj->co_lu)), 344 PFID(lu_object_fid(&obj->co_lu)),
168 cio->cui_layout_gen, gen); 345 vio->vui_layout_gen, gen);
169 /* today successful restore is the only possible case */ 346 /* today successful restore is the only possible case */
170 /* restore was done, clear restoring state */ 347 /* restore was done, clear restoring state */
171 ll_i2info(ccc_object_inode(obj))->lli_flags &= 348 ll_i2info(vvp_object_inode(obj))->lli_flags &=
172 ~LLIF_FILE_RESTORING; 349 ~LLIF_FILE_RESTORING;
173 } 350 }
174 } 351 }
@@ -180,7 +357,7 @@ static void vvp_io_fault_fini(const struct lu_env *env,
180 struct cl_io *io = ios->cis_io; 357 struct cl_io *io = ios->cis_io;
181 struct cl_page *page = io->u.ci_fault.ft_page; 358 struct cl_page *page = io->u.ci_fault.ft_page;
182 359
183 CLOBINVRNT(env, io->ci_obj, ccc_object_invariant(io->ci_obj)); 360 CLOBINVRNT(env, io->ci_obj, vvp_object_invariant(io->ci_obj));
184 361
185 if (page) { 362 if (page) {
186 lu_ref_del(&page->cp_reference, "fault", io); 363 lu_ref_del(&page->cp_reference, "fault", io);
@@ -203,16 +380,16 @@ static enum cl_lock_mode vvp_mode_from_vma(struct vm_area_struct *vma)
203} 380}
204 381
205static int vvp_mmap_locks(const struct lu_env *env, 382static int vvp_mmap_locks(const struct lu_env *env,
206 struct ccc_io *vio, struct cl_io *io) 383 struct vvp_io *vio, struct cl_io *io)
207{ 384{
208 struct ccc_thread_info *cti = ccc_env_info(env); 385 struct vvp_thread_info *cti = vvp_env_info(env);
209 struct mm_struct *mm = current->mm; 386 struct mm_struct *mm = current->mm;
210 struct vm_area_struct *vma; 387 struct vm_area_struct *vma;
211 struct cl_lock_descr *descr = &cti->cti_descr; 388 struct cl_lock_descr *descr = &cti->vti_descr;
212 ldlm_policy_data_t policy; 389 ldlm_policy_data_t policy;
213 unsigned long addr; 390 unsigned long addr;
214 ssize_t count; 391 ssize_t count;
215 int result; 392 int result = 0;
216 struct iov_iter i; 393 struct iov_iter i;
217 struct iovec iov; 394 struct iovec iov;
218 395
@@ -221,21 +398,21 @@ static int vvp_mmap_locks(const struct lu_env *env,
221 if (!cl_is_normalio(env, io)) 398 if (!cl_is_normalio(env, io))
222 return 0; 399 return 0;
223 400
224 if (!vio->cui_iter) /* nfs or loop back device write */ 401 if (!vio->vui_iter) /* nfs or loop back device write */
225 return 0; 402 return 0;
226 403
227 /* No MM (e.g. NFS)? No vmas too. */ 404 /* No MM (e.g. NFS)? No vmas too. */
228 if (!mm) 405 if (!mm)
229 return 0; 406 return 0;
230 407
231 iov_for_each(iov, i, *(vio->cui_iter)) { 408 iov_for_each(iov, i, *vio->vui_iter) {
232 addr = (unsigned long)iov.iov_base; 409 addr = (unsigned long)iov.iov_base;
233 count = iov.iov_len; 410 count = iov.iov_len;
234 if (count == 0) 411 if (count == 0)
235 continue; 412 continue;
236 413
237 count += addr & (~CFS_PAGE_MASK); 414 count += addr & (~PAGE_MASK);
238 addr &= CFS_PAGE_MASK; 415 addr &= PAGE_MASK;
239 416
240 down_read(&mm->mmap_sem); 417 down_read(&mm->mmap_sem);
241 while ((vma = our_vma(mm, addr, count)) != NULL) { 418 while ((vma = our_vma(mm, addr, count)) != NULL) {
@@ -244,10 +421,10 @@ static int vvp_mmap_locks(const struct lu_env *env,
244 421
245 if (ll_file_nolock(vma->vm_file)) { 422 if (ll_file_nolock(vma->vm_file)) {
246 /* 423 /*
247 * For no lock case, a lockless lock will be 424 * For no lock case is not allowed for mmap
248 * generated.
249 */ 425 */
250 flags = CEF_NEVER; 426 result = -EINVAL;
427 break;
251 } 428 }
252 429
253 /* 430 /*
@@ -269,10 +446,8 @@ static int vvp_mmap_locks(const struct lu_env *env,
269 descr->cld_mode, descr->cld_start, 446 descr->cld_mode, descr->cld_start,
270 descr->cld_end); 447 descr->cld_end);
271 448
272 if (result < 0) { 449 if (result < 0)
273 up_read(&mm->mmap_sem); 450 break;
274 return result;
275 }
276 451
277 if (vma->vm_end - addr >= count) 452 if (vma->vm_end - addr >= count)
278 break; 453 break;
@@ -281,26 +456,55 @@ static int vvp_mmap_locks(const struct lu_env *env,
281 addr = vma->vm_end; 456 addr = vma->vm_end;
282 } 457 }
283 up_read(&mm->mmap_sem); 458 up_read(&mm->mmap_sem);
459 if (result < 0)
460 break;
284 } 461 }
285 return 0; 462 return result;
463}
464
465static void vvp_io_advance(const struct lu_env *env,
466 const struct cl_io_slice *ios,
467 size_t nob)
468{
469 struct vvp_io *vio = cl2vvp_io(env, ios);
470 struct cl_io *io = ios->cis_io;
471 struct cl_object *obj = ios->cis_io->ci_obj;
472
473 CLOBINVRNT(env, obj, vvp_object_invariant(obj));
474
475 if (!cl_is_normalio(env, io))
476 return;
477
478 iov_iter_reexpand(vio->vui_iter, vio->vui_tot_count -= nob);
479}
480
481static void vvp_io_update_iov(const struct lu_env *env,
482 struct vvp_io *vio, struct cl_io *io)
483{
484 size_t size = io->u.ci_rw.crw_count;
485
486 if (!cl_is_normalio(env, io) || !vio->vui_iter)
487 return;
488
489 iov_iter_truncate(vio->vui_iter, size);
286} 490}
287 491
288static int vvp_io_rw_lock(const struct lu_env *env, struct cl_io *io, 492static int vvp_io_rw_lock(const struct lu_env *env, struct cl_io *io,
289 enum cl_lock_mode mode, loff_t start, loff_t end) 493 enum cl_lock_mode mode, loff_t start, loff_t end)
290{ 494{
291 struct ccc_io *cio = ccc_env_io(env); 495 struct vvp_io *vio = vvp_env_io(env);
292 int result; 496 int result;
293 int ast_flags = 0; 497 int ast_flags = 0;
294 498
295 LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE); 499 LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
296 500
297 ccc_io_update_iov(env, cio, io); 501 vvp_io_update_iov(env, vio, io);
298 502
299 if (io->u.ci_rw.crw_nonblock) 503 if (io->u.ci_rw.crw_nonblock)
300 ast_flags |= CEF_NONBLOCK; 504 ast_flags |= CEF_NONBLOCK;
301 result = vvp_mmap_locks(env, cio, io); 505 result = vvp_mmap_locks(env, vio, io);
302 if (result == 0) 506 if (result == 0)
303 result = ccc_io_one_lock(env, io, ast_flags, mode, start, end); 507 result = vvp_io_one_lock(env, io, ast_flags, mode, start, end);
304 return result; 508 return result;
305} 509}
306 510
@@ -325,9 +529,11 @@ static int vvp_io_fault_lock(const struct lu_env *env,
325 /* 529 /*
326 * XXX LDLM_FL_CBPENDING 530 * XXX LDLM_FL_CBPENDING
327 */ 531 */
328 return ccc_io_one_lock_index 532 return vvp_io_one_lock_index(env,
329 (env, io, 0, vvp_mode_from_vma(vio->u.fault.ft_vma), 533 io, 0,
330 io->u.ci_fault.ft_index, io->u.ci_fault.ft_index); 534 vvp_mode_from_vma(vio->u.fault.ft_vma),
535 io->u.ci_fault.ft_index,
536 io->u.ci_fault.ft_index);
331} 537}
332 538
333static int vvp_io_write_lock(const struct lu_env *env, 539static int vvp_io_write_lock(const struct lu_env *env,
@@ -354,14 +560,13 @@ static int vvp_io_setattr_iter_init(const struct lu_env *env,
354} 560}
355 561
356/** 562/**
357 * Implementation of cl_io_operations::cio_lock() method for CIT_SETATTR io. 563 * Implementation of cl_io_operations::vio_lock() method for CIT_SETATTR io.
358 * 564 *
359 * Handles "lockless io" mode when extent locking is done by server. 565 * Handles "lockless io" mode when extent locking is done by server.
360 */ 566 */
361static int vvp_io_setattr_lock(const struct lu_env *env, 567static int vvp_io_setattr_lock(const struct lu_env *env,
362 const struct cl_io_slice *ios) 568 const struct cl_io_slice *ios)
363{ 569{
364 struct ccc_io *cio = ccc_env_io(env);
365 struct cl_io *io = ios->cis_io; 570 struct cl_io *io = ios->cis_io;
366 __u64 new_size; 571 __u64 new_size;
367 __u32 enqflags = 0; 572 __u32 enqflags = 0;
@@ -378,8 +583,8 @@ static int vvp_io_setattr_lock(const struct lu_env *env,
378 return 0; 583 return 0;
379 new_size = 0; 584 new_size = 0;
380 } 585 }
381 cio->u.setattr.cui_local_lock = SETATTR_EXTENT_LOCK; 586
382 return ccc_io_one_lock(env, io, enqflags, CLM_WRITE, 587 return vvp_io_one_lock(env, io, enqflags, CLM_WRITE,
383 new_size, OBD_OBJECT_EOF); 588 new_size, OBD_OBJECT_EOF);
384} 589}
385 590
@@ -413,7 +618,7 @@ static int vvp_io_setattr_time(const struct lu_env *env,
413{ 618{
414 struct cl_io *io = ios->cis_io; 619 struct cl_io *io = ios->cis_io;
415 struct cl_object *obj = io->ci_obj; 620 struct cl_object *obj = io->ci_obj;
416 struct cl_attr *attr = ccc_env_thread_attr(env); 621 struct cl_attr *attr = vvp_env_thread_attr(env);
417 int result; 622 int result;
418 unsigned valid = CAT_CTIME; 623 unsigned valid = CAT_CTIME;
419 624
@@ -437,7 +642,7 @@ static int vvp_io_setattr_start(const struct lu_env *env,
437 const struct cl_io_slice *ios) 642 const struct cl_io_slice *ios)
438{ 643{
439 struct cl_io *io = ios->cis_io; 644 struct cl_io *io = ios->cis_io;
440 struct inode *inode = ccc_object_inode(io->ci_obj); 645 struct inode *inode = vvp_object_inode(io->ci_obj);
441 int result = 0; 646 int result = 0;
442 647
443 inode_lock(inode); 648 inode_lock(inode);
@@ -453,7 +658,7 @@ static void vvp_io_setattr_end(const struct lu_env *env,
453 const struct cl_io_slice *ios) 658 const struct cl_io_slice *ios)
454{ 659{
455 struct cl_io *io = ios->cis_io; 660 struct cl_io *io = ios->cis_io;
456 struct inode *inode = ccc_object_inode(io->ci_obj); 661 struct inode *inode = vvp_object_inode(io->ci_obj);
457 662
458 if (cl_io_is_trunc(io)) 663 if (cl_io_is_trunc(io))
459 /* Truncate in memory pages - they must be clean pages 664 /* Truncate in memory pages - they must be clean pages
@@ -474,27 +679,25 @@ static int vvp_io_read_start(const struct lu_env *env,
474 const struct cl_io_slice *ios) 679 const struct cl_io_slice *ios)
475{ 680{
476 struct vvp_io *vio = cl2vvp_io(env, ios); 681 struct vvp_io *vio = cl2vvp_io(env, ios);
477 struct ccc_io *cio = cl2ccc_io(env, ios);
478 struct cl_io *io = ios->cis_io; 682 struct cl_io *io = ios->cis_io;
479 struct cl_object *obj = io->ci_obj; 683 struct cl_object *obj = io->ci_obj;
480 struct inode *inode = ccc_object_inode(obj); 684 struct inode *inode = vvp_object_inode(obj);
481 struct ll_ra_read *bead = &vio->cui_bead; 685 struct file *file = vio->vui_fd->fd_file;
482 struct file *file = cio->cui_fd->fd_file;
483 686
484 int result; 687 int result;
485 loff_t pos = io->u.ci_rd.rd.crw_pos; 688 loff_t pos = io->u.ci_rd.rd.crw_pos;
486 long cnt = io->u.ci_rd.rd.crw_count; 689 long cnt = io->u.ci_rd.rd.crw_count;
487 long tot = cio->cui_tot_count; 690 long tot = vio->vui_tot_count;
488 int exceed = 0; 691 int exceed = 0;
489 692
490 CLOBINVRNT(env, obj, ccc_object_invariant(obj)); 693 CLOBINVRNT(env, obj, vvp_object_invariant(obj));
491 694
492 CDEBUG(D_VFSTRACE, "read: -> [%lli, %lli)\n", pos, pos + cnt); 695 CDEBUG(D_VFSTRACE, "read: -> [%lli, %lli)\n", pos, pos + cnt);
493 696
494 if (!can_populate_pages(env, io, inode)) 697 if (!can_populate_pages(env, io, inode))
495 return 0; 698 return 0;
496 699
497 result = ccc_prep_size(env, obj, io, pos, tot, &exceed); 700 result = vvp_prep_size(env, obj, io, pos, tot, &exceed);
498 if (result != 0) 701 if (result != 0)
499 return result; 702 return result;
500 else if (exceed != 0) 703 else if (exceed != 0)
@@ -505,30 +708,27 @@ static int vvp_io_read_start(const struct lu_env *env,
505 inode->i_ino, cnt, pos, i_size_read(inode)); 708 inode->i_ino, cnt, pos, i_size_read(inode));
506 709
507 /* turn off the kernel's read-ahead */ 710 /* turn off the kernel's read-ahead */
508 cio->cui_fd->fd_file->f_ra.ra_pages = 0; 711 vio->vui_fd->fd_file->f_ra.ra_pages = 0;
509 712
510 /* initialize read-ahead window once per syscall */ 713 /* initialize read-ahead window once per syscall */
511 if (!vio->cui_ra_window_set) { 714 if (!vio->vui_ra_valid) {
512 vio->cui_ra_window_set = 1; 715 vio->vui_ra_valid = true;
513 bead->lrr_start = cl_index(obj, pos); 716 vio->vui_ra_start = cl_index(obj, pos);
514 /* 717 vio->vui_ra_count = cl_index(obj, tot + PAGE_SIZE - 1);
515 * XXX: explicit PAGE_SIZE 718 ll_ras_enter(file);
516 */
517 bead->lrr_count = cl_index(obj, tot + PAGE_SIZE - 1);
518 ll_ra_read_in(file, bead);
519 } 719 }
520 720
521 /* BUG: 5972 */ 721 /* BUG: 5972 */
522 file_accessed(file); 722 file_accessed(file);
523 switch (vio->cui_io_subtype) { 723 switch (vio->vui_io_subtype) {
524 case IO_NORMAL: 724 case IO_NORMAL:
525 LASSERT(cio->cui_iocb->ki_pos == pos); 725 LASSERT(vio->vui_iocb->ki_pos == pos);
526 result = generic_file_read_iter(cio->cui_iocb, cio->cui_iter); 726 result = generic_file_read_iter(vio->vui_iocb, vio->vui_iter);
527 break; 727 break;
528 case IO_SPLICE: 728 case IO_SPLICE:
529 result = generic_file_splice_read(file, &pos, 729 result = generic_file_splice_read(file, &pos,
530 vio->u.splice.cui_pipe, cnt, 730 vio->u.splice.vui_pipe, cnt,
531 vio->u.splice.cui_flags); 731 vio->u.splice.vui_flags);
532 /* LU-1109: do splice read stripe by stripe otherwise if it 732 /* LU-1109: do splice read stripe by stripe otherwise if it
533 * may make nfsd stuck if this read occupied all internal pipe 733 * may make nfsd stuck if this read occupied all internal pipe
534 * buffers. 734 * buffers.
@@ -536,7 +736,7 @@ static int vvp_io_read_start(const struct lu_env *env,
536 io->ci_continue = 0; 736 io->ci_continue = 0;
537 break; 737 break;
538 default: 738 default:
539 CERROR("Wrong IO type %u\n", vio->cui_io_subtype); 739 CERROR("Wrong IO type %u\n", vio->vui_io_subtype);
540 LBUG(); 740 LBUG();
541 } 741 }
542 742
@@ -546,30 +746,201 @@ out:
546 io->ci_continue = 0; 746 io->ci_continue = 0;
547 io->ci_nob += result; 747 io->ci_nob += result;
548 ll_rw_stats_tally(ll_i2sbi(inode), current->pid, 748 ll_rw_stats_tally(ll_i2sbi(inode), current->pid,
549 cio->cui_fd, pos, result, READ); 749 vio->vui_fd, pos, result, READ);
550 result = 0; 750 result = 0;
551 } 751 }
552 return result; 752 return result;
553} 753}
554 754
555static void vvp_io_read_fini(const struct lu_env *env, const struct cl_io_slice *ios) 755static int vvp_io_commit_sync(const struct lu_env *env, struct cl_io *io,
756 struct cl_page_list *plist, int from, int to)
556{ 757{
557 struct vvp_io *vio = cl2vvp_io(env, ios); 758 struct cl_2queue *queue = &io->ci_queue;
558 struct ccc_io *cio = cl2ccc_io(env, ios); 759 struct cl_page *page;
760 unsigned int bytes = 0;
761 int rc = 0;
559 762
560 if (vio->cui_ra_window_set) 763 if (plist->pl_nr == 0)
561 ll_ra_read_ex(cio->cui_fd->fd_file, &vio->cui_bead); 764 return 0;
562 765
563 vvp_io_fini(env, ios); 766 if (from > 0 || to != PAGE_SIZE) {
767 page = cl_page_list_first(plist);
768 if (plist->pl_nr == 1) {
769 cl_page_clip(env, page, from, to);
770 } else {
771 if (from > 0)
772 cl_page_clip(env, page, from, PAGE_SIZE);
773 if (to != PAGE_SIZE) {
774 page = cl_page_list_last(plist);
775 cl_page_clip(env, page, 0, to);
776 }
777 }
778 }
779
780 cl_2queue_init(queue);
781 cl_page_list_splice(plist, &queue->c2_qin);
782 rc = cl_io_submit_sync(env, io, CRT_WRITE, queue, 0);
783
784 /* plist is not sorted any more */
785 cl_page_list_splice(&queue->c2_qin, plist);
786 cl_page_list_splice(&queue->c2_qout, plist);
787 cl_2queue_fini(env, queue);
788
789 if (rc == 0) {
790 /* calculate bytes */
791 bytes = plist->pl_nr << PAGE_SHIFT;
792 bytes -= from + PAGE_SIZE - to;
793
794 while (plist->pl_nr > 0) {
795 page = cl_page_list_first(plist);
796 cl_page_list_del(env, plist, page);
797
798 cl_page_clip(env, page, 0, PAGE_SIZE);
799
800 SetPageUptodate(cl_page_vmpage(page));
801 cl_page_disown(env, io, page);
802
803 /* held in ll_cl_init() */
804 lu_ref_del(&page->cp_reference, "cl_io", io);
805 cl_page_put(env, page);
806 }
807 }
808
809 return bytes > 0 ? bytes : rc;
810}
811
812static void write_commit_callback(const struct lu_env *env, struct cl_io *io,
813 struct cl_page *page)
814{
815 struct vvp_page *vpg;
816 struct page *vmpage = page->cp_vmpage;
817 struct cl_object *clob = cl_io_top(io)->ci_obj;
818
819 SetPageUptodate(vmpage);
820 set_page_dirty(vmpage);
821
822 vpg = cl2vvp_page(cl_object_page_slice(clob, page));
823 vvp_write_pending(cl2vvp(clob), vpg);
824
825 cl_page_disown(env, io, page);
826
827 /* held in ll_cl_init() */
828 lu_ref_del(&page->cp_reference, "cl_io", io);
829 cl_page_put(env, page);
830}
831
832/* make sure the page list is contiguous */
833static bool page_list_sanity_check(struct cl_object *obj,
834 struct cl_page_list *plist)
835{
836 struct cl_page *page;
837 pgoff_t index = CL_PAGE_EOF;
838
839 cl_page_list_for_each(page, plist) {
840 struct vvp_page *vpg = cl_object_page_slice(obj, page);
841
842 if (index == CL_PAGE_EOF) {
843 index = vvp_index(vpg);
844 continue;
845 }
846
847 ++index;
848 if (index == vvp_index(vpg))
849 continue;
850
851 return false;
852 }
853 return true;
854}
855
856/* Return how many bytes have queued or written */
857int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io)
858{
859 struct cl_object *obj = io->ci_obj;
860 struct inode *inode = vvp_object_inode(obj);
861 struct vvp_io *vio = vvp_env_io(env);
862 struct cl_page_list *queue = &vio->u.write.vui_queue;
863 struct cl_page *page;
864 int rc = 0;
865 int bytes = 0;
866 unsigned int npages = vio->u.write.vui_queue.pl_nr;
867
868 if (npages == 0)
869 return 0;
870
871 CDEBUG(D_VFSTRACE, "commit async pages: %d, from %d, to %d\n",
872 npages, vio->u.write.vui_from, vio->u.write.vui_to);
873
874 LASSERT(page_list_sanity_check(obj, queue));
875
876 /* submit IO with async write */
877 rc = cl_io_commit_async(env, io, queue,
878 vio->u.write.vui_from, vio->u.write.vui_to,
879 write_commit_callback);
880 npages -= queue->pl_nr; /* already committed pages */
881 if (npages > 0) {
882 /* calculate how many bytes were written */
883 bytes = npages << PAGE_SHIFT;
884
885 /* first page */
886 bytes -= vio->u.write.vui_from;
887 if (queue->pl_nr == 0) /* last page */
888 bytes -= PAGE_SIZE - vio->u.write.vui_to;
889 LASSERTF(bytes > 0, "bytes = %d, pages = %d\n", bytes, npages);
890
891 vio->u.write.vui_written += bytes;
892
893 CDEBUG(D_VFSTRACE, "Committed %d pages %d bytes, tot: %ld\n",
894 npages, bytes, vio->u.write.vui_written);
895
896 /* the first page must have been written. */
897 vio->u.write.vui_from = 0;
898 }
899 LASSERT(page_list_sanity_check(obj, queue));
900 LASSERT(ergo(rc == 0, queue->pl_nr == 0));
901
902 /* out of quota, try sync write */
903 if (rc == -EDQUOT && !cl_io_is_mkwrite(io)) {
904 rc = vvp_io_commit_sync(env, io, queue,
905 vio->u.write.vui_from,
906 vio->u.write.vui_to);
907 if (rc > 0) {
908 vio->u.write.vui_written += rc;
909 rc = 0;
910 }
911 }
912
913 /* update inode size */
914 ll_merge_attr(env, inode);
915
916 /* Now the pages in queue were failed to commit, discard them
917 * unless they were dirtied before.
918 */
919 while (queue->pl_nr > 0) {
920 page = cl_page_list_first(queue);
921 cl_page_list_del(env, queue, page);
922
923 if (!PageDirty(cl_page_vmpage(page)))
924 cl_page_discard(env, io, page);
925
926 cl_page_disown(env, io, page);
927
928 /* held in ll_cl_init() */
929 lu_ref_del(&page->cp_reference, "cl_io", io);
930 cl_page_put(env, page);
931 }
932 cl_page_list_fini(env, queue);
933
934 return rc;
564} 935}
565 936
566static int vvp_io_write_start(const struct lu_env *env, 937static int vvp_io_write_start(const struct lu_env *env,
567 const struct cl_io_slice *ios) 938 const struct cl_io_slice *ios)
568{ 939{
569 struct ccc_io *cio = cl2ccc_io(env, ios); 940 struct vvp_io *vio = cl2vvp_io(env, ios);
570 struct cl_io *io = ios->cis_io; 941 struct cl_io *io = ios->cis_io;
571 struct cl_object *obj = io->ci_obj; 942 struct cl_object *obj = io->ci_obj;
572 struct inode *inode = ccc_object_inode(obj); 943 struct inode *inode = vvp_object_inode(obj);
573 ssize_t result = 0; 944 ssize_t result = 0;
574 loff_t pos = io->u.ci_wr.wr.crw_pos; 945 loff_t pos = io->u.ci_wr.wr.crw_pos;
575 size_t cnt = io->u.ci_wr.wr.crw_count; 946 size_t cnt = io->u.ci_wr.wr.crw_count;
@@ -582,25 +953,41 @@ static int vvp_io_write_start(const struct lu_env *env,
582 * PARALLEL IO This has to be changed for parallel IO doing 953 * PARALLEL IO This has to be changed for parallel IO doing
583 * out-of-order writes. 954 * out-of-order writes.
584 */ 955 */
956 ll_merge_attr(env, inode);
585 pos = io->u.ci_wr.wr.crw_pos = i_size_read(inode); 957 pos = io->u.ci_wr.wr.crw_pos = i_size_read(inode);
586 cio->cui_iocb->ki_pos = pos; 958 vio->vui_iocb->ki_pos = pos;
587 } else { 959 } else {
588 LASSERT(cio->cui_iocb->ki_pos == pos); 960 LASSERT(vio->vui_iocb->ki_pos == pos);
589 } 961 }
590 962
591 CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt); 963 CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt);
592 964
593 if (!cio->cui_iter) /* from a temp io in ll_cl_init(). */ 965 if (!vio->vui_iter) /* from a temp io in ll_cl_init(). */
594 result = 0; 966 result = 0;
595 else 967 else
596 result = generic_file_write_iter(cio->cui_iocb, cio->cui_iter); 968 result = generic_file_write_iter(vio->vui_iocb, vio->vui_iter);
969
970 if (result > 0) {
971 result = vvp_io_write_commit(env, io);
972 if (vio->u.write.vui_written > 0) {
973 result = vio->u.write.vui_written;
974 io->ci_nob += result;
597 975
976 CDEBUG(D_VFSTRACE, "write: nob %zd, result: %zd\n",
977 io->ci_nob, result);
978 }
979 }
598 if (result > 0) { 980 if (result > 0) {
981 struct ll_inode_info *lli = ll_i2info(inode);
982
983 spin_lock(&lli->lli_lock);
984 lli->lli_flags |= LLIF_DATA_MODIFIED;
985 spin_unlock(&lli->lli_lock);
986
599 if (result < cnt) 987 if (result < cnt)
600 io->ci_continue = 0; 988 io->ci_continue = 0;
601 io->ci_nob += result;
602 ll_rw_stats_tally(ll_i2sbi(inode), current->pid, 989 ll_rw_stats_tally(ll_i2sbi(inode), current->pid,
603 cio->cui_fd, pos, result, WRITE); 990 vio->vui_fd, pos, result, WRITE);
604 result = 0; 991 result = 0;
605 } 992 }
606 return result; 993 return result;
@@ -608,10 +995,10 @@ static int vvp_io_write_start(const struct lu_env *env,
608 995
609static int vvp_io_kernel_fault(struct vvp_fault_io *cfio) 996static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
610{ 997{
611 struct vm_fault *vmf = cfio->fault.ft_vmf; 998 struct vm_fault *vmf = cfio->ft_vmf;
612 999
613 cfio->fault.ft_flags = filemap_fault(cfio->ft_vma, vmf); 1000 cfio->ft_flags = filemap_fault(cfio->ft_vma, vmf);
614 cfio->fault.ft_flags_valid = 1; 1001 cfio->ft_flags_valid = 1;
615 1002
616 if (vmf->page) { 1003 if (vmf->page) {
617 CDEBUG(D_PAGE, 1004 CDEBUG(D_PAGE,
@@ -619,39 +1006,51 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
619 vmf->page, vmf->page->mapping, vmf->page->index, 1006 vmf->page, vmf->page->mapping, vmf->page->index,
620 (long)vmf->page->flags, page_count(vmf->page), 1007 (long)vmf->page->flags, page_count(vmf->page),
621 page_private(vmf->page), vmf->virtual_address); 1008 page_private(vmf->page), vmf->virtual_address);
622 if (unlikely(!(cfio->fault.ft_flags & VM_FAULT_LOCKED))) { 1009 if (unlikely(!(cfio->ft_flags & VM_FAULT_LOCKED))) {
623 lock_page(vmf->page); 1010 lock_page(vmf->page);
624 cfio->fault.ft_flags |= VM_FAULT_LOCKED; 1011 cfio->ft_flags |= VM_FAULT_LOCKED;
625 } 1012 }
626 1013
627 cfio->ft_vmpage = vmf->page; 1014 cfio->ft_vmpage = vmf->page;
628 return 0; 1015 return 0;
629 } 1016 }
630 1017
631 if (cfio->fault.ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) { 1018 if (cfio->ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
632 CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address); 1019 CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address);
633 return -EFAULT; 1020 return -EFAULT;
634 } 1021 }
635 1022
636 if (cfio->fault.ft_flags & VM_FAULT_OOM) { 1023 if (cfio->ft_flags & VM_FAULT_OOM) {
637 CDEBUG(D_PAGE, "got addr %p - OOM\n", vmf->virtual_address); 1024 CDEBUG(D_PAGE, "got addr %p - OOM\n", vmf->virtual_address);
638 return -ENOMEM; 1025 return -ENOMEM;
639 } 1026 }
640 1027
641 if (cfio->fault.ft_flags & VM_FAULT_RETRY) 1028 if (cfio->ft_flags & VM_FAULT_RETRY)
642 return -EAGAIN; 1029 return -EAGAIN;
643 1030
644 CERROR("Unknown error in page fault %d!\n", cfio->fault.ft_flags); 1031 CERROR("Unknown error in page fault %d!\n", cfio->ft_flags);
645 return -EINVAL; 1032 return -EINVAL;
646} 1033}
647 1034
1035static void mkwrite_commit_callback(const struct lu_env *env, struct cl_io *io,
1036 struct cl_page *page)
1037{
1038 struct vvp_page *vpg;
1039 struct cl_object *clob = cl_io_top(io)->ci_obj;
1040
1041 set_page_dirty(page->cp_vmpage);
1042
1043 vpg = cl2vvp_page(cl_object_page_slice(clob, page));
1044 vvp_write_pending(cl2vvp(clob), vpg);
1045}
1046
648static int vvp_io_fault_start(const struct lu_env *env, 1047static int vvp_io_fault_start(const struct lu_env *env,
649 const struct cl_io_slice *ios) 1048 const struct cl_io_slice *ios)
650{ 1049{
651 struct vvp_io *vio = cl2vvp_io(env, ios); 1050 struct vvp_io *vio = cl2vvp_io(env, ios);
652 struct cl_io *io = ios->cis_io; 1051 struct cl_io *io = ios->cis_io;
653 struct cl_object *obj = io->ci_obj; 1052 struct cl_object *obj = io->ci_obj;
654 struct inode *inode = ccc_object_inode(obj); 1053 struct inode *inode = vvp_object_inode(obj);
655 struct cl_fault_io *fio = &io->u.ci_fault; 1054 struct cl_fault_io *fio = &io->u.ci_fault;
656 struct vvp_fault_io *cfio = &vio->u.fault; 1055 struct vvp_fault_io *cfio = &vio->u.fault;
657 loff_t offset; 1056 loff_t offset;
@@ -659,7 +1058,7 @@ static int vvp_io_fault_start(const struct lu_env *env,
659 struct page *vmpage = NULL; 1058 struct page *vmpage = NULL;
660 struct cl_page *page; 1059 struct cl_page *page;
661 loff_t size; 1060 loff_t size;
662 pgoff_t last; /* last page in a file data region */ 1061 pgoff_t last_index;
663 1062
664 if (fio->ft_executable && 1063 if (fio->ft_executable &&
665 inode->i_mtime.tv_sec != vio->u.fault.ft_mtime) 1064 inode->i_mtime.tv_sec != vio->u.fault.ft_mtime)
@@ -670,7 +1069,7 @@ static int vvp_io_fault_start(const struct lu_env *env,
670 /* offset of the last byte on the page */ 1069 /* offset of the last byte on the page */
671 offset = cl_offset(obj, fio->ft_index + 1) - 1; 1070 offset = cl_offset(obj, fio->ft_index + 1) - 1;
672 LASSERT(cl_index(obj, offset) == fio->ft_index); 1071 LASSERT(cl_index(obj, offset) == fio->ft_index);
673 result = ccc_prep_size(env, obj, io, 0, offset + 1, NULL); 1072 result = vvp_prep_size(env, obj, io, 0, offset + 1, NULL);
674 if (result != 0) 1073 if (result != 0)
675 return result; 1074 return result;
676 1075
@@ -705,15 +1104,15 @@ static int vvp_io_fault_start(const struct lu_env *env,
705 goto out; 1104 goto out;
706 } 1105 }
707 1106
1107 last_index = cl_index(obj, size - 1);
1108
708 if (fio->ft_mkwrite) { 1109 if (fio->ft_mkwrite) {
709 pgoff_t last_index;
710 /* 1110 /*
711 * Capture the size while holding the lli_trunc_sem from above 1111 * Capture the size while holding the lli_trunc_sem from above
712 * we want to make sure that we complete the mkwrite action 1112 * we want to make sure that we complete the mkwrite action
713 * while holding this lock. We need to make sure that we are 1113 * while holding this lock. We need to make sure that we are
714 * not past the end of the file. 1114 * not past the end of the file.
715 */ 1115 */
716 last_index = cl_index(obj, size - 1);
717 if (last_index < fio->ft_index) { 1116 if (last_index < fio->ft_index) {
718 CDEBUG(D_PAGE, 1117 CDEBUG(D_PAGE,
719 "llite: mkwrite and truncate race happened: %p: 0x%lx 0x%lx\n", 1118 "llite: mkwrite and truncate race happened: %p: 0x%lx 0x%lx\n",
@@ -745,25 +1144,32 @@ static int vvp_io_fault_start(const struct lu_env *env,
745 */ 1144 */
746 if (fio->ft_mkwrite) { 1145 if (fio->ft_mkwrite) {
747 wait_on_page_writeback(vmpage); 1146 wait_on_page_writeback(vmpage);
748 if (set_page_dirty(vmpage)) { 1147 if (!PageDirty(vmpage)) {
749 struct ccc_page *cp; 1148 struct cl_page_list *plist = &io->ci_queue.c2_qin;
1149 struct vvp_page *vpg = cl_object_page_slice(obj, page);
1150 int to = PAGE_SIZE;
750 1151
751 /* vvp_page_assume() calls wait_on_page_writeback(). */ 1152 /* vvp_page_assume() calls wait_on_page_writeback(). */
752 cl_page_assume(env, io, page); 1153 cl_page_assume(env, io, page);
753 1154
754 cp = cl2ccc_page(cl_page_at(page, &vvp_device_type)); 1155 cl_page_list_init(plist);
755 vvp_write_pending(cl2ccc(obj), cp); 1156 cl_page_list_add(plist, page);
1157
1158 /* size fixup */
1159 if (last_index == vvp_index(vpg))
1160 to = size & ~PAGE_MASK;
756 1161
757 /* Do not set Dirty bit here so that in case IO is 1162 /* Do not set Dirty bit here so that in case IO is
758 * started before the page is really made dirty, we 1163 * started before the page is really made dirty, we
759 * still have chance to detect it. 1164 * still have chance to detect it.
760 */ 1165 */
761 result = cl_page_cache_add(env, io, page, CRT_WRITE); 1166 result = cl_io_commit_async(env, io, plist, 0, to,
1167 mkwrite_commit_callback);
762 LASSERT(cl_page_is_owned(page, io)); 1168 LASSERT(cl_page_is_owned(page, io));
1169 cl_page_list_fini(env, plist);
763 1170
764 vmpage = NULL; 1171 vmpage = NULL;
765 if (result < 0) { 1172 if (result < 0) {
766 cl_page_unmap(env, io, page);
767 cl_page_discard(env, io, page); 1173 cl_page_discard(env, io, page);
768 cl_page_disown(env, io, page); 1174 cl_page_disown(env, io, page);
769 1175
@@ -773,20 +1179,20 @@ static int vvp_io_fault_start(const struct lu_env *env,
773 if (result == -EDQUOT) 1179 if (result == -EDQUOT)
774 result = -ENOSPC; 1180 result = -ENOSPC;
775 goto out; 1181 goto out;
776 } else 1182 } else {
777 cl_page_disown(env, io, page); 1183 cl_page_disown(env, io, page);
1184 }
778 } 1185 }
779 } 1186 }
780 1187
781 last = cl_index(obj, size - 1);
782 /* 1188 /*
783 * The ft_index is only used in the case of 1189 * The ft_index is only used in the case of
784 * a mkwrite action. We need to check 1190 * a mkwrite action. We need to check
785 * our assertions are correct, since 1191 * our assertions are correct, since
786 * we should have caught this above 1192 * we should have caught this above
787 */ 1193 */
788 LASSERT(!fio->ft_mkwrite || fio->ft_index <= last); 1194 LASSERT(!fio->ft_mkwrite || fio->ft_index <= last_index);
789 if (fio->ft_index == last) 1195 if (fio->ft_index == last_index)
790 /* 1196 /*
791 * Last page is mapped partially. 1197 * Last page is mapped partially.
792 */ 1198 */
@@ -801,7 +1207,9 @@ out:
801 /* return unlocked vmpage to avoid deadlocking */ 1207 /* return unlocked vmpage to avoid deadlocking */
802 if (vmpage) 1208 if (vmpage)
803 unlock_page(vmpage); 1209 unlock_page(vmpage);
804 cfio->fault.ft_flags &= ~VM_FAULT_LOCKED; 1210
1211 cfio->ft_flags &= ~VM_FAULT_LOCKED;
1212
805 return result; 1213 return result;
806} 1214}
807 1215
@@ -820,293 +1228,58 @@ static int vvp_io_read_page(const struct lu_env *env,
820 const struct cl_page_slice *slice) 1228 const struct cl_page_slice *slice)
821{ 1229{
822 struct cl_io *io = ios->cis_io; 1230 struct cl_io *io = ios->cis_io;
823 struct cl_object *obj = slice->cpl_obj; 1231 struct vvp_page *vpg = cl2vvp_page(slice);
824 struct ccc_page *cp = cl2ccc_page(slice);
825 struct cl_page *page = slice->cpl_page; 1232 struct cl_page *page = slice->cpl_page;
826 struct inode *inode = ccc_object_inode(obj); 1233 struct inode *inode = vvp_object_inode(slice->cpl_obj);
827 struct ll_sb_info *sbi = ll_i2sbi(inode); 1234 struct ll_sb_info *sbi = ll_i2sbi(inode);
828 struct ll_file_data *fd = cl2ccc_io(env, ios)->cui_fd; 1235 struct ll_file_data *fd = cl2vvp_io(env, ios)->vui_fd;
829 struct ll_readahead_state *ras = &fd->fd_ras; 1236 struct ll_readahead_state *ras = &fd->fd_ras;
830 struct page *vmpage = cp->cpg_page;
831 struct cl_2queue *queue = &io->ci_queue; 1237 struct cl_2queue *queue = &io->ci_queue;
832 int rc;
833
834 CLOBINVRNT(env, obj, ccc_object_invariant(obj));
835 LASSERT(slice->cpl_obj == obj);
836 1238
837 if (sbi->ll_ra_info.ra_max_pages_per_file && 1239 if (sbi->ll_ra_info.ra_max_pages_per_file &&
838 sbi->ll_ra_info.ra_max_pages) 1240 sbi->ll_ra_info.ra_max_pages)
839 ras_update(sbi, inode, ras, page->cp_index, 1241 ras_update(sbi, inode, ras, vvp_index(vpg),
840 cp->cpg_defer_uptodate); 1242 vpg->vpg_defer_uptodate);
841
842 /* Sanity check whether the page is protected by a lock. */
843 rc = cl_page_is_under_lock(env, io, page);
844 if (rc != -EBUSY) {
845 CL_PAGE_HEADER(D_WARNING, env, page, "%s: %d\n",
846 rc == -ENODATA ? "without a lock" :
847 "match failed", rc);
848 if (rc != -ENODATA)
849 return rc;
850 }
851 1243
852 if (cp->cpg_defer_uptodate) { 1244 if (vpg->vpg_defer_uptodate) {
853 cp->cpg_ra_used = 1; 1245 vpg->vpg_ra_used = 1;
854 cl_page_export(env, page, 1); 1246 cl_page_export(env, page, 1);
855 } 1247 }
856 /* 1248 /*
857 * Add page into the queue even when it is marked uptodate above. 1249 * Add page into the queue even when it is marked uptodate above.
858 * this will unlock it automatically as part of cl_page_list_disown(). 1250 * this will unlock it automatically as part of cl_page_list_disown().
859 */ 1251 */
1252
860 cl_page_list_add(&queue->c2_qin, page); 1253 cl_page_list_add(&queue->c2_qin, page);
861 if (sbi->ll_ra_info.ra_max_pages_per_file && 1254 if (sbi->ll_ra_info.ra_max_pages_per_file &&
862 sbi->ll_ra_info.ra_max_pages) 1255 sbi->ll_ra_info.ra_max_pages)
863 ll_readahead(env, io, ras, 1256 ll_readahead(env, io, &queue->c2_qin, ras,
864 vmpage->mapping, &queue->c2_qin, fd->fd_flags); 1257 vpg->vpg_defer_uptodate);
865 1258
866 return 0; 1259 return 0;
867} 1260}
868 1261
869static int vvp_page_sync_io(const struct lu_env *env, struct cl_io *io, 1262void vvp_io_end(const struct lu_env *env, const struct cl_io_slice *ios)
870 struct cl_page *page, struct ccc_page *cp,
871 enum cl_req_type crt)
872{ 1263{
873 struct cl_2queue *queue; 1264 CLOBINVRNT(env, ios->cis_io->ci_obj,
874 int result; 1265 vvp_object_invariant(ios->cis_io->ci_obj));
875
876 LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
877
878 queue = &io->ci_queue;
879 cl_2queue_init_page(queue, page);
880
881 result = cl_io_submit_sync(env, io, crt, queue, 0);
882 LASSERT(cl_page_is_owned(page, io));
883
884 if (crt == CRT_READ)
885 /*
886 * in CRT_WRITE case page is left locked even in case of
887 * error.
888 */
889 cl_page_list_disown(env, io, &queue->c2_qin);
890 cl_2queue_fini(env, queue);
891
892 return result;
893}
894
895/**
896 * Prepare partially written-to page for a write.
897 */
898static int vvp_io_prepare_partial(const struct lu_env *env, struct cl_io *io,
899 struct cl_object *obj, struct cl_page *pg,
900 struct ccc_page *cp,
901 unsigned from, unsigned to)
902{
903 struct cl_attr *attr = ccc_env_thread_attr(env);
904 loff_t offset = cl_offset(obj, pg->cp_index);
905 int result;
906
907 cl_object_attr_lock(obj);
908 result = cl_object_attr_get(env, obj, attr);
909 cl_object_attr_unlock(obj);
910 if (result == 0) {
911 /*
912 * If are writing to a new page, no need to read old data.
913 * The extent locking will have updated the KMS, and for our
914 * purposes here we can treat it like i_size.
915 */
916 if (attr->cat_kms <= offset) {
917 char *kaddr = kmap_atomic(cp->cpg_page);
918
919 memset(kaddr, 0, cl_page_size(obj));
920 kunmap_atomic(kaddr);
921 } else if (cp->cpg_defer_uptodate)
922 cp->cpg_ra_used = 1;
923 else
924 result = vvp_page_sync_io(env, io, pg, cp, CRT_READ);
925 /*
926 * In older implementations, obdo_refresh_inode is called here
927 * to update the inode because the write might modify the
928 * object info at OST. However, this has been proven useless,
929 * since LVB functions will be called when user space program
930 * tries to retrieve inode attribute. Also, see bug 15909 for
931 * details. -jay
932 */
933 if (result == 0)
934 cl_page_export(env, pg, 1);
935 }
936 return result;
937}
938
939static int vvp_io_prepare_write(const struct lu_env *env,
940 const struct cl_io_slice *ios,
941 const struct cl_page_slice *slice,
942 unsigned from, unsigned to)
943{
944 struct cl_object *obj = slice->cpl_obj;
945 struct ccc_page *cp = cl2ccc_page(slice);
946 struct cl_page *pg = slice->cpl_page;
947 struct page *vmpage = cp->cpg_page;
948
949 int result;
950
951 LINVRNT(cl_page_is_vmlocked(env, pg));
952 LASSERT(vmpage->mapping->host == ccc_object_inode(obj));
953
954 result = 0;
955
956 CL_PAGE_HEADER(D_PAGE, env, pg, "preparing: [%d, %d]\n", from, to);
957 if (!PageUptodate(vmpage)) {
958 /*
959 * We're completely overwriting an existing page, so _don't_
960 * set it up to date until commit_write
961 */
962 if (from == 0 && to == PAGE_SIZE) {
963 CL_PAGE_HEADER(D_PAGE, env, pg, "full page write\n");
964 POISON_PAGE(page, 0x11);
965 } else
966 result = vvp_io_prepare_partial(env, ios->cis_io, obj,
967 pg, cp, from, to);
968 } else
969 CL_PAGE_HEADER(D_PAGE, env, pg, "uptodate\n");
970 return result;
971}
972
973static int vvp_io_commit_write(const struct lu_env *env,
974 const struct cl_io_slice *ios,
975 const struct cl_page_slice *slice,
976 unsigned from, unsigned to)
977{
978 struct cl_object *obj = slice->cpl_obj;
979 struct cl_io *io = ios->cis_io;
980 struct ccc_page *cp = cl2ccc_page(slice);
981 struct cl_page *pg = slice->cpl_page;
982 struct inode *inode = ccc_object_inode(obj);
983 struct ll_sb_info *sbi = ll_i2sbi(inode);
984 struct ll_inode_info *lli = ll_i2info(inode);
985 struct page *vmpage = cp->cpg_page;
986
987 int result;
988 int tallyop;
989 loff_t size;
990
991 LINVRNT(cl_page_is_vmlocked(env, pg));
992 LASSERT(vmpage->mapping->host == inode);
993
994 LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu, "committing page write\n");
995 CL_PAGE_HEADER(D_PAGE, env, pg, "committing: [%d, %d]\n", from, to);
996
997 /*
998 * queue a write for some time in the future the first time we
999 * dirty the page.
1000 *
1001 * This is different from what other file systems do: they usually
1002 * just mark page (and some of its buffers) dirty and rely on
1003 * balance_dirty_pages() to start a write-back. Lustre wants write-back
1004 * to be started earlier for the following reasons:
1005 *
1006 * (1) with a large number of clients we need to limit the amount
1007 * of cached data on the clients a lot;
1008 *
1009 * (2) large compute jobs generally want compute-only then io-only
1010 * and the IO should complete as quickly as possible;
1011 *
1012 * (3) IO is batched up to the RPC size and is async until the
1013 * client max cache is hit
1014 * (/sys/fs/lustre/osc/OSC.../max_dirty_mb)
1015 *
1016 */
1017 if (!PageDirty(vmpage)) {
1018 tallyop = LPROC_LL_DIRTY_MISSES;
1019 result = cl_page_cache_add(env, io, pg, CRT_WRITE);
1020 if (result == 0) {
1021 /* page was added into cache successfully. */
1022 set_page_dirty(vmpage);
1023 vvp_write_pending(cl2ccc(obj), cp);
1024 } else if (result == -EDQUOT) {
1025 pgoff_t last_index = i_size_read(inode) >> PAGE_SHIFT;
1026 bool need_clip = true;
1027
1028 /*
1029 * Client ran out of disk space grant. Possible
1030 * strategies are:
1031 *
1032 * (a) do a sync write, renewing grant;
1033 *
1034 * (b) stop writing on this stripe, switch to the
1035 * next one.
1036 *
1037 * (b) is a part of "parallel io" design that is the
1038 * ultimate goal. (a) is what "old" client did, and
1039 * what the new code continues to do for the time
1040 * being.
1041 */
1042 if (last_index > pg->cp_index) {
1043 to = PAGE_SIZE;
1044 need_clip = false;
1045 } else if (last_index == pg->cp_index) {
1046 int size_to = i_size_read(inode) & ~CFS_PAGE_MASK;
1047
1048 if (to < size_to)
1049 to = size_to;
1050 }
1051 if (need_clip)
1052 cl_page_clip(env, pg, 0, to);
1053 result = vvp_page_sync_io(env, io, pg, cp, CRT_WRITE);
1054 if (result)
1055 CERROR("Write page %lu of inode %p failed %d\n",
1056 pg->cp_index, inode, result);
1057 }
1058 } else {
1059 tallyop = LPROC_LL_DIRTY_HITS;
1060 result = 0;
1061 }
1062 ll_stats_ops_tally(sbi, tallyop, 1);
1063
1064 /* Inode should be marked DIRTY even if no new page was marked DIRTY
1065 * because page could have been not flushed between 2 modifications.
1066 * It is important the file is marked DIRTY as soon as the I/O is done
1067 * Indeed, when cache is flushed, file could be already closed and it
1068 * is too late to warn the MDT.
1069 * It is acceptable that file is marked DIRTY even if I/O is dropped
1070 * for some reasons before being flushed to OST.
1071 */
1072 if (result == 0) {
1073 spin_lock(&lli->lli_lock);
1074 lli->lli_flags |= LLIF_DATA_MODIFIED;
1075 spin_unlock(&lli->lli_lock);
1076 }
1077
1078 size = cl_offset(obj, pg->cp_index) + to;
1079
1080 ll_inode_size_lock(inode);
1081 if (result == 0) {
1082 if (size > i_size_read(inode)) {
1083 cl_isize_write_nolock(inode, size);
1084 CDEBUG(D_VFSTRACE, DFID" updating i_size %lu\n",
1085 PFID(lu_object_fid(&obj->co_lu)),
1086 (unsigned long)size);
1087 }
1088 cl_page_export(env, pg, 1);
1089 } else {
1090 if (size > i_size_read(inode))
1091 cl_page_discard(env, io, pg);
1092 }
1093 ll_inode_size_unlock(inode);
1094 return result;
1095} 1266}
1096 1267
1097static const struct cl_io_operations vvp_io_ops = { 1268static const struct cl_io_operations vvp_io_ops = {
1098 .op = { 1269 .op = {
1099 [CIT_READ] = { 1270 [CIT_READ] = {
1100 .cio_fini = vvp_io_read_fini, 1271 .cio_fini = vvp_io_fini,
1101 .cio_lock = vvp_io_read_lock, 1272 .cio_lock = vvp_io_read_lock,
1102 .cio_start = vvp_io_read_start, 1273 .cio_start = vvp_io_read_start,
1103 .cio_advance = ccc_io_advance 1274 .cio_advance = vvp_io_advance,
1104 }, 1275 },
1105 [CIT_WRITE] = { 1276 [CIT_WRITE] = {
1106 .cio_fini = vvp_io_fini, 1277 .cio_fini = vvp_io_fini,
1278 .cio_iter_init = vvp_io_write_iter_init,
1279 .cio_iter_fini = vvp_io_write_iter_fini,
1107 .cio_lock = vvp_io_write_lock, 1280 .cio_lock = vvp_io_write_lock,
1108 .cio_start = vvp_io_write_start, 1281 .cio_start = vvp_io_write_start,
1109 .cio_advance = ccc_io_advance 1282 .cio_advance = vvp_io_advance,
1110 }, 1283 },
1111 [CIT_SETATTR] = { 1284 [CIT_SETATTR] = {
1112 .cio_fini = vvp_io_setattr_fini, 1285 .cio_fini = vvp_io_setattr_fini,
@@ -1120,7 +1293,7 @@ static const struct cl_io_operations vvp_io_ops = {
1120 .cio_iter_init = vvp_io_fault_iter_init, 1293 .cio_iter_init = vvp_io_fault_iter_init,
1121 .cio_lock = vvp_io_fault_lock, 1294 .cio_lock = vvp_io_fault_lock,
1122 .cio_start = vvp_io_fault_start, 1295 .cio_start = vvp_io_fault_start,
1123 .cio_end = ccc_io_end 1296 .cio_end = vvp_io_end,
1124 }, 1297 },
1125 [CIT_FSYNC] = { 1298 [CIT_FSYNC] = {
1126 .cio_start = vvp_io_fsync_start, 1299 .cio_start = vvp_io_fsync_start,
@@ -1131,29 +1304,26 @@ static const struct cl_io_operations vvp_io_ops = {
1131 } 1304 }
1132 }, 1305 },
1133 .cio_read_page = vvp_io_read_page, 1306 .cio_read_page = vvp_io_read_page,
1134 .cio_prepare_write = vvp_io_prepare_write,
1135 .cio_commit_write = vvp_io_commit_write
1136}; 1307};
1137 1308
1138int vvp_io_init(const struct lu_env *env, struct cl_object *obj, 1309int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
1139 struct cl_io *io) 1310 struct cl_io *io)
1140{ 1311{
1141 struct vvp_io *vio = vvp_env_io(env); 1312 struct vvp_io *vio = vvp_env_io(env);
1142 struct ccc_io *cio = ccc_env_io(env); 1313 struct inode *inode = vvp_object_inode(obj);
1143 struct inode *inode = ccc_object_inode(obj);
1144 int result; 1314 int result;
1145 1315
1146 CLOBINVRNT(env, obj, ccc_object_invariant(obj)); 1316 CLOBINVRNT(env, obj, vvp_object_invariant(obj));
1147 1317
1148 CDEBUG(D_VFSTRACE, DFID 1318 CDEBUG(D_VFSTRACE, DFID
1149 " ignore/verify layout %d/%d, layout version %d restore needed %d\n", 1319 " ignore/verify layout %d/%d, layout version %d restore needed %d\n",
1150 PFID(lu_object_fid(&obj->co_lu)), 1320 PFID(lu_object_fid(&obj->co_lu)),
1151 io->ci_ignore_layout, io->ci_verify_layout, 1321 io->ci_ignore_layout, io->ci_verify_layout,
1152 cio->cui_layout_gen, io->ci_restore_needed); 1322 vio->vui_layout_gen, io->ci_restore_needed);
1153 1323
1154 CL_IO_SLICE_CLEAN(cio, cui_cl); 1324 CL_IO_SLICE_CLEAN(vio, vui_cl);
1155 cl_io_slice_add(io, &cio->cui_cl, obj, &vvp_io_ops); 1325 cl_io_slice_add(io, &vio->vui_cl, obj, &vvp_io_ops);
1156 vio->cui_ra_window_set = 0; 1326 vio->vui_ra_valid = false;
1157 result = 0; 1327 result = 0;
1158 if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE) { 1328 if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE) {
1159 size_t count; 1329 size_t count;
@@ -1166,7 +1336,7 @@ int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
1166 if (count == 0) 1336 if (count == 0)
1167 result = 1; 1337 result = 1;
1168 else 1338 else
1169 cio->cui_tot_count = count; 1339 vio->vui_tot_count = count;
1170 1340
1171 /* for read/write, we store the jobid in the inode, and 1341 /* for read/write, we store the jobid in the inode, and
1172 * it'll be fetched by osc when building RPC. 1342 * it'll be fetched by osc when building RPC.
@@ -1192,7 +1362,7 @@ int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
1192 * because it might not grant layout lock in IT_OPEN. 1362 * because it might not grant layout lock in IT_OPEN.
1193 */ 1363 */
1194 if (result == 0 && !io->ci_ignore_layout) { 1364 if (result == 0 && !io->ci_ignore_layout) {
1195 result = ll_layout_refresh(inode, &cio->cui_layout_gen); 1365 result = ll_layout_refresh(inode, &vio->vui_layout_gen);
1196 if (result == -ENOENT) 1366 if (result == -ENOENT)
1197 /* If the inode on MDS has been removed, but the objects 1367 /* If the inode on MDS has been removed, but the objects
1198 * on OSTs haven't been destroyed (async unlink), layout 1368 * on OSTs haven't been destroyed (async unlink), layout
@@ -1208,11 +1378,3 @@ int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
1208 1378
1209 return result; 1379 return result;
1210} 1380}
1211
1212static struct vvp_io *cl2vvp_io(const struct lu_env *env,
1213 const struct cl_io_slice *slice)
1214{
1215 /* Calling just for assertion */
1216 cl2ccc_io(env, slice);
1217 return vvp_env_io(env);
1218}
diff --git a/drivers/staging/lustre/lustre/llite/vvp_lock.c b/drivers/staging/lustre/lustre/llite/vvp_lock.c
index ff0948043c7a..f5bd6c22e112 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_lock.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_lock.c
@@ -40,7 +40,7 @@
40 40
41#define DEBUG_SUBSYSTEM S_LLITE 41#define DEBUG_SUBSYSTEM S_LLITE
42 42
43#include "../include/obd.h" 43#include "../include/obd_support.h"
44#include "../include/lustre_lite.h" 44#include "../include/lustre_lite.h"
45 45
46#include "vvp_internal.h" 46#include "vvp_internal.h"
@@ -51,36 +51,41 @@
51 * 51 *
52 */ 52 */
53 53
54/** 54static void vvp_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice)
55 * Estimates lock value for the purpose of managing the lock cache during 55{
56 * memory shortages. 56 struct vvp_lock *vlk = cl2vvp_lock(slice);
57 * 57
58 * Locks for memory mapped files are almost infinitely precious, others are 58 kmem_cache_free(vvp_lock_kmem, vlk);
59 * junk. "Mapped locks" are heavy, but not infinitely heavy, so that they are 59}
60 * ordered within themselves by weights assigned from other layers. 60
61 */ 61static int vvp_lock_enqueue(const struct lu_env *env,
62static unsigned long vvp_lock_weigh(const struct lu_env *env, 62 const struct cl_lock_slice *slice,
63 const struct cl_lock_slice *slice) 63 struct cl_io *unused, struct cl_sync_io *anchor)
64{ 64{
65 struct ccc_object *cob = cl2ccc(slice->cls_obj); 65 CLOBINVRNT(env, slice->cls_obj, vvp_object_invariant(slice->cls_obj));
66 66
67 return atomic_read(&cob->cob_mmap_cnt) > 0 ? ~0UL >> 2 : 0; 67 return 0;
68} 68}
69 69
70static const struct cl_lock_operations vvp_lock_ops = { 70static const struct cl_lock_operations vvp_lock_ops = {
71 .clo_delete = ccc_lock_delete, 71 .clo_fini = vvp_lock_fini,
72 .clo_fini = ccc_lock_fini, 72 .clo_enqueue = vvp_lock_enqueue,
73 .clo_enqueue = ccc_lock_enqueue,
74 .clo_wait = ccc_lock_wait,
75 .clo_use = ccc_lock_use,
76 .clo_unuse = ccc_lock_unuse,
77 .clo_fits_into = ccc_lock_fits_into,
78 .clo_state = ccc_lock_state,
79 .clo_weigh = vvp_lock_weigh
80}; 73};
81 74
82int vvp_lock_init(const struct lu_env *env, struct cl_object *obj, 75int vvp_lock_init(const struct lu_env *env, struct cl_object *obj,
83 struct cl_lock *lock, const struct cl_io *io) 76 struct cl_lock *lock, const struct cl_io *unused)
84{ 77{
85 return ccc_lock_init(env, obj, lock, io, &vvp_lock_ops); 78 struct vvp_lock *vlk;
79 int result;
80
81 CLOBINVRNT(env, obj, vvp_object_invariant(obj));
82
83 vlk = kmem_cache_zalloc(vvp_lock_kmem, GFP_NOFS);
84 if (vlk) {
85 cl_lock_slice_add(lock, &vlk->vlk_cl, obj, &vvp_lock_ops);
86 result = 0;
87 } else {
88 result = -ENOMEM;
89 }
90 return result;
86} 91}
diff --git a/drivers/staging/lustre/lustre/llite/vvp_object.c b/drivers/staging/lustre/lustre/llite/vvp_object.c
index 03c887d8ed83..18c9df7ebdda 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_object.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_object.c
@@ -45,6 +45,7 @@
45#include "../include/obd.h" 45#include "../include/obd.h"
46#include "../include/lustre_lite.h" 46#include "../include/lustre_lite.h"
47 47
48#include "llite_internal.h"
48#include "vvp_internal.h" 49#include "vvp_internal.h"
49 50
50/***************************************************************************** 51/*****************************************************************************
@@ -53,16 +54,25 @@
53 * 54 *
54 */ 55 */
55 56
57int vvp_object_invariant(const struct cl_object *obj)
58{
59 struct inode *inode = vvp_object_inode(obj);
60 struct ll_inode_info *lli = ll_i2info(inode);
61
62 return (S_ISREG(inode->i_mode) || inode->i_mode == 0) &&
63 lli->lli_clob == obj;
64}
65
56static int vvp_object_print(const struct lu_env *env, void *cookie, 66static int vvp_object_print(const struct lu_env *env, void *cookie,
57 lu_printer_t p, const struct lu_object *o) 67 lu_printer_t p, const struct lu_object *o)
58{ 68{
59 struct ccc_object *obj = lu2ccc(o); 69 struct vvp_object *obj = lu2vvp(o);
60 struct inode *inode = obj->cob_inode; 70 struct inode *inode = obj->vob_inode;
61 struct ll_inode_info *lli; 71 struct ll_inode_info *lli;
62 72
63 (*p)(env, cookie, "(%s %d %d) inode: %p ", 73 (*p)(env, cookie, "(%s %d %d) inode: %p ",
64 list_empty(&obj->cob_pending_list) ? "-" : "+", 74 list_empty(&obj->vob_pending_list) ? "-" : "+",
65 obj->cob_transient_pages, atomic_read(&obj->cob_mmap_cnt), 75 obj->vob_transient_pages, atomic_read(&obj->vob_mmap_cnt),
66 inode); 76 inode);
67 if (inode) { 77 if (inode) {
68 lli = ll_i2info(inode); 78 lli = ll_i2info(inode);
@@ -77,7 +87,7 @@ static int vvp_object_print(const struct lu_env *env, void *cookie,
77static int vvp_attr_get(const struct lu_env *env, struct cl_object *obj, 87static int vvp_attr_get(const struct lu_env *env, struct cl_object *obj,
78 struct cl_attr *attr) 88 struct cl_attr *attr)
79{ 89{
80 struct inode *inode = ccc_object_inode(obj); 90 struct inode *inode = vvp_object_inode(obj);
81 91
82 /* 92 /*
83 * lov overwrites most of these fields in 93 * lov overwrites most of these fields in
@@ -99,7 +109,7 @@ static int vvp_attr_get(const struct lu_env *env, struct cl_object *obj,
99static int vvp_attr_set(const struct lu_env *env, struct cl_object *obj, 109static int vvp_attr_set(const struct lu_env *env, struct cl_object *obj,
100 const struct cl_attr *attr, unsigned valid) 110 const struct cl_attr *attr, unsigned valid)
101{ 111{
102 struct inode *inode = ccc_object_inode(obj); 112 struct inode *inode = vvp_object_inode(obj);
103 113
104 if (valid & CAT_UID) 114 if (valid & CAT_UID)
105 inode->i_uid = make_kuid(&init_user_ns, attr->cat_uid); 115 inode->i_uid = make_kuid(&init_user_ns, attr->cat_uid);
@@ -112,7 +122,7 @@ static int vvp_attr_set(const struct lu_env *env, struct cl_object *obj,
112 if (valid & CAT_CTIME) 122 if (valid & CAT_CTIME)
113 inode->i_ctime.tv_sec = attr->cat_ctime; 123 inode->i_ctime.tv_sec = attr->cat_ctime;
114 if (0 && valid & CAT_SIZE) 124 if (0 && valid & CAT_SIZE)
115 cl_isize_write_nolock(inode, attr->cat_size); 125 i_size_write(inode, attr->cat_size);
116 /* not currently necessary */ 126 /* not currently necessary */
117 if (0 && valid & (CAT_UID|CAT_GID|CAT_SIZE)) 127 if (0 && valid & (CAT_UID|CAT_GID|CAT_SIZE))
118 mark_inode_dirty(inode); 128 mark_inode_dirty(inode);
@@ -165,6 +175,40 @@ static int vvp_conf_set(const struct lu_env *env, struct cl_object *obj,
165 return 0; 175 return 0;
166} 176}
167 177
178static int vvp_prune(const struct lu_env *env, struct cl_object *obj)
179{
180 struct inode *inode = vvp_object_inode(obj);
181 int rc;
182
183 rc = cl_sync_file_range(inode, 0, OBD_OBJECT_EOF, CL_FSYNC_LOCAL, 1);
184 if (rc < 0) {
185 CDEBUG(D_VFSTRACE, DFID ": writeback failed: %d\n",
186 PFID(lu_object_fid(&obj->co_lu)), rc);
187 return rc;
188 }
189
190 truncate_inode_pages(inode->i_mapping, 0);
191 return 0;
192}
193
194static int vvp_object_glimpse(const struct lu_env *env,
195 const struct cl_object *obj, struct ost_lvb *lvb)
196{
197 struct inode *inode = vvp_object_inode(obj);
198
199 lvb->lvb_mtime = LTIME_S(inode->i_mtime);
200 lvb->lvb_atime = LTIME_S(inode->i_atime);
201 lvb->lvb_ctime = LTIME_S(inode->i_ctime);
202 /*
203 * LU-417: Add dirty pages block count lest i_blocks reports 0, some
204 * "cp" or "tar" on remote node may think it's a completely sparse file
205 * and skip it.
206 */
207 if (lvb->lvb_size > 0 && lvb->lvb_blocks == 0)
208 lvb->lvb_blocks = dirty_cnt(inode);
209 return 0;
210}
211
168static const struct cl_object_operations vvp_ops = { 212static const struct cl_object_operations vvp_ops = {
169 .coo_page_init = vvp_page_init, 213 .coo_page_init = vvp_page_init,
170 .coo_lock_init = vvp_lock_init, 214 .coo_lock_init = vvp_lock_init,
@@ -172,29 +216,94 @@ static const struct cl_object_operations vvp_ops = {
172 .coo_attr_get = vvp_attr_get, 216 .coo_attr_get = vvp_attr_get,
173 .coo_attr_set = vvp_attr_set, 217 .coo_attr_set = vvp_attr_set,
174 .coo_conf_set = vvp_conf_set, 218 .coo_conf_set = vvp_conf_set,
175 .coo_glimpse = ccc_object_glimpse 219 .coo_prune = vvp_prune,
220 .coo_glimpse = vvp_object_glimpse
176}; 221};
177 222
223static int vvp_object_init0(const struct lu_env *env,
224 struct vvp_object *vob,
225 const struct cl_object_conf *conf)
226{
227 vob->vob_inode = conf->coc_inode;
228 vob->vob_transient_pages = 0;
229 cl_object_page_init(&vob->vob_cl, sizeof(struct vvp_page));
230 return 0;
231}
232
233static int vvp_object_init(const struct lu_env *env, struct lu_object *obj,
234 const struct lu_object_conf *conf)
235{
236 struct vvp_device *dev = lu2vvp_dev(obj->lo_dev);
237 struct vvp_object *vob = lu2vvp(obj);
238 struct lu_object *below;
239 struct lu_device *under;
240 int result;
241
242 under = &dev->vdv_next->cd_lu_dev;
243 below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, under);
244 if (below) {
245 const struct cl_object_conf *cconf;
246
247 cconf = lu2cl_conf(conf);
248 INIT_LIST_HEAD(&vob->vob_pending_list);
249 lu_object_add(obj, below);
250 result = vvp_object_init0(env, vob, cconf);
251 } else {
252 result = -ENOMEM;
253 }
254
255 return result;
256}
257
258static void vvp_object_free(const struct lu_env *env, struct lu_object *obj)
259{
260 struct vvp_object *vob = lu2vvp(obj);
261
262 lu_object_fini(obj);
263 lu_object_header_fini(obj->lo_header);
264 kmem_cache_free(vvp_object_kmem, vob);
265}
266
178static const struct lu_object_operations vvp_lu_obj_ops = { 267static const struct lu_object_operations vvp_lu_obj_ops = {
179 .loo_object_init = ccc_object_init, 268 .loo_object_init = vvp_object_init,
180 .loo_object_free = ccc_object_free, 269 .loo_object_free = vvp_object_free,
181 .loo_object_print = vvp_object_print 270 .loo_object_print = vvp_object_print,
182}; 271};
183 272
184struct ccc_object *cl_inode2ccc(struct inode *inode) 273struct vvp_object *cl_inode2vvp(struct inode *inode)
185{ 274{
186 struct cl_inode_info *lli = cl_i2info(inode); 275 struct ll_inode_info *lli = ll_i2info(inode);
187 struct cl_object *obj = lli->lli_clob; 276 struct cl_object *obj = lli->lli_clob;
188 struct lu_object *lu; 277 struct lu_object *lu;
189 278
190 lu = lu_object_locate(obj->co_lu.lo_header, &vvp_device_type); 279 lu = lu_object_locate(obj->co_lu.lo_header, &vvp_device_type);
191 LASSERT(lu); 280 LASSERT(lu);
192 return lu2ccc(lu); 281 return lu2vvp(lu);
193} 282}
194 283
195struct lu_object *vvp_object_alloc(const struct lu_env *env, 284struct lu_object *vvp_object_alloc(const struct lu_env *env,
196 const struct lu_object_header *hdr, 285 const struct lu_object_header *unused,
197 struct lu_device *dev) 286 struct lu_device *dev)
198{ 287{
199 return ccc_object_alloc(env, hdr, dev, &vvp_ops, &vvp_lu_obj_ops); 288 struct vvp_object *vob;
289 struct lu_object *obj;
290
291 vob = kmem_cache_zalloc(vvp_object_kmem, GFP_NOFS);
292 if (vob) {
293 struct cl_object_header *hdr;
294
295 obj = &vob->vob_cl.co_lu;
296 hdr = &vob->vob_header;
297 cl_object_header_init(hdr);
298 hdr->coh_page_bufsize = cfs_size_round(sizeof(struct cl_page));
299
300 lu_object_init(obj, &hdr->coh_lu, dev);
301 lu_object_add_top(&hdr->coh_lu, obj);
302
303 vob->vob_cl.co_ops = &vvp_ops;
304 obj->lo_ops = &vvp_lu_obj_ops;
305 } else {
306 obj = NULL;
307 }
308 return obj;
200} 309}
diff --git a/drivers/staging/lustre/lustre/llite/vvp_page.c b/drivers/staging/lustre/lustre/llite/vvp_page.c
index 33ca3eb34965..6cd2af7a958f 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_page.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_page.c
@@ -41,9 +41,16 @@
41 41
42#define DEBUG_SUBSYSTEM S_LLITE 42#define DEBUG_SUBSYSTEM S_LLITE
43 43
44#include "../include/obd.h" 44#include <linux/atomic.h>
45#include <linux/bitops.h>
46#include <linux/mm.h>
47#include <linux/mutex.h>
48#include <linux/page-flags.h>
49#include <linux/pagemap.h>
50
45#include "../include/lustre_lite.h" 51#include "../include/lustre_lite.h"
46 52
53#include "llite_internal.h"
47#include "vvp_internal.h" 54#include "vvp_internal.h"
48 55
49/***************************************************************************** 56/*****************************************************************************
@@ -52,9 +59,9 @@
52 * 59 *
53 */ 60 */
54 61
55static void vvp_page_fini_common(struct ccc_page *cp) 62static void vvp_page_fini_common(struct vvp_page *vpg)
56{ 63{
57 struct page *vmpage = cp->cpg_page; 64 struct page *vmpage = vpg->vpg_page;
58 65
59 LASSERT(vmpage); 66 LASSERT(vmpage);
60 put_page(vmpage); 67 put_page(vmpage);
@@ -63,23 +70,23 @@ static void vvp_page_fini_common(struct ccc_page *cp)
63static void vvp_page_fini(const struct lu_env *env, 70static void vvp_page_fini(const struct lu_env *env,
64 struct cl_page_slice *slice) 71 struct cl_page_slice *slice)
65{ 72{
66 struct ccc_page *cp = cl2ccc_page(slice); 73 struct vvp_page *vpg = cl2vvp_page(slice);
67 struct page *vmpage = cp->cpg_page; 74 struct page *vmpage = vpg->vpg_page;
68 75
69 /* 76 /*
70 * vmpage->private was already cleared when page was moved into 77 * vmpage->private was already cleared when page was moved into
71 * VPG_FREEING state. 78 * VPG_FREEING state.
72 */ 79 */
73 LASSERT((struct cl_page *)vmpage->private != slice->cpl_page); 80 LASSERT((struct cl_page *)vmpage->private != slice->cpl_page);
74 vvp_page_fini_common(cp); 81 vvp_page_fini_common(vpg);
75} 82}
76 83
77static int vvp_page_own(const struct lu_env *env, 84static int vvp_page_own(const struct lu_env *env,
78 const struct cl_page_slice *slice, struct cl_io *io, 85 const struct cl_page_slice *slice, struct cl_io *io,
79 int nonblock) 86 int nonblock)
80{ 87{
81 struct ccc_page *vpg = cl2ccc_page(slice); 88 struct vvp_page *vpg = cl2vvp_page(slice);
82 struct page *vmpage = vpg->cpg_page; 89 struct page *vmpage = vpg->vpg_page;
83 90
84 LASSERT(vmpage); 91 LASSERT(vmpage);
85 if (nonblock) { 92 if (nonblock) {
@@ -96,6 +103,7 @@ static int vvp_page_own(const struct lu_env *env,
96 103
97 lock_page(vmpage); 104 lock_page(vmpage);
98 wait_on_page_writeback(vmpage); 105 wait_on_page_writeback(vmpage);
106
99 return 0; 107 return 0;
100} 108}
101 109
@@ -136,41 +144,15 @@ static void vvp_page_discard(const struct lu_env *env,
136 struct cl_io *unused) 144 struct cl_io *unused)
137{ 145{
138 struct page *vmpage = cl2vm_page(slice); 146 struct page *vmpage = cl2vm_page(slice);
139 struct address_space *mapping; 147 struct vvp_page *vpg = cl2vvp_page(slice);
140 struct ccc_page *cpg = cl2ccc_page(slice);
141 148
142 LASSERT(vmpage); 149 LASSERT(vmpage);
143 LASSERT(PageLocked(vmpage)); 150 LASSERT(PageLocked(vmpage));
144 151
145 mapping = vmpage->mapping; 152 if (vpg->vpg_defer_uptodate && !vpg->vpg_ra_used)
146 153 ll_ra_stats_inc(vmpage->mapping->host, RA_STAT_DISCARDED);
147 if (cpg->cpg_defer_uptodate && !cpg->cpg_ra_used)
148 ll_ra_stats_inc(mapping, RA_STAT_DISCARDED);
149 154
150 /* 155 ll_invalidate_page(vmpage);
151 * truncate_complete_page() calls
152 * a_ops->invalidatepage()->cl_page_delete()->vvp_page_delete().
153 */
154 truncate_complete_page(mapping, vmpage);
155}
156
157static int vvp_page_unmap(const struct lu_env *env,
158 const struct cl_page_slice *slice,
159 struct cl_io *unused)
160{
161 struct page *vmpage = cl2vm_page(slice);
162 __u64 offset;
163
164 LASSERT(vmpage);
165 LASSERT(PageLocked(vmpage));
166
167 offset = vmpage->index << PAGE_SHIFT;
168
169 /*
170 * XXX is it safe to call this with the page lock held?
171 */
172 ll_teardown_mmaps(vmpage->mapping, offset, offset + PAGE_SIZE);
173 return 0;
174} 156}
175 157
176static void vvp_page_delete(const struct lu_env *env, 158static void vvp_page_delete(const struct lu_env *env,
@@ -179,12 +161,20 @@ static void vvp_page_delete(const struct lu_env *env,
179 struct page *vmpage = cl2vm_page(slice); 161 struct page *vmpage = cl2vm_page(slice);
180 struct inode *inode = vmpage->mapping->host; 162 struct inode *inode = vmpage->mapping->host;
181 struct cl_object *obj = slice->cpl_obj; 163 struct cl_object *obj = slice->cpl_obj;
164 struct cl_page *page = slice->cpl_page;
165 int refc;
182 166
183 LASSERT(PageLocked(vmpage)); 167 LASSERT(PageLocked(vmpage));
184 LASSERT((struct cl_page *)vmpage->private == slice->cpl_page); 168 LASSERT((struct cl_page *)vmpage->private == page);
185 LASSERT(inode == ccc_object_inode(obj)); 169 LASSERT(inode == vvp_object_inode(obj));
186 170
187 vvp_write_complete(cl2ccc(obj), cl2ccc_page(slice)); 171 vvp_write_complete(cl2vvp(obj), cl2vvp_page(slice));
172
173 /* Drop the reference count held in vvp_page_init */
174 refc = atomic_dec_return(&page->cp_ref);
175 LASSERTF(refc >= 1, "page = %p, refc = %d\n", page, refc);
176
177 ClearPageUptodate(vmpage);
188 ClearPagePrivate(vmpage); 178 ClearPagePrivate(vmpage);
189 vmpage->private = 0; 179 vmpage->private = 0;
190 /* 180 /*
@@ -237,7 +227,7 @@ static int vvp_page_prep_write(const struct lu_env *env,
237 if (!pg->cp_sync_io) 227 if (!pg->cp_sync_io)
238 set_page_writeback(vmpage); 228 set_page_writeback(vmpage);
239 229
240 vvp_write_pending(cl2ccc(slice->cpl_obj), cl2ccc_page(slice)); 230 vvp_write_pending(cl2vvp(slice->cpl_obj), cl2vvp_page(slice));
241 231
242 return 0; 232 return 0;
243} 233}
@@ -250,11 +240,11 @@ static int vvp_page_prep_write(const struct lu_env *env,
250 */ 240 */
251static void vvp_vmpage_error(struct inode *inode, struct page *vmpage, int ioret) 241static void vvp_vmpage_error(struct inode *inode, struct page *vmpage, int ioret)
252{ 242{
253 struct ccc_object *obj = cl_inode2ccc(inode); 243 struct vvp_object *obj = cl_inode2vvp(inode);
254 244
255 if (ioret == 0) { 245 if (ioret == 0) {
256 ClearPageError(vmpage); 246 ClearPageError(vmpage);
257 obj->cob_discard_page_warned = 0; 247 obj->vob_discard_page_warned = 0;
258 } else { 248 } else {
259 SetPageError(vmpage); 249 SetPageError(vmpage);
260 if (ioret == -ENOSPC) 250 if (ioret == -ENOSPC)
@@ -263,8 +253,8 @@ static void vvp_vmpage_error(struct inode *inode, struct page *vmpage, int ioret
263 set_bit(AS_EIO, &inode->i_mapping->flags); 253 set_bit(AS_EIO, &inode->i_mapping->flags);
264 254
265 if ((ioret == -ESHUTDOWN || ioret == -EINTR) && 255 if ((ioret == -ESHUTDOWN || ioret == -EINTR) &&
266 obj->cob_discard_page_warned == 0) { 256 obj->vob_discard_page_warned == 0) {
267 obj->cob_discard_page_warned = 1; 257 obj->vob_discard_page_warned = 1;
268 ll_dirty_page_discard_warn(vmpage, ioret); 258 ll_dirty_page_discard_warn(vmpage, ioret);
269 } 259 }
270 } 260 }
@@ -274,22 +264,23 @@ static void vvp_page_completion_read(const struct lu_env *env,
274 const struct cl_page_slice *slice, 264 const struct cl_page_slice *slice,
275 int ioret) 265 int ioret)
276{ 266{
277 struct ccc_page *cp = cl2ccc_page(slice); 267 struct vvp_page *vpg = cl2vvp_page(slice);
278 struct page *vmpage = cp->cpg_page; 268 struct page *vmpage = vpg->vpg_page;
279 struct cl_page *page = cl_page_top(slice->cpl_page); 269 struct cl_page *page = slice->cpl_page;
280 struct inode *inode = ccc_object_inode(page->cp_obj); 270 struct inode *inode = vvp_object_inode(page->cp_obj);
281 271
282 LASSERT(PageLocked(vmpage)); 272 LASSERT(PageLocked(vmpage));
283 CL_PAGE_HEADER(D_PAGE, env, page, "completing READ with %d\n", ioret); 273 CL_PAGE_HEADER(D_PAGE, env, page, "completing READ with %d\n", ioret);
284 274
285 if (cp->cpg_defer_uptodate) 275 if (vpg->vpg_defer_uptodate)
286 ll_ra_count_put(ll_i2sbi(inode), 1); 276 ll_ra_count_put(ll_i2sbi(inode), 1);
287 277
288 if (ioret == 0) { 278 if (ioret == 0) {
289 if (!cp->cpg_defer_uptodate) 279 if (!vpg->vpg_defer_uptodate)
290 cl_page_export(env, page, 1); 280 cl_page_export(env, page, 1);
291 } else 281 } else {
292 cp->cpg_defer_uptodate = 0; 282 vpg->vpg_defer_uptodate = 0;
283 }
293 284
294 if (!page->cp_sync_io) 285 if (!page->cp_sync_io)
295 unlock_page(vmpage); 286 unlock_page(vmpage);
@@ -299,9 +290,9 @@ static void vvp_page_completion_write(const struct lu_env *env,
299 const struct cl_page_slice *slice, 290 const struct cl_page_slice *slice,
300 int ioret) 291 int ioret)
301{ 292{
302 struct ccc_page *cp = cl2ccc_page(slice); 293 struct vvp_page *vpg = cl2vvp_page(slice);
303 struct cl_page *pg = slice->cpl_page; 294 struct cl_page *pg = slice->cpl_page;
304 struct page *vmpage = cp->cpg_page; 295 struct page *vmpage = vpg->vpg_page;
305 296
306 CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret); 297 CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
307 298
@@ -315,8 +306,8 @@ static void vvp_page_completion_write(const struct lu_env *env,
315 * and then re-add the page into pending transfer queue. -jay 306 * and then re-add the page into pending transfer queue. -jay
316 */ 307 */
317 308
318 cp->cpg_write_queued = 0; 309 vpg->vpg_write_queued = 0;
319 vvp_write_complete(cl2ccc(slice->cpl_obj), cp); 310 vvp_write_complete(cl2vvp(slice->cpl_obj), vpg);
320 311
321 if (pg->cp_sync_io) { 312 if (pg->cp_sync_io) {
322 LASSERT(PageLocked(vmpage)); 313 LASSERT(PageLocked(vmpage));
@@ -327,7 +318,7 @@ static void vvp_page_completion_write(const struct lu_env *env,
327 * Only mark the page error only when it's an async write 318 * Only mark the page error only when it's an async write
328 * because applications won't wait for IO to finish. 319 * because applications won't wait for IO to finish.
329 */ 320 */
330 vvp_vmpage_error(ccc_object_inode(pg->cp_obj), vmpage, ioret); 321 vvp_vmpage_error(vvp_object_inode(pg->cp_obj), vmpage, ioret);
331 322
332 end_page_writeback(vmpage); 323 end_page_writeback(vmpage);
333 } 324 }
@@ -359,7 +350,7 @@ static int vvp_page_make_ready(const struct lu_env *env,
359 LASSERT(pg->cp_state == CPS_CACHED); 350 LASSERT(pg->cp_state == CPS_CACHED);
360 /* This actually clears the dirty bit in the radix tree. */ 351 /* This actually clears the dirty bit in the radix tree. */
361 set_page_writeback(vmpage); 352 set_page_writeback(vmpage);
362 vvp_write_pending(cl2ccc(slice->cpl_obj), cl2ccc_page(slice)); 353 vvp_write_pending(cl2vvp(slice->cpl_obj), cl2vvp_page(slice));
363 CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n"); 354 CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n");
364 } else if (pg->cp_state == CPS_PAGEOUT) { 355 } else if (pg->cp_state == CPS_PAGEOUT) {
365 /* is it possible for osc_flush_async_page() to already 356 /* is it possible for osc_flush_async_page() to already
@@ -375,24 +366,51 @@ static int vvp_page_make_ready(const struct lu_env *env,
375 return result; 366 return result;
376} 367}
377 368
369static int vvp_page_is_under_lock(const struct lu_env *env,
370 const struct cl_page_slice *slice,
371 struct cl_io *io, pgoff_t *max_index)
372{
373 if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
374 io->ci_type == CIT_FAULT) {
375 struct vvp_io *vio = vvp_env_io(env);
376
377 if (unlikely(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED))
378 *max_index = CL_PAGE_EOF;
379 }
380 return 0;
381}
382
378static int vvp_page_print(const struct lu_env *env, 383static int vvp_page_print(const struct lu_env *env,
379 const struct cl_page_slice *slice, 384 const struct cl_page_slice *slice,
380 void *cookie, lu_printer_t printer) 385 void *cookie, lu_printer_t printer)
381{ 386{
382 struct ccc_page *vp = cl2ccc_page(slice); 387 struct vvp_page *vpg = cl2vvp_page(slice);
383 struct page *vmpage = vp->cpg_page; 388 struct page *vmpage = vpg->vpg_page;
384 389
385 (*printer)(env, cookie, LUSTRE_VVP_NAME "-page@%p(%d:%d:%d) vm@%p ", 390 (*printer)(env, cookie, LUSTRE_VVP_NAME "-page@%p(%d:%d:%d) vm@%p ",
386 vp, vp->cpg_defer_uptodate, vp->cpg_ra_used, 391 vpg, vpg->vpg_defer_uptodate, vpg->vpg_ra_used,
387 vp->cpg_write_queued, vmpage); 392 vpg->vpg_write_queued, vmpage);
388 if (vmpage) { 393 if (vmpage) {
389 (*printer)(env, cookie, "%lx %d:%d %lx %lu %slru", 394 (*printer)(env, cookie, "%lx %d:%d %lx %lu %slru",
390 (long)vmpage->flags, page_count(vmpage), 395 (long)vmpage->flags, page_count(vmpage),
391 page_mapcount(vmpage), vmpage->private, 396 page_mapcount(vmpage), vmpage->private,
392 page_index(vmpage), 397 vmpage->index,
393 list_empty(&vmpage->lru) ? "not-" : ""); 398 list_empty(&vmpage->lru) ? "not-" : "");
394 } 399 }
400
395 (*printer)(env, cookie, "\n"); 401 (*printer)(env, cookie, "\n");
402
403 return 0;
404}
405
406static int vvp_page_fail(const struct lu_env *env,
407 const struct cl_page_slice *slice)
408{
409 /*
410 * Cached read?
411 */
412 LBUG();
413
396 return 0; 414 return 0;
397} 415}
398 416
@@ -401,32 +419,38 @@ static const struct cl_page_operations vvp_page_ops = {
401 .cpo_assume = vvp_page_assume, 419 .cpo_assume = vvp_page_assume,
402 .cpo_unassume = vvp_page_unassume, 420 .cpo_unassume = vvp_page_unassume,
403 .cpo_disown = vvp_page_disown, 421 .cpo_disown = vvp_page_disown,
404 .cpo_vmpage = ccc_page_vmpage,
405 .cpo_discard = vvp_page_discard, 422 .cpo_discard = vvp_page_discard,
406 .cpo_delete = vvp_page_delete, 423 .cpo_delete = vvp_page_delete,
407 .cpo_unmap = vvp_page_unmap,
408 .cpo_export = vvp_page_export, 424 .cpo_export = vvp_page_export,
409 .cpo_is_vmlocked = vvp_page_is_vmlocked, 425 .cpo_is_vmlocked = vvp_page_is_vmlocked,
410 .cpo_fini = vvp_page_fini, 426 .cpo_fini = vvp_page_fini,
411 .cpo_print = vvp_page_print, 427 .cpo_print = vvp_page_print,
412 .cpo_is_under_lock = ccc_page_is_under_lock, 428 .cpo_is_under_lock = vvp_page_is_under_lock,
413 .io = { 429 .io = {
414 [CRT_READ] = { 430 [CRT_READ] = {
415 .cpo_prep = vvp_page_prep_read, 431 .cpo_prep = vvp_page_prep_read,
416 .cpo_completion = vvp_page_completion_read, 432 .cpo_completion = vvp_page_completion_read,
417 .cpo_make_ready = ccc_fail, 433 .cpo_make_ready = vvp_page_fail,
418 }, 434 },
419 [CRT_WRITE] = { 435 [CRT_WRITE] = {
420 .cpo_prep = vvp_page_prep_write, 436 .cpo_prep = vvp_page_prep_write,
421 .cpo_completion = vvp_page_completion_write, 437 .cpo_completion = vvp_page_completion_write,
422 .cpo_make_ready = vvp_page_make_ready, 438 .cpo_make_ready = vvp_page_make_ready,
423 } 439 },
424 } 440 },
425}; 441};
426 442
443static int vvp_transient_page_prep(const struct lu_env *env,
444 const struct cl_page_slice *slice,
445 struct cl_io *unused)
446{
447 /* transient page should always be sent. */
448 return 0;
449}
450
427static void vvp_transient_page_verify(const struct cl_page *page) 451static void vvp_transient_page_verify(const struct cl_page *page)
428{ 452{
429 struct inode *inode = ccc_object_inode(page->cp_obj); 453 struct inode *inode = vvp_object_inode(page->cp_obj);
430 454
431 LASSERT(!inode_trylock(inode)); 455 LASSERT(!inode_trylock(inode));
432} 456}
@@ -477,7 +501,7 @@ static void vvp_transient_page_discard(const struct lu_env *env,
477static int vvp_transient_page_is_vmlocked(const struct lu_env *env, 501static int vvp_transient_page_is_vmlocked(const struct lu_env *env,
478 const struct cl_page_slice *slice) 502 const struct cl_page_slice *slice)
479{ 503{
480 struct inode *inode = ccc_object_inode(slice->cpl_obj); 504 struct inode *inode = vvp_object_inode(slice->cpl_obj);
481 int locked; 505 int locked;
482 506
483 locked = !inode_trylock(inode); 507 locked = !inode_trylock(inode);
@@ -497,13 +521,13 @@ vvp_transient_page_completion(const struct lu_env *env,
497static void vvp_transient_page_fini(const struct lu_env *env, 521static void vvp_transient_page_fini(const struct lu_env *env,
498 struct cl_page_slice *slice) 522 struct cl_page_slice *slice)
499{ 523{
500 struct ccc_page *cp = cl2ccc_page(slice); 524 struct vvp_page *vpg = cl2vvp_page(slice);
501 struct cl_page *clp = slice->cpl_page; 525 struct cl_page *clp = slice->cpl_page;
502 struct ccc_object *clobj = cl2ccc(clp->cp_obj); 526 struct vvp_object *clobj = cl2vvp(clp->cp_obj);
503 527
504 vvp_page_fini_common(cp); 528 vvp_page_fini_common(vpg);
505 LASSERT(!inode_trylock(clobj->cob_inode)); 529 LASSERT(!inode_trylock(clobj->vob_inode));
506 clobj->cob_transient_pages--; 530 clobj->vob_transient_pages--;
507} 531}
508 532
509static const struct cl_page_operations vvp_transient_page_ops = { 533static const struct cl_page_operations vvp_transient_page_ops = {
@@ -512,45 +536,48 @@ static const struct cl_page_operations vvp_transient_page_ops = {
512 .cpo_unassume = vvp_transient_page_unassume, 536 .cpo_unassume = vvp_transient_page_unassume,
513 .cpo_disown = vvp_transient_page_disown, 537 .cpo_disown = vvp_transient_page_disown,
514 .cpo_discard = vvp_transient_page_discard, 538 .cpo_discard = vvp_transient_page_discard,
515 .cpo_vmpage = ccc_page_vmpage,
516 .cpo_fini = vvp_transient_page_fini, 539 .cpo_fini = vvp_transient_page_fini,
517 .cpo_is_vmlocked = vvp_transient_page_is_vmlocked, 540 .cpo_is_vmlocked = vvp_transient_page_is_vmlocked,
518 .cpo_print = vvp_page_print, 541 .cpo_print = vvp_page_print,
519 .cpo_is_under_lock = ccc_page_is_under_lock, 542 .cpo_is_under_lock = vvp_page_is_under_lock,
520 .io = { 543 .io = {
521 [CRT_READ] = { 544 [CRT_READ] = {
522 .cpo_prep = ccc_transient_page_prep, 545 .cpo_prep = vvp_transient_page_prep,
523 .cpo_completion = vvp_transient_page_completion, 546 .cpo_completion = vvp_transient_page_completion,
524 }, 547 },
525 [CRT_WRITE] = { 548 [CRT_WRITE] = {
526 .cpo_prep = ccc_transient_page_prep, 549 .cpo_prep = vvp_transient_page_prep,
527 .cpo_completion = vvp_transient_page_completion, 550 .cpo_completion = vvp_transient_page_completion,
528 } 551 }
529 } 552 }
530}; 553};
531 554
532int vvp_page_init(const struct lu_env *env, struct cl_object *obj, 555int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
533 struct cl_page *page, struct page *vmpage) 556 struct cl_page *page, pgoff_t index)
534{ 557{
535 struct ccc_page *cpg = cl_object_page_slice(obj, page); 558 struct vvp_page *vpg = cl_object_page_slice(obj, page);
559 struct page *vmpage = page->cp_vmpage;
536 560
537 CLOBINVRNT(env, obj, ccc_object_invariant(obj)); 561 CLOBINVRNT(env, obj, vvp_object_invariant(obj));
538 562
539 cpg->cpg_page = vmpage; 563 vpg->vpg_page = vmpage;
540 get_page(vmpage); 564 get_page(vmpage);
541 565
542 INIT_LIST_HEAD(&cpg->cpg_pending_linkage); 566 INIT_LIST_HEAD(&vpg->vpg_pending_linkage);
543 if (page->cp_type == CPT_CACHEABLE) { 567 if (page->cp_type == CPT_CACHEABLE) {
568 /* in cache, decref in vvp_page_delete */
569 atomic_inc(&page->cp_ref);
544 SetPagePrivate(vmpage); 570 SetPagePrivate(vmpage);
545 vmpage->private = (unsigned long)page; 571 vmpage->private = (unsigned long)page;
546 cl_page_slice_add(page, &cpg->cpg_cl, obj, &vvp_page_ops); 572 cl_page_slice_add(page, &vpg->vpg_cl, obj, index,
573 &vvp_page_ops);
547 } else { 574 } else {
548 struct ccc_object *clobj = cl2ccc(obj); 575 struct vvp_object *clobj = cl2vvp(obj);
549 576
550 LASSERT(!inode_trylock(clobj->cob_inode)); 577 LASSERT(!inode_trylock(clobj->vob_inode));
551 cl_page_slice_add(page, &cpg->cpg_cl, obj, 578 cl_page_slice_add(page, &vpg->vpg_cl, obj, index,
552 &vvp_transient_page_ops); 579 &vvp_transient_page_ops);
553 clobj->cob_transient_pages++; 580 clobj->vob_transient_pages++;
554 } 581 }
555 return 0; 582 return 0;
556} 583}
diff --git a/drivers/staging/lustre/lustre/llite/vvp_req.c b/drivers/staging/lustre/lustre/llite/vvp_req.c
new file mode 100644
index 000000000000..fb886291a4e2
--- /dev/null
+++ b/drivers/staging/lustre/lustre/llite/vvp_req.c
@@ -0,0 +1,121 @@
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
19 *
20 * GPL HEADER END
21 */
22/*
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
25 *
26 * Copyright (c) 2011, 2014, Intel Corporation.
27 */
28
29#define DEBUG_SUBSYSTEM S_LLITE
30
31#include "../include/lustre/lustre_idl.h"
32#include "../include/cl_object.h"
33#include "../include/obd.h"
34#include "../include/obd_support.h"
35#include "../include/lustre_lite.h"
36#include "llite_internal.h"
37#include "vvp_internal.h"
38
39static inline struct vvp_req *cl2vvp_req(const struct cl_req_slice *slice)
40{
41 return container_of0(slice, struct vvp_req, vrq_cl);
42}
43
44/**
45 * Implementation of struct cl_req_operations::cro_attr_set() for VVP
46 * layer. VVP is responsible for
47 *
48 * - o_[mac]time
49 *
50 * - o_mode
51 *
52 * - o_parent_seq
53 *
54 * - o_[ug]id
55 *
56 * - o_parent_oid
57 *
58 * - o_parent_ver
59 *
60 * - o_ioepoch,
61 *
62 */
63void vvp_req_attr_set(const struct lu_env *env,
64 const struct cl_req_slice *slice,
65 const struct cl_object *obj,
66 struct cl_req_attr *attr, u64 flags)
67{
68 struct inode *inode;
69 struct obdo *oa;
70 u32 valid_flags;
71
72 oa = attr->cra_oa;
73 inode = vvp_object_inode(obj);
74 valid_flags = OBD_MD_FLTYPE;
75
76 if (slice->crs_req->crq_type == CRT_WRITE) {
77 if (flags & OBD_MD_FLEPOCH) {
78 oa->o_valid |= OBD_MD_FLEPOCH;
79 oa->o_ioepoch = ll_i2info(inode)->lli_ioepoch;
80 valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME |
81 OBD_MD_FLUID | OBD_MD_FLGID;
82 }
83 }
84 obdo_from_inode(oa, inode, valid_flags & flags);
85 obdo_set_parent_fid(oa, &ll_i2info(inode)->lli_fid);
86 memcpy(attr->cra_jobid, ll_i2info(inode)->lli_jobid,
87 JOBSTATS_JOBID_SIZE);
88}
89
90void vvp_req_completion(const struct lu_env *env,
91 const struct cl_req_slice *slice, int ioret)
92{
93 struct vvp_req *vrq;
94
95 if (ioret > 0)
96 cl_stats_tally(slice->crs_dev, slice->crs_req->crq_type, ioret);
97
98 vrq = cl2vvp_req(slice);
99 kmem_cache_free(vvp_req_kmem, vrq);
100}
101
102static const struct cl_req_operations vvp_req_ops = {
103 .cro_attr_set = vvp_req_attr_set,
104 .cro_completion = vvp_req_completion
105};
106
107int vvp_req_init(const struct lu_env *env, struct cl_device *dev,
108 struct cl_req *req)
109{
110 struct vvp_req *vrq;
111 int result;
112
113 vrq = kmem_cache_zalloc(vvp_req_kmem, GFP_NOFS);
114 if (vrq) {
115 cl_req_slice_add(req, &vrq->vrq_cl, dev, &vvp_req_ops);
116 result = 0;
117 } else {
118 result = -ENOMEM;
119 }
120 return result;
121}
diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c
index c671f221c28c..ed4de04381c3 100644
--- a/drivers/staging/lustre/lustre/llite/xattr.c
+++ b/drivers/staging/lustre/lustre/llite/xattr.c
@@ -181,8 +181,9 @@ int ll_setxattr_common(struct inode *inode, const char *name,
181 size = rc; 181 size = rc;
182 182
183 pv = (const char *)new_value; 183 pv = (const char *)new_value;
184 } else 184 } else {
185 return -EOPNOTSUPP; 185 return -EOPNOTSUPP;
186 }
186 187
187 valid |= rce_ops2valid(rce->rce_ops); 188 valid |= rce_ops2valid(rce->rce_ops);
188 } 189 }
@@ -218,8 +219,8 @@ int ll_setxattr(struct dentry *dentry, const char *name,
218 LASSERT(inode); 219 LASSERT(inode);
219 LASSERT(name); 220 LASSERT(name);
220 221
221 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), xattr %s\n", 222 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), xattr %s\n",
222 inode->i_ino, inode->i_generation, inode, name); 223 PFID(ll_inode2fid(inode)), inode, name);
223 224
224 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_SETXATTR, 1); 225 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_SETXATTR, 1);
225 226
@@ -243,12 +244,12 @@ int ll_setxattr(struct dentry *dentry, const char *name,
243 lump->lmm_stripe_offset = -1; 244 lump->lmm_stripe_offset = -1;
244 245
245 if (lump && S_ISREG(inode->i_mode)) { 246 if (lump && S_ISREG(inode->i_mode)) {
246 int flags = FMODE_WRITE; 247 __u64 it_flags = FMODE_WRITE;
247 int lum_size = (lump->lmm_magic == LOV_USER_MAGIC_V1) ? 248 int lum_size = (lump->lmm_magic == LOV_USER_MAGIC_V1) ?
248 sizeof(*lump) : sizeof(struct lov_user_md_v3); 249 sizeof(*lump) : sizeof(struct lov_user_md_v3);
249 250
250 rc = ll_lov_setstripe_ea_info(inode, dentry, flags, lump, 251 rc = ll_lov_setstripe_ea_info(inode, dentry, it_flags,
251 lum_size); 252 lump, lum_size);
252 /* b10667: rc always be 0 here for now */ 253 /* b10667: rc always be 0 here for now */
253 rc = 0; 254 rc = 0;
254 } else if (S_ISDIR(inode->i_mode)) { 255 } else if (S_ISDIR(inode->i_mode)) {
@@ -272,8 +273,8 @@ int ll_removexattr(struct dentry *dentry, const char *name)
272 LASSERT(inode); 273 LASSERT(inode);
273 LASSERT(name); 274 LASSERT(name);
274 275
275 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), xattr %s\n", 276 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), xattr %s\n",
276 inode->i_ino, inode->i_generation, inode, name); 277 PFID(ll_inode2fid(inode)), inode, name);
277 278
278 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_REMOVEXATTR, 1); 279 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_REMOVEXATTR, 1);
279 return ll_setxattr_common(inode, name, NULL, 0, 0, 280 return ll_setxattr_common(inode, name, NULL, 0, 0,
@@ -292,8 +293,8 @@ int ll_getxattr_common(struct inode *inode, const char *name,
292 struct rmtacl_ctl_entry *rce = NULL; 293 struct rmtacl_ctl_entry *rce = NULL;
293 struct ll_inode_info *lli = ll_i2info(inode); 294 struct ll_inode_info *lli = ll_i2info(inode);
294 295
295 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", 296 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
296 inode->i_ino, inode->i_generation, inode); 297 PFID(ll_inode2fid(inode)), inode);
297 298
298 /* listxattr have slightly different behavior from of ext3: 299 /* listxattr have slightly different behavior from of ext3:
299 * without 'user_xattr' ext3 will list all xattr names but 300 * without 'user_xattr' ext3 will list all xattr names but
@@ -338,7 +339,6 @@ int ll_getxattr_common(struct inode *inode, const char *name,
338 */ 339 */
339 if (xattr_type == XATTR_ACL_ACCESS_T && 340 if (xattr_type == XATTR_ACL_ACCESS_T &&
340 !(sbi->ll_flags & LL_SBI_RMT_CLIENT)) { 341 !(sbi->ll_flags & LL_SBI_RMT_CLIENT)) {
341
342 struct posix_acl *acl; 342 struct posix_acl *acl;
343 343
344 spin_lock(&lli->lli_lock); 344 spin_lock(&lli->lli_lock);
@@ -423,8 +423,7 @@ getxattr_nocache:
423 if (rce && rce->rce_ops == RMT_LSETFACL) { 423 if (rce && rce->rce_ops == RMT_LSETFACL) {
424 ext_acl_xattr_header *acl; 424 ext_acl_xattr_header *acl;
425 425
426 acl = lustre_posix_acl_xattr_2ext( 426 acl = lustre_posix_acl_xattr_2ext(buffer, rc);
427 (posix_acl_xattr_header *)buffer, rc);
428 if (IS_ERR(acl)) { 427 if (IS_ERR(acl)) {
429 rc = PTR_ERR(acl); 428 rc = PTR_ERR(acl);
430 goto out; 429 goto out;
@@ -457,8 +456,8 @@ ssize_t ll_getxattr(struct dentry *dentry, struct inode *inode,
457 LASSERT(inode); 456 LASSERT(inode);
458 LASSERT(name); 457 LASSERT(name);
459 458
460 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), xattr %s\n", 459 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), xattr %s\n",
461 inode->i_ino, inode->i_generation, inode, name); 460 PFID(ll_inode2fid(inode)), inode, name);
462 461
463 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR, 1); 462 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR, 1);
464 463
@@ -552,8 +551,8 @@ ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size)
552 551
553 LASSERT(inode); 552 LASSERT(inode);
554 553
555 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", 554 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
556 inode->i_ino, inode->i_generation, inode); 555 PFID(ll_inode2fid(inode)), inode);
557 556
558 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_LISTXATTR, 1); 557 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_LISTXATTR, 1);
559 558
diff --git a/drivers/staging/lustre/lustre/llite/xattr_cache.c b/drivers/staging/lustre/lustre/llite/xattr_cache.c
index 3480ce2bb3cc..d7e17abbe361 100644
--- a/drivers/staging/lustre/lustre/llite/xattr_cache.c
+++ b/drivers/staging/lustre/lustre/llite/xattr_cache.c
@@ -229,7 +229,6 @@ static int ll_xattr_cache_valid(struct ll_inode_info *lli)
229 */ 229 */
230static int ll_xattr_cache_destroy_locked(struct ll_inode_info *lli) 230static int ll_xattr_cache_destroy_locked(struct ll_inode_info *lli)
231{ 231{
232
233 if (!ll_xattr_cache_valid(lli)) 232 if (!ll_xattr_cache_valid(lli))
234 return 0; 233 return 0;
235 234
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_internal.h b/drivers/staging/lustre/lustre/lmv/lmv_internal.h
index 8a0087190e23..7007e4c48035 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_internal.h
+++ b/drivers/staging/lustre/lustre/lmv/lmv_internal.h
@@ -42,9 +42,6 @@
42 42
43#define LMV_MAX_TGT_COUNT 128 43#define LMV_MAX_TGT_COUNT 128
44 44
45#define lmv_init_lock(lmv) mutex_lock(&lmv->init_mutex)
46#define lmv_init_unlock(lmv) mutex_unlock(&lmv->init_mutex)
47
48#define LL_IT2STR(it) \ 45#define LL_IT2STR(it) \
49 ((it) ? ldlm_it2str((it)->it_op) : "0") 46 ((it) ? ldlm_it2str((it)->it_op) : "0")
50 47
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
index 9abb7c2b9231..9e31f6b03f9e 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
@@ -132,8 +132,9 @@ static int lmv_set_mdc_active(struct lmv_obd *lmv, struct obd_uuid *uuid,
132static struct obd_uuid *lmv_get_uuid(struct obd_export *exp) 132static struct obd_uuid *lmv_get_uuid(struct obd_export *exp)
133{ 133{
134 struct lmv_obd *lmv = &exp->exp_obd->u.lmv; 134 struct lmv_obd *lmv = &exp->exp_obd->u.lmv;
135 struct lmv_tgt_desc *tgt = lmv->tgts[0];
135 136
136 return obd_get_uuid(lmv->tgts[0]->ltd_exp); 137 return tgt ? obd_get_uuid(tgt->ltd_exp) : NULL;
137} 138}
138 139
139static int lmv_notify(struct obd_device *obd, struct obd_device *watched, 140static int lmv_notify(struct obd_device *obd, struct obd_device *watched,
@@ -249,7 +250,6 @@ static int lmv_connect(const struct lu_env *env,
249 250
250static void lmv_set_timeouts(struct obd_device *obd) 251static void lmv_set_timeouts(struct obd_device *obd)
251{ 252{
252 struct lmv_tgt_desc *tgt;
253 struct lmv_obd *lmv; 253 struct lmv_obd *lmv;
254 int i; 254 int i;
255 255
@@ -261,8 +261,10 @@ static void lmv_set_timeouts(struct obd_device *obd)
261 return; 261 return;
262 262
263 for (i = 0; i < lmv->desc.ld_tgt_count; i++) { 263 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
264 struct lmv_tgt_desc *tgt = lmv->tgts[i];
265
264 tgt = lmv->tgts[i]; 266 tgt = lmv->tgts[i];
265 if (!tgt || !tgt->ltd_exp || tgt->ltd_active == 0) 267 if (!tgt || !tgt->ltd_exp || !tgt->ltd_active)
266 continue; 268 continue;
267 269
268 obd_set_info_async(NULL, tgt->ltd_exp, sizeof(KEY_INTERMDS), 270 obd_set_info_async(NULL, tgt->ltd_exp, sizeof(KEY_INTERMDS),
@@ -302,13 +304,14 @@ static int lmv_init_ea_size(struct obd_export *exp, int easize,
302 return 0; 304 return 0;
303 305
304 for (i = 0; i < lmv->desc.ld_tgt_count; i++) { 306 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
305 if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp || 307 struct lmv_tgt_desc *tgt = lmv->tgts[i];
306 lmv->tgts[i]->ltd_active == 0) { 308
309 if (!tgt || !tgt->ltd_exp || !tgt->ltd_active) {
307 CWARN("%s: NULL export for %d\n", obd->obd_name, i); 310 CWARN("%s: NULL export for %d\n", obd->obd_name, i);
308 continue; 311 continue;
309 } 312 }
310 313
311 rc = md_init_ea_size(lmv->tgts[i]->ltd_exp, easize, def_easize, 314 rc = md_init_ea_size(tgt->ltd_exp, easize, def_easize,
312 cookiesize, def_cookiesize); 315 cookiesize, def_cookiesize);
313 if (rc) { 316 if (rc) {
314 CERROR("%s: obd_init_ea_size() failed on MDT target %d: rc = %d\n", 317 CERROR("%s: obd_init_ea_size() failed on MDT target %d: rc = %d\n",
@@ -425,7 +428,7 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
425 428
426 CDEBUG(D_CONFIG, "Target uuid: %s. index %d\n", uuidp->uuid, index); 429 CDEBUG(D_CONFIG, "Target uuid: %s. index %d\n", uuidp->uuid, index);
427 430
428 lmv_init_lock(lmv); 431 mutex_lock(&lmv->lmv_init_mutex);
429 432
430 if (lmv->desc.ld_tgt_count == 0) { 433 if (lmv->desc.ld_tgt_count == 0) {
431 struct obd_device *mdc_obd; 434 struct obd_device *mdc_obd;
@@ -433,7 +436,7 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
433 mdc_obd = class_find_client_obd(uuidp, LUSTRE_MDC_NAME, 436 mdc_obd = class_find_client_obd(uuidp, LUSTRE_MDC_NAME,
434 &obd->obd_uuid); 437 &obd->obd_uuid);
435 if (!mdc_obd) { 438 if (!mdc_obd) {
436 lmv_init_unlock(lmv); 439 mutex_unlock(&lmv->lmv_init_mutex);
437 CERROR("%s: Target %s not attached: rc = %d\n", 440 CERROR("%s: Target %s not attached: rc = %d\n",
438 obd->obd_name, uuidp->uuid, -EINVAL); 441 obd->obd_name, uuidp->uuid, -EINVAL);
439 return -EINVAL; 442 return -EINVAL;
@@ -445,7 +448,7 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
445 CERROR("%s: UUID %s already assigned at LOV target index %d: rc = %d\n", 448 CERROR("%s: UUID %s already assigned at LOV target index %d: rc = %d\n",
446 obd->obd_name, 449 obd->obd_name,
447 obd_uuid2str(&tgt->ltd_uuid), index, -EEXIST); 450 obd_uuid2str(&tgt->ltd_uuid), index, -EEXIST);
448 lmv_init_unlock(lmv); 451 mutex_unlock(&lmv->lmv_init_mutex);
449 return -EEXIST; 452 return -EEXIST;
450 } 453 }
451 454
@@ -459,7 +462,7 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
459 newsize <<= 1; 462 newsize <<= 1;
460 newtgts = kcalloc(newsize, sizeof(*newtgts), GFP_NOFS); 463 newtgts = kcalloc(newsize, sizeof(*newtgts), GFP_NOFS);
461 if (!newtgts) { 464 if (!newtgts) {
462 lmv_init_unlock(lmv); 465 mutex_unlock(&lmv->lmv_init_mutex);
463 return -ENOMEM; 466 return -ENOMEM;
464 } 467 }
465 468
@@ -481,7 +484,7 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
481 484
482 tgt = kzalloc(sizeof(*tgt), GFP_NOFS); 485 tgt = kzalloc(sizeof(*tgt), GFP_NOFS);
483 if (!tgt) { 486 if (!tgt) {
484 lmv_init_unlock(lmv); 487 mutex_unlock(&lmv->lmv_init_mutex);
485 return -ENOMEM; 488 return -ENOMEM;
486 } 489 }
487 490
@@ -507,7 +510,7 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
507 } 510 }
508 } 511 }
509 512
510 lmv_init_unlock(lmv); 513 mutex_unlock(&lmv->lmv_init_mutex);
511 return rc; 514 return rc;
512} 515}
513 516
@@ -522,18 +525,27 @@ int lmv_check_connect(struct obd_device *obd)
522 if (lmv->connected) 525 if (lmv->connected)
523 return 0; 526 return 0;
524 527
525 lmv_init_lock(lmv); 528 mutex_lock(&lmv->lmv_init_mutex);
526 if (lmv->connected) { 529 if (lmv->connected) {
527 lmv_init_unlock(lmv); 530 mutex_unlock(&lmv->lmv_init_mutex);
528 return 0; 531 return 0;
529 } 532 }
530 533
531 if (lmv->desc.ld_tgt_count == 0) { 534 if (lmv->desc.ld_tgt_count == 0) {
532 lmv_init_unlock(lmv); 535 mutex_unlock(&lmv->lmv_init_mutex);
533 CERROR("%s: no targets configured.\n", obd->obd_name); 536 CERROR("%s: no targets configured.\n", obd->obd_name);
534 return -EINVAL; 537 return -EINVAL;
535 } 538 }
536 539
540 LASSERT(lmv->tgts);
541
542 if (!lmv->tgts[0]) {
543 mutex_unlock(&lmv->lmv_init_mutex);
544 CERROR("%s: no target configured for index 0.\n",
545 obd->obd_name);
546 return -EINVAL;
547 }
548
537 CDEBUG(D_CONFIG, "Time to connect %s to %s\n", 549 CDEBUG(D_CONFIG, "Time to connect %s to %s\n",
538 lmv->cluuid.uuid, obd->obd_name); 550 lmv->cluuid.uuid, obd->obd_name);
539 551
@@ -551,7 +563,7 @@ int lmv_check_connect(struct obd_device *obd)
551 lmv->connected = 1; 563 lmv->connected = 1;
552 easize = lmv_get_easize(lmv); 564 easize = lmv_get_easize(lmv);
553 lmv_init_ea_size(obd->obd_self_export, easize, 0, 0, 0); 565 lmv_init_ea_size(obd->obd_self_export, easize, 0, 0, 0);
554 lmv_init_unlock(lmv); 566 mutex_unlock(&lmv->lmv_init_mutex);
555 return 0; 567 return 0;
556 568
557 out_disc: 569 out_disc:
@@ -572,7 +584,7 @@ int lmv_check_connect(struct obd_device *obd)
572 } 584 }
573 } 585 }
574 class_disconnect(lmv->exp); 586 class_disconnect(lmv->exp);
575 lmv_init_unlock(lmv); 587 mutex_unlock(&lmv->lmv_init_mutex);
576 return rc; 588 return rc;
577} 589}
578 590
@@ -796,6 +808,11 @@ static int lmv_hsm_ct_unregister(struct lmv_obd *lmv, unsigned int cmd, int len,
796 808
797 /* unregister request (call from llapi_hsm_copytool_fini) */ 809 /* unregister request (call from llapi_hsm_copytool_fini) */
798 for (i = 0; i < lmv->desc.ld_tgt_count; i++) { 810 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
811 struct lmv_tgt_desc *tgt = lmv->tgts[i];
812
813 if (!tgt || !tgt->ltd_exp)
814 continue;
815
799 /* best effort: try to clean as much as possible 816 /* best effort: try to clean as much as possible
800 * (continue on error) 817 * (continue on error)
801 */ 818 */
@@ -825,20 +842,28 @@ static int lmv_hsm_ct_register(struct lmv_obd *lmv, unsigned int cmd, int len,
825 * except if it because of inactive target. 842 * except if it because of inactive target.
826 */ 843 */
827 for (i = 0; i < lmv->desc.ld_tgt_count; i++) { 844 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
828 err = obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp, len, lk, uarg); 845 struct lmv_tgt_desc *tgt = lmv->tgts[i];
846
847 if (!tgt || !tgt->ltd_exp)
848 continue;
849
850 err = obd_iocontrol(cmd, tgt->ltd_exp, len, lk, uarg);
829 if (err) { 851 if (err) {
830 if (lmv->tgts[i]->ltd_active) { 852 if (tgt->ltd_active) {
831 /* permanent error */ 853 /* permanent error */
832 CERROR("error: iocontrol MDC %s on MDTidx %d cmd %x: err = %d\n", 854 CERROR("error: iocontrol MDC %s on MDTidx %d cmd %x: err = %d\n",
833 lmv->tgts[i]->ltd_uuid.uuid, 855 tgt->ltd_uuid.uuid, i, cmd, err);
834 i, cmd, err);
835 rc = err; 856 rc = err;
836 lk->lk_flags |= LK_FLG_STOP; 857 lk->lk_flags |= LK_FLG_STOP;
837 /* unregister from previous MDS */ 858 /* unregister from previous MDS */
838 for (j = 0; j < i; j++) 859 for (j = 0; j < i; j++) {
839 obd_iocontrol(cmd, 860 tgt = lmv->tgts[j];
840 lmv->tgts[j]->ltd_exp, 861
841 len, lk, uarg); 862 if (!tgt || !tgt->ltd_exp)
863 continue;
864 obd_iocontrol(cmd, tgt->ltd_exp, len,
865 lk, uarg);
866 }
842 return rc; 867 return rc;
843 } 868 }
844 /* else: transient error. 869 /* else: transient error.
@@ -877,6 +902,7 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
877{ 902{
878 struct obd_device *obddev = class_exp2obd(exp); 903 struct obd_device *obddev = class_exp2obd(exp);
879 struct lmv_obd *lmv = &obddev->u.lmv; 904 struct lmv_obd *lmv = &obddev->u.lmv;
905 struct lmv_tgt_desc *tgt = NULL;
880 int i = 0; 906 int i = 0;
881 int rc = 0; 907 int rc = 0;
882 int set = 0; 908 int set = 0;
@@ -896,10 +922,11 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
896 if (index >= count) 922 if (index >= count)
897 return -ENODEV; 923 return -ENODEV;
898 924
899 if (!lmv->tgts[index] || lmv->tgts[index]->ltd_active == 0) 925 tgt = lmv->tgts[index];
926 if (!tgt || !tgt->ltd_active)
900 return -ENODATA; 927 return -ENODATA;
901 928
902 mdc_obd = class_exp2obd(lmv->tgts[index]->ltd_exp); 929 mdc_obd = class_exp2obd(tgt->ltd_exp);
903 if (!mdc_obd) 930 if (!mdc_obd)
904 return -EINVAL; 931 return -EINVAL;
905 932
@@ -909,7 +936,7 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
909 (int)sizeof(struct obd_uuid)))) 936 (int)sizeof(struct obd_uuid))))
910 return -EFAULT; 937 return -EFAULT;
911 938
912 rc = obd_statfs(NULL, lmv->tgts[index]->ltd_exp, &stat_buf, 939 rc = obd_statfs(NULL, tgt->ltd_exp, &stat_buf,
913 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS), 940 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
914 0); 941 0);
915 if (rc) 942 if (rc)
@@ -922,11 +949,10 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
922 } 949 }
923 case OBD_IOC_QUOTACTL: { 950 case OBD_IOC_QUOTACTL: {
924 struct if_quotactl *qctl = karg; 951 struct if_quotactl *qctl = karg;
925 struct lmv_tgt_desc *tgt = NULL;
926 struct obd_quotactl *oqctl; 952 struct obd_quotactl *oqctl;
927 953
928 if (qctl->qc_valid == QC_MDTIDX) { 954 if (qctl->qc_valid == QC_MDTIDX) {
929 if (qctl->qc_idx < 0 || count <= qctl->qc_idx) 955 if (count <= qctl->qc_idx)
930 return -EINVAL; 956 return -EINVAL;
931 957
932 tgt = lmv->tgts[qctl->qc_idx]; 958 tgt = lmv->tgts[qctl->qc_idx];
@@ -975,18 +1001,18 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
975 if (icc->icc_mdtindex >= count) 1001 if (icc->icc_mdtindex >= count)
976 return -ENODEV; 1002 return -ENODEV;
977 1003
978 if (!lmv->tgts[icc->icc_mdtindex] || 1004 tgt = lmv->tgts[icc->icc_mdtindex];
979 !lmv->tgts[icc->icc_mdtindex]->ltd_exp || 1005 if (!tgt || !tgt->ltd_exp || !tgt->ltd_active)
980 lmv->tgts[icc->icc_mdtindex]->ltd_active == 0)
981 return -ENODEV; 1006 return -ENODEV;
982 rc = obd_iocontrol(cmd, lmv->tgts[icc->icc_mdtindex]->ltd_exp, 1007 rc = obd_iocontrol(cmd, tgt->ltd_exp, sizeof(*icc), icc, NULL);
983 sizeof(*icc), icc, NULL);
984 break; 1008 break;
985 } 1009 }
986 case LL_IOC_GET_CONNECT_FLAGS: { 1010 case LL_IOC_GET_CONNECT_FLAGS: {
987 if (!lmv->tgts[0]) 1011 tgt = lmv->tgts[0];
1012
1013 if (!tgt || !tgt->ltd_exp)
988 return -ENODATA; 1014 return -ENODATA;
989 rc = obd_iocontrol(cmd, lmv->tgts[0]->ltd_exp, len, karg, uarg); 1015 rc = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg);
990 break; 1016 break;
991 } 1017 }
992 case OBD_IOC_FID2PATH: { 1018 case OBD_IOC_FID2PATH: {
@@ -997,7 +1023,6 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
997 case LL_IOC_HSM_STATE_SET: 1023 case LL_IOC_HSM_STATE_SET:
998 case LL_IOC_HSM_ACTION: { 1024 case LL_IOC_HSM_ACTION: {
999 struct md_op_data *op_data = karg; 1025 struct md_op_data *op_data = karg;
1000 struct lmv_tgt_desc *tgt;
1001 1026
1002 tgt = lmv_find_target(lmv, &op_data->op_fid1); 1027 tgt = lmv_find_target(lmv, &op_data->op_fid1);
1003 if (IS_ERR(tgt)) 1028 if (IS_ERR(tgt))
@@ -1011,7 +1036,6 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
1011 } 1036 }
1012 case LL_IOC_HSM_PROGRESS: { 1037 case LL_IOC_HSM_PROGRESS: {
1013 const struct hsm_progress_kernel *hpk = karg; 1038 const struct hsm_progress_kernel *hpk = karg;
1014 struct lmv_tgt_desc *tgt;
1015 1039
1016 tgt = lmv_find_target(lmv, &hpk->hpk_fid); 1040 tgt = lmv_find_target(lmv, &hpk->hpk_fid);
1017 if (IS_ERR(tgt)) 1041 if (IS_ERR(tgt))
@@ -1021,7 +1045,6 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
1021 } 1045 }
1022 case LL_IOC_HSM_REQUEST: { 1046 case LL_IOC_HSM_REQUEST: {
1023 struct hsm_user_request *hur = karg; 1047 struct hsm_user_request *hur = karg;
1024 struct lmv_tgt_desc *tgt;
1025 unsigned int reqcount = hur->hur_request.hr_itemcount; 1048 unsigned int reqcount = hur->hur_request.hr_itemcount;
1026 1049
1027 if (reqcount == 0) 1050 if (reqcount == 0)
@@ -1044,7 +1067,11 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
1044 int rc1; 1067 int rc1;
1045 struct hsm_user_request *req; 1068 struct hsm_user_request *req;
1046 1069
1047 nr = lmv_hsm_req_count(lmv, hur, lmv->tgts[i]); 1070 tgt = lmv->tgts[i];
1071 if (!tgt || !tgt->ltd_exp)
1072 continue;
1073
1074 nr = lmv_hsm_req_count(lmv, hur, tgt);
1048 if (nr == 0) /* nothing for this MDS */ 1075 if (nr == 0) /* nothing for this MDS */
1049 continue; 1076 continue;
1050 1077
@@ -1056,10 +1083,10 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
1056 if (!req) 1083 if (!req)
1057 return -ENOMEM; 1084 return -ENOMEM;
1058 1085
1059 lmv_hsm_req_build(lmv, hur, lmv->tgts[i], req); 1086 lmv_hsm_req_build(lmv, hur, tgt, req);
1060 1087
1061 rc1 = obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp, 1088 rc1 = obd_iocontrol(cmd, tgt->ltd_exp, reqlen,
1062 reqlen, req, uarg); 1089 req, uarg);
1063 if (rc1 != 0 && rc == 0) 1090 if (rc1 != 0 && rc == 0)
1064 rc = rc1; 1091 rc = rc1;
1065 kvfree(req); 1092 kvfree(req);
@@ -1103,27 +1130,27 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
1103 struct obd_device *mdc_obd; 1130 struct obd_device *mdc_obd;
1104 int err; 1131 int err;
1105 1132
1106 if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp) 1133 tgt = lmv->tgts[i];
1134 if (!tgt || !tgt->ltd_exp)
1107 continue; 1135 continue;
1108 /* ll_umount_begin() sets force flag but for lmv, not 1136 /* ll_umount_begin() sets force flag but for lmv, not
1109 * mdc. Let's pass it through 1137 * mdc. Let's pass it through
1110 */ 1138 */
1111 mdc_obd = class_exp2obd(lmv->tgts[i]->ltd_exp); 1139 mdc_obd = class_exp2obd(tgt->ltd_exp);
1112 mdc_obd->obd_force = obddev->obd_force; 1140 mdc_obd->obd_force = obddev->obd_force;
1113 err = obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp, len, 1141 err = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg);
1114 karg, uarg);
1115 if (err == -ENODATA && cmd == OBD_IOC_POLL_QUOTACHECK) { 1142 if (err == -ENODATA && cmd == OBD_IOC_POLL_QUOTACHECK) {
1116 return err; 1143 return err;
1117 } else if (err) { 1144 } else if (err) {
1118 if (lmv->tgts[i]->ltd_active) { 1145 if (tgt->ltd_active) {
1119 CERROR("error: iocontrol MDC %s on MDTidx %d cmd %x: err = %d\n", 1146 CERROR("error: iocontrol MDC %s on MDTidx %d cmd %x: err = %d\n",
1120 lmv->tgts[i]->ltd_uuid.uuid, 1147 tgt->ltd_uuid.uuid, i, cmd, err);
1121 i, cmd, err);
1122 if (!rc) 1148 if (!rc)
1123 rc = err; 1149 rc = err;
1124 } 1150 }
1125 } else 1151 } else {
1126 set = 1; 1152 set = 1;
1153 }
1127 } 1154 }
1128 if (!set && !rc) 1155 if (!set && !rc)
1129 rc = -EIO; 1156 rc = -EIO;
@@ -1269,7 +1296,7 @@ static int lmv_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
1269 lmv->lmv_placement = PLACEMENT_CHAR_POLICY; 1296 lmv->lmv_placement = PLACEMENT_CHAR_POLICY;
1270 1297
1271 spin_lock_init(&lmv->lmv_lock); 1298 spin_lock_init(&lmv->lmv_lock);
1272 mutex_init(&lmv->init_mutex); 1299 mutex_init(&lmv->lmv_init_mutex);
1273 1300
1274 lprocfs_lmv_init_vars(&lvars); 1301 lprocfs_lmv_init_vars(&lvars);
1275 1302
@@ -2071,7 +2098,7 @@ static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs)
2071 dp = (struct lu_dirpage *)((char *)dp + LU_PAGE_SIZE); 2098 dp = (struct lu_dirpage *)((char *)dp + LU_PAGE_SIZE);
2072 2099
2073 /* Check if we've reached the end of the CFS_PAGE. */ 2100 /* Check if we've reached the end of the CFS_PAGE. */
2074 if (!((unsigned long)dp & ~CFS_PAGE_MASK)) 2101 if (!((unsigned long)dp & ~PAGE_MASK))
2075 break; 2102 break;
2076 2103
2077 /* Save the hash and flags of this lu_dirpage. */ 2104 /* Save the hash and flags of this lu_dirpage. */
@@ -2268,7 +2295,6 @@ static int lmv_get_info(const struct lu_env *env, struct obd_export *exp,
2268 2295
2269 lmv = &obd->u.lmv; 2296 lmv = &obd->u.lmv;
2270 if (keylen >= strlen("remote_flag") && !strcmp(key, "remote_flag")) { 2297 if (keylen >= strlen("remote_flag") && !strcmp(key, "remote_flag")) {
2271 struct lmv_tgt_desc *tgt;
2272 int i; 2298 int i;
2273 2299
2274 rc = lmv_check_connect(obd); 2300 rc = lmv_check_connect(obd);
@@ -2277,7 +2303,8 @@ static int lmv_get_info(const struct lu_env *env, struct obd_export *exp,
2277 2303
2278 LASSERT(*vallen == sizeof(__u32)); 2304 LASSERT(*vallen == sizeof(__u32));
2279 for (i = 0; i < lmv->desc.ld_tgt_count; i++) { 2305 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
2280 tgt = lmv->tgts[i]; 2306 struct lmv_tgt_desc *tgt = lmv->tgts[i];
2307
2281 /* 2308 /*
2282 * All tgts should be connected when this gets called. 2309 * All tgts should be connected when this gets called.
2283 */ 2310 */
@@ -2466,12 +2493,13 @@ static int lmv_cancel_unused(struct obd_export *exp, const struct lu_fid *fid,
2466 LASSERT(fid); 2493 LASSERT(fid);
2467 2494
2468 for (i = 0; i < lmv->desc.ld_tgt_count; i++) { 2495 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
2469 if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp || 2496 struct lmv_tgt_desc *tgt = lmv->tgts[i];
2470 lmv->tgts[i]->ltd_active == 0) 2497
2498 if (!tgt || !tgt->ltd_exp || !tgt->ltd_active)
2471 continue; 2499 continue;
2472 2500
2473 err = md_cancel_unused(lmv->tgts[i]->ltd_exp, fid, 2501 err = md_cancel_unused(tgt->ltd_exp, fid, policy, mode, flags,
2474 policy, mode, flags, opaque); 2502 opaque);
2475 if (!rc) 2503 if (!rc)
2476 rc = err; 2504 rc = err;
2477 } 2505 }
@@ -2482,9 +2510,13 @@ static int lmv_set_lock_data(struct obd_export *exp, __u64 *lockh, void *data,
2482 __u64 *bits) 2510 __u64 *bits)
2483{ 2511{
2484 struct lmv_obd *lmv = &exp->exp_obd->u.lmv; 2512 struct lmv_obd *lmv = &exp->exp_obd->u.lmv;
2513 struct lmv_tgt_desc *tgt = lmv->tgts[0];
2485 int rc; 2514 int rc;
2486 2515
2487 rc = md_set_lock_data(lmv->tgts[0]->ltd_exp, lockh, data, bits); 2516 if (!tgt || !tgt->ltd_exp)
2517 return -EINVAL;
2518
2519 rc = md_set_lock_data(tgt->ltd_exp, lockh, data, bits);
2488 return rc; 2520 return rc;
2489} 2521}
2490 2522
@@ -2509,12 +2541,13 @@ static enum ldlm_mode lmv_lock_match(struct obd_export *exp, __u64 flags,
2509 * one fid was created in. 2541 * one fid was created in.
2510 */ 2542 */
2511 for (i = 0; i < lmv->desc.ld_tgt_count; i++) { 2543 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
2512 if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp || 2544 struct lmv_tgt_desc *tgt = lmv->tgts[i];
2513 lmv->tgts[i]->ltd_active == 0) 2545
2546 if (!tgt || !tgt->ltd_exp || !tgt->ltd_active)
2514 continue; 2547 continue;
2515 2548
2516 rc = md_lock_match(lmv->tgts[i]->ltd_exp, flags, fid, 2549 rc = md_lock_match(tgt->ltd_exp, flags, fid, type, policy, mode,
2517 type, policy, mode, lockh); 2550 lockh);
2518 if (rc) 2551 if (rc)
2519 return rc; 2552 return rc;
2520 } 2553 }
@@ -2529,18 +2562,24 @@ static int lmv_get_lustre_md(struct obd_export *exp,
2529 struct lustre_md *md) 2562 struct lustre_md *md)
2530{ 2563{
2531 struct lmv_obd *lmv = &exp->exp_obd->u.lmv; 2564 struct lmv_obd *lmv = &exp->exp_obd->u.lmv;
2565 struct lmv_tgt_desc *tgt = lmv->tgts[0];
2532 2566
2533 return md_get_lustre_md(lmv->tgts[0]->ltd_exp, req, dt_exp, md_exp, md); 2567 if (!tgt || !tgt->ltd_exp)
2568 return -EINVAL;
2569 return md_get_lustre_md(tgt->ltd_exp, req, dt_exp, md_exp, md);
2534} 2570}
2535 2571
2536static int lmv_free_lustre_md(struct obd_export *exp, struct lustre_md *md) 2572static int lmv_free_lustre_md(struct obd_export *exp, struct lustre_md *md)
2537{ 2573{
2538 struct obd_device *obd = exp->exp_obd; 2574 struct obd_device *obd = exp->exp_obd;
2539 struct lmv_obd *lmv = &obd->u.lmv; 2575 struct lmv_obd *lmv = &obd->u.lmv;
2576 struct lmv_tgt_desc *tgt = lmv->tgts[0];
2540 2577
2541 if (md->mea) 2578 if (md->mea)
2542 obd_free_memmd(exp, (void *)&md->mea); 2579 obd_free_memmd(exp, (void *)&md->mea);
2543 return md_free_lustre_md(lmv->tgts[0]->ltd_exp, md); 2580 if (!tgt || !tgt->ltd_exp)
2581 return -EINVAL;
2582 return md_free_lustre_md(tgt->ltd_exp, md);
2544} 2583}
2545 2584
2546static int lmv_set_open_replay_data(struct obd_export *exp, 2585static int lmv_set_open_replay_data(struct obd_export *exp,
@@ -2649,7 +2688,8 @@ static int lmv_quotactl(struct obd_device *unused, struct obd_export *exp,
2649 int rc = 0, i; 2688 int rc = 0, i;
2650 __u64 curspace, curinodes; 2689 __u64 curspace, curinodes;
2651 2690
2652 if (!lmv->desc.ld_tgt_count || !tgt->ltd_active) { 2691 if (!tgt || !tgt->ltd_exp || !tgt->ltd_active ||
2692 !lmv->desc.ld_tgt_count) {
2653 CERROR("master lmv inactive\n"); 2693 CERROR("master lmv inactive\n");
2654 return -EIO; 2694 return -EIO;
2655 } 2695 }
@@ -2665,12 +2705,8 @@ static int lmv_quotactl(struct obd_device *unused, struct obd_export *exp,
2665 2705
2666 tgt = lmv->tgts[i]; 2706 tgt = lmv->tgts[i];
2667 2707
2668 if (!tgt || !tgt->ltd_exp || tgt->ltd_active == 0) 2708 if (!tgt || !tgt->ltd_exp || !tgt->ltd_active)
2669 continue; 2709 continue;
2670 if (!tgt->ltd_active) {
2671 CDEBUG(D_HA, "mdt %d is inactive.\n", i);
2672 continue;
2673 }
2674 2710
2675 err = obd_quotactl(tgt->ltd_exp, oqctl); 2711 err = obd_quotactl(tgt->ltd_exp, oqctl);
2676 if (err) { 2712 if (err) {
diff --git a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
index 7dd3162b51e9..ac9744e887ae 100644
--- a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
+++ b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
@@ -73,19 +73,6 @@
73 * - top-page keeps a reference to its sub-page, and destroys it when it 73 * - top-page keeps a reference to its sub-page, and destroys it when it
74 * is destroyed. 74 * is destroyed.
75 * 75 *
76 * - sub-lock keep a reference to its top-locks. Top-lock keeps a
77 * reference (and a hold, see cl_lock_hold()) on its sub-locks when it
78 * actively using them (that is, in cl_lock_state::CLS_QUEUING,
79 * cl_lock_state::CLS_ENQUEUED, cl_lock_state::CLS_HELD states). When
80 * moving into cl_lock_state::CLS_CACHED state, top-lock releases a
81 * hold. From this moment top-lock has only a 'weak' reference to its
82 * sub-locks. This reference is protected by top-lock
83 * cl_lock::cll_guard, and will be automatically cleared by the sub-lock
84 * when the latter is destroyed. When a sub-lock is canceled, a
85 * reference to it is removed from the top-lock array, and top-lock is
86 * moved into CLS_NEW state. It is guaranteed that all sub-locks exist
87 * while their top-lock is in CLS_HELD or CLS_CACHED states.
88 *
89 * - IO's are not reference counted. 76 * - IO's are not reference counted.
90 * 77 *
91 * To implement a connection between top and sub entities, lov layer is split 78 * To implement a connection between top and sub entities, lov layer is split
@@ -281,24 +268,17 @@ struct lov_object {
281}; 268};
282 269
283/** 270/**
284 * Flags that top-lock can set on each of its sub-locks.
285 */
286enum lov_sub_flags {
287 /** Top-lock acquired a hold (cl_lock_hold()) on a sub-lock. */
288 LSF_HELD = 1 << 0
289};
290
291/**
292 * State lov_lock keeps for each sub-lock. 271 * State lov_lock keeps for each sub-lock.
293 */ 272 */
294struct lov_lock_sub { 273struct lov_lock_sub {
295 /** sub-lock itself */ 274 /** sub-lock itself */
296 struct lovsub_lock *sub_lock; 275 struct cl_lock sub_lock;
297 /** An array of per-sub-lock flags, taken from enum lov_sub_flags */ 276 /** Set if the sublock has ever been enqueued, meaning it may
298 unsigned sub_flags; 277 * hold resources of underlying layers
278 */
279 unsigned int sub_is_enqueued:1,
280 sub_initialized:1;
299 int sub_stripe; 281 int sub_stripe;
300 struct cl_lock_descr sub_descr;
301 struct cl_lock_descr sub_got;
302}; 282};
303 283
304/** 284/**
@@ -308,59 +288,8 @@ struct lov_lock {
308 struct cl_lock_slice lls_cl; 288 struct cl_lock_slice lls_cl;
309 /** Number of sub-locks in this lock */ 289 /** Number of sub-locks in this lock */
310 int lls_nr; 290 int lls_nr;
311 /** 291 /** sublock array */
312 * Number of existing sub-locks. 292 struct lov_lock_sub lls_sub[0];
313 */
314 unsigned lls_nr_filled;
315 /**
316 * Set when sub-lock was canceled, while top-lock was being
317 * used, or unused.
318 */
319 unsigned int lls_cancel_race:1;
320 /**
321 * An array of sub-locks
322 *
323 * There are two issues with managing sub-locks:
324 *
325 * - sub-locks are concurrently canceled, and
326 *
327 * - sub-locks are shared with other top-locks.
328 *
329 * To manage cancellation, top-lock acquires a hold on a sublock
330 * (lov_sublock_adopt()) when the latter is inserted into
331 * lov_lock::lls_sub[]. This hold is released (lov_sublock_release())
332 * when top-lock is going into CLS_CACHED state or destroyed. Hold
333 * prevents sub-lock from cancellation.
334 *
335 * Sub-lock sharing means, among other things, that top-lock that is
336 * in the process of creation (i.e., not yet inserted into lock list)
337 * is already accessible to other threads once at least one of its
338 * sub-locks is created, see lov_lock_sub_init().
339 *
340 * Sub-lock can be in one of the following states:
341 *
342 * - doesn't exist, lov_lock::lls_sub[]::sub_lock == NULL. Such
343 * sub-lock was either never created (top-lock is in CLS_NEW
344 * state), or it was created, then canceled, then destroyed
345 * (lov_lock_unlink() cleared sub-lock pointer in the top-lock).
346 *
347 * - sub-lock exists and is on
348 * hold. (lov_lock::lls_sub[]::sub_flags & LSF_HELD). This is a
349 * normal state of a sub-lock in CLS_HELD and CLS_CACHED states
350 * of a top-lock.
351 *
352 * - sub-lock exists, but is not held by the top-lock. This
353 * happens after top-lock released a hold on sub-locks before
354 * going into cache (lov_lock_unuse()).
355 *
356 * \todo To support wide-striping, array has to be replaced with a set
357 * of queues to avoid scanning.
358 */
359 struct lov_lock_sub *lls_sub;
360 /**
361 * Original description with which lock was enqueued.
362 */
363 struct cl_lock_descr lls_orig;
364}; 293};
365 294
366struct lov_page { 295struct lov_page {
@@ -444,8 +373,9 @@ struct lov_thread_info {
444 struct cl_lock_descr lti_ldescr; 373 struct cl_lock_descr lti_ldescr;
445 struct ost_lvb lti_lvb; 374 struct ost_lvb lti_lvb;
446 struct cl_2queue lti_cl2q; 375 struct cl_2queue lti_cl2q;
447 struct cl_lock_closure lti_closure; 376 struct cl_page_list lti_plist;
448 wait_queue_t lti_waiter; 377 wait_queue_t lti_waiter;
378 struct cl_attr lti_attr;
449}; 379};
450 380
451/** 381/**
@@ -611,14 +541,13 @@ int lov_sublock_modify(const struct lu_env *env, struct lov_lock *lov,
611 const struct cl_lock_descr *d, int idx); 541 const struct cl_lock_descr *d, int idx);
612 542
613int lov_page_init(const struct lu_env *env, struct cl_object *ob, 543int lov_page_init(const struct lu_env *env, struct cl_object *ob,
614 struct cl_page *page, struct page *vmpage); 544 struct cl_page *page, pgoff_t index);
615int lovsub_page_init(const struct lu_env *env, struct cl_object *ob, 545int lovsub_page_init(const struct lu_env *env, struct cl_object *ob,
616 struct cl_page *page, struct page *vmpage); 546 struct cl_page *page, pgoff_t index);
617
618int lov_page_init_empty(const struct lu_env *env, struct cl_object *obj, 547int lov_page_init_empty(const struct lu_env *env, struct cl_object *obj,
619 struct cl_page *page, struct page *vmpage); 548 struct cl_page *page, pgoff_t index);
620int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj, 549int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj,
621 struct cl_page *page, struct page *vmpage); 550 struct cl_page *page, pgoff_t index);
622struct lu_object *lov_object_alloc(const struct lu_env *env, 551struct lu_object *lov_object_alloc(const struct lu_env *env,
623 const struct lu_object_header *hdr, 552 const struct lu_object_header *hdr,
624 struct lu_device *dev); 553 struct lu_device *dev);
@@ -631,6 +560,7 @@ struct lov_lock_link *lov_lock_link_find(const struct lu_env *env,
631 struct lovsub_lock *sub); 560 struct lovsub_lock *sub);
632struct lov_io_sub *lov_page_subio(const struct lu_env *env, struct lov_io *lio, 561struct lov_io_sub *lov_page_subio(const struct lu_env *env, struct lov_io *lio,
633 const struct cl_page_slice *slice); 562 const struct cl_page_slice *slice);
563int lov_page_stripe(const struct cl_page *page);
634 564
635#define lov_foreach_target(lov, var) \ 565#define lov_foreach_target(lov, var) \
636 for (var = 0; var < lov_targets_nr(lov); ++var) 566 for (var = 0; var < lov_targets_nr(lov); ++var)
@@ -789,11 +719,6 @@ static inline struct lovsub_req *cl2lovsub_req(const struct cl_req_slice *slice)
789 return container_of0(slice, struct lovsub_req, lsrq_cl); 719 return container_of0(slice, struct lovsub_req, lsrq_cl);
790} 720}
791 721
792static inline struct cl_page *lov_sub_page(const struct cl_page_slice *slice)
793{
794 return slice->cpl_page->cp_child;
795}
796
797static inline struct lov_io *cl2lov_io(const struct lu_env *env, 722static inline struct lov_io *cl2lov_io(const struct lu_env *env,
798 const struct cl_io_slice *ios) 723 const struct cl_io_slice *ios)
799{ 724{
diff --git a/drivers/staging/lustre/lustre/lov/lov_dev.c b/drivers/staging/lustre/lustre/lov/lov_dev.c
index 532ef87dfb44..dae8e89bcf6d 100644
--- a/drivers/staging/lustre/lustre/lov/lov_dev.c
+++ b/drivers/staging/lustre/lustre/lov/lov_dev.c
@@ -143,9 +143,7 @@ static void *lov_key_init(const struct lu_context *ctx,
143 struct lov_thread_info *info; 143 struct lov_thread_info *info;
144 144
145 info = kmem_cache_zalloc(lov_thread_kmem, GFP_NOFS); 145 info = kmem_cache_zalloc(lov_thread_kmem, GFP_NOFS);
146 if (info) 146 if (!info)
147 INIT_LIST_HEAD(&info->lti_closure.clc_list);
148 else
149 info = ERR_PTR(-ENOMEM); 147 info = ERR_PTR(-ENOMEM);
150 return info; 148 return info;
151} 149}
@@ -155,7 +153,6 @@ static void lov_key_fini(const struct lu_context *ctx,
155{ 153{
156 struct lov_thread_info *info = data; 154 struct lov_thread_info *info = data;
157 155
158 LINVRNT(list_empty(&info->lti_closure.clc_list));
159 kmem_cache_free(lov_thread_kmem, info); 156 kmem_cache_free(lov_thread_kmem, info);
160} 157}
161 158
@@ -265,8 +262,9 @@ static int lov_req_init(const struct lu_env *env, struct cl_device *dev,
265 if (lr) { 262 if (lr) {
266 cl_req_slice_add(req, &lr->lr_cl, dev, &lov_req_ops); 263 cl_req_slice_add(req, &lr->lr_cl, dev, &lov_req_ops);
267 result = 0; 264 result = 0;
268 } else 265 } else {
269 result = -ENOMEM; 266 result = -ENOMEM;
267 }
270 return result; 268 return result;
271} 269}
272 270
@@ -335,14 +333,15 @@ static struct lov_device_emerg **lov_emerg_alloc(int nr)
335 cl_page_list_init(&em->emrg_page_list); 333 cl_page_list_init(&em->emrg_page_list);
336 em->emrg_env = cl_env_alloc(&em->emrg_refcheck, 334 em->emrg_env = cl_env_alloc(&em->emrg_refcheck,
337 LCT_REMEMBER | LCT_NOREF); 335 LCT_REMEMBER | LCT_NOREF);
338 if (!IS_ERR(em->emrg_env)) 336 if (!IS_ERR(em->emrg_env)) {
339 em->emrg_env->le_ctx.lc_cookie = 0x2; 337 em->emrg_env->le_ctx.lc_cookie = 0x2;
340 else { 338 } else {
341 result = PTR_ERR(em->emrg_env); 339 result = PTR_ERR(em->emrg_env);
342 em->emrg_env = NULL; 340 em->emrg_env = NULL;
343 } 341 }
344 } else 342 } else {
345 result = -ENOMEM; 343 result = -ENOMEM;
344 }
346 } 345 }
347 if (result != 0) { 346 if (result != 0) {
348 lov_emerg_free(emerg, nr); 347 lov_emerg_free(emerg, nr);
diff --git a/drivers/staging/lustre/lustre/lov/lov_ea.c b/drivers/staging/lustre/lustre/lov/lov_ea.c
index b6529401c713..460f0fa5e6b1 100644
--- a/drivers/staging/lustre/lustre/lov/lov_ea.c
+++ b/drivers/staging/lustre/lustre/lov/lov_ea.c
@@ -48,11 +48,6 @@
48 48
49#include "lov_internal.h" 49#include "lov_internal.h"
50 50
51struct lovea_unpack_args {
52 struct lov_stripe_md *lsm;
53 int cursor;
54};
55
56static int lsm_lmm_verify_common(struct lov_mds_md *lmm, int lmm_bytes, 51static int lsm_lmm_verify_common(struct lov_mds_md *lmm, int lmm_bytes,
57 __u16 stripe_count) 52 __u16 stripe_count)
58{ 53{
diff --git a/drivers/staging/lustre/lustre/lov/lov_internal.h b/drivers/staging/lustre/lustre/lov/lov_internal.h
index 590f9326af37..eef9afac8467 100644
--- a/drivers/staging/lustre/lustre/lov/lov_internal.h
+++ b/drivers/staging/lustre/lustre/lov/lov_internal.h
@@ -72,6 +72,21 @@
72}) 72})
73#endif 73#endif
74 74
75#define pool_tgt_size(p) ((p)->pool_obds.op_size)
76#define pool_tgt_count(p) ((p)->pool_obds.op_count)
77#define pool_tgt_array(p) ((p)->pool_obds.op_array)
78#define pool_tgt_rw_sem(p) ((p)->pool_obds.op_rw_sem)
79
80struct pool_desc {
81 char pool_name[LOV_MAXPOOLNAME + 1];
82 struct ost_pool pool_obds;
83 atomic_t pool_refcount;
84 struct hlist_node pool_hash; /* access by poolname */
85 struct list_head pool_list; /* serial access */
86 struct dentry *pool_debugfs_entry; /* file in debugfs */
87 struct obd_device *pool_lobd; /* owner */
88};
89
75struct lov_request { 90struct lov_request {
76 struct obd_info rq_oi; 91 struct obd_info rq_oi;
77 struct lov_request_set *rq_rqset; 92 struct lov_request_set *rq_rqset;
@@ -88,7 +103,6 @@ struct lov_request {
88}; 103};
89 104
90struct lov_request_set { 105struct lov_request_set {
91 struct ldlm_enqueue_info *set_ei;
92 struct obd_info *set_oi; 106 struct obd_info *set_oi;
93 atomic_t set_refcount; 107 atomic_t set_refcount;
94 struct obd_export *set_exp; 108 struct obd_export *set_exp;
@@ -102,10 +116,8 @@ struct lov_request_set {
102 atomic_t set_finish_checked; 116 atomic_t set_finish_checked;
103 struct llog_cookie *set_cookies; 117 struct llog_cookie *set_cookies;
104 int set_cookie_sent; 118 int set_cookie_sent;
105 struct obd_trans_info *set_oti;
106 struct list_head set_list; 119 struct list_head set_list;
107 wait_queue_head_t set_waitq; 120 wait_queue_head_t set_waitq;
108 spinlock_t set_lock;
109}; 121};
110 122
111extern struct kmem_cache *lov_oinfo_slab; 123extern struct kmem_cache *lov_oinfo_slab;
@@ -114,12 +126,6 @@ extern struct lu_kmem_descr lov_caches[];
114 126
115void lov_finish_set(struct lov_request_set *set); 127void lov_finish_set(struct lov_request_set *set);
116 128
117static inline void lov_get_reqset(struct lov_request_set *set)
118{
119 LASSERT(atomic_read(&set->set_refcount) > 0);
120 atomic_inc(&set->set_refcount);
121}
122
123static inline void lov_put_reqset(struct lov_request_set *set) 129static inline void lov_put_reqset(struct lov_request_set *set)
124{ 130{
125 if (atomic_dec_and_test(&set->set_refcount)) 131 if (atomic_dec_and_test(&set->set_refcount))
@@ -146,10 +152,8 @@ int lov_stripe_intersects(struct lov_stripe_md *lsm, int stripeno,
146 u64 start, u64 end, 152 u64 start, u64 end,
147 u64 *obd_start, u64 *obd_end); 153 u64 *obd_start, u64 *obd_end);
148int lov_stripe_number(struct lov_stripe_md *lsm, u64 lov_off); 154int lov_stripe_number(struct lov_stripe_md *lsm, u64 lov_off);
149 155pgoff_t lov_stripe_pgoff(struct lov_stripe_md *lsm, pgoff_t stripe_index,
150/* lov_qos.c */ 156 int stripe);
151#define LOV_USES_ASSIGNED_STRIPE 0
152#define LOV_USES_DEFAULT_STRIPE 1
153 157
154/* lov_request.c */ 158/* lov_request.c */
155int lov_update_common_set(struct lov_request_set *set, 159int lov_update_common_set(struct lov_request_set *set,
@@ -176,6 +180,8 @@ int lov_fini_statfs_set(struct lov_request_set *set);
176int lov_statfs_interpret(struct ptlrpc_request_set *rqset, void *data, int rc); 180int lov_statfs_interpret(struct ptlrpc_request_set *rqset, void *data, int rc);
177 181
178/* lov_obd.c */ 182/* lov_obd.c */
183void lov_stripe_lock(struct lov_stripe_md *md);
184void lov_stripe_unlock(struct lov_stripe_md *md);
179void lov_fix_desc(struct lov_desc *desc); 185void lov_fix_desc(struct lov_desc *desc);
180void lov_fix_desc_stripe_size(__u64 *val); 186void lov_fix_desc_stripe_size(__u64 *val);
181void lov_fix_desc_stripe_count(__u32 *val); 187void lov_fix_desc_stripe_count(__u32 *val);
@@ -231,8 +237,6 @@ int lov_pool_new(struct obd_device *obd, char *poolname);
231int lov_pool_del(struct obd_device *obd, char *poolname); 237int lov_pool_del(struct obd_device *obd, char *poolname);
232int lov_pool_add(struct obd_device *obd, char *poolname, char *ostname); 238int lov_pool_add(struct obd_device *obd, char *poolname, char *ostname);
233int lov_pool_remove(struct obd_device *obd, char *poolname, char *ostname); 239int lov_pool_remove(struct obd_device *obd, char *poolname, char *ostname);
234struct pool_desc *lov_find_pool(struct lov_obd *lov, char *poolname);
235int lov_check_index_in_pool(__u32 idx, struct pool_desc *pool);
236void lov_pool_putref(struct pool_desc *pool); 240void lov_pool_putref(struct pool_desc *pool);
237 241
238static inline struct lov_stripe_md *lsm_addref(struct lov_stripe_md *lsm) 242static inline struct lov_stripe_md *lsm_addref(struct lov_stripe_md *lsm)
diff --git a/drivers/staging/lustre/lustre/lov/lov_io.c b/drivers/staging/lustre/lustre/lov/lov_io.c
index 4296aacd84fc..86cb3f8f9246 100644
--- a/drivers/staging/lustre/lustre/lov/lov_io.c
+++ b/drivers/staging/lustre/lustre/lov/lov_io.c
@@ -225,8 +225,9 @@ struct lov_io_sub *lov_sub_get(const struct lu_env *env,
225 if (!sub->sub_io_initialized) { 225 if (!sub->sub_io_initialized) {
226 sub->sub_stripe = stripe; 226 sub->sub_stripe = stripe;
227 rc = lov_io_sub_init(env, lio, sub); 227 rc = lov_io_sub_init(env, lio, sub);
228 } else 228 } else {
229 rc = 0; 229 rc = 0;
230 }
230 if (rc == 0) 231 if (rc == 0)
231 lov_sub_enter(sub); 232 lov_sub_enter(sub);
232 else 233 else
@@ -245,13 +246,15 @@ void lov_sub_put(struct lov_io_sub *sub)
245 * 246 *
246 */ 247 */
247 248
248static int lov_page_stripe(const struct cl_page *page) 249int lov_page_stripe(const struct cl_page *page)
249{ 250{
250 struct lovsub_object *subobj; 251 struct lovsub_object *subobj;
252 const struct cl_page_slice *slice;
253
254 slice = cl_page_at(page, &lovsub_device_type);
255 LASSERT(slice->cpl_obj);
251 256
252 subobj = lu2lovsub( 257 subobj = cl2lovsub(slice->cpl_obj);
253 lu_object_locate(page->cp_child->cp_obj->co_lu.lo_header,
254 &lovsub_device_type));
255 return subobj->lso_index; 258 return subobj->lso_index;
256} 259}
257 260
@@ -274,10 +277,11 @@ struct lov_io_sub *lov_page_subio(const struct lu_env *env, struct lov_io *lio,
274static int lov_io_subio_init(const struct lu_env *env, struct lov_io *lio, 277static int lov_io_subio_init(const struct lu_env *env, struct lov_io *lio,
275 struct cl_io *io) 278 struct cl_io *io)
276{ 279{
277 struct lov_stripe_md *lsm = lio->lis_object->lo_lsm; 280 struct lov_stripe_md *lsm;
278 int result; 281 int result;
279 282
280 LASSERT(lio->lis_object); 283 LASSERT(lio->lis_object);
284 lsm = lio->lis_object->lo_lsm;
281 285
282 /* 286 /*
283 * Need to be optimized, we can't afford to allocate a piece of memory 287 * Need to be optimized, we can't afford to allocate a piece of memory
@@ -292,8 +296,9 @@ static int lov_io_subio_init(const struct lu_env *env, struct lov_io *lio,
292 lio->lis_single_subio_index = -1; 296 lio->lis_single_subio_index = -1;
293 lio->lis_active_subios = 0; 297 lio->lis_active_subios = 0;
294 result = 0; 298 result = 0;
295 } else 299 } else {
296 result = -ENOMEM; 300 result = -ENOMEM;
301 }
297 return result; 302 return result;
298} 303}
299 304
@@ -411,8 +416,9 @@ static int lov_io_iter_init(const struct lu_env *env,
411 lov_sub_put(sub); 416 lov_sub_put(sub);
412 CDEBUG(D_VFSTRACE, "shrink: %d [%llu, %llu)\n", 417 CDEBUG(D_VFSTRACE, "shrink: %d [%llu, %llu)\n",
413 stripe, start, end); 418 stripe, start, end);
414 } else 419 } else {
415 rc = PTR_ERR(sub); 420 rc = PTR_ERR(sub);
421 }
416 422
417 if (!rc) 423 if (!rc)
418 list_add_tail(&sub->sub_linkage, &lio->lis_active); 424 list_add_tail(&sub->sub_linkage, &lio->lis_active);
@@ -436,7 +442,6 @@ static int lov_io_rw_iter_init(const struct lu_env *env,
436 442
437 /* fast path for common case. */ 443 /* fast path for common case. */
438 if (lio->lis_nr_subios != 1 && !cl_io_is_append(io)) { 444 if (lio->lis_nr_subios != 1 && !cl_io_is_append(io)) {
439
440 lov_do_div64(start, ssize); 445 lov_do_div64(start, ssize);
441 next = (start + 1) * ssize; 446 next = (start + 1) * ssize;
442 if (next <= start * ssize) 447 if (next <= start * ssize)
@@ -543,13 +548,6 @@ static void lov_io_unlock(const struct lu_env *env,
543 LASSERT(rc == 0); 548 LASSERT(rc == 0);
544} 549}
545 550
546static struct cl_page_list *lov_io_submit_qin(struct lov_device *ld,
547 struct cl_page_list *qin,
548 int idx, int alloc)
549{
550 return alloc ? &qin[idx] : &ld->ld_emrg[idx]->emrg_page_list;
551}
552
553/** 551/**
554 * lov implementation of cl_operations::cio_submit() method. It takes a list 552 * lov implementation of cl_operations::cio_submit() method. It takes a list
555 * of pages in \a queue, splits it into per-stripe sub-lists, invokes 553 * of pages in \a queue, splits it into per-stripe sub-lists, invokes
@@ -569,25 +567,17 @@ static int lov_io_submit(const struct lu_env *env,
569 const struct cl_io_slice *ios, 567 const struct cl_io_slice *ios,
570 enum cl_req_type crt, struct cl_2queue *queue) 568 enum cl_req_type crt, struct cl_2queue *queue)
571{ 569{
572 struct lov_io *lio = cl2lov_io(env, ios); 570 struct cl_page_list *qin = &queue->c2_qin;
573 struct lov_object *obj = lio->lis_object; 571 struct lov_io *lio = cl2lov_io(env, ios);
574 struct lov_device *ld = lu2lov_dev(lov2cl(obj)->co_lu.lo_dev); 572 struct lov_io_sub *sub;
575 struct cl_page_list *qin = &queue->c2_qin; 573 struct cl_page_list *plist = &lov_env_info(env)->lti_plist;
576 struct cl_2queue *cl2q = &lov_env_info(env)->lti_cl2q;
577 struct cl_page_list *stripes_qin = NULL;
578 struct cl_page *page; 574 struct cl_page *page;
579 struct cl_page *tmp;
580 int stripe; 575 int stripe;
581 576
582#define QIN(stripe) lov_io_submit_qin(ld, stripes_qin, stripe, alloc)
583
584 int rc = 0; 577 int rc = 0;
585 int alloc =
586 !(current->flags & PF_MEMALLOC);
587 578
588 if (lio->lis_active_subios == 1) { 579 if (lio->lis_active_subios == 1) {
589 int idx = lio->lis_single_subio_index; 580 int idx = lio->lis_single_subio_index;
590 struct lov_io_sub *sub;
591 581
592 LASSERT(idx < lio->lis_nr_subios); 582 LASSERT(idx < lio->lis_nr_subios);
593 sub = lov_sub_get(env, lio, idx); 583 sub = lov_sub_get(env, lio, idx);
@@ -600,119 +590,120 @@ static int lov_io_submit(const struct lu_env *env,
600 } 590 }
601 591
602 LASSERT(lio->lis_subs); 592 LASSERT(lio->lis_subs);
603 if (alloc) {
604 stripes_qin =
605 libcfs_kvzalloc(sizeof(*stripes_qin) *
606 lio->lis_nr_subios,
607 GFP_NOFS);
608 if (!stripes_qin)
609 return -ENOMEM;
610
611 for (stripe = 0; stripe < lio->lis_nr_subios; stripe++)
612 cl_page_list_init(&stripes_qin[stripe]);
613 } else {
614 /*
615 * If we get here, it means pageout & swap doesn't help.
616 * In order to not make things worse, even don't try to
617 * allocate the memory with __GFP_NOWARN. -jay
618 */
619 mutex_lock(&ld->ld_mutex);
620 lio->lis_mem_frozen = 1;
621 }
622 593
623 cl_2queue_init(cl2q); 594 cl_page_list_init(plist);
624 cl_page_list_for_each_safe(page, tmp, qin) { 595 while (qin->pl_nr > 0) {
625 stripe = lov_page_stripe(page); 596 struct cl_2queue *cl2q = &lov_env_info(env)->lti_cl2q;
626 cl_page_list_move(QIN(stripe), qin, page);
627 }
628 597
629 for (stripe = 0; stripe < lio->lis_nr_subios; stripe++) { 598 cl_2queue_init(cl2q);
630 struct lov_io_sub *sub;
631 struct cl_page_list *sub_qin = QIN(stripe);
632 599
633 if (list_empty(&sub_qin->pl_pages)) 600 page = cl_page_list_first(qin);
634 continue; 601 cl_page_list_move(&cl2q->c2_qin, qin, page);
602
603 stripe = lov_page_stripe(page);
604 while (qin->pl_nr > 0) {
605 page = cl_page_list_first(qin);
606 if (stripe != lov_page_stripe(page))
607 break;
608
609 cl_page_list_move(&cl2q->c2_qin, qin, page);
610 }
635 611
636 cl_page_list_splice(sub_qin, &cl2q->c2_qin);
637 sub = lov_sub_get(env, lio, stripe); 612 sub = lov_sub_get(env, lio, stripe);
638 if (!IS_ERR(sub)) { 613 if (!IS_ERR(sub)) {
639 rc = cl_io_submit_rw(sub->sub_env, sub->sub_io, 614 rc = cl_io_submit_rw(sub->sub_env, sub->sub_io,
640 crt, cl2q); 615 crt, cl2q);
641 lov_sub_put(sub); 616 lov_sub_put(sub);
642 } else 617 } else {
643 rc = PTR_ERR(sub); 618 rc = PTR_ERR(sub);
644 cl_page_list_splice(&cl2q->c2_qin, &queue->c2_qin); 619 }
620
621 cl_page_list_splice(&cl2q->c2_qin, plist);
645 cl_page_list_splice(&cl2q->c2_qout, &queue->c2_qout); 622 cl_page_list_splice(&cl2q->c2_qout, &queue->c2_qout);
623 cl_2queue_fini(env, cl2q);
624
646 if (rc != 0) 625 if (rc != 0)
647 break; 626 break;
648 } 627 }
649 628
650 for (stripe = 0; stripe < lio->lis_nr_subios; stripe++) { 629 cl_page_list_splice(plist, qin);
651 struct cl_page_list *sub_qin = QIN(stripe); 630 cl_page_list_fini(env, plist);
652 631
653 if (list_empty(&sub_qin->pl_pages)) 632 return rc;
654 continue; 633}
634
635static int lov_io_commit_async(const struct lu_env *env,
636 const struct cl_io_slice *ios,
637 struct cl_page_list *queue, int from, int to,
638 cl_commit_cbt cb)
639{
640 struct cl_page_list *plist = &lov_env_info(env)->lti_plist;
641 struct lov_io *lio = cl2lov_io(env, ios);
642 struct lov_io_sub *sub;
643 struct cl_page *page;
644 int rc = 0;
645
646 if (lio->lis_active_subios == 1) {
647 int idx = lio->lis_single_subio_index;
655 648
656 cl_page_list_splice(sub_qin, qin); 649 LASSERT(idx < lio->lis_nr_subios);
650 sub = lov_sub_get(env, lio, idx);
651 LASSERT(!IS_ERR(sub));
652 LASSERT(sub->sub_io == &lio->lis_single_subio);
653 rc = cl_io_commit_async(sub->sub_env, sub->sub_io, queue,
654 from, to, cb);
655 lov_sub_put(sub);
656 return rc;
657 } 657 }
658 658
659 if (alloc) { 659 LASSERT(lio->lis_subs);
660 kvfree(stripes_qin);
661 } else {
662 int i;
663 660
664 for (i = 0; i < lio->lis_nr_subios; i++) { 661 cl_page_list_init(plist);
665 struct cl_io *cio = lio->lis_subs[i].sub_io; 662 while (queue->pl_nr > 0) {
663 int stripe_to = to;
664 int stripe;
666 665
667 if (cio && cio == &ld->ld_emrg[i]->emrg_subio) 666 LASSERT(plist->pl_nr == 0);
668 lov_io_sub_fini(env, lio, &lio->lis_subs[i]); 667 page = cl_page_list_first(queue);
668 cl_page_list_move(plist, queue, page);
669
670 stripe = lov_page_stripe(page);
671 while (queue->pl_nr > 0) {
672 page = cl_page_list_first(queue);
673 if (stripe != lov_page_stripe(page))
674 break;
675
676 cl_page_list_move(plist, queue, page);
669 } 677 }
670 lio->lis_mem_frozen = 0;
671 mutex_unlock(&ld->ld_mutex);
672 }
673 678
674 return rc; 679 if (queue->pl_nr > 0) /* still has more pages */
675#undef QIN 680 stripe_to = PAGE_SIZE;
676}
677 681
678static int lov_io_prepare_write(const struct lu_env *env, 682 sub = lov_sub_get(env, lio, stripe);
679 const struct cl_io_slice *ios, 683 if (!IS_ERR(sub)) {
680 const struct cl_page_slice *slice, 684 rc = cl_io_commit_async(sub->sub_env, sub->sub_io,
681 unsigned from, unsigned to) 685 plist, from, stripe_to, cb);
682{ 686 lov_sub_put(sub);
683 struct lov_io *lio = cl2lov_io(env, ios); 687 } else {
684 struct cl_page *sub_page = lov_sub_page(slice); 688 rc = PTR_ERR(sub);
685 struct lov_io_sub *sub; 689 break;
686 int result; 690 }
687 691
688 sub = lov_page_subio(env, lio, slice); 692 if (plist->pl_nr > 0) /* short write */
689 if (!IS_ERR(sub)) { 693 break;
690 result = cl_io_prepare_write(sub->sub_env, sub->sub_io,
691 sub_page, from, to);
692 lov_sub_put(sub);
693 } else
694 result = PTR_ERR(sub);
695 return result;
696}
697 694
698static int lov_io_commit_write(const struct lu_env *env, 695 from = 0;
699 const struct cl_io_slice *ios, 696 }
700 const struct cl_page_slice *slice,
701 unsigned from, unsigned to)
702{
703 struct lov_io *lio = cl2lov_io(env, ios);
704 struct cl_page *sub_page = lov_sub_page(slice);
705 struct lov_io_sub *sub;
706 int result;
707 697
708 sub = lov_page_subio(env, lio, slice); 698 /* for error case, add the page back into the qin list */
709 if (!IS_ERR(sub)) { 699 LASSERT(ergo(rc == 0, plist->pl_nr == 0));
710 result = cl_io_commit_write(sub->sub_env, sub->sub_io, 700 while (plist->pl_nr > 0) {
711 sub_page, from, to); 701 /* error occurred, add the uncommitted pages back into queue */
712 lov_sub_put(sub); 702 page = cl_page_list_last(plist);
713 } else 703 cl_page_list_move_head(queue, plist, page);
714 result = PTR_ERR(sub); 704 }
715 return result; 705
706 return rc;
716} 707}
717 708
718static int lov_io_fault_start(const struct lu_env *env, 709static int lov_io_fault_start(const struct lu_env *env,
@@ -803,16 +794,8 @@ static const struct cl_io_operations lov_io_ops = {
803 .cio_fini = lov_io_fini 794 .cio_fini = lov_io_fini
804 } 795 }
805 }, 796 },
806 .req_op = { 797 .cio_submit = lov_io_submit,
807 [CRT_READ] = { 798 .cio_commit_async = lov_io_commit_async,
808 .cio_submit = lov_io_submit
809 },
810 [CRT_WRITE] = {
811 .cio_submit = lov_io_submit
812 }
813 },
814 .cio_prepare_write = lov_io_prepare_write,
815 .cio_commit_write = lov_io_commit_write
816}; 799};
817 800
818/***************************************************************************** 801/*****************************************************************************
@@ -880,15 +863,8 @@ static const struct cl_io_operations lov_empty_io_ops = {
880 .cio_fini = lov_empty_io_fini 863 .cio_fini = lov_empty_io_fini
881 } 864 }
882 }, 865 },
883 .req_op = { 866 .cio_submit = LOV_EMPTY_IMPOSSIBLE,
884 [CRT_READ] = { 867 .cio_commit_async = LOV_EMPTY_IMPOSSIBLE
885 .cio_submit = LOV_EMPTY_IMPOSSIBLE
886 },
887 [CRT_WRITE] = {
888 .cio_submit = LOV_EMPTY_IMPOSSIBLE
889 }
890 },
891 .cio_commit_write = LOV_EMPTY_IMPOSSIBLE
892}; 868};
893 869
894int lov_io_init_raid0(const struct lu_env *env, struct cl_object *obj, 870int lov_io_init_raid0(const struct lu_env *env, struct cl_object *obj,
@@ -943,7 +919,7 @@ int lov_io_init_empty(const struct lu_env *env, struct cl_object *obj,
943 } 919 }
944 920
945 io->ci_result = result < 0 ? result : 0; 921 io->ci_result = result < 0 ? result : 0;
946 return result != 0; 922 return result;
947} 923}
948 924
949int lov_io_init_released(const struct lu_env *env, struct cl_object *obj, 925int lov_io_init_released(const struct lu_env *env, struct cl_object *obj,
@@ -986,7 +962,7 @@ int lov_io_init_released(const struct lu_env *env, struct cl_object *obj,
986 } 962 }
987 963
988 io->ci_result = result < 0 ? result : 0; 964 io->ci_result = result < 0 ? result : 0;
989 return result != 0; 965 return result;
990} 966}
991 967
992/** @} lov */ 968/** @} lov */
diff --git a/drivers/staging/lustre/lustre/lov/lov_lock.c b/drivers/staging/lustre/lustre/lov/lov_lock.c
index ae854bc25dbe..1b203d18c6e9 100644
--- a/drivers/staging/lustre/lustre/lov/lov_lock.c
+++ b/drivers/staging/lustre/lustre/lov/lov_lock.c
@@ -46,11 +46,6 @@
46 * @{ 46 * @{
47 */ 47 */
48 48
49static struct cl_lock_closure *lov_closure_get(const struct lu_env *env,
50 struct cl_lock *parent);
51
52static int lov_lock_unuse(const struct lu_env *env,
53 const struct cl_lock_slice *slice);
54/***************************************************************************** 49/*****************************************************************************
55 * 50 *
56 * Lov lock operations. 51 * Lov lock operations.
@@ -58,7 +53,7 @@ static int lov_lock_unuse(const struct lu_env *env,
58 */ 53 */
59 54
60static struct lov_sublock_env *lov_sublock_env_get(const struct lu_env *env, 55static struct lov_sublock_env *lov_sublock_env_get(const struct lu_env *env,
61 struct cl_lock *parent, 56 const struct cl_lock *parent,
62 struct lov_lock_sub *lls) 57 struct lov_lock_sub *lls)
63{ 58{
64 struct lov_sublock_env *subenv; 59 struct lov_sublock_env *subenv;
@@ -100,185 +95,26 @@ static void lov_sublock_env_put(struct lov_sublock_env *subenv)
100 lov_sub_put(subenv->lse_sub); 95 lov_sub_put(subenv->lse_sub);
101} 96}
102 97
103static void lov_sublock_adopt(const struct lu_env *env, struct lov_lock *lck, 98static int lov_sublock_init(const struct lu_env *env,
104 struct cl_lock *sublock, int idx, 99 const struct cl_lock *parent,
105 struct lov_lock_link *link) 100 struct lov_lock_sub *lls)
106{ 101{
107 struct lovsub_lock *lsl; 102 struct lov_sublock_env *subenv;
108 struct cl_lock *parent = lck->lls_cl.cls_lock; 103 int result;
109 int rc;
110
111 LASSERT(cl_lock_is_mutexed(parent));
112 LASSERT(cl_lock_is_mutexed(sublock));
113
114 lsl = cl2sub_lock(sublock);
115 /*
116 * check that sub-lock doesn't have lock link to this top-lock.
117 */
118 LASSERT(!lov_lock_link_find(env, lck, lsl));
119 LASSERT(idx < lck->lls_nr);
120
121 lck->lls_sub[idx].sub_lock = lsl;
122 lck->lls_nr_filled++;
123 LASSERT(lck->lls_nr_filled <= lck->lls_nr);
124 list_add_tail(&link->lll_list, &lsl->lss_parents);
125 link->lll_idx = idx;
126 link->lll_super = lck;
127 cl_lock_get(parent);
128 lu_ref_add(&parent->cll_reference, "lov-child", sublock);
129 lck->lls_sub[idx].sub_flags |= LSF_HELD;
130 cl_lock_user_add(env, sublock);
131
132 rc = lov_sublock_modify(env, lck, lsl, &sublock->cll_descr, idx);
133 LASSERT(rc == 0); /* there is no way this can fail, currently */
134}
135
136static struct cl_lock *lov_sublock_alloc(const struct lu_env *env,
137 const struct cl_io *io,
138 struct lov_lock *lck,
139 int idx, struct lov_lock_link **out)
140{
141 struct cl_lock *sublock;
142 struct cl_lock *parent;
143 struct lov_lock_link *link;
144
145 LASSERT(idx < lck->lls_nr);
146
147 link = kmem_cache_zalloc(lov_lock_link_kmem, GFP_NOFS);
148 if (link) {
149 struct lov_sublock_env *subenv;
150 struct lov_lock_sub *lls;
151 struct cl_lock_descr *descr;
152
153 parent = lck->lls_cl.cls_lock;
154 lls = &lck->lls_sub[idx];
155 descr = &lls->sub_got;
156
157 subenv = lov_sublock_env_get(env, parent, lls);
158 if (!IS_ERR(subenv)) {
159 /* CAVEAT: Don't try to add a field in lov_lock_sub
160 * to remember the subio. This is because lock is able
161 * to be cached, but this is not true for IO. This
162 * further means a sublock might be referenced in
163 * different io context. -jay
164 */
165
166 sublock = cl_lock_hold(subenv->lse_env, subenv->lse_io,
167 descr, "lov-parent", parent);
168 lov_sublock_env_put(subenv);
169 } else {
170 /* error occurs. */
171 sublock = (void *)subenv;
172 }
173
174 if (!IS_ERR(sublock))
175 *out = link;
176 else
177 kmem_cache_free(lov_lock_link_kmem, link);
178 } else
179 sublock = ERR_PTR(-ENOMEM);
180 return sublock;
181}
182
183static void lov_sublock_unlock(const struct lu_env *env,
184 struct lovsub_lock *lsl,
185 struct cl_lock_closure *closure,
186 struct lov_sublock_env *subenv)
187{
188 lov_sublock_env_put(subenv);
189 lsl->lss_active = NULL;
190 cl_lock_disclosure(env, closure);
191}
192
193static int lov_sublock_lock(const struct lu_env *env,
194 struct lov_lock *lck,
195 struct lov_lock_sub *lls,
196 struct cl_lock_closure *closure,
197 struct lov_sublock_env **lsep)
198{
199 struct lovsub_lock *sublock;
200 struct cl_lock *child;
201 int result = 0;
202
203 LASSERT(list_empty(&closure->clc_list));
204
205 sublock = lls->sub_lock;
206 child = sublock->lss_cl.cls_lock;
207 result = cl_lock_closure_build(env, child, closure);
208 if (result == 0) {
209 struct cl_lock *parent = closure->clc_origin;
210
211 LASSERT(cl_lock_is_mutexed(child));
212 sublock->lss_active = parent;
213
214 if (unlikely((child->cll_state == CLS_FREEING) ||
215 (child->cll_flags & CLF_CANCELLED))) {
216 struct lov_lock_link *link;
217 /*
218 * we could race with lock deletion which temporarily
219 * put the lock in freeing state, bug 19080.
220 */
221 LASSERT(!(lls->sub_flags & LSF_HELD));
222
223 link = lov_lock_link_find(env, lck, sublock);
224 LASSERT(link);
225 lov_lock_unlink(env, link, sublock);
226 lov_sublock_unlock(env, sublock, closure, NULL);
227 lck->lls_cancel_race = 1;
228 result = CLO_REPEAT;
229 } else if (lsep) {
230 struct lov_sublock_env *subenv;
231 104
232 subenv = lov_sublock_env_get(env, parent, lls); 105 subenv = lov_sublock_env_get(env, parent, lls);
233 if (IS_ERR(subenv)) { 106 if (!IS_ERR(subenv)) {
234 lov_sublock_unlock(env, sublock, 107 result = cl_lock_init(subenv->lse_env, &lls->sub_lock,
235 closure, NULL); 108 subenv->lse_io);
236 result = PTR_ERR(subenv); 109 lov_sublock_env_put(subenv);
237 } else { 110 } else {
238 *lsep = subenv; 111 /* error occurs. */
239 } 112 result = PTR_ERR(subenv);
240 }
241 } 113 }
242 return result; 114 return result;
243} 115}
244 116
245/** 117/**
246 * Updates the result of a top-lock operation from a result of sub-lock
247 * sub-operations. Top-operations like lov_lock_{enqueue,use,unuse}() iterate
248 * over sub-locks and lov_subresult() is used to calculate return value of a
249 * top-operation. To this end, possible return values of sub-operations are
250 * ordered as
251 *
252 * - 0 success
253 * - CLO_WAIT wait for event
254 * - CLO_REPEAT repeat top-operation
255 * - -ne fundamental error
256 *
257 * Top-level return code can only go down through this list. CLO_REPEAT
258 * overwrites CLO_WAIT, because lock mutex was released and sleeping condition
259 * has to be rechecked by the upper layer.
260 */
261static int lov_subresult(int result, int rc)
262{
263 int result_rank;
264 int rc_rank;
265
266 LASSERTF(result <= 0 || result == CLO_REPEAT || result == CLO_WAIT,
267 "result = %d\n", result);
268 LASSERTF(rc <= 0 || rc == CLO_REPEAT || rc == CLO_WAIT,
269 "rc = %d\n", rc);
270 CLASSERT(CLO_WAIT < CLO_REPEAT);
271
272 /* calculate ranks in the ordering above */
273 result_rank = result < 0 ? 1 + CLO_REPEAT : result;
274 rc_rank = rc < 0 ? 1 + CLO_REPEAT : rc;
275
276 if (result_rank < rc_rank)
277 result = rc;
278 return result;
279}
280
281/**
282 * Creates sub-locks for a given lov_lock for the first time. 118 * Creates sub-locks for a given lov_lock for the first time.
283 * 119 *
284 * Goes through all sub-objects of top-object, and creates sub-locks on every 120 * Goes through all sub-objects of top-object, and creates sub-locks on every
@@ -286,8 +122,9 @@ static int lov_subresult(int result, int rc)
286 * fact that top-lock (that is being created) can be accessed concurrently 122 * fact that top-lock (that is being created) can be accessed concurrently
287 * through already created sub-locks (possibly shared with other top-locks). 123 * through already created sub-locks (possibly shared with other top-locks).
288 */ 124 */
289static int lov_lock_sub_init(const struct lu_env *env, 125static struct lov_lock *lov_lock_sub_init(const struct lu_env *env,
290 struct lov_lock *lck, const struct cl_io *io) 126 const struct cl_object *obj,
127 struct cl_lock *lock)
291{ 128{
292 int result = 0; 129 int result = 0;
293 int i; 130 int i;
@@ -297,241 +134,86 @@ static int lov_lock_sub_init(const struct lu_env *env,
297 u64 file_start; 134 u64 file_start;
298 u64 file_end; 135 u64 file_end;
299 136
300 struct lov_object *loo = cl2lov(lck->lls_cl.cls_obj); 137 struct lov_object *loo = cl2lov(obj);
301 struct lov_layout_raid0 *r0 = lov_r0(loo); 138 struct lov_layout_raid0 *r0 = lov_r0(loo);
302 struct cl_lock *parent = lck->lls_cl.cls_lock; 139 struct lov_lock *lovlck;
303 140
304 lck->lls_orig = parent->cll_descr; 141 file_start = cl_offset(lov2cl(loo), lock->cll_descr.cld_start);
305 file_start = cl_offset(lov2cl(loo), parent->cll_descr.cld_start); 142 file_end = cl_offset(lov2cl(loo), lock->cll_descr.cld_end + 1) - 1;
306 file_end = cl_offset(lov2cl(loo), parent->cll_descr.cld_end + 1) - 1;
307 143
308 for (i = 0, nr = 0; i < r0->lo_nr; i++) { 144 for (i = 0, nr = 0; i < r0->lo_nr; i++) {
309 /* 145 /*
310 * XXX for wide striping smarter algorithm is desirable, 146 * XXX for wide striping smarter algorithm is desirable,
311 * breaking out of the loop, early. 147 * breaking out of the loop, early.
312 */ 148 */
313 if (likely(r0->lo_sub[i]) && 149 if (likely(r0->lo_sub[i]) && /* spare layout */
314 lov_stripe_intersects(loo->lo_lsm, i, 150 lov_stripe_intersects(loo->lo_lsm, i,
315 file_start, file_end, &start, &end)) 151 file_start, file_end, &start, &end))
316 nr++; 152 nr++;
317 } 153 }
318 LASSERT(nr > 0); 154 LASSERT(nr > 0);
319 lck->lls_sub = libcfs_kvzalloc(nr * sizeof(lck->lls_sub[0]), GFP_NOFS); 155 lovlck = libcfs_kvzalloc(offsetof(struct lov_lock, lls_sub[nr]),
320 if (!lck->lls_sub) 156 GFP_NOFS);
321 return -ENOMEM; 157 if (!lovlck)
158 return ERR_PTR(-ENOMEM);
322 159
323 lck->lls_nr = nr; 160 lovlck->lls_nr = nr;
324 /*
325 * First, fill in sub-lock descriptions in
326 * lck->lls_sub[].sub_descr. They are used by lov_sublock_alloc()
327 * (called below in this function, and by lov_lock_enqueue()) to
328 * create sub-locks. At this moment, no other thread can access
329 * top-lock.
330 */
331 for (i = 0, nr = 0; i < r0->lo_nr; ++i) { 161 for (i = 0, nr = 0; i < r0->lo_nr; ++i) {
332 if (likely(r0->lo_sub[i]) && 162 if (likely(r0->lo_sub[i]) &&
333 lov_stripe_intersects(loo->lo_lsm, i, 163 lov_stripe_intersects(loo->lo_lsm, i,
334 file_start, file_end, &start, &end)) { 164 file_start, file_end, &start, &end)) {
165 struct lov_lock_sub *lls = &lovlck->lls_sub[nr];
335 struct cl_lock_descr *descr; 166 struct cl_lock_descr *descr;
336 167
337 descr = &lck->lls_sub[nr].sub_descr; 168 descr = &lls->sub_lock.cll_descr;
338 169
339 LASSERT(!descr->cld_obj); 170 LASSERT(!descr->cld_obj);
340 descr->cld_obj = lovsub2cl(r0->lo_sub[i]); 171 descr->cld_obj = lovsub2cl(r0->lo_sub[i]);
341 descr->cld_start = cl_index(descr->cld_obj, start); 172 descr->cld_start = cl_index(descr->cld_obj, start);
342 descr->cld_end = cl_index(descr->cld_obj, end); 173 descr->cld_end = cl_index(descr->cld_obj, end);
343 descr->cld_mode = parent->cll_descr.cld_mode; 174 descr->cld_mode = lock->cll_descr.cld_mode;
344 descr->cld_gid = parent->cll_descr.cld_gid; 175 descr->cld_gid = lock->cll_descr.cld_gid;
345 descr->cld_enq_flags = parent->cll_descr.cld_enq_flags; 176 descr->cld_enq_flags = lock->cll_descr.cld_enq_flags;
346 /* XXX has no effect */ 177 lls->sub_stripe = i;
347 lck->lls_sub[nr].sub_got = *descr; 178
348 lck->lls_sub[nr].sub_stripe = i; 179 /* initialize sub lock */
180 result = lov_sublock_init(env, lock, lls);
181 if (result < 0)
182 break;
183
184 lls->sub_initialized = 1;
349 nr++; 185 nr++;
350 } 186 }
351 } 187 }
352 LASSERT(nr == lck->lls_nr); 188 LASSERT(ergo(result == 0, nr == lovlck->lls_nr));
353
354 /*
355 * Some sub-locks can be missing at this point. This is not a problem,
356 * because enqueue will create them anyway. Main duty of this function
357 * is to fill in sub-lock descriptions in a race free manner.
358 */
359 return result;
360}
361 189
362static int lov_sublock_release(const struct lu_env *env, struct lov_lock *lck, 190 if (result != 0) {
363 int i, int deluser, int rc) 191 for (i = 0; i < nr; ++i) {
364{ 192 if (!lovlck->lls_sub[i].sub_initialized)
365 struct cl_lock *parent = lck->lls_cl.cls_lock; 193 break;
366
367 LASSERT(cl_lock_is_mutexed(parent));
368
369 if (lck->lls_sub[i].sub_flags & LSF_HELD) {
370 struct cl_lock *sublock;
371 int dying;
372
373 sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock;
374 LASSERT(cl_lock_is_mutexed(sublock));
375 194
376 lck->lls_sub[i].sub_flags &= ~LSF_HELD; 195 cl_lock_fini(env, &lovlck->lls_sub[i].sub_lock);
377 if (deluser)
378 cl_lock_user_del(env, sublock);
379 /*
380 * If the last hold is released, and cancellation is pending
381 * for a sub-lock, release parent mutex, to avoid keeping it
382 * while sub-lock is being paged out.
383 */
384 dying = (sublock->cll_descr.cld_mode == CLM_PHANTOM ||
385 sublock->cll_descr.cld_mode == CLM_GROUP ||
386 (sublock->cll_flags & (CLF_CANCELPEND|CLF_DOOMED))) &&
387 sublock->cll_holds == 1;
388 if (dying)
389 cl_lock_mutex_put(env, parent);
390 cl_lock_unhold(env, sublock, "lov-parent", parent);
391 if (dying) {
392 cl_lock_mutex_get(env, parent);
393 rc = lov_subresult(rc, CLO_REPEAT);
394 } 196 }
395 /* 197 kvfree(lovlck);
396 * From now on lck->lls_sub[i].sub_lock is a "weak" pointer, 198 lovlck = ERR_PTR(result);
397 * not backed by a reference on a
398 * sub-lock. lovsub_lock_delete() will clear
399 * lck->lls_sub[i].sub_lock under semaphores, just before
400 * sub-lock is destroyed.
401 */
402 } 199 }
403 return rc;
404}
405
406static void lov_sublock_hold(const struct lu_env *env, struct lov_lock *lck,
407 int i)
408{
409 struct cl_lock *parent = lck->lls_cl.cls_lock;
410
411 LASSERT(cl_lock_is_mutexed(parent));
412
413 if (!(lck->lls_sub[i].sub_flags & LSF_HELD)) {
414 struct cl_lock *sublock;
415
416 sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock;
417 LASSERT(cl_lock_is_mutexed(sublock));
418 LASSERT(sublock->cll_state != CLS_FREEING);
419 200
420 lck->lls_sub[i].sub_flags |= LSF_HELD; 201 return lovlck;
421
422 cl_lock_get_trust(sublock);
423 cl_lock_hold_add(env, sublock, "lov-parent", parent);
424 cl_lock_user_add(env, sublock);
425 cl_lock_put(env, sublock);
426 }
427} 202}
428 203
429static void lov_lock_fini(const struct lu_env *env, 204static void lov_lock_fini(const struct lu_env *env,
430 struct cl_lock_slice *slice) 205 struct cl_lock_slice *slice)
431{ 206{
432 struct lov_lock *lck; 207 struct lov_lock *lovlck;
433 int i; 208 int i;
434 209
435 lck = cl2lov_lock(slice); 210 lovlck = cl2lov_lock(slice);
436 LASSERT(lck->lls_nr_filled == 0); 211 for (i = 0; i < lovlck->lls_nr; ++i) {
437 if (lck->lls_sub) { 212 LASSERT(!lovlck->lls_sub[i].sub_is_enqueued);
438 for (i = 0; i < lck->lls_nr; ++i) 213 if (lovlck->lls_sub[i].sub_initialized)
439 /* 214 cl_lock_fini(env, &lovlck->lls_sub[i].sub_lock);
440 * No sub-locks exists at this point, as sub-lock has
441 * a reference on its parent.
442 */
443 LASSERT(!lck->lls_sub[i].sub_lock);
444 kvfree(lck->lls_sub);
445 } 215 }
446 kmem_cache_free(lov_lock_kmem, lck); 216 kvfree(lovlck);
447}
448
449static int lov_lock_enqueue_wait(const struct lu_env *env,
450 struct lov_lock *lck,
451 struct cl_lock *sublock)
452{
453 struct cl_lock *lock = lck->lls_cl.cls_lock;
454 int result;
455
456 LASSERT(cl_lock_is_mutexed(lock));
457
458 cl_lock_mutex_put(env, lock);
459 result = cl_lock_enqueue_wait(env, sublock, 0);
460 cl_lock_mutex_get(env, lock);
461 return result ?: CLO_REPEAT;
462}
463
464/**
465 * Tries to advance a state machine of a given sub-lock toward enqueuing of
466 * the top-lock.
467 *
468 * \retval 0 if state-transition can proceed
469 * \retval -ve otherwise.
470 */
471static int lov_lock_enqueue_one(const struct lu_env *env, struct lov_lock *lck,
472 struct cl_lock *sublock,
473 struct cl_io *io, __u32 enqflags, int last)
474{
475 int result;
476
477 /* first, try to enqueue a sub-lock ... */
478 result = cl_enqueue_try(env, sublock, io, enqflags);
479 if ((sublock->cll_state == CLS_ENQUEUED) && !(enqflags & CEF_AGL)) {
480 /* if it is enqueued, try to `wait' on it---maybe it's already
481 * granted
482 */
483 result = cl_wait_try(env, sublock);
484 if (result == CLO_REENQUEUED)
485 result = CLO_WAIT;
486 }
487 /*
488 * If CEF_ASYNC flag is set, then all sub-locks can be enqueued in
489 * parallel, otherwise---enqueue has to wait until sub-lock is granted
490 * before proceeding to the next one.
491 */
492 if ((result == CLO_WAIT) && (sublock->cll_state <= CLS_HELD) &&
493 (enqflags & CEF_ASYNC) && (!last || (enqflags & CEF_AGL)))
494 result = 0;
495 return result;
496}
497
498/**
499 * Helper function for lov_lock_enqueue() that creates missing sub-lock.
500 */
501static int lov_sublock_fill(const struct lu_env *env, struct cl_lock *parent,
502 struct cl_io *io, struct lov_lock *lck, int idx)
503{
504 struct lov_lock_link *link = NULL;
505 struct cl_lock *sublock;
506 int result;
507
508 LASSERT(parent->cll_depth == 1);
509 cl_lock_mutex_put(env, parent);
510 sublock = lov_sublock_alloc(env, io, lck, idx, &link);
511 if (!IS_ERR(sublock))
512 cl_lock_mutex_get(env, sublock);
513 cl_lock_mutex_get(env, parent);
514
515 if (!IS_ERR(sublock)) {
516 cl_lock_get_trust(sublock);
517 if (parent->cll_state == CLS_QUEUING &&
518 !lck->lls_sub[idx].sub_lock) {
519 lov_sublock_adopt(env, lck, sublock, idx, link);
520 } else {
521 kmem_cache_free(lov_lock_link_kmem, link);
522 /* other thread allocated sub-lock, or enqueue is no
523 * longer going on
524 */
525 cl_lock_mutex_put(env, parent);
526 cl_lock_unhold(env, sublock, "lov-parent", parent);
527 cl_lock_mutex_get(env, parent);
528 }
529 cl_lock_mutex_put(env, sublock);
530 cl_lock_put(env, sublock);
531 result = CLO_REPEAT;
532 } else
533 result = PTR_ERR(sublock);
534 return result;
535} 217}
536 218
537/** 219/**
@@ -543,529 +225,59 @@ static int lov_sublock_fill(const struct lu_env *env, struct cl_lock *parent,
543 */ 225 */
544static int lov_lock_enqueue(const struct lu_env *env, 226static int lov_lock_enqueue(const struct lu_env *env,
545 const struct cl_lock_slice *slice, 227 const struct cl_lock_slice *slice,
546 struct cl_io *io, __u32 enqflags) 228 struct cl_io *io, struct cl_sync_io *anchor)
547{ 229{
548 struct cl_lock *lock = slice->cls_lock; 230 struct cl_lock *lock = slice->cls_lock;
549 struct lov_lock *lck = cl2lov_lock(slice); 231 struct lov_lock *lovlck = cl2lov_lock(slice);
550 struct cl_lock_closure *closure = lov_closure_get(env, lock);
551 int i; 232 int i;
552 int result; 233 int rc = 0;
553 enum cl_lock_state minstate;
554 234
555 for (result = 0, minstate = CLS_FREEING, i = 0; i < lck->lls_nr; ++i) { 235 for (i = 0; i < lovlck->lls_nr; ++i) {
556 int rc; 236 struct lov_lock_sub *lls = &lovlck->lls_sub[i];
557 struct lovsub_lock *sub;
558 struct lov_lock_sub *lls;
559 struct cl_lock *sublock;
560 struct lov_sublock_env *subenv; 237 struct lov_sublock_env *subenv;
561 238
562 if (lock->cll_state != CLS_QUEUING) { 239 subenv = lov_sublock_env_get(env, lock, lls);
563 /* 240 if (IS_ERR(subenv)) {
564 * Lock might have left QUEUING state if previous 241 rc = PTR_ERR(subenv);
565 * iteration released its mutex. Stop enqueing in this
566 * case and let the upper layer to decide what to do.
567 */
568 LASSERT(i > 0 && result != 0);
569 break;
570 }
571
572 lls = &lck->lls_sub[i];
573 sub = lls->sub_lock;
574 /*
575 * Sub-lock might have been canceled, while top-lock was
576 * cached.
577 */
578 if (!sub) {
579 result = lov_sublock_fill(env, lock, io, lck, i);
580 /* lov_sublock_fill() released @lock mutex,
581 * restart.
582 */
583 break; 242 break;
584 } 243 }
585 sublock = sub->lss_cl.cls_lock; 244 rc = cl_lock_enqueue(subenv->lse_env, subenv->lse_io,
586 rc = lov_sublock_lock(env, lck, lls, closure, &subenv); 245 &lls->sub_lock, anchor);
587 if (rc == 0) { 246 lov_sublock_env_put(subenv);
588 lov_sublock_hold(env, lck, i); 247 if (rc != 0)
589 rc = lov_lock_enqueue_one(subenv->lse_env, lck, sublock,
590 subenv->lse_io, enqflags,
591 i == lck->lls_nr - 1);
592 minstate = min(minstate, sublock->cll_state);
593 if (rc == CLO_WAIT) {
594 switch (sublock->cll_state) {
595 case CLS_QUEUING:
596 /* take recursive mutex, the lock is
597 * released in lov_lock_enqueue_wait.
598 */
599 cl_lock_mutex_get(env, sublock);
600 lov_sublock_unlock(env, sub, closure,
601 subenv);
602 rc = lov_lock_enqueue_wait(env, lck,
603 sublock);
604 break;
605 case CLS_CACHED:
606 cl_lock_get(sublock);
607 /* take recursive mutex of sublock */
608 cl_lock_mutex_get(env, sublock);
609 /* need to release all locks in closure
610 * otherwise it may deadlock. LU-2683.
611 */
612 lov_sublock_unlock(env, sub, closure,
613 subenv);
614 /* sublock and parent are held. */
615 rc = lov_sublock_release(env, lck, i,
616 1, rc);
617 cl_lock_mutex_put(env, sublock);
618 cl_lock_put(env, sublock);
619 break;
620 default:
621 lov_sublock_unlock(env, sub, closure,
622 subenv);
623 break;
624 }
625 } else {
626 LASSERT(!sublock->cll_conflict);
627 lov_sublock_unlock(env, sub, closure, subenv);
628 }
629 }
630 result = lov_subresult(result, rc);
631 if (result != 0)
632 break; 248 break;
633 }
634 cl_lock_closure_fini(closure);
635 return result ?: minstate >= CLS_ENQUEUED ? 0 : CLO_WAIT;
636}
637
638static int lov_lock_unuse(const struct lu_env *env,
639 const struct cl_lock_slice *slice)
640{
641 struct lov_lock *lck = cl2lov_lock(slice);
642 struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
643 int i;
644 int result;
645
646 for (result = 0, i = 0; i < lck->lls_nr; ++i) {
647 int rc;
648 struct lovsub_lock *sub;
649 struct cl_lock *sublock;
650 struct lov_lock_sub *lls;
651 struct lov_sublock_env *subenv;
652 249
653 /* top-lock state cannot change concurrently, because single 250 lls->sub_is_enqueued = 1;
654 * thread (one that released the last hold) carries unlocking
655 * to the completion.
656 */
657 LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
658 lls = &lck->lls_sub[i];
659 sub = lls->sub_lock;
660 if (!sub)
661 continue;
662
663 sublock = sub->lss_cl.cls_lock;
664 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
665 if (rc == 0) {
666 if (lls->sub_flags & LSF_HELD) {
667 LASSERT(sublock->cll_state == CLS_HELD ||
668 sublock->cll_state == CLS_ENQUEUED);
669 rc = cl_unuse_try(subenv->lse_env, sublock);
670 rc = lov_sublock_release(env, lck, i, 0, rc);
671 }
672 lov_sublock_unlock(env, sub, closure, subenv);
673 }
674 result = lov_subresult(result, rc);
675 } 251 }
676 252 return rc;
677 if (result == 0 && lck->lls_cancel_race) {
678 lck->lls_cancel_race = 0;
679 result = -ESTALE;
680 }
681 cl_lock_closure_fini(closure);
682 return result;
683} 253}
684 254
685static void lov_lock_cancel(const struct lu_env *env, 255static void lov_lock_cancel(const struct lu_env *env,
686 const struct cl_lock_slice *slice) 256 const struct cl_lock_slice *slice)
687{ 257{
688 struct lov_lock *lck = cl2lov_lock(slice); 258 struct cl_lock *lock = slice->cls_lock;
689 struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock); 259 struct lov_lock *lovlck = cl2lov_lock(slice);
690 int i; 260 int i;
691 int result;
692 261
693 for (result = 0, i = 0; i < lck->lls_nr; ++i) { 262 for (i = 0; i < lovlck->lls_nr; ++i) {
694 int rc; 263 struct lov_lock_sub *lls = &lovlck->lls_sub[i];
695 struct lovsub_lock *sub; 264 struct cl_lock *sublock = &lls->sub_lock;
696 struct cl_lock *sublock;
697 struct lov_lock_sub *lls;
698 struct lov_sublock_env *subenv; 265 struct lov_sublock_env *subenv;
699 266
700 /* top-lock state cannot change concurrently, because single 267 if (!lls->sub_is_enqueued)
701 * thread (one that released the last hold) carries unlocking
702 * to the completion.
703 */
704 lls = &lck->lls_sub[i];
705 sub = lls->sub_lock;
706 if (!sub)
707 continue;
708
709 sublock = sub->lss_cl.cls_lock;
710 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
711 if (rc == 0) {
712 if (!(lls->sub_flags & LSF_HELD)) {
713 lov_sublock_unlock(env, sub, closure, subenv);
714 continue;
715 }
716
717 switch (sublock->cll_state) {
718 case CLS_HELD:
719 rc = cl_unuse_try(subenv->lse_env, sublock);
720 lov_sublock_release(env, lck, i, 0, 0);
721 break;
722 default:
723 lov_sublock_release(env, lck, i, 1, 0);
724 break;
725 }
726 lov_sublock_unlock(env, sub, closure, subenv);
727 }
728
729 if (rc == CLO_REPEAT) {
730 --i;
731 continue;
732 }
733
734 result = lov_subresult(result, rc);
735 }
736
737 if (result)
738 CL_LOCK_DEBUG(D_ERROR, env, slice->cls_lock,
739 "lov_lock_cancel fails with %d.\n", result);
740
741 cl_lock_closure_fini(closure);
742}
743
744static int lov_lock_wait(const struct lu_env *env,
745 const struct cl_lock_slice *slice)
746{
747 struct lov_lock *lck = cl2lov_lock(slice);
748 struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
749 enum cl_lock_state minstate;
750 int reenqueued;
751 int result;
752 int i;
753
754again:
755 for (result = 0, minstate = CLS_FREEING, i = 0, reenqueued = 0;
756 i < lck->lls_nr; ++i) {
757 int rc;
758 struct lovsub_lock *sub;
759 struct cl_lock *sublock;
760 struct lov_lock_sub *lls;
761 struct lov_sublock_env *subenv;
762
763 lls = &lck->lls_sub[i];
764 sub = lls->sub_lock;
765 sublock = sub->lss_cl.cls_lock;
766 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
767 if (rc == 0) {
768 LASSERT(sublock->cll_state >= CLS_ENQUEUED);
769 if (sublock->cll_state < CLS_HELD)
770 rc = cl_wait_try(env, sublock);
771
772 minstate = min(minstate, sublock->cll_state);
773 lov_sublock_unlock(env, sub, closure, subenv);
774 }
775 if (rc == CLO_REENQUEUED) {
776 reenqueued++;
777 rc = 0;
778 }
779 result = lov_subresult(result, rc);
780 if (result != 0)
781 break;
782 }
783 /* Each sublock only can be reenqueued once, so will not loop
784 * forever.
785 */
786 if (result == 0 && reenqueued != 0)
787 goto again;
788 cl_lock_closure_fini(closure);
789 return result ?: minstate >= CLS_HELD ? 0 : CLO_WAIT;
790}
791
792static int lov_lock_use(const struct lu_env *env,
793 const struct cl_lock_slice *slice)
794{
795 struct lov_lock *lck = cl2lov_lock(slice);
796 struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
797 int result;
798 int i;
799
800 LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
801
802 for (result = 0, i = 0; i < lck->lls_nr; ++i) {
803 int rc;
804 struct lovsub_lock *sub;
805 struct cl_lock *sublock;
806 struct lov_lock_sub *lls;
807 struct lov_sublock_env *subenv;
808
809 LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
810
811 lls = &lck->lls_sub[i];
812 sub = lls->sub_lock;
813 if (!sub) {
814 /*
815 * Sub-lock might have been canceled, while top-lock was
816 * cached.
817 */
818 result = -ESTALE;
819 break;
820 }
821
822 sublock = sub->lss_cl.cls_lock;
823 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
824 if (rc == 0) {
825 LASSERT(sublock->cll_state != CLS_FREEING);
826 lov_sublock_hold(env, lck, i);
827 if (sublock->cll_state == CLS_CACHED) {
828 rc = cl_use_try(subenv->lse_env, sublock, 0);
829 if (rc != 0)
830 rc = lov_sublock_release(env, lck,
831 i, 1, rc);
832 } else if (sublock->cll_state == CLS_NEW) {
833 /* Sub-lock might have been canceled, while
834 * top-lock was cached.
835 */
836 result = -ESTALE;
837 lov_sublock_release(env, lck, i, 1, result);
838 }
839 lov_sublock_unlock(env, sub, closure, subenv);
840 }
841 result = lov_subresult(result, rc);
842 if (result != 0)
843 break;
844 }
845
846 if (lck->lls_cancel_race) {
847 /*
848 * If there is unlocking happened at the same time, then
849 * sublock_lock state should be FREEING, and lov_sublock_lock
850 * should return CLO_REPEAT. In this case, it should return
851 * ESTALE, and up layer should reset the lock state to be NEW.
852 */
853 lck->lls_cancel_race = 0;
854 LASSERT(result != 0);
855 result = -ESTALE;
856 }
857 cl_lock_closure_fini(closure);
858 return result;
859}
860
861/**
862 * Check if the extent region \a descr is covered by \a child against the
863 * specific \a stripe.
864 */
865static int lov_lock_stripe_is_matching(const struct lu_env *env,
866 struct lov_object *lov, int stripe,
867 const struct cl_lock_descr *child,
868 const struct cl_lock_descr *descr)
869{
870 struct lov_stripe_md *lsm = lov->lo_lsm;
871 u64 start;
872 u64 end;
873 int result;
874
875 if (lov_r0(lov)->lo_nr == 1)
876 return cl_lock_ext_match(child, descr);
877
878 /*
879 * For a multi-stripes object:
880 * - make sure the descr only covers child's stripe, and
881 * - check if extent is matching.
882 */
883 start = cl_offset(&lov->lo_cl, descr->cld_start);
884 end = cl_offset(&lov->lo_cl, descr->cld_end + 1) - 1;
885 result = 0;
886 /* glimpse should work on the object with LOV EA hole. */
887 if (end - start <= lsm->lsm_stripe_size) {
888 int idx;
889
890 idx = lov_stripe_number(lsm, start);
891 if (idx == stripe ||
892 unlikely(!lov_r0(lov)->lo_sub[idx])) {
893 idx = lov_stripe_number(lsm, end);
894 if (idx == stripe ||
895 unlikely(!lov_r0(lov)->lo_sub[idx]))
896 result = 1;
897 }
898 }
899
900 if (result != 0) {
901 struct cl_lock_descr *subd = &lov_env_info(env)->lti_ldescr;
902 u64 sub_start;
903 u64 sub_end;
904
905 subd->cld_obj = NULL; /* don't need sub object at all */
906 subd->cld_mode = descr->cld_mode;
907 subd->cld_gid = descr->cld_gid;
908 result = lov_stripe_intersects(lsm, stripe, start, end,
909 &sub_start, &sub_end);
910 LASSERT(result);
911 subd->cld_start = cl_index(child->cld_obj, sub_start);
912 subd->cld_end = cl_index(child->cld_obj, sub_end);
913 result = cl_lock_ext_match(child, subd);
914 }
915 return result;
916}
917
918/**
919 * An implementation of cl_lock_operations::clo_fits_into() method.
920 *
921 * Checks whether a lock (given by \a slice) is suitable for \a
922 * io. Multi-stripe locks can be used only for "quick" io, like truncate, or
923 * O_APPEND write.
924 *
925 * \see ccc_lock_fits_into().
926 */
927static int lov_lock_fits_into(const struct lu_env *env,
928 const struct cl_lock_slice *slice,
929 const struct cl_lock_descr *need,
930 const struct cl_io *io)
931{
932 struct lov_lock *lov = cl2lov_lock(slice);
933 struct lov_object *obj = cl2lov(slice->cls_obj);
934 int result;
935
936 LASSERT(cl_object_same(need->cld_obj, slice->cls_obj));
937 LASSERT(lov->lls_nr > 0);
938
939 /* for top lock, it's necessary to match enq flags otherwise it will
940 * run into problem if a sublock is missing and reenqueue.
941 */
942 if (need->cld_enq_flags != lov->lls_orig.cld_enq_flags)
943 return 0;
944
945 if (need->cld_mode == CLM_GROUP)
946 /*
947 * always allow to match group lock.
948 */
949 result = cl_lock_ext_match(&lov->lls_orig, need);
950 else if (lov->lls_nr == 1) {
951 struct cl_lock_descr *got = &lov->lls_sub[0].sub_got;
952
953 result = lov_lock_stripe_is_matching(env,
954 cl2lov(slice->cls_obj),
955 lov->lls_sub[0].sub_stripe,
956 got, need);
957 } else if (io->ci_type != CIT_SETATTR && io->ci_type != CIT_MISC &&
958 !cl_io_is_append(io) && need->cld_mode != CLM_PHANTOM)
959 /*
960 * Multi-stripe locks are only suitable for `quick' IO and for
961 * glimpse.
962 */
963 result = 0;
964 else
965 /*
966 * Most general case: multi-stripe existing lock, and
967 * (potentially) multi-stripe @need lock. Check that @need is
968 * covered by @lov's sub-locks.
969 *
970 * For now, ignore lock expansions made by the server, and
971 * match against original lock extent.
972 */
973 result = cl_lock_ext_match(&lov->lls_orig, need);
974 CDEBUG(D_DLMTRACE, DDESCR"/"DDESCR" %d %d/%d: %d\n",
975 PDESCR(&lov->lls_orig), PDESCR(&lov->lls_sub[0].sub_got),
976 lov->lls_sub[0].sub_stripe, lov->lls_nr, lov_r0(obj)->lo_nr,
977 result);
978 return result;
979}
980
981void lov_lock_unlink(const struct lu_env *env,
982 struct lov_lock_link *link, struct lovsub_lock *sub)
983{
984 struct lov_lock *lck = link->lll_super;
985 struct cl_lock *parent = lck->lls_cl.cls_lock;
986
987 LASSERT(cl_lock_is_mutexed(parent));
988 LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
989
990 list_del_init(&link->lll_list);
991 LASSERT(lck->lls_sub[link->lll_idx].sub_lock == sub);
992 /* yank this sub-lock from parent's array */
993 lck->lls_sub[link->lll_idx].sub_lock = NULL;
994 LASSERT(lck->lls_nr_filled > 0);
995 lck->lls_nr_filled--;
996 lu_ref_del(&parent->cll_reference, "lov-child", sub->lss_cl.cls_lock);
997 cl_lock_put(env, parent);
998 kmem_cache_free(lov_lock_link_kmem, link);
999}
1000
1001struct lov_lock_link *lov_lock_link_find(const struct lu_env *env,
1002 struct lov_lock *lck,
1003 struct lovsub_lock *sub)
1004{
1005 struct lov_lock_link *scan;
1006
1007 LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
1008
1009 list_for_each_entry(scan, &sub->lss_parents, lll_list) {
1010 if (scan->lll_super == lck)
1011 return scan;
1012 }
1013 return NULL;
1014}
1015
1016/**
1017 * An implementation of cl_lock_operations::clo_delete() method. This is
1018 * invoked for "top-to-bottom" delete, when lock destruction starts from the
1019 * top-lock, e.g., as a result of inode destruction.
1020 *
1021 * Unlinks top-lock from all its sub-locks. Sub-locks are not deleted there:
1022 * this is done separately elsewhere:
1023 *
1024 * - for inode destruction, lov_object_delete() calls cl_object_kill() for
1025 * each sub-object, purging its locks;
1026 *
1027 * - in other cases (e.g., a fatal error with a top-lock) sub-locks are
1028 * left in the cache.
1029 */
1030static void lov_lock_delete(const struct lu_env *env,
1031 const struct cl_lock_slice *slice)
1032{
1033 struct lov_lock *lck = cl2lov_lock(slice);
1034 struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
1035 struct lov_lock_link *link;
1036 int rc;
1037 int i;
1038
1039 LASSERT(slice->cls_lock->cll_state == CLS_FREEING);
1040
1041 for (i = 0; i < lck->lls_nr; ++i) {
1042 struct lov_lock_sub *lls = &lck->lls_sub[i];
1043 struct lovsub_lock *lsl = lls->sub_lock;
1044
1045 if (!lsl) /* already removed */
1046 continue; 268 continue;
1047 269
1048 rc = lov_sublock_lock(env, lck, lls, closure, NULL); 270 lls->sub_is_enqueued = 0;
1049 if (rc == CLO_REPEAT) { 271 subenv = lov_sublock_env_get(env, lock, lls);
1050 --i; 272 if (!IS_ERR(subenv)) {
1051 continue; 273 cl_lock_cancel(subenv->lse_env, sublock);
274 lov_sublock_env_put(subenv);
275 } else {
276 CL_LOCK_DEBUG(D_ERROR, env, slice->cls_lock,
277 "lov_lock_cancel fails with %ld.\n",
278 PTR_ERR(subenv));
1052 } 279 }
1053
1054 LASSERT(rc == 0);
1055 LASSERT(lsl->lss_cl.cls_lock->cll_state < CLS_FREEING);
1056
1057 if (lls->sub_flags & LSF_HELD)
1058 lov_sublock_release(env, lck, i, 1, 0);
1059
1060 link = lov_lock_link_find(env, lck, lsl);
1061 LASSERT(link);
1062 lov_lock_unlink(env, link, lsl);
1063 LASSERT(!lck->lls_sub[i].sub_lock);
1064
1065 lov_sublock_unlock(env, lsl, closure, NULL);
1066 } 280 }
1067
1068 cl_lock_closure_fini(closure);
1069} 281}
1070 282
1071static int lov_lock_print(const struct lu_env *env, void *cookie, 283static int lov_lock_print(const struct lu_env *env, void *cookie,
@@ -1079,12 +291,8 @@ static int lov_lock_print(const struct lu_env *env, void *cookie,
1079 struct lov_lock_sub *sub; 291 struct lov_lock_sub *sub;
1080 292
1081 sub = &lck->lls_sub[i]; 293 sub = &lck->lls_sub[i];
1082 (*p)(env, cookie, " %d %x: ", i, sub->sub_flags); 294 (*p)(env, cookie, " %d %x: ", i, sub->sub_is_enqueued);
1083 if (sub->sub_lock) 295 cl_lock_print(env, cookie, p, &sub->sub_lock);
1084 cl_lock_print(env, cookie, p,
1085 sub->sub_lock->lss_cl.cls_lock);
1086 else
1087 (*p)(env, cookie, "---\n");
1088 } 296 }
1089 return 0; 297 return 0;
1090} 298}
@@ -1092,12 +300,7 @@ static int lov_lock_print(const struct lu_env *env, void *cookie,
1092static const struct cl_lock_operations lov_lock_ops = { 300static const struct cl_lock_operations lov_lock_ops = {
1093 .clo_fini = lov_lock_fini, 301 .clo_fini = lov_lock_fini,
1094 .clo_enqueue = lov_lock_enqueue, 302 .clo_enqueue = lov_lock_enqueue,
1095 .clo_wait = lov_lock_wait,
1096 .clo_use = lov_lock_use,
1097 .clo_unuse = lov_lock_unuse,
1098 .clo_cancel = lov_lock_cancel, 303 .clo_cancel = lov_lock_cancel,
1099 .clo_fits_into = lov_lock_fits_into,
1100 .clo_delete = lov_lock_delete,
1101 .clo_print = lov_lock_print 304 .clo_print = lov_lock_print
1102}; 305};
1103 306
@@ -1105,14 +308,13 @@ int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj,
1105 struct cl_lock *lock, const struct cl_io *io) 308 struct cl_lock *lock, const struct cl_io *io)
1106{ 309{
1107 struct lov_lock *lck; 310 struct lov_lock *lck;
1108 int result; 311 int result = 0;
1109 312
1110 lck = kmem_cache_zalloc(lov_lock_kmem, GFP_NOFS); 313 lck = lov_lock_sub_init(env, obj, lock);
1111 if (lck) { 314 if (!IS_ERR(lck))
1112 cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_lock_ops); 315 cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_lock_ops);
1113 result = lov_lock_sub_init(env, lck, io); 316 else
1114 } else 317 result = PTR_ERR(lck);
1115 result = -ENOMEM;
1116 return result; 318 return result;
1117} 319}
1118 320
@@ -1147,21 +349,9 @@ int lov_lock_init_empty(const struct lu_env *env, struct cl_object *obj,
1147 lck = kmem_cache_zalloc(lov_lock_kmem, GFP_NOFS); 349 lck = kmem_cache_zalloc(lov_lock_kmem, GFP_NOFS);
1148 if (lck) { 350 if (lck) {
1149 cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_empty_lock_ops); 351 cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_empty_lock_ops);
1150 lck->lls_orig = lock->cll_descr;
1151 result = 0; 352 result = 0;
1152 } 353 }
1153 return result; 354 return result;
1154} 355}
1155 356
1156static struct cl_lock_closure *lov_closure_get(const struct lu_env *env,
1157 struct cl_lock *parent)
1158{
1159 struct cl_lock_closure *closure;
1160
1161 closure = &lov_env_info(env)->lti_closure;
1162 LASSERT(list_empty(&closure->clc_list));
1163 cl_lock_closure_init(env, closure, parent, 1);
1164 return closure;
1165}
1166
1167/** @} lov */ 357/** @} lov */
diff --git a/drivers/staging/lustre/lustre/lov/lov_merge.c b/drivers/staging/lustre/lustre/lov/lov_merge.c
index 029cd4d62796..56ef41d17ad7 100644
--- a/drivers/staging/lustre/lustre/lov/lov_merge.c
+++ b/drivers/staging/lustre/lustre/lov/lov_merge.c
@@ -154,6 +154,7 @@ void lov_merge_attrs(struct obdo *tgt, struct obdo *src, u64 valid,
154 valid &= src->o_valid; 154 valid &= src->o_valid;
155 155
156 if (*set) { 156 if (*set) {
157 tgt->o_valid &= valid;
157 if (valid & OBD_MD_FLSIZE) { 158 if (valid & OBD_MD_FLSIZE) {
158 /* this handles sparse files properly */ 159 /* this handles sparse files properly */
159 u64 lov_size; 160 u64 lov_size;
@@ -172,12 +173,22 @@ void lov_merge_attrs(struct obdo *tgt, struct obdo *src, u64 valid,
172 tgt->o_mtime = src->o_mtime; 173 tgt->o_mtime = src->o_mtime;
173 if (valid & OBD_MD_FLDATAVERSION) 174 if (valid & OBD_MD_FLDATAVERSION)
174 tgt->o_data_version += src->o_data_version; 175 tgt->o_data_version += src->o_data_version;
176
177 /* handle flags */
178 if (valid & OBD_MD_FLFLAGS)
179 tgt->o_flags &= src->o_flags;
180 else
181 tgt->o_flags = 0;
175 } else { 182 } else {
176 memcpy(tgt, src, sizeof(*tgt)); 183 memcpy(tgt, src, sizeof(*tgt));
177 tgt->o_oi = lsm->lsm_oi; 184 tgt->o_oi = lsm->lsm_oi;
185 tgt->o_valid = valid;
178 if (valid & OBD_MD_FLSIZE) 186 if (valid & OBD_MD_FLSIZE)
179 tgt->o_size = lov_stripe_size(lsm, src->o_size, 187 tgt->o_size = lov_stripe_size(lsm, src->o_size,
180 stripeno); 188 stripeno);
189 tgt->o_flags = 0;
190 if (valid & OBD_MD_FLFLAGS)
191 tgt->o_flags = src->o_flags;
181 } 192 }
182 193
183 /* data_version needs to be valid on all stripes to be correct! */ 194 /* data_version needs to be valid on all stripes to be correct! */
diff --git a/drivers/staging/lustre/lustre/lov/lov_obd.c b/drivers/staging/lustre/lustre/lov/lov_obd.c
index 5daa7faf4dda..e15ef2ece893 100644
--- a/drivers/staging/lustre/lustre/lov/lov_obd.c
+++ b/drivers/staging/lustre/lustre/lov/lov_obd.c
@@ -54,7 +54,6 @@
54#include "../include/lprocfs_status.h" 54#include "../include/lprocfs_status.h"
55#include "../include/lustre_param.h" 55#include "../include/lustre_param.h"
56#include "../include/cl_object.h" 56#include "../include/cl_object.h"
57#include "../include/lclient.h" /* for cl_client_lru */
58#include "../include/lustre/ll_fiemap.h" 57#include "../include/lustre/ll_fiemap.h"
59#include "../include/lustre_fid.h" 58#include "../include/lustre_fid.h"
60 59
@@ -124,7 +123,6 @@ static int lov_set_osc_active(struct obd_device *obd, struct obd_uuid *uuid,
124static int lov_notify(struct obd_device *obd, struct obd_device *watched, 123static int lov_notify(struct obd_device *obd, struct obd_device *watched,
125 enum obd_notify_event ev, void *data); 124 enum obd_notify_event ev, void *data);
126 125
127#define MAX_STRING_SIZE 128
128int lov_connect_obd(struct obd_device *obd, __u32 index, int activate, 126int lov_connect_obd(struct obd_device *obd, __u32 index, int activate,
129 struct obd_connect_data *data) 127 struct obd_connect_data *data)
130{ 128{
@@ -965,7 +963,6 @@ int lov_process_config_base(struct obd_device *obd, struct lustre_cfg *lcfg,
965 CERROR("Unknown command: %d\n", lcfg->lcfg_command); 963 CERROR("Unknown command: %d\n", lcfg->lcfg_command);
966 rc = -EINVAL; 964 rc = -EINVAL;
967 goto out; 965 goto out;
968
969 } 966 }
970 } 967 }
971out: 968out:
@@ -1734,6 +1731,27 @@ static int lov_fiemap(struct lov_obd *lov, __u32 keylen, void *key,
1734 unsigned int buffer_size = FIEMAP_BUFFER_SIZE; 1731 unsigned int buffer_size = FIEMAP_BUFFER_SIZE;
1735 1732
1736 if (!lsm_has_objects(lsm)) { 1733 if (!lsm_has_objects(lsm)) {
1734 if (lsm && lsm_is_released(lsm) && (fm_key->fiemap.fm_start <
1735 fm_key->oa.o_size)) {
1736 /*
1737 * released file, return a minimal FIEMAP if
1738 * request fits in file-size.
1739 */
1740 fiemap->fm_mapped_extents = 1;
1741 fiemap->fm_extents[0].fe_logical =
1742 fm_key->fiemap.fm_start;
1743 if (fm_key->fiemap.fm_start + fm_key->fiemap.fm_length <
1744 fm_key->oa.o_size) {
1745 fiemap->fm_extents[0].fe_length =
1746 fm_key->fiemap.fm_length;
1747 } else {
1748 fiemap->fm_extents[0].fe_length =
1749 fm_key->oa.o_size - fm_key->fiemap.fm_start;
1750 fiemap->fm_extents[0].fe_flags |=
1751 (FIEMAP_EXTENT_UNKNOWN |
1752 FIEMAP_EXTENT_LAST);
1753 }
1754 }
1737 rc = 0; 1755 rc = 0;
1738 goto out; 1756 goto out;
1739 } 1757 }
@@ -2173,7 +2191,6 @@ void lov_stripe_lock(struct lov_stripe_md *md)
2173 LASSERT(md->lsm_lock_owner == 0); 2191 LASSERT(md->lsm_lock_owner == 0);
2174 md->lsm_lock_owner = current_pid(); 2192 md->lsm_lock_owner = current_pid();
2175} 2193}
2176EXPORT_SYMBOL(lov_stripe_lock);
2177 2194
2178void lov_stripe_unlock(struct lov_stripe_md *md) 2195void lov_stripe_unlock(struct lov_stripe_md *md)
2179 __releases(&md->lsm_lock) 2196 __releases(&md->lsm_lock)
@@ -2182,7 +2199,6 @@ void lov_stripe_unlock(struct lov_stripe_md *md)
2182 md->lsm_lock_owner = 0; 2199 md->lsm_lock_owner = 0;
2183 spin_unlock(&md->lsm_lock); 2200 spin_unlock(&md->lsm_lock);
2184} 2201}
2185EXPORT_SYMBOL(lov_stripe_unlock);
2186 2202
2187static int lov_quotactl(struct obd_device *obd, struct obd_export *exp, 2203static int lov_quotactl(struct obd_device *obd, struct obd_export *exp,
2188 struct obd_quotactl *oqctl) 2204 struct obd_quotactl *oqctl)
diff --git a/drivers/staging/lustre/lustre/lov/lov_object.c b/drivers/staging/lustre/lustre/lov/lov_object.c
index 1f8ed95a6d89..561d493b2cdf 100644
--- a/drivers/staging/lustre/lustre/lov/lov_object.c
+++ b/drivers/staging/lustre/lustre/lov/lov_object.c
@@ -67,7 +67,7 @@ struct lov_layout_operations {
67 int (*llo_print)(const struct lu_env *env, void *cookie, 67 int (*llo_print)(const struct lu_env *env, void *cookie,
68 lu_printer_t p, const struct lu_object *o); 68 lu_printer_t p, const struct lu_object *o);
69 int (*llo_page_init)(const struct lu_env *env, struct cl_object *obj, 69 int (*llo_page_init)(const struct lu_env *env, struct cl_object *obj,
70 struct cl_page *page, struct page *vmpage); 70 struct cl_page *page, pgoff_t index);
71 int (*llo_lock_init)(const struct lu_env *env, 71 int (*llo_lock_init)(const struct lu_env *env,
72 struct cl_object *obj, struct cl_lock *lock, 72 struct cl_object *obj, struct cl_lock *lock,
73 const struct cl_io *io); 73 const struct cl_io *io);
@@ -193,6 +193,18 @@ static int lov_init_sub(const struct lu_env *env, struct lov_object *lov,
193 return result; 193 return result;
194} 194}
195 195
196static int lov_page_slice_fixup(struct lov_object *lov,
197 struct cl_object *stripe)
198{
199 struct cl_object_header *hdr = cl_object_header(&lov->lo_cl);
200 struct cl_object *o;
201
202 cl_object_for_each(o, stripe)
203 o->co_slice_off += hdr->coh_page_bufsize;
204
205 return cl_object_header(stripe)->coh_page_bufsize;
206}
207
196static int lov_init_raid0(const struct lu_env *env, 208static int lov_init_raid0(const struct lu_env *env,
197 struct lov_device *dev, struct lov_object *lov, 209 struct lov_device *dev, struct lov_object *lov,
198 const struct cl_object_conf *conf, 210 const struct cl_object_conf *conf,
@@ -222,6 +234,8 @@ static int lov_init_raid0(const struct lu_env *env,
222 r0->lo_sub = libcfs_kvzalloc(r0->lo_nr * sizeof(r0->lo_sub[0]), 234 r0->lo_sub = libcfs_kvzalloc(r0->lo_nr * sizeof(r0->lo_sub[0]),
223 GFP_NOFS); 235 GFP_NOFS);
224 if (r0->lo_sub) { 236 if (r0->lo_sub) {
237 int psz = 0;
238
225 result = 0; 239 result = 0;
226 subconf->coc_inode = conf->coc_inode; 240 subconf->coc_inode = conf->coc_inode;
227 spin_lock_init(&r0->lo_sub_lock); 241 spin_lock_init(&r0->lo_sub_lock);
@@ -254,13 +268,24 @@ static int lov_init_raid0(const struct lu_env *env,
254 if (result == -EAGAIN) { /* try again */ 268 if (result == -EAGAIN) { /* try again */
255 --i; 269 --i;
256 result = 0; 270 result = 0;
271 continue;
257 } 272 }
258 } else { 273 } else {
259 result = PTR_ERR(stripe); 274 result = PTR_ERR(stripe);
260 } 275 }
276
277 if (result == 0) {
278 int sz = lov_page_slice_fixup(lov, stripe);
279
280 LASSERT(ergo(psz > 0, psz == sz));
281 psz = sz;
282 }
261 } 283 }
262 } else 284 if (result == 0)
285 cl_object_header(&lov->lo_cl)->coh_page_bufsize += psz;
286 } else {
263 result = -ENOMEM; 287 result = -ENOMEM;
288 }
264out: 289out:
265 return result; 290 return result;
266} 291}
@@ -286,8 +311,6 @@ static int lov_delete_empty(const struct lu_env *env, struct lov_object *lov,
286 LASSERT(lov->lo_type == LLT_EMPTY || lov->lo_type == LLT_RELEASED); 311 LASSERT(lov->lo_type == LLT_EMPTY || lov->lo_type == LLT_RELEASED);
287 312
288 lov_layout_wait(env, lov); 313 lov_layout_wait(env, lov);
289
290 cl_object_prune(env, &lov->lo_cl);
291 return 0; 314 return 0;
292} 315}
293 316
@@ -355,7 +378,7 @@ static int lov_delete_raid0(const struct lu_env *env, struct lov_object *lov,
355 struct lovsub_object *los = r0->lo_sub[i]; 378 struct lovsub_object *los = r0->lo_sub[i];
356 379
357 if (los) { 380 if (los) {
358 cl_locks_prune(env, &los->lso_cl, 1); 381 cl_object_prune(env, &los->lso_cl);
359 /* 382 /*
360 * If top-level object is to be evicted from 383 * If top-level object is to be evicted from
361 * the cache, so are its sub-objects. 384 * the cache, so are its sub-objects.
@@ -364,7 +387,6 @@ static int lov_delete_raid0(const struct lu_env *env, struct lov_object *lov,
364 } 387 }
365 } 388 }
366 } 389 }
367 cl_object_prune(env, &lov->lo_cl);
368 return 0; 390 return 0;
369} 391}
370 392
@@ -666,7 +688,6 @@ static int lov_layout_change(const struct lu_env *unused,
666 const struct lov_layout_operations *old_ops; 688 const struct lov_layout_operations *old_ops;
667 const struct lov_layout_operations *new_ops; 689 const struct lov_layout_operations *new_ops;
668 690
669 struct cl_object_header *hdr = cl_object_header(&lov->lo_cl);
670 void *cookie; 691 void *cookie;
671 struct lu_env *env; 692 struct lu_env *env;
672 int refcheck; 693 int refcheck;
@@ -691,13 +712,15 @@ static int lov_layout_change(const struct lu_env *unused,
691 old_ops = &lov_dispatch[lov->lo_type]; 712 old_ops = &lov_dispatch[lov->lo_type];
692 new_ops = &lov_dispatch[llt]; 713 new_ops = &lov_dispatch[llt];
693 714
715 result = cl_object_prune(env, &lov->lo_cl);
716 if (result != 0)
717 goto out;
718
694 result = old_ops->llo_delete(env, lov, &lov->u); 719 result = old_ops->llo_delete(env, lov, &lov->u);
695 if (result == 0) { 720 if (result == 0) {
696 old_ops->llo_fini(env, lov, &lov->u); 721 old_ops->llo_fini(env, lov, &lov->u);
697 722
698 LASSERT(atomic_read(&lov->lo_active_ios) == 0); 723 LASSERT(atomic_read(&lov->lo_active_ios) == 0);
699 LASSERT(!hdr->coh_tree.rnode);
700 LASSERT(hdr->coh_pages == 0);
701 724
702 lov->lo_type = LLT_EMPTY; 725 lov->lo_type = LLT_EMPTY;
703 result = new_ops->llo_init(env, 726 result = new_ops->llo_init(env,
@@ -713,6 +736,7 @@ static int lov_layout_change(const struct lu_env *unused,
713 } 736 }
714 } 737 }
715 738
739out:
716 cl_env_put(env, &refcheck); 740 cl_env_put(env, &refcheck);
717 cl_env_reexit(cookie); 741 cl_env_reexit(cookie);
718 return result; 742 return result;
@@ -793,7 +817,8 @@ static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
793 goto out; 817 goto out;
794 } 818 }
795 819
796 lov->lo_layout_invalid = lov_layout_change(env, lov, conf); 820 result = lov_layout_change(env, lov, conf);
821 lov->lo_layout_invalid = result != 0;
797 822
798out: 823out:
799 lov_conf_unlock(lov); 824 lov_conf_unlock(lov);
@@ -825,10 +850,10 @@ static int lov_object_print(const struct lu_env *env, void *cookie,
825} 850}
826 851
827int lov_page_init(const struct lu_env *env, struct cl_object *obj, 852int lov_page_init(const struct lu_env *env, struct cl_object *obj,
828 struct cl_page *page, struct page *vmpage) 853 struct cl_page *page, pgoff_t index)
829{ 854{
830 return LOV_2DISPATCH_NOLOCK(cl2lov(obj), 855 return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_page_init, env, obj, page,
831 llo_page_init, env, obj, page, vmpage); 856 index);
832} 857}
833 858
834/** 859/**
@@ -911,8 +936,9 @@ struct lu_object *lov_object_alloc(const struct lu_env *env,
911 * for object with different layouts. 936 * for object with different layouts.
912 */ 937 */
913 obj->lo_ops = &lov_lu_obj_ops; 938 obj->lo_ops = &lov_lu_obj_ops;
914 } else 939 } else {
915 obj = NULL; 940 obj = NULL;
941 }
916 return obj; 942 return obj;
917} 943}
918 944
diff --git a/drivers/staging/lustre/lustre/lov/lov_offset.c b/drivers/staging/lustre/lustre/lov/lov_offset.c
index ae83eb0f6f36..9302f06c34ef 100644
--- a/drivers/staging/lustre/lustre/lov/lov_offset.c
+++ b/drivers/staging/lustre/lustre/lov/lov_offset.c
@@ -66,6 +66,18 @@ u64 lov_stripe_size(struct lov_stripe_md *lsm, u64 ost_size, int stripeno)
66 return lov_size; 66 return lov_size;
67} 67}
68 68
69/**
70 * Compute file level page index by stripe level page offset
71 */
72pgoff_t lov_stripe_pgoff(struct lov_stripe_md *lsm, pgoff_t stripe_index,
73 int stripe)
74{
75 loff_t offset;
76
77 offset = lov_stripe_size(lsm, stripe_index << PAGE_SHIFT, stripe);
78 return offset >> PAGE_SHIFT;
79}
80
69/* we have an offset in file backed by an lov and want to find out where 81/* we have an offset in file backed by an lov and want to find out where
70 * that offset lands in our given stripe of the file. for the easy 82 * that offset lands in our given stripe of the file. for the easy
71 * case where the offset is within the stripe, we just have to scale the 83 * case where the offset is within the stripe, we just have to scale the
diff --git a/drivers/staging/lustre/lustre/lov/lov_pack.c b/drivers/staging/lustre/lustre/lov/lov_pack.c
index 3925633a99ec..0215ea54df8d 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pack.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pack.c
@@ -136,7 +136,6 @@ int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
136 CERROR("bad mem LOV MAGIC: 0x%08X != 0x%08X nor 0x%08X\n", 136 CERROR("bad mem LOV MAGIC: 0x%08X != 0x%08X nor 0x%08X\n",
137 lmm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3); 137 lmm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3);
138 return -EINVAL; 138 return -EINVAL;
139
140 } 139 }
141 140
142 if (lsm) { 141 if (lsm) {
@@ -444,8 +443,7 @@ int lov_getstripe(struct obd_export *exp, struct lov_stripe_md *lsm,
444 if (lum.lmm_magic == LOV_USER_MAGIC) { 443 if (lum.lmm_magic == LOV_USER_MAGIC) {
445 /* User request for v1, we need skip lmm_pool_name */ 444 /* User request for v1, we need skip lmm_pool_name */
446 if (lmmk->lmm_magic == LOV_MAGIC_V3) { 445 if (lmmk->lmm_magic == LOV_MAGIC_V3) {
447 memmove((char *)(&lmmk->lmm_stripe_count) + 446 memmove(((struct lov_mds_md_v1 *)lmmk)->lmm_objects,
448 sizeof(lmmk->lmm_stripe_count),
449 ((struct lov_mds_md_v3 *)lmmk)->lmm_objects, 447 ((struct lov_mds_md_v3 *)lmmk)->lmm_objects,
450 lmmk->lmm_stripe_count * 448 lmmk->lmm_stripe_count *
451 sizeof(struct lov_ost_data_v1)); 449 sizeof(struct lov_ost_data_v1));
@@ -457,9 +455,9 @@ int lov_getstripe(struct obd_export *exp, struct lov_stripe_md *lsm,
457 } 455 }
458 456
459 /* User wasn't expecting this many OST entries */ 457 /* User wasn't expecting this many OST entries */
460 if (lum.lmm_stripe_count == 0) 458 if (lum.lmm_stripe_count == 0) {
461 lmm_size = lum_size; 459 lmm_size = lum_size;
462 else if (lum.lmm_stripe_count < lmmk->lmm_stripe_count) { 460 } else if (lum.lmm_stripe_count < lmmk->lmm_stripe_count) {
463 rc = -EOVERFLOW; 461 rc = -EOVERFLOW;
464 goto out_set; 462 goto out_set;
465 } 463 }
diff --git a/drivers/staging/lustre/lustre/lov/lov_page.c b/drivers/staging/lustre/lustre/lov/lov_page.c
index fdcaf8047ad8..0306f00c3f33 100644
--- a/drivers/staging/lustre/lustre/lov/lov_page.c
+++ b/drivers/staging/lustre/lustre/lov/lov_page.c
@@ -36,6 +36,7 @@
36 * Implementation of cl_page for LOV layer. 36 * Implementation of cl_page for LOV layer.
37 * 37 *
38 * Author: Nikita Danilov <nikita.danilov@sun.com> 38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 * Author: Jinshan Xiong <jinshan.xiong@intel.com>
39 */ 40 */
40 41
41#define DEBUG_SUBSYSTEM S_LOV 42#define DEBUG_SUBSYSTEM S_LOV
@@ -52,116 +53,66 @@
52 * 53 *
53 */ 54 */
54 55
55static int lov_page_invariant(const struct cl_page_slice *slice) 56/**
57 * Adjust the stripe index by layout of raid0. @max_index is the maximum
58 * page index covered by an underlying DLM lock.
59 * This function converts max_index from stripe level to file level, and make
60 * sure it's not beyond one stripe.
61 */
62static int lov_raid0_page_is_under_lock(const struct lu_env *env,
63 const struct cl_page_slice *slice,
64 struct cl_io *unused,
65 pgoff_t *max_index)
56{ 66{
57 const struct cl_page *page = slice->cpl_page; 67 struct lov_object *loo = cl2lov(slice->cpl_obj);
58 const struct cl_page *sub = lov_sub_page(slice); 68 struct lov_layout_raid0 *r0 = lov_r0(loo);
59 69 pgoff_t index = *max_index;
60 return ergo(sub, 70 unsigned int pps; /* pages per stripe */
61 page->cp_child == sub &&
62 sub->cp_parent == page &&
63 page->cp_state == sub->cp_state);
64}
65 71
66static void lov_page_fini(const struct lu_env *env, 72 CDEBUG(D_READA, "*max_index = %lu, nr = %d\n", index, r0->lo_nr);
67 struct cl_page_slice *slice) 73 if (index == 0) /* the page is not covered by any lock */
68{ 74 return 0;
69 struct cl_page *sub = lov_sub_page(slice);
70 75
71 LINVRNT(lov_page_invariant(slice)); 76 if (r0->lo_nr == 1) /* single stripe file */
77 return 0;
72 78
73 if (sub) { 79 /* max_index is stripe level, convert it into file level */
74 LASSERT(sub->cp_state == CPS_FREEING); 80 if (index != CL_PAGE_EOF) {
75 lu_ref_del(&sub->cp_reference, "lov", sub->cp_parent); 81 int stripeno = lov_page_stripe(slice->cpl_page);
76 sub->cp_parent = NULL; 82 *max_index = lov_stripe_pgoff(loo->lo_lsm, index, stripeno);
77 slice->cpl_page->cp_child = NULL;
78 cl_page_put(env, sub);
79 } 83 }
80}
81
82static int lov_page_own(const struct lu_env *env,
83 const struct cl_page_slice *slice, struct cl_io *io,
84 int nonblock)
85{
86 struct lov_io *lio = lov_env_io(env);
87 struct lov_io_sub *sub;
88 84
89 LINVRNT(lov_page_invariant(slice)); 85 /* calculate the end of current stripe */
90 LINVRNT(!cl2lov_page(slice)->lps_invalid); 86 pps = loo->lo_lsm->lsm_stripe_size >> PAGE_SHIFT;
87 index = ((slice->cpl_index + pps) & ~(pps - 1)) - 1;
91 88
92 sub = lov_page_subio(env, lio, slice); 89 /* never exceed the end of the stripe */
93 if (!IS_ERR(sub)) { 90 *max_index = min_t(pgoff_t, *max_index, index);
94 lov_sub_page(slice)->cp_owner = sub->sub_io;
95 lov_sub_put(sub);
96 } else
97 LBUG(); /* Arrgh */
98 return 0; 91 return 0;
99} 92}
100 93
101static void lov_page_assume(const struct lu_env *env, 94static int lov_raid0_page_print(const struct lu_env *env,
102 const struct cl_page_slice *slice, struct cl_io *io) 95 const struct cl_page_slice *slice,
103{ 96 void *cookie, lu_printer_t printer)
104 lov_page_own(env, slice, io, 0);
105}
106
107static int lov_page_cache_add(const struct lu_env *env,
108 const struct cl_page_slice *slice,
109 struct cl_io *io)
110{
111 struct lov_io *lio = lov_env_io(env);
112 struct lov_io_sub *sub;
113 int rc = 0;
114
115 LINVRNT(lov_page_invariant(slice));
116 LINVRNT(!cl2lov_page(slice)->lps_invalid);
117
118 sub = lov_page_subio(env, lio, slice);
119 if (!IS_ERR(sub)) {
120 rc = cl_page_cache_add(sub->sub_env, sub->sub_io,
121 slice->cpl_page->cp_child, CRT_WRITE);
122 lov_sub_put(sub);
123 } else {
124 rc = PTR_ERR(sub);
125 CL_PAGE_DEBUG(D_ERROR, env, slice->cpl_page, "rc = %d\n", rc);
126 }
127 return rc;
128}
129
130static int lov_page_print(const struct lu_env *env,
131 const struct cl_page_slice *slice,
132 void *cookie, lu_printer_t printer)
133{ 97{
134 struct lov_page *lp = cl2lov_page(slice); 98 struct lov_page *lp = cl2lov_page(slice);
135 99
136 return (*printer)(env, cookie, LUSTRE_LOV_NAME"-page@%p\n", lp); 100 return (*printer)(env, cookie, LUSTRE_LOV_NAME "-page@%p, raid0\n", lp);
137} 101}
138 102
139static const struct cl_page_operations lov_page_ops = { 103static const struct cl_page_operations lov_raid0_page_ops = {
140 .cpo_fini = lov_page_fini, 104 .cpo_is_under_lock = lov_raid0_page_is_under_lock,
141 .cpo_own = lov_page_own, 105 .cpo_print = lov_raid0_page_print
142 .cpo_assume = lov_page_assume,
143 .io = {
144 [CRT_WRITE] = {
145 .cpo_cache_add = lov_page_cache_add
146 }
147 },
148 .cpo_print = lov_page_print
149}; 106};
150 107
151static void lov_empty_page_fini(const struct lu_env *env,
152 struct cl_page_slice *slice)
153{
154 LASSERT(!slice->cpl_page->cp_child);
155}
156
157int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj, 108int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj,
158 struct cl_page *page, struct page *vmpage) 109 struct cl_page *page, pgoff_t index)
159{ 110{
160 struct lov_object *loo = cl2lov(obj); 111 struct lov_object *loo = cl2lov(obj);
161 struct lov_layout_raid0 *r0 = lov_r0(loo); 112 struct lov_layout_raid0 *r0 = lov_r0(loo);
162 struct lov_io *lio = lov_env_io(env); 113 struct lov_io *lio = lov_env_io(env);
163 struct cl_page *subpage;
164 struct cl_object *subobj; 114 struct cl_object *subobj;
115 struct cl_object *o;
165 struct lov_io_sub *sub; 116 struct lov_io_sub *sub;
166 struct lov_page *lpg = cl_object_page_slice(obj, page); 117 struct lov_page *lpg = cl_object_page_slice(obj, page);
167 loff_t offset; 118 loff_t offset;
@@ -169,59 +120,57 @@ int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj,
169 int stripe; 120 int stripe;
170 int rc; 121 int rc;
171 122
172 offset = cl_offset(obj, page->cp_index); 123 offset = cl_offset(obj, index);
173 stripe = lov_stripe_number(loo->lo_lsm, offset); 124 stripe = lov_stripe_number(loo->lo_lsm, offset);
174 LASSERT(stripe < r0->lo_nr); 125 LASSERT(stripe < r0->lo_nr);
175 rc = lov_stripe_offset(loo->lo_lsm, offset, stripe, &suboff); 126 rc = lov_stripe_offset(loo->lo_lsm, offset, stripe, &suboff);
176 LASSERT(rc == 0); 127 LASSERT(rc == 0);
177 128
178 lpg->lps_invalid = 1; 129 cl_page_slice_add(page, &lpg->lps_cl, obj, index, &lov_raid0_page_ops);
179 cl_page_slice_add(page, &lpg->lps_cl, obj, &lov_page_ops);
180 130
181 sub = lov_sub_get(env, lio, stripe); 131 sub = lov_sub_get(env, lio, stripe);
182 if (IS_ERR(sub)) { 132 if (IS_ERR(sub))
183 rc = PTR_ERR(sub); 133 return PTR_ERR(sub);
184 goto out;
185 }
186 134
187 subobj = lovsub2cl(r0->lo_sub[stripe]); 135 subobj = lovsub2cl(r0->lo_sub[stripe]);
188 subpage = cl_page_find_sub(sub->sub_env, subobj, 136 list_for_each_entry(o, &subobj->co_lu.lo_header->loh_layers,
189 cl_index(subobj, suboff), vmpage, page); 137 co_lu.lo_linkage) {
190 lov_sub_put(sub); 138 if (o->co_ops->coo_page_init) {
191 if (IS_ERR(subpage)) { 139 rc = o->co_ops->coo_page_init(sub->sub_env, o, page,
192 rc = PTR_ERR(subpage); 140 cl_index(subobj, suboff));
193 goto out; 141 if (rc != 0)
194 } 142 break;
195 143 }
196 if (likely(subpage->cp_parent == page)) {
197 lu_ref_add(&subpage->cp_reference, "lov", page);
198 lpg->lps_invalid = 0;
199 rc = 0;
200 } else {
201 CL_PAGE_DEBUG(D_ERROR, env, page, "parent page\n");
202 CL_PAGE_DEBUG(D_ERROR, env, subpage, "child page\n");
203 LASSERT(0);
204 } 144 }
145 lov_sub_put(sub);
205 146
206out:
207 return rc; 147 return rc;
208} 148}
209 149
150static int lov_empty_page_print(const struct lu_env *env,
151 const struct cl_page_slice *slice,
152 void *cookie, lu_printer_t printer)
153{
154 struct lov_page *lp = cl2lov_page(slice);
155
156 return (*printer)(env, cookie, LUSTRE_LOV_NAME "-page@%p, empty.\n",
157 lp);
158}
159
210static const struct cl_page_operations lov_empty_page_ops = { 160static const struct cl_page_operations lov_empty_page_ops = {
211 .cpo_fini = lov_empty_page_fini, 161 .cpo_print = lov_empty_page_print
212 .cpo_print = lov_page_print
213}; 162};
214 163
215int lov_page_init_empty(const struct lu_env *env, struct cl_object *obj, 164int lov_page_init_empty(const struct lu_env *env, struct cl_object *obj,
216 struct cl_page *page, struct page *vmpage) 165 struct cl_page *page, pgoff_t index)
217{ 166{
218 struct lov_page *lpg = cl_object_page_slice(obj, page); 167 struct lov_page *lpg = cl_object_page_slice(obj, page);
219 void *addr; 168 void *addr;
220 169
221 cl_page_slice_add(page, &lpg->lps_cl, obj, &lov_empty_page_ops); 170 cl_page_slice_add(page, &lpg->lps_cl, obj, index, &lov_empty_page_ops);
222 addr = kmap(vmpage); 171 addr = kmap(page->cp_vmpage);
223 memset(addr, 0, cl_page_size(obj)); 172 memset(addr, 0, cl_page_size(obj));
224 kunmap(vmpage); 173 kunmap(page->cp_vmpage);
225 cl_page_export(env, page, 1); 174 cl_page_export(env, page, 1);
226 return 0; 175 return 0;
227} 176}
diff --git a/drivers/staging/lustre/lustre/lov/lov_pool.c b/drivers/staging/lustre/lustre/lov/lov_pool.c
index 9ae1d6f42d6e..690292ecebdc 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pool.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pool.c
@@ -65,7 +65,6 @@ void lov_pool_putref(struct pool_desc *pool)
65 LASSERT(hlist_unhashed(&pool->pool_hash)); 65 LASSERT(hlist_unhashed(&pool->pool_hash));
66 LASSERT(list_empty(&pool->pool_list)); 66 LASSERT(list_empty(&pool->pool_list));
67 LASSERT(!pool->pool_debugfs_entry); 67 LASSERT(!pool->pool_debugfs_entry);
68 lov_ost_pool_free(&(pool->pool_rr.lqr_pool));
69 lov_ost_pool_free(&(pool->pool_obds)); 68 lov_ost_pool_free(&(pool->pool_obds));
70 kfree(pool); 69 kfree(pool);
71 } 70 }
@@ -424,11 +423,6 @@ int lov_pool_new(struct obd_device *obd, char *poolname)
424 if (rc) 423 if (rc)
425 goto out_err; 424 goto out_err;
426 425
427 memset(&(new_pool->pool_rr), 0, sizeof(struct lov_qos_rr));
428 rc = lov_ost_pool_init(&new_pool->pool_rr.lqr_pool, 0);
429 if (rc)
430 goto out_free_pool_obds;
431
432 INIT_HLIST_NODE(&new_pool->pool_hash); 426 INIT_HLIST_NODE(&new_pool->pool_hash);
433 427
434 /* get ref for debugfs file */ 428 /* get ref for debugfs file */
@@ -469,13 +463,10 @@ out_err:
469 list_del_init(&new_pool->pool_list); 463 list_del_init(&new_pool->pool_list);
470 lov->lov_pool_count--; 464 lov->lov_pool_count--;
471 spin_unlock(&obd->obd_dev_lock); 465 spin_unlock(&obd->obd_dev_lock);
472
473 ldebugfs_remove(&new_pool->pool_debugfs_entry); 466 ldebugfs_remove(&new_pool->pool_debugfs_entry);
474
475 lov_ost_pool_free(&new_pool->pool_rr.lqr_pool);
476out_free_pool_obds:
477 lov_ost_pool_free(&new_pool->pool_obds); 467 lov_ost_pool_free(&new_pool->pool_obds);
478 kfree(new_pool); 468 kfree(new_pool);
469
479 return rc; 470 return rc;
480} 471}
481 472
@@ -543,8 +534,6 @@ int lov_pool_add(struct obd_device *obd, char *poolname, char *ostname)
543 if (rc) 534 if (rc)
544 goto out; 535 goto out;
545 536
546 pool->pool_rr.lqr_dirty = 1;
547
548 CDEBUG(D_CONFIG, "Added %s to "LOV_POOLNAMEF" as member %d\n", 537 CDEBUG(D_CONFIG, "Added %s to "LOV_POOLNAMEF" as member %d\n",
549 ostname, poolname, pool_tgt_count(pool)); 538 ostname, poolname, pool_tgt_count(pool));
550 539
@@ -589,8 +578,6 @@ int lov_pool_remove(struct obd_device *obd, char *poolname, char *ostname)
589 578
590 lov_ost_pool_remove(&pool->pool_obds, lov_idx); 579 lov_ost_pool_remove(&pool->pool_obds, lov_idx);
591 580
592 pool->pool_rr.lqr_dirty = 1;
593
594 CDEBUG(D_CONFIG, "%s removed from "LOV_POOLNAMEF"\n", ostname, 581 CDEBUG(D_CONFIG, "%s removed from "LOV_POOLNAMEF"\n", ostname,
595 poolname); 582 poolname);
596 583
@@ -599,50 +586,3 @@ out:
599 lov_pool_putref(pool); 586 lov_pool_putref(pool);
600 return rc; 587 return rc;
601} 588}
602
603int lov_check_index_in_pool(__u32 idx, struct pool_desc *pool)
604{
605 int i, rc;
606
607 /* caller may no have a ref on pool if it got the pool
608 * without calling lov_find_pool() (e.g. go through the lov pool
609 * list)
610 */
611 lov_pool_getref(pool);
612
613 down_read(&pool_tgt_rw_sem(pool));
614
615 for (i = 0; i < pool_tgt_count(pool); i++) {
616 if (pool_tgt_array(pool)[i] == idx) {
617 rc = 0;
618 goto out;
619 }
620 }
621 rc = -ENOENT;
622out:
623 up_read(&pool_tgt_rw_sem(pool));
624
625 lov_pool_putref(pool);
626 return rc;
627}
628
629struct pool_desc *lov_find_pool(struct lov_obd *lov, char *poolname)
630{
631 struct pool_desc *pool;
632
633 pool = NULL;
634 if (poolname[0] != '\0') {
635 pool = cfs_hash_lookup(lov->lov_pools_hash_body, poolname);
636 if (!pool)
637 CWARN("Request for an unknown pool ("LOV_POOLNAMEF")\n",
638 poolname);
639 if (pool && (pool_tgt_count(pool) == 0)) {
640 CWARN("Request for an empty pool ("LOV_POOLNAMEF")\n",
641 poolname);
642 /* pool is ignored, so we remove ref on it */
643 lov_pool_putref(pool);
644 pool = NULL;
645 }
646 }
647 return pool;
648}
diff --git a/drivers/staging/lustre/lustre/lov/lov_request.c b/drivers/staging/lustre/lustre/lov/lov_request.c
index 7178a02d6267..1be4b921c01f 100644
--- a/drivers/staging/lustre/lustre/lov/lov_request.c
+++ b/drivers/staging/lustre/lustre/lov/lov_request.c
@@ -52,7 +52,6 @@ static void lov_init_set(struct lov_request_set *set)
52 INIT_LIST_HEAD(&set->set_list); 52 INIT_LIST_HEAD(&set->set_list);
53 atomic_set(&set->set_refcount, 1); 53 atomic_set(&set->set_refcount, 1);
54 init_waitqueue_head(&set->set_waitq); 54 init_waitqueue_head(&set->set_waitq);
55 spin_lock_init(&set->set_lock);
56} 55}
57 56
58void lov_finish_set(struct lov_request_set *set) 57void lov_finish_set(struct lov_request_set *set)
@@ -235,7 +234,6 @@ out:
235 if (tmp_oa) 234 if (tmp_oa)
236 kmem_cache_free(obdo_cachep, tmp_oa); 235 kmem_cache_free(obdo_cachep, tmp_oa);
237 return rc; 236 return rc;
238
239} 237}
240 238
241int lov_fini_getattr_set(struct lov_request_set *set) 239int lov_fini_getattr_set(struct lov_request_set *set)
@@ -363,7 +361,6 @@ int lov_prep_destroy_set(struct obd_export *exp, struct obd_info *oinfo,
363 set->set_oi = oinfo; 361 set->set_oi = oinfo;
364 set->set_oi->oi_md = lsm; 362 set->set_oi->oi_md = lsm;
365 set->set_oi->oi_oa = src_oa; 363 set->set_oi->oi_oa = src_oa;
366 set->set_oti = oti;
367 if (oti && src_oa->o_valid & OBD_MD_FLCOOKIE) 364 if (oti && src_oa->o_valid & OBD_MD_FLCOOKIE)
368 set->set_cookies = oti->oti_logcookies; 365 set->set_cookies = oti->oti_logcookies;
369 366
@@ -480,7 +477,6 @@ int lov_prep_setattr_set(struct obd_export *exp, struct obd_info *oinfo,
480 lov_init_set(set); 477 lov_init_set(set);
481 478
482 set->set_exp = exp; 479 set->set_exp = exp;
483 set->set_oti = oti;
484 set->set_oi = oinfo; 480 set->set_oi = oinfo;
485 if (oti && oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE) 481 if (oti && oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
486 set->set_cookies = oti->oti_logcookies; 482 set->set_cookies = oti->oti_logcookies;
@@ -716,12 +712,15 @@ int lov_prep_statfs_set(struct obd_device *obd, struct obd_info *oinfo,
716 struct lov_request *req; 712 struct lov_request *req;
717 713
718 if (!lov->lov_tgts[i] || 714 if (!lov->lov_tgts[i] ||
719 (!lov_check_and_wait_active(lov, i) && 715 (oinfo->oi_flags & OBD_STATFS_NODELAY &&
720 (oinfo->oi_flags & OBD_STATFS_NODELAY))) { 716 !lov->lov_tgts[i]->ltd_active)) {
721 CDEBUG(D_HA, "lov idx %d inactive\n", i); 717 CDEBUG(D_HA, "lov idx %d inactive\n", i);
722 continue; 718 continue;
723 } 719 }
724 720
721 if (!lov->lov_tgts[i]->ltd_active)
722 lov_check_and_wait_active(lov, i);
723
725 /* skip targets that have been explicitly disabled by the 724 /* skip targets that have been explicitly disabled by the
726 * administrator 725 * administrator
727 */ 726 */
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_dev.c b/drivers/staging/lustre/lustre/lov/lovsub_dev.c
index c335c020f4f4..35f6b1d66ff4 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_dev.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_dev.c
@@ -151,8 +151,9 @@ static int lovsub_req_init(const struct lu_env *env, struct cl_device *dev,
151 if (lsr) { 151 if (lsr) {
152 cl_req_slice_add(req, &lsr->lsrq_cl, dev, &lovsub_req_ops); 152 cl_req_slice_add(req, &lsr->lsrq_cl, dev, &lovsub_req_ops);
153 result = 0; 153 result = 0;
154 } else 154 } else {
155 result = -ENOMEM; 155 result = -ENOMEM;
156 }
156 return result; 157 return result;
157} 158}
158 159
@@ -182,10 +183,12 @@ static struct lu_device *lovsub_device_alloc(const struct lu_env *env,
182 d = lovsub2lu_dev(lsd); 183 d = lovsub2lu_dev(lsd);
183 d->ld_ops = &lovsub_lu_ops; 184 d->ld_ops = &lovsub_lu_ops;
184 lsd->acid_cl.cd_ops = &lovsub_cl_ops; 185 lsd->acid_cl.cd_ops = &lovsub_cl_ops;
185 } else 186 } else {
186 d = ERR_PTR(result); 187 d = ERR_PTR(result);
187 } else 188 }
189 } else {
188 d = ERR_PTR(-ENOMEM); 190 d = ERR_PTR(-ENOMEM);
191 }
189 return d; 192 return d;
190} 193}
191 194
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_lock.c b/drivers/staging/lustre/lustre/lov/lovsub_lock.c
index 3bb0c9068a90..e92edfb618b7 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_lock.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_lock.c
@@ -62,391 +62,8 @@ static void lovsub_lock_fini(const struct lu_env *env,
62 kmem_cache_free(lovsub_lock_kmem, lsl); 62 kmem_cache_free(lovsub_lock_kmem, lsl);
63} 63}
64 64
65static void lovsub_parent_lock(const struct lu_env *env, struct lov_lock *lov)
66{
67 struct cl_lock *parent;
68
69 parent = lov->lls_cl.cls_lock;
70 cl_lock_get(parent);
71 lu_ref_add(&parent->cll_reference, "lovsub-parent", current);
72 cl_lock_mutex_get(env, parent);
73}
74
75static void lovsub_parent_unlock(const struct lu_env *env, struct lov_lock *lov)
76{
77 struct cl_lock *parent;
78
79 parent = lov->lls_cl.cls_lock;
80 cl_lock_mutex_put(env, lov->lls_cl.cls_lock);
81 lu_ref_del(&parent->cll_reference, "lovsub-parent", current);
82 cl_lock_put(env, parent);
83}
84
85/**
86 * Implements cl_lock_operations::clo_state() method for lovsub layer, which
87 * method is called whenever sub-lock state changes. Propagates state change
88 * to the top-locks.
89 */
90static void lovsub_lock_state(const struct lu_env *env,
91 const struct cl_lock_slice *slice,
92 enum cl_lock_state state)
93{
94 struct lovsub_lock *sub = cl2lovsub_lock(slice);
95 struct lov_lock_link *scan;
96
97 LASSERT(cl_lock_is_mutexed(slice->cls_lock));
98
99 list_for_each_entry(scan, &sub->lss_parents, lll_list) {
100 struct lov_lock *lov = scan->lll_super;
101 struct cl_lock *parent = lov->lls_cl.cls_lock;
102
103 if (sub->lss_active != parent) {
104 lovsub_parent_lock(env, lov);
105 cl_lock_signal(env, parent);
106 lovsub_parent_unlock(env, lov);
107 }
108 }
109}
110
111/**
112 * Implementation of cl_lock_operation::clo_weigh() estimating lock weight by
113 * asking parent lock.
114 */
115static unsigned long lovsub_lock_weigh(const struct lu_env *env,
116 const struct cl_lock_slice *slice)
117{
118 struct lovsub_lock *lock = cl2lovsub_lock(slice);
119 struct lov_lock *lov;
120 unsigned long dumbbell;
121
122 LASSERT(cl_lock_is_mutexed(slice->cls_lock));
123
124 if (!list_empty(&lock->lss_parents)) {
125 /*
126 * It is not clear whether all parents have to be asked and
127 * their estimations summed, or it is enough to ask one. For
128 * the current usages, one is always enough.
129 */
130 lov = container_of(lock->lss_parents.next,
131 struct lov_lock_link, lll_list)->lll_super;
132
133 lovsub_parent_lock(env, lov);
134 dumbbell = cl_lock_weigh(env, lov->lls_cl.cls_lock);
135 lovsub_parent_unlock(env, lov);
136 } else
137 dumbbell = 0;
138
139 return dumbbell;
140}
141
142/**
143 * Maps start/end offsets within a stripe, to offsets within a file.
144 */
145static void lovsub_lock_descr_map(const struct cl_lock_descr *in,
146 struct lov_object *lov,
147 int stripe, struct cl_lock_descr *out)
148{
149 pgoff_t size; /* stripe size in pages */
150 pgoff_t skip; /* how many pages in every stripe are occupied by
151 * "other" stripes
152 */
153 pgoff_t start;
154 pgoff_t end;
155
156 start = in->cld_start;
157 end = in->cld_end;
158
159 if (lov->lo_lsm->lsm_stripe_count > 1) {
160 size = cl_index(lov2cl(lov), lov->lo_lsm->lsm_stripe_size);
161 skip = (lov->lo_lsm->lsm_stripe_count - 1) * size;
162
163 /* XXX overflow check here? */
164 start += start/size * skip + stripe * size;
165
166 if (end != CL_PAGE_EOF) {
167 end += end/size * skip + stripe * size;
168 /*
169 * And check for overflow...
170 */
171 if (end < in->cld_end)
172 end = CL_PAGE_EOF;
173 }
174 }
175 out->cld_start = start;
176 out->cld_end = end;
177}
178
179/**
180 * Adjusts parent lock extent when a sub-lock is attached to a parent. This is
181 * called in two ways:
182 *
183 * - as part of receive call-back, when server returns granted extent to
184 * the client, and
185 *
186 * - when top-lock finds existing sub-lock in the cache.
187 *
188 * Note, that lock mode is not propagated to the parent: i.e., if CLM_READ
189 * top-lock matches CLM_WRITE sub-lock, top-lock is still CLM_READ.
190 */
191int lov_sublock_modify(const struct lu_env *env, struct lov_lock *lov,
192 struct lovsub_lock *sublock,
193 const struct cl_lock_descr *d, int idx)
194{
195 struct cl_lock *parent;
196 struct lovsub_object *subobj;
197 struct cl_lock_descr *pd;
198 struct cl_lock_descr *parent_descr;
199 int result;
200
201 parent = lov->lls_cl.cls_lock;
202 parent_descr = &parent->cll_descr;
203 LASSERT(cl_lock_mode_match(d->cld_mode, parent_descr->cld_mode));
204
205 subobj = cl2lovsub(sublock->lss_cl.cls_obj);
206 pd = &lov_env_info(env)->lti_ldescr;
207
208 pd->cld_obj = parent_descr->cld_obj;
209 pd->cld_mode = parent_descr->cld_mode;
210 pd->cld_gid = parent_descr->cld_gid;
211 lovsub_lock_descr_map(d, subobj->lso_super, subobj->lso_index, pd);
212 lov->lls_sub[idx].sub_got = *d;
213 /*
214 * Notify top-lock about modification, if lock description changes
215 * materially.
216 */
217 if (!cl_lock_ext_match(parent_descr, pd))
218 result = cl_lock_modify(env, parent, pd);
219 else
220 result = 0;
221 return result;
222}
223
224static int lovsub_lock_modify(const struct lu_env *env,
225 const struct cl_lock_slice *s,
226 const struct cl_lock_descr *d)
227{
228 struct lovsub_lock *lock = cl2lovsub_lock(s);
229 struct lov_lock_link *scan;
230 struct lov_lock *lov;
231 int result = 0;
232
233 LASSERT(cl_lock_mode_match(d->cld_mode,
234 s->cls_lock->cll_descr.cld_mode));
235 list_for_each_entry(scan, &lock->lss_parents, lll_list) {
236 int rc;
237
238 lov = scan->lll_super;
239 lovsub_parent_lock(env, lov);
240 rc = lov_sublock_modify(env, lov, lock, d, scan->lll_idx);
241 lovsub_parent_unlock(env, lov);
242 result = result ?: rc;
243 }
244 return result;
245}
246
247static int lovsub_lock_closure(const struct lu_env *env,
248 const struct cl_lock_slice *slice,
249 struct cl_lock_closure *closure)
250{
251 struct lovsub_lock *sub;
252 struct cl_lock *parent;
253 struct lov_lock_link *scan;
254 int result;
255
256 LASSERT(cl_lock_is_mutexed(slice->cls_lock));
257
258 sub = cl2lovsub_lock(slice);
259 result = 0;
260
261 list_for_each_entry(scan, &sub->lss_parents, lll_list) {
262 parent = scan->lll_super->lls_cl.cls_lock;
263 result = cl_lock_closure_build(env, parent, closure);
264 if (result != 0)
265 break;
266 }
267 return result;
268}
269
270/**
271 * A helper function for lovsub_lock_delete() that deals with a given parent
272 * top-lock.
273 */
274static int lovsub_lock_delete_one(const struct lu_env *env,
275 struct cl_lock *child, struct lov_lock *lov)
276{
277 struct cl_lock *parent;
278 int result;
279
280 parent = lov->lls_cl.cls_lock;
281 if (parent->cll_error)
282 return 0;
283
284 result = 0;
285 switch (parent->cll_state) {
286 case CLS_ENQUEUED:
287 /* See LU-1355 for the case that a glimpse lock is
288 * interrupted by signal
289 */
290 LASSERT(parent->cll_flags & CLF_CANCELLED);
291 break;
292 case CLS_QUEUING:
293 case CLS_FREEING:
294 cl_lock_signal(env, parent);
295 break;
296 case CLS_INTRANSIT:
297 /*
298 * Here lies a problem: a sub-lock is canceled while top-lock
299 * is being unlocked. Top-lock cannot be moved into CLS_NEW
300 * state, because unlocking has to succeed eventually by
301 * placing lock into CLS_CACHED (or failing it), see
302 * cl_unuse_try(). Nor can top-lock be left in CLS_CACHED
303 * state, because lov maintains an invariant that all
304 * sub-locks exist in CLS_CACHED (this allows cached top-lock
305 * to be reused immediately). Nor can we wait for top-lock
306 * state to change, because this can be synchronous to the
307 * current thread.
308 *
309 * We know for sure that lov_lock_unuse() will be called at
310 * least one more time to finish un-using, so leave a mark on
311 * the top-lock, that will be seen by the next call to
312 * lov_lock_unuse().
313 */
314 if (cl_lock_is_intransit(parent))
315 lov->lls_cancel_race = 1;
316 break;
317 case CLS_CACHED:
318 /*
319 * if a sub-lock is canceled move its top-lock into CLS_NEW
320 * state to preserve an invariant that a top-lock in
321 * CLS_CACHED is immediately ready for re-use (i.e., has all
322 * sub-locks), and so that next attempt to re-use the top-lock
323 * enqueues missing sub-lock.
324 */
325 cl_lock_state_set(env, parent, CLS_NEW);
326 /* fall through */
327 case CLS_NEW:
328 /*
329 * if last sub-lock is canceled, destroy the top-lock (which
330 * is now `empty') proactively.
331 */
332 if (lov->lls_nr_filled == 0) {
333 /* ... but unfortunately, this cannot be done easily,
334 * as cancellation of a top-lock might acquire mutices
335 * of its other sub-locks, violating lock ordering,
336 * see cl_lock_{cancel,delete}() preconditions.
337 *
338 * To work around this, the mutex of this sub-lock is
339 * released, top-lock is destroyed, and sub-lock mutex
340 * acquired again. The list of parents has to be
341 * re-scanned from the beginning after this.
342 *
343 * Only do this if no mutices other than on @child and
344 * @parent are held by the current thread.
345 *
346 * TODO: The lock modal here is too complex, because
347 * the lock may be canceled and deleted by voluntarily:
348 * cl_lock_request
349 * -> osc_lock_enqueue_wait
350 * -> osc_lock_cancel_wait
351 * -> cl_lock_delete
352 * -> lovsub_lock_delete
353 * -> cl_lock_cancel/delete
354 * -> ...
355 *
356 * The better choice is to spawn a kernel thread for
357 * this purpose. -jay
358 */
359 if (cl_lock_nr_mutexed(env) == 2) {
360 cl_lock_mutex_put(env, child);
361 cl_lock_cancel(env, parent);
362 cl_lock_delete(env, parent);
363 result = 1;
364 }
365 }
366 break;
367 case CLS_HELD:
368 CL_LOCK_DEBUG(D_ERROR, env, parent, "Delete CLS_HELD lock\n");
369 default:
370 CERROR("Impossible state: %d\n", parent->cll_state);
371 LBUG();
372 break;
373 }
374
375 return result;
376}
377
378/**
379 * An implementation of cl_lock_operations::clo_delete() method. This is
380 * invoked in "bottom-to-top" delete, when lock destruction starts from the
381 * sub-lock (e.g, as a result of ldlm lock LRU policy).
382 */
383static void lovsub_lock_delete(const struct lu_env *env,
384 const struct cl_lock_slice *slice)
385{
386 struct cl_lock *child = slice->cls_lock;
387 struct lovsub_lock *sub = cl2lovsub_lock(slice);
388 int restart;
389
390 LASSERT(cl_lock_is_mutexed(child));
391
392 /*
393 * Destruction of a sub-lock might take multiple iterations, because
394 * when the last sub-lock of a given top-lock is deleted, top-lock is
395 * canceled proactively, and this requires to release sub-lock
396 * mutex. Once sub-lock mutex has been released, list of its parents
397 * has to be re-scanned from the beginning.
398 */
399 do {
400 struct lov_lock *lov;
401 struct lov_lock_link *scan;
402 struct lov_lock_link *temp;
403 struct lov_lock_sub *subdata;
404
405 restart = 0;
406 list_for_each_entry_safe(scan, temp,
407 &sub->lss_parents, lll_list) {
408 lov = scan->lll_super;
409 subdata = &lov->lls_sub[scan->lll_idx];
410 lovsub_parent_lock(env, lov);
411 subdata->sub_got = subdata->sub_descr;
412 lov_lock_unlink(env, scan, sub);
413 restart = lovsub_lock_delete_one(env, child, lov);
414 lovsub_parent_unlock(env, lov);
415
416 if (restart) {
417 cl_lock_mutex_get(env, child);
418 break;
419 }
420 }
421 } while (restart);
422}
423
424static int lovsub_lock_print(const struct lu_env *env, void *cookie,
425 lu_printer_t p, const struct cl_lock_slice *slice)
426{
427 struct lovsub_lock *sub = cl2lovsub_lock(slice);
428 struct lov_lock *lov;
429 struct lov_lock_link *scan;
430
431 list_for_each_entry(scan, &sub->lss_parents, lll_list) {
432 lov = scan->lll_super;
433 (*p)(env, cookie, "[%d %p ", scan->lll_idx, lov);
434 if (lov)
435 cl_lock_descr_print(env, cookie, p,
436 &lov->lls_cl.cls_lock->cll_descr);
437 (*p)(env, cookie, "] ");
438 }
439 return 0;
440}
441
442static const struct cl_lock_operations lovsub_lock_ops = { 65static const struct cl_lock_operations lovsub_lock_ops = {
443 .clo_fini = lovsub_lock_fini, 66 .clo_fini = lovsub_lock_fini,
444 .clo_state = lovsub_lock_state,
445 .clo_delete = lovsub_lock_delete,
446 .clo_modify = lovsub_lock_modify,
447 .clo_closure = lovsub_lock_closure,
448 .clo_weigh = lovsub_lock_weigh,
449 .clo_print = lovsub_lock_print
450}; 67};
451 68
452int lovsub_lock_init(const struct lu_env *env, struct cl_object *obj, 69int lovsub_lock_init(const struct lu_env *env, struct cl_object *obj,
@@ -460,8 +77,9 @@ int lovsub_lock_init(const struct lu_env *env, struct cl_object *obj,
460 INIT_LIST_HEAD(&lsk->lss_parents); 77 INIT_LIST_HEAD(&lsk->lss_parents);
461 cl_lock_slice_add(lock, &lsk->lss_cl, obj, &lovsub_lock_ops); 78 cl_lock_slice_add(lock, &lsk->lss_cl, obj, &lovsub_lock_ops);
462 result = 0; 79 result = 0;
463 } else 80 } else {
464 result = -ENOMEM; 81 result = -ENOMEM;
82 }
465 return result; 83 return result;
466} 84}
467 85
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_object.c b/drivers/staging/lustre/lustre/lov/lovsub_object.c
index 6c5430d938d0..bcaae1e5b840 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_object.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_object.c
@@ -67,10 +67,10 @@ int lovsub_object_init(const struct lu_env *env, struct lu_object *obj,
67 lu_object_add(obj, below); 67 lu_object_add(obj, below);
68 cl_object_page_init(lu2cl(obj), sizeof(struct lovsub_page)); 68 cl_object_page_init(lu2cl(obj), sizeof(struct lovsub_page));
69 result = 0; 69 result = 0;
70 } else 70 } else {
71 result = -ENOMEM; 71 result = -ENOMEM;
72 }
72 return result; 73 return result;
73
74} 74}
75 75
76static void lovsub_object_free(const struct lu_env *env, struct lu_object *obj) 76static void lovsub_object_free(const struct lu_env *env, struct lu_object *obj)
@@ -154,8 +154,9 @@ struct lu_object *lovsub_object_alloc(const struct lu_env *env,
154 lu_object_add_top(&hdr->coh_lu, obj); 154 lu_object_add_top(&hdr->coh_lu, obj);
155 los->lso_cl.co_ops = &lovsub_ops; 155 los->lso_cl.co_ops = &lovsub_ops;
156 obj->lo_ops = &lovsub_lu_obj_ops; 156 obj->lo_ops = &lovsub_lu_obj_ops;
157 } else 157 } else {
158 obj = NULL; 158 obj = NULL;
159 }
159 return obj; 160 return obj;
160} 161}
161 162
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_page.c b/drivers/staging/lustre/lustre/lov/lovsub_page.c
index 2d945532b78e..9badedcce2bf 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_page.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_page.c
@@ -60,11 +60,11 @@ static const struct cl_page_operations lovsub_page_ops = {
60}; 60};
61 61
62int lovsub_page_init(const struct lu_env *env, struct cl_object *obj, 62int lovsub_page_init(const struct lu_env *env, struct cl_object *obj,
63 struct cl_page *page, struct page *unused) 63 struct cl_page *page, pgoff_t index)
64{ 64{
65 struct lovsub_page *lsb = cl_object_page_slice(obj, page); 65 struct lovsub_page *lsb = cl_object_page_slice(obj, page);
66 66
67 cl_page_slice_add(page, &lsb->lsb_cl, obj, &lovsub_page_ops); 67 cl_page_slice_add(page, &lsb->lsb_cl, obj, index, &lovsub_page_ops);
68 return 0; 68 return 0;
69} 69}
70 70
diff --git a/drivers/staging/lustre/lustre/mdc/lproc_mdc.c b/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
index 38f267a60f59..5c7a15dd7bd2 100644
--- a/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
+++ b/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
@@ -49,9 +49,9 @@ static ssize_t max_rpcs_in_flight_show(struct kobject *kobj,
49 obd_kobj); 49 obd_kobj);
50 struct client_obd *cli = &dev->u.cli; 50 struct client_obd *cli = &dev->u.cli;
51 51
52 client_obd_list_lock(&cli->cl_loi_list_lock); 52 spin_lock(&cli->cl_loi_list_lock);
53 len = sprintf(buf, "%u\n", cli->cl_max_rpcs_in_flight); 53 len = sprintf(buf, "%u\n", cli->cl_max_rpcs_in_flight);
54 client_obd_list_unlock(&cli->cl_loi_list_lock); 54 spin_unlock(&cli->cl_loi_list_lock);
55 55
56 return len; 56 return len;
57} 57}
@@ -74,9 +74,9 @@ static ssize_t max_rpcs_in_flight_store(struct kobject *kobj,
74 if (val < 1 || val > MDC_MAX_RIF_MAX) 74 if (val < 1 || val > MDC_MAX_RIF_MAX)
75 return -ERANGE; 75 return -ERANGE;
76 76
77 client_obd_list_lock(&cli->cl_loi_list_lock); 77 spin_lock(&cli->cl_loi_list_lock);
78 cli->cl_max_rpcs_in_flight = val; 78 cli->cl_max_rpcs_in_flight = val;
79 client_obd_list_unlock(&cli->cl_loi_list_lock); 79 spin_unlock(&cli->cl_loi_list_lock);
80 80
81 return count; 81 return count;
82} 82}
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_lib.c b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
index b3bfdcb73670..856c54e03b6b 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_lib.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
@@ -279,8 +279,7 @@ static void mdc_setattr_pack_rec(struct mdt_rec_setattr *rec,
279 rec->sa_atime = LTIME_S(op_data->op_attr.ia_atime); 279 rec->sa_atime = LTIME_S(op_data->op_attr.ia_atime);
280 rec->sa_mtime = LTIME_S(op_data->op_attr.ia_mtime); 280 rec->sa_mtime = LTIME_S(op_data->op_attr.ia_mtime);
281 rec->sa_ctime = LTIME_S(op_data->op_attr.ia_ctime); 281 rec->sa_ctime = LTIME_S(op_data->op_attr.ia_ctime);
282 rec->sa_attr_flags = 282 rec->sa_attr_flags = op_data->op_attr_flags;
283 ((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags;
284 if ((op_data->op_attr.ia_valid & ATTR_GID) && 283 if ((op_data->op_attr.ia_valid & ATTR_GID) &&
285 in_group_p(op_data->op_attr.ia_gid)) 284 in_group_p(op_data->op_attr.ia_gid))
286 rec->sa_suppgid = 285 rec->sa_suppgid =
@@ -439,7 +438,6 @@ void mdc_getattr_pack(struct ptlrpc_request *req, __u64 valid, int flags,
439 char *tmp = req_capsule_client_get(&req->rq_pill, &RMF_NAME); 438 char *tmp = req_capsule_client_get(&req->rq_pill, &RMF_NAME);
440 439
441 LOGL0(op_data->op_name, op_data->op_namelen, tmp); 440 LOGL0(op_data->op_name, op_data->op_namelen, tmp);
442
443 } 441 }
444} 442}
445 443
@@ -455,7 +453,7 @@ static void mdc_hsm_release_pack(struct ptlrpc_request *req,
455 lock = ldlm_handle2lock(&op_data->op_lease_handle); 453 lock = ldlm_handle2lock(&op_data->op_lease_handle);
456 if (lock) { 454 if (lock) {
457 data->cd_handle = lock->l_remote_handle; 455 data->cd_handle = lock->l_remote_handle;
458 ldlm_lock_put(lock); 456 LDLM_LOCK_PUT(lock);
459 } 457 }
460 ldlm_cli_cancel(&op_data->op_lease_handle, LCF_LOCAL); 458 ldlm_cli_cancel(&op_data->op_lease_handle, LCF_LOCAL);
461 459
@@ -481,9 +479,9 @@ static int mdc_req_avail(struct client_obd *cli, struct mdc_cache_waiter *mcw)
481{ 479{
482 int rc; 480 int rc;
483 481
484 client_obd_list_lock(&cli->cl_loi_list_lock); 482 spin_lock(&cli->cl_loi_list_lock);
485 rc = list_empty(&mcw->mcw_entry); 483 rc = list_empty(&mcw->mcw_entry);
486 client_obd_list_unlock(&cli->cl_loi_list_lock); 484 spin_unlock(&cli->cl_loi_list_lock);
487 return rc; 485 return rc;
488}; 486};
489 487
@@ -497,23 +495,23 @@ int mdc_enter_request(struct client_obd *cli)
497 struct mdc_cache_waiter mcw; 495 struct mdc_cache_waiter mcw;
498 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL); 496 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
499 497
500 client_obd_list_lock(&cli->cl_loi_list_lock); 498 spin_lock(&cli->cl_loi_list_lock);
501 if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) { 499 if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
502 list_add_tail(&mcw.mcw_entry, &cli->cl_cache_waiters); 500 list_add_tail(&mcw.mcw_entry, &cli->cl_cache_waiters);
503 init_waitqueue_head(&mcw.mcw_waitq); 501 init_waitqueue_head(&mcw.mcw_waitq);
504 client_obd_list_unlock(&cli->cl_loi_list_lock); 502 spin_unlock(&cli->cl_loi_list_lock);
505 rc = l_wait_event(mcw.mcw_waitq, mdc_req_avail(cli, &mcw), 503 rc = l_wait_event(mcw.mcw_waitq, mdc_req_avail(cli, &mcw),
506 &lwi); 504 &lwi);
507 if (rc) { 505 if (rc) {
508 client_obd_list_lock(&cli->cl_loi_list_lock); 506 spin_lock(&cli->cl_loi_list_lock);
509 if (list_empty(&mcw.mcw_entry)) 507 if (list_empty(&mcw.mcw_entry))
510 cli->cl_r_in_flight--; 508 cli->cl_r_in_flight--;
511 list_del_init(&mcw.mcw_entry); 509 list_del_init(&mcw.mcw_entry);
512 client_obd_list_unlock(&cli->cl_loi_list_lock); 510 spin_unlock(&cli->cl_loi_list_lock);
513 } 511 }
514 } else { 512 } else {
515 cli->cl_r_in_flight++; 513 cli->cl_r_in_flight++;
516 client_obd_list_unlock(&cli->cl_loi_list_lock); 514 spin_unlock(&cli->cl_loi_list_lock);
517 } 515 }
518 return rc; 516 return rc;
519} 517}
@@ -523,7 +521,7 @@ void mdc_exit_request(struct client_obd *cli)
523 struct list_head *l, *tmp; 521 struct list_head *l, *tmp;
524 struct mdc_cache_waiter *mcw; 522 struct mdc_cache_waiter *mcw;
525 523
526 client_obd_list_lock(&cli->cl_loi_list_lock); 524 spin_lock(&cli->cl_loi_list_lock);
527 cli->cl_r_in_flight--; 525 cli->cl_r_in_flight--;
528 list_for_each_safe(l, tmp, &cli->cl_cache_waiters) { 526 list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
529 if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) { 527 if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
@@ -538,5 +536,5 @@ void mdc_exit_request(struct client_obd *cli)
538 } 536 }
539 /* Empty waiting list? Decrease reqs in-flight number */ 537 /* Empty waiting list? Decrease reqs in-flight number */
540 538
541 client_obd_list_unlock(&cli->cl_loi_list_lock); 539 spin_unlock(&cli->cl_loi_list_lock);
542} 540}
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_locks.c b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
index 958a164f620d..3b1bc9111b93 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_locks.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
@@ -869,7 +869,9 @@ resend:
869 * (explicits or automatically generated by Kernel to clean 869 * (explicits or automatically generated by Kernel to clean
870 * current FLocks upon exit) that can't be trashed 870 * current FLocks upon exit) that can't be trashed
871 */ 871 */
872 if ((rc == -EINTR) || (rc == -ETIMEDOUT)) 872 if (((rc == -EINTR) || (rc == -ETIMEDOUT)) &&
873 (einfo->ei_type == LDLM_FLOCK) &&
874 (einfo->ei_mode == LCK_NL))
873 goto resend; 875 goto resend;
874 return rc; 876 return rc;
875 } 877 }
@@ -963,7 +965,6 @@ static int mdc_finish_intent_lock(struct obd_export *exp,
963 if (fid_is_sane(&op_data->op_fid2) && 965 if (fid_is_sane(&op_data->op_fid2) &&
964 it->it_create_mode & M_CHECK_STALE && 966 it->it_create_mode & M_CHECK_STALE &&
965 it->it_op != IT_GETATTR) { 967 it->it_op != IT_GETATTR) {
966
967 /* Also: did we find the same inode? */ 968 /* Also: did we find the same inode? */
968 /* sever can return one of two fids: 969 /* sever can return one of two fids:
969 * op_fid2 - new allocated fid - if file is created. 970 * op_fid2 - new allocated fid - if file is created.
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c
index b91d3ff18b02..86b7445365f4 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_request.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c
@@ -142,9 +142,8 @@ static int mdc_getattr_common(struct obd_export *exp,
142 142
143 CDEBUG(D_NET, "mode: %o\n", body->mode); 143 CDEBUG(D_NET, "mode: %o\n", body->mode);
144 144
145 mdc_update_max_ea_from_body(exp, body);
145 if (body->eadatasize != 0) { 146 if (body->eadatasize != 0) {
146 mdc_update_max_ea_from_body(exp, body);
147
148 eadata = req_capsule_server_sized_get(pill, &RMF_MDT_MD, 147 eadata = req_capsule_server_sized_get(pill, &RMF_MDT_MD,
149 body->eadatasize); 148 body->eadatasize);
150 if (!eadata) 149 if (!eadata)
@@ -1169,7 +1168,7 @@ static int mdc_ioc_hsm_progress(struct obd_export *exp,
1169 goto out; 1168 goto out;
1170 } 1169 }
1171 1170
1172 mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, 0, 0); 1171 mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, -1, 0);
1173 1172
1174 /* Copy hsm_progress struct */ 1173 /* Copy hsm_progress struct */
1175 req_hpk = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_PROGRESS); 1174 req_hpk = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_PROGRESS);
@@ -1203,7 +1202,7 @@ static int mdc_ioc_hsm_ct_register(struct obd_import *imp, __u32 archives)
1203 goto out; 1202 goto out;
1204 } 1203 }
1205 1204
1206 mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, 0, 0); 1205 mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, -1, 0);
1207 1206
1208 /* Copy hsm_progress struct */ 1207 /* Copy hsm_progress struct */
1209 archive_mask = req_capsule_client_get(&req->rq_pill, 1208 archive_mask = req_capsule_client_get(&req->rq_pill,
@@ -1278,7 +1277,7 @@ static int mdc_ioc_hsm_ct_unregister(struct obd_import *imp)
1278 goto out; 1277 goto out;
1279 } 1278 }
1280 1279
1281 mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, 0, 0); 1280 mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, -1, 0);
1282 1281
1283 ptlrpc_request_set_replen(req); 1282 ptlrpc_request_set_replen(req);
1284 1283
@@ -1395,7 +1394,7 @@ static int mdc_ioc_hsm_request(struct obd_export *exp,
1395 return rc; 1394 return rc;
1396 } 1395 }
1397 1396
1398 mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, 0, 0); 1397 mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, -1, 0);
1399 1398
1400 /* Copy hsm_request struct */ 1399 /* Copy hsm_request struct */
1401 req_hr = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_REQUEST); 1400 req_hr = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_REQUEST);
@@ -1952,7 +1951,7 @@ static void lustre_swab_hal(struct hsm_action_list *h)
1952 __swab32s(&h->hal_count); 1951 __swab32s(&h->hal_count);
1953 __swab32s(&h->hal_archive_id); 1952 __swab32s(&h->hal_archive_id);
1954 __swab64s(&h->hal_flags); 1953 __swab64s(&h->hal_flags);
1955 hai = hai_zero(h); 1954 hai = hai_first(h);
1956 for (i = 0; i < h->hal_count; i++, hai = hai_next(hai)) 1955 for (i = 0; i < h->hal_count; i++, hai = hai_next(hai))
1957 lustre_swab_hai(hai); 1956 lustre_swab_hai(hai);
1958} 1957}
@@ -2249,7 +2248,7 @@ static struct obd_uuid *mdc_get_uuid(struct obd_export *exp)
2249 * recovery, non zero value will be return if the lock can be canceled, 2248 * recovery, non zero value will be return if the lock can be canceled,
2250 * or zero returned for not 2249 * or zero returned for not
2251 */ 2250 */
2252static int mdc_cancel_for_recovery(struct ldlm_lock *lock) 2251static int mdc_cancel_weight(struct ldlm_lock *lock)
2253{ 2252{
2254 if (lock->l_resource->lr_type != LDLM_IBITS) 2253 if (lock->l_resource->lr_type != LDLM_IBITS)
2255 return 0; 2254 return 0;
@@ -2314,12 +2313,14 @@ static int mdc_setup(struct obd_device *obd, struct lustre_cfg *cfg)
2314 return -ENOMEM; 2313 return -ENOMEM;
2315 mdc_init_rpc_lock(cli->cl_rpc_lock); 2314 mdc_init_rpc_lock(cli->cl_rpc_lock);
2316 2315
2317 ptlrpcd_addref(); 2316 rc = ptlrpcd_addref();
2317 if (rc < 0)
2318 goto err_rpc_lock;
2318 2319
2319 cli->cl_close_lock = kzalloc(sizeof(*cli->cl_close_lock), GFP_NOFS); 2320 cli->cl_close_lock = kzalloc(sizeof(*cli->cl_close_lock), GFP_NOFS);
2320 if (!cli->cl_close_lock) { 2321 if (!cli->cl_close_lock) {
2321 rc = -ENOMEM; 2322 rc = -ENOMEM;
2322 goto err_rpc_lock; 2323 goto err_ptlrpcd_decref;
2323 } 2324 }
2324 mdc_init_rpc_lock(cli->cl_close_lock); 2325 mdc_init_rpc_lock(cli->cl_close_lock);
2325 2326
@@ -2331,7 +2332,7 @@ static int mdc_setup(struct obd_device *obd, struct lustre_cfg *cfg)
2331 sptlrpc_lprocfs_cliobd_attach(obd); 2332 sptlrpc_lprocfs_cliobd_attach(obd);
2332 ptlrpc_lprocfs_register_obd(obd); 2333 ptlrpc_lprocfs_register_obd(obd);
2333 2334
2334 ns_register_cancel(obd->obd_namespace, mdc_cancel_for_recovery); 2335 ns_register_cancel(obd->obd_namespace, mdc_cancel_weight);
2335 2336
2336 obd->obd_namespace->ns_lvbo = &inode_lvbo; 2337 obd->obd_namespace->ns_lvbo = &inode_lvbo;
2337 2338
@@ -2345,9 +2346,10 @@ static int mdc_setup(struct obd_device *obd, struct lustre_cfg *cfg)
2345 2346
2346err_close_lock: 2347err_close_lock:
2347 kfree(cli->cl_close_lock); 2348 kfree(cli->cl_close_lock);
2349err_ptlrpcd_decref:
2350 ptlrpcd_decref();
2348err_rpc_lock: 2351err_rpc_lock:
2349 kfree(cli->cl_rpc_lock); 2352 kfree(cli->cl_rpc_lock);
2350 ptlrpcd_decref();
2351 return rc; 2353 return rc;
2352} 2354}
2353 2355
diff --git a/drivers/staging/lustre/lustre/mgc/mgc_request.c b/drivers/staging/lustre/lustre/mgc/mgc_request.c
index 3924b095bfb0..2311a437c441 100644
--- a/drivers/staging/lustre/lustre/mgc/mgc_request.c
+++ b/drivers/staging/lustre/lustre/mgc/mgc_request.c
@@ -502,8 +502,12 @@ static void do_requeue(struct config_llog_data *cld)
502 */ 502 */
503 down_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem); 503 down_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
504 if (cld->cld_mgcexp->exp_obd->u.cli.cl_conn_count != 0) { 504 if (cld->cld_mgcexp->exp_obd->u.cli.cl_conn_count != 0) {
505 int rc;
506
505 CDEBUG(D_MGC, "updating log %s\n", cld->cld_logname); 507 CDEBUG(D_MGC, "updating log %s\n", cld->cld_logname);
506 mgc_process_log(cld->cld_mgcexp->exp_obd, cld); 508 rc = mgc_process_log(cld->cld_mgcexp->exp_obd, cld);
509 if (rc && rc != -ENOENT)
510 CERROR("failed processing log: %d\n", rc);
507 } else { 511 } else {
508 CDEBUG(D_MGC, "disconnecting, won't update log %s\n", 512 CDEBUG(D_MGC, "disconnecting, won't update log %s\n",
509 cld->cld_logname); 513 cld->cld_logname);
@@ -734,7 +738,9 @@ static int mgc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
734 struct task_struct *task; 738 struct task_struct *task;
735 int rc; 739 int rc;
736 740
737 ptlrpcd_addref(); 741 rc = ptlrpcd_addref();
742 if (rc < 0)
743 goto err_noref;
738 744
739 rc = client_obd_setup(obd, lcfg); 745 rc = client_obd_setup(obd, lcfg);
740 if (rc) 746 if (rc)
@@ -773,6 +779,7 @@ err_cleanup:
773 client_obd_cleanup(obd); 779 client_obd_cleanup(obd);
774err_decref: 780err_decref:
775 ptlrpcd_decref(); 781 ptlrpcd_decref();
782err_noref:
776 return rc; 783 return rc;
777} 784}
778 785
@@ -1720,7 +1727,6 @@ static int mgc_process_config(struct obd_device *obd, u32 len, void *buf)
1720 CERROR("Unknown command: %d\n", lcfg->lcfg_command); 1727 CERROR("Unknown command: %d\n", lcfg->lcfg_command);
1721 rc = -EINVAL; 1728 rc = -EINVAL;
1722 goto out; 1729 goto out;
1723
1724 } 1730 }
1725 } 1731 }
1726out: 1732out:
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_io.c b/drivers/staging/lustre/lustre/obdclass/cl_io.c
index f5128b4f176f..583fb5f33889 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_io.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_io.c
@@ -36,6 +36,7 @@
36 * Client IO. 36 * Client IO.
37 * 37 *
38 * Author: Nikita Danilov <nikita.danilov@sun.com> 38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 * Author: Jinshan Xiong <jinshan.xiong@intel.com>
39 */ 40 */
40 41
41#define DEBUG_SUBSYSTEM S_CLASS 42#define DEBUG_SUBSYSTEM S_CLASS
@@ -132,6 +133,7 @@ void cl_io_fini(const struct lu_env *env, struct cl_io *io)
132 case CIT_WRITE: 133 case CIT_WRITE:
133 break; 134 break;
134 case CIT_FAULT: 135 case CIT_FAULT:
136 break;
135 case CIT_FSYNC: 137 case CIT_FSYNC:
136 LASSERT(!io->ci_need_restart); 138 LASSERT(!io->ci_need_restart);
137 break; 139 break;
@@ -159,7 +161,6 @@ static int cl_io_init0(const struct lu_env *env, struct cl_io *io,
159 161
160 io->ci_type = iot; 162 io->ci_type = iot;
161 INIT_LIST_HEAD(&io->ci_lockset.cls_todo); 163 INIT_LIST_HEAD(&io->ci_lockset.cls_todo);
162 INIT_LIST_HEAD(&io->ci_lockset.cls_curr);
163 INIT_LIST_HEAD(&io->ci_lockset.cls_done); 164 INIT_LIST_HEAD(&io->ci_lockset.cls_done);
164 INIT_LIST_HEAD(&io->ci_layers); 165 INIT_LIST_HEAD(&io->ci_layers);
165 166
@@ -241,37 +242,7 @@ static int cl_lock_descr_sort(const struct cl_lock_descr *d0,
241 const struct cl_lock_descr *d1) 242 const struct cl_lock_descr *d1)
242{ 243{
243 return lu_fid_cmp(lu_object_fid(&d0->cld_obj->co_lu), 244 return lu_fid_cmp(lu_object_fid(&d0->cld_obj->co_lu),
244 lu_object_fid(&d1->cld_obj->co_lu)) ?: 245 lu_object_fid(&d1->cld_obj->co_lu));
245 __diff_normalize(d0->cld_start, d1->cld_start);
246}
247
248static int cl_lock_descr_cmp(const struct cl_lock_descr *d0,
249 const struct cl_lock_descr *d1)
250{
251 int ret;
252
253 ret = lu_fid_cmp(lu_object_fid(&d0->cld_obj->co_lu),
254 lu_object_fid(&d1->cld_obj->co_lu));
255 if (ret)
256 return ret;
257 if (d0->cld_end < d1->cld_start)
258 return -1;
259 if (d0->cld_start > d0->cld_end)
260 return 1;
261 return 0;
262}
263
264static void cl_lock_descr_merge(struct cl_lock_descr *d0,
265 const struct cl_lock_descr *d1)
266{
267 d0->cld_start = min(d0->cld_start, d1->cld_start);
268 d0->cld_end = max(d0->cld_end, d1->cld_end);
269
270 if (d1->cld_mode == CLM_WRITE && d0->cld_mode != CLM_WRITE)
271 d0->cld_mode = CLM_WRITE;
272
273 if (d1->cld_mode == CLM_GROUP && d0->cld_mode != CLM_GROUP)
274 d0->cld_mode = CLM_GROUP;
275} 246}
276 247
277/* 248/*
@@ -320,33 +291,35 @@ static void cl_io_locks_sort(struct cl_io *io)
320 } while (!done); 291 } while (!done);
321} 292}
322 293
323/** 294static void cl_lock_descr_merge(struct cl_lock_descr *d0,
324 * Check whether \a queue contains locks matching \a need. 295 const struct cl_lock_descr *d1)
325 *
326 * \retval +ve there is a matching lock in the \a queue
327 * \retval 0 there are no matching locks in the \a queue
328 */
329int cl_queue_match(const struct list_head *queue,
330 const struct cl_lock_descr *need)
331{ 296{
332 struct cl_io_lock_link *scan; 297 d0->cld_start = min(d0->cld_start, d1->cld_start);
298 d0->cld_end = max(d0->cld_end, d1->cld_end);
333 299
334 list_for_each_entry(scan, queue, cill_linkage) { 300 if (d1->cld_mode == CLM_WRITE && d0->cld_mode != CLM_WRITE)
335 if (cl_lock_descr_match(&scan->cill_descr, need)) 301 d0->cld_mode = CLM_WRITE;
336 return 1; 302
337 } 303 if (d1->cld_mode == CLM_GROUP && d0->cld_mode != CLM_GROUP)
338 return 0; 304 d0->cld_mode = CLM_GROUP;
339} 305}
340EXPORT_SYMBOL(cl_queue_match);
341 306
342static int cl_queue_merge(const struct list_head *queue, 307static int cl_lockset_merge(const struct cl_lockset *set,
343 const struct cl_lock_descr *need) 308 const struct cl_lock_descr *need)
344{ 309{
345 struct cl_io_lock_link *scan; 310 struct cl_io_lock_link *scan;
346 311
347 list_for_each_entry(scan, queue, cill_linkage) { 312 list_for_each_entry(scan, &set->cls_todo, cill_linkage) {
348 if (cl_lock_descr_cmp(&scan->cill_descr, need)) 313 if (!cl_object_same(scan->cill_descr.cld_obj, need->cld_obj))
349 continue; 314 continue;
315
316 /* Merge locks for the same object because ldlm lock server
317 * may expand the lock extent, otherwise there is a deadlock
318 * case if two conflicted locks are queueud for the same object
319 * and lock server expands one lock to overlap the another.
320 * The side effect is that it can generate a multi-stripe lock
321 * that may cause casacading problem
322 */
350 cl_lock_descr_merge(&scan->cill_descr, need); 323 cl_lock_descr_merge(&scan->cill_descr, need);
351 CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n", 324 CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
352 scan->cill_descr.cld_mode, scan->cill_descr.cld_start, 325 scan->cill_descr.cld_mode, scan->cill_descr.cld_start,
@@ -356,87 +329,20 @@ static int cl_queue_merge(const struct list_head *queue,
356 return 0; 329 return 0;
357} 330}
358 331
359static int cl_lockset_match(const struct cl_lockset *set,
360 const struct cl_lock_descr *need)
361{
362 return cl_queue_match(&set->cls_curr, need) ||
363 cl_queue_match(&set->cls_done, need);
364}
365
366static int cl_lockset_merge(const struct cl_lockset *set,
367 const struct cl_lock_descr *need)
368{
369 return cl_queue_merge(&set->cls_todo, need) ||
370 cl_lockset_match(set, need);
371}
372
373static int cl_lockset_lock_one(const struct lu_env *env,
374 struct cl_io *io, struct cl_lockset *set,
375 struct cl_io_lock_link *link)
376{
377 struct cl_lock *lock;
378 int result;
379
380 lock = cl_lock_request(env, io, &link->cill_descr, "io", io);
381
382 if (!IS_ERR(lock)) {
383 link->cill_lock = lock;
384 list_move(&link->cill_linkage, &set->cls_curr);
385 if (!(link->cill_descr.cld_enq_flags & CEF_ASYNC)) {
386 result = cl_wait(env, lock);
387 if (result == 0)
388 list_move(&link->cill_linkage, &set->cls_done);
389 } else
390 result = 0;
391 } else
392 result = PTR_ERR(lock);
393 return result;
394}
395
396static void cl_lock_link_fini(const struct lu_env *env, struct cl_io *io,
397 struct cl_io_lock_link *link)
398{
399 struct cl_lock *lock = link->cill_lock;
400
401 list_del_init(&link->cill_linkage);
402 if (lock) {
403 cl_lock_release(env, lock, "io", io);
404 link->cill_lock = NULL;
405 }
406 if (link->cill_fini)
407 link->cill_fini(env, link);
408}
409
410static int cl_lockset_lock(const struct lu_env *env, struct cl_io *io, 332static int cl_lockset_lock(const struct lu_env *env, struct cl_io *io,
411 struct cl_lockset *set) 333 struct cl_lockset *set)
412{ 334{
413 struct cl_io_lock_link *link; 335 struct cl_io_lock_link *link;
414 struct cl_io_lock_link *temp; 336 struct cl_io_lock_link *temp;
415 struct cl_lock *lock;
416 int result; 337 int result;
417 338
418 result = 0; 339 result = 0;
419 list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) { 340 list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
420 if (!cl_lockset_match(set, &link->cill_descr)) { 341 result = cl_lock_request(env, io, &link->cill_lock);
421 /* XXX some locking to guarantee that locks aren't 342 if (result < 0)
422 * expanded in between. 343 break;
423 */ 344
424 result = cl_lockset_lock_one(env, io, set, link); 345 list_move(&link->cill_linkage, &set->cls_done);
425 if (result != 0)
426 break;
427 } else
428 cl_lock_link_fini(env, io, link);
429 }
430 if (result == 0) {
431 list_for_each_entry_safe(link, temp,
432 &set->cls_curr, cill_linkage) {
433 lock = link->cill_lock;
434 result = cl_wait(env, lock);
435 if (result == 0)
436 list_move(&link->cill_linkage, &set->cls_done);
437 else
438 break;
439 }
440 } 346 }
441 return result; 347 return result;
442} 348}
@@ -492,16 +398,19 @@ void cl_io_unlock(const struct lu_env *env, struct cl_io *io)
492 398
493 set = &io->ci_lockset; 399 set = &io->ci_lockset;
494 400
495 list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) 401 list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
496 cl_lock_link_fini(env, io, link); 402 list_del_init(&link->cill_linkage);
497 403 if (link->cill_fini)
498 list_for_each_entry_safe(link, temp, &set->cls_curr, cill_linkage) 404 link->cill_fini(env, link);
499 cl_lock_link_fini(env, io, link); 405 }
500 406
501 list_for_each_entry_safe(link, temp, &set->cls_done, cill_linkage) { 407 list_for_each_entry_safe(link, temp, &set->cls_done, cill_linkage) {
502 cl_unuse(env, link->cill_lock); 408 list_del_init(&link->cill_linkage);
503 cl_lock_link_fini(env, io, link); 409 cl_lock_release(env, &link->cill_lock);
410 if (link->cill_fini)
411 link->cill_fini(env, link);
504 } 412 }
413
505 cl_io_for_each_reverse(scan, io) { 414 cl_io_for_each_reverse(scan, io) {
506 if (scan->cis_iop->op[io->ci_type].cio_unlock) 415 if (scan->cis_iop->op[io->ci_type].cio_unlock)
507 scan->cis_iop->op[io->ci_type].cio_unlock(env, scan); 416 scan->cis_iop->op[io->ci_type].cio_unlock(env, scan);
@@ -595,9 +504,9 @@ int cl_io_lock_add(const struct lu_env *env, struct cl_io *io,
595{ 504{
596 int result; 505 int result;
597 506
598 if (cl_lockset_merge(&io->ci_lockset, &link->cill_descr)) 507 if (cl_lockset_merge(&io->ci_lockset, &link->cill_descr)) {
599 result = 1; 508 result = 1;
600 else { 509 } else {
601 list_add(&link->cill_linkage, &io->ci_lockset.cls_todo); 510 list_add(&link->cill_linkage, &io->ci_lockset.cls_todo);
602 result = 0; 511 result = 0;
603 } 512 }
@@ -627,8 +536,9 @@ int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
627 result = cl_io_lock_add(env, io, link); 536 result = cl_io_lock_add(env, io, link);
628 if (result) /* lock match */ 537 if (result) /* lock match */
629 link->cill_fini(env, link); 538 link->cill_fini(env, link);
630 } else 539 } else {
631 result = -ENOMEM; 540 result = -ENOMEM;
541 }
632 542
633 return result; 543 return result;
634} 544}
@@ -692,42 +602,6 @@ cl_io_slice_page(const struct cl_io_slice *ios, struct cl_page *page)
692} 602}
693 603
694/** 604/**
695 * True iff \a page is within \a io range.
696 */
697static int cl_page_in_io(const struct cl_page *page, const struct cl_io *io)
698{
699 int result = 1;
700 loff_t start;
701 loff_t end;
702 pgoff_t idx;
703
704 idx = page->cp_index;
705 switch (io->ci_type) {
706 case CIT_READ:
707 case CIT_WRITE:
708 /*
709 * check that [start, end) and [pos, pos + count) extents
710 * overlap.
711 */
712 if (!cl_io_is_append(io)) {
713 const struct cl_io_rw_common *crw = &(io->u.ci_rw);
714
715 start = cl_offset(page->cp_obj, idx);
716 end = cl_offset(page->cp_obj, idx + 1);
717 result = crw->crw_pos < end &&
718 start < crw->crw_pos + crw->crw_count;
719 }
720 break;
721 case CIT_FAULT:
722 result = io->u.ci_fault.ft_index == idx;
723 break;
724 default:
725 LBUG();
726 }
727 return result;
728}
729
730/**
731 * Called by read io, when page has to be read from the server. 605 * Called by read io, when page has to be read from the server.
732 * 606 *
733 * \see cl_io_operations::cio_read_page() 607 * \see cl_io_operations::cio_read_page()
@@ -742,7 +616,6 @@ int cl_io_read_page(const struct lu_env *env, struct cl_io *io,
742 LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_FAULT); 616 LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_FAULT);
743 LINVRNT(cl_page_is_owned(page, io)); 617 LINVRNT(cl_page_is_owned(page, io));
744 LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED); 618 LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
745 LINVRNT(cl_page_in_io(page, io));
746 LINVRNT(cl_io_invariant(io)); 619 LINVRNT(cl_io_invariant(io));
747 620
748 queue = &io->ci_queue; 621 queue = &io->ci_queue;
@@ -769,7 +642,7 @@ int cl_io_read_page(const struct lu_env *env, struct cl_io *io,
769 break; 642 break;
770 } 643 }
771 } 644 }
772 if (result == 0) 645 if (result == 0 && queue->c2_qin.pl_nr > 0)
773 result = cl_io_submit_rw(env, io, CRT_READ, queue); 646 result = cl_io_submit_rw(env, io, CRT_READ, queue);
774 /* 647 /*
775 * Unlock unsent pages in case of error. 648 * Unlock unsent pages in case of error.
@@ -781,77 +654,29 @@ int cl_io_read_page(const struct lu_env *env, struct cl_io *io,
781EXPORT_SYMBOL(cl_io_read_page); 654EXPORT_SYMBOL(cl_io_read_page);
782 655
783/** 656/**
784 * Called by write io to prepare page to receive data from user buffer. 657 * Commit a list of contiguous pages into writeback cache.
785 * 658 *
786 * \see cl_io_operations::cio_prepare_write() 659 * \returns 0 if all pages committed, or errcode if error occurred.
660 * \see cl_io_operations::cio_commit_async()
787 */ 661 */
788int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io, 662int cl_io_commit_async(const struct lu_env *env, struct cl_io *io,
789 struct cl_page *page, unsigned from, unsigned to) 663 struct cl_page_list *queue, int from, int to,
664 cl_commit_cbt cb)
790{ 665{
791 const struct cl_io_slice *scan; 666 const struct cl_io_slice *scan;
792 int result = 0; 667 int result = 0;
793 668
794 LINVRNT(io->ci_type == CIT_WRITE);
795 LINVRNT(cl_page_is_owned(page, io));
796 LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
797 LINVRNT(cl_io_invariant(io));
798 LASSERT(cl_page_in_io(page, io));
799
800 cl_io_for_each_reverse(scan, io) {
801 if (scan->cis_iop->cio_prepare_write) {
802 const struct cl_page_slice *slice;
803
804 slice = cl_io_slice_page(scan, page);
805 result = scan->cis_iop->cio_prepare_write(env, scan,
806 slice,
807 from, to);
808 if (result != 0)
809 break;
810 }
811 }
812 return result;
813}
814EXPORT_SYMBOL(cl_io_prepare_write);
815
816/**
817 * Called by write io after user data were copied into a page.
818 *
819 * \see cl_io_operations::cio_commit_write()
820 */
821int cl_io_commit_write(const struct lu_env *env, struct cl_io *io,
822 struct cl_page *page, unsigned from, unsigned to)
823{
824 const struct cl_io_slice *scan;
825 int result = 0;
826
827 LINVRNT(io->ci_type == CIT_WRITE);
828 LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
829 LINVRNT(cl_io_invariant(io));
830 /*
831 * XXX Uh... not nice. Top level cl_io_commit_write() call (vvp->lov)
832 * already called cl_page_cache_add(), moving page into CPS_CACHED
833 * state. Better (and more general) way of dealing with such situation
834 * is needed.
835 */
836 LASSERT(cl_page_is_owned(page, io) || page->cp_parent);
837 LASSERT(cl_page_in_io(page, io));
838
839 cl_io_for_each(scan, io) { 669 cl_io_for_each(scan, io) {
840 if (scan->cis_iop->cio_commit_write) { 670 if (!scan->cis_iop->cio_commit_async)
841 const struct cl_page_slice *slice; 671 continue;
842 672 result = scan->cis_iop->cio_commit_async(env, scan, queue,
843 slice = cl_io_slice_page(scan, page); 673 from, to, cb);
844 result = scan->cis_iop->cio_commit_write(env, scan, 674 if (result != 0)
845 slice, 675 break;
846 from, to);
847 if (result != 0)
848 break;
849 }
850 } 676 }
851 LINVRNT(result <= 0);
852 return result; 677 return result;
853} 678}
854EXPORT_SYMBOL(cl_io_commit_write); 679EXPORT_SYMBOL(cl_io_commit_async);
855 680
856/** 681/**
857 * Submits a list of pages for immediate io. 682 * Submits a list of pages for immediate io.
@@ -869,13 +694,10 @@ int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
869 const struct cl_io_slice *scan; 694 const struct cl_io_slice *scan;
870 int result = 0; 695 int result = 0;
871 696
872 LINVRNT(crt < ARRAY_SIZE(scan->cis_iop->req_op));
873
874 cl_io_for_each(scan, io) { 697 cl_io_for_each(scan, io) {
875 if (!scan->cis_iop->req_op[crt].cio_submit) 698 if (!scan->cis_iop->cio_submit)
876 continue; 699 continue;
877 result = scan->cis_iop->req_op[crt].cio_submit(env, scan, crt, 700 result = scan->cis_iop->cio_submit(env, scan, crt, queue);
878 queue);
879 if (result != 0) 701 if (result != 0)
880 break; 702 break;
881 } 703 }
@@ -887,6 +709,9 @@ int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
887} 709}
888EXPORT_SYMBOL(cl_io_submit_rw); 710EXPORT_SYMBOL(cl_io_submit_rw);
889 711
712static void cl_page_list_assume(const struct lu_env *env,
713 struct cl_io *io, struct cl_page_list *plist);
714
890/** 715/**
891 * Submit a sync_io and wait for the IO to be finished, or error happens. 716 * Submit a sync_io and wait for the IO to be finished, or error happens.
892 * If \a timeout is zero, it means to wait for the IO unconditionally. 717 * If \a timeout is zero, it means to wait for the IO unconditionally.
@@ -904,7 +729,7 @@ int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
904 pg->cp_sync_io = anchor; 729 pg->cp_sync_io = anchor;
905 } 730 }
906 731
907 cl_sync_io_init(anchor, queue->c2_qin.pl_nr); 732 cl_sync_io_init(anchor, queue->c2_qin.pl_nr, &cl_sync_io_end);
908 rc = cl_io_submit_rw(env, io, iot, queue); 733 rc = cl_io_submit_rw(env, io, iot, queue);
909 if (rc == 0) { 734 if (rc == 0) {
910 /* 735 /*
@@ -915,12 +740,12 @@ int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
915 */ 740 */
916 cl_page_list_for_each(pg, &queue->c2_qin) { 741 cl_page_list_for_each(pg, &queue->c2_qin) {
917 pg->cp_sync_io = NULL; 742 pg->cp_sync_io = NULL;
918 cl_sync_io_note(anchor, 1); 743 cl_sync_io_note(env, anchor, 1);
919 } 744 }
920 745
921 /* wait for the IO to be finished. */ 746 /* wait for the IO to be finished. */
922 rc = cl_sync_io_wait(env, io, &queue->c2_qout, 747 rc = cl_sync_io_wait(env, anchor, timeout);
923 anchor, timeout); 748 cl_page_list_assume(env, io, &queue->c2_qout);
924 } else { 749 } else {
925 LASSERT(list_empty(&queue->c2_qout.pl_pages)); 750 LASSERT(list_empty(&queue->c2_qout.pl_pages));
926 cl_page_list_for_each(pg, &queue->c2_qin) 751 cl_page_list_for_each(pg, &queue->c2_qin)
@@ -931,26 +756,6 @@ int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
931EXPORT_SYMBOL(cl_io_submit_sync); 756EXPORT_SYMBOL(cl_io_submit_sync);
932 757
933/** 758/**
934 * Cancel an IO which has been submitted by cl_io_submit_rw.
935 */
936static int cl_io_cancel(const struct lu_env *env, struct cl_io *io,
937 struct cl_page_list *queue)
938{
939 struct cl_page *page;
940 int result = 0;
941
942 CERROR("Canceling ongoing page transmission\n");
943 cl_page_list_for_each(page, queue) {
944 int rc;
945
946 LINVRNT(cl_page_in_io(page, io));
947 rc = cl_page_cancel(env, page);
948 result = result ?: rc;
949 }
950 return result;
951}
952
953/**
954 * Main io loop. 759 * Main io loop.
955 * 760 *
956 * Pumps io through iterations calling 761 * Pumps io through iterations calling
@@ -1072,8 +877,8 @@ EXPORT_SYMBOL(cl_page_list_add);
1072/** 877/**
1073 * Removes a page from a page list. 878 * Removes a page from a page list.
1074 */ 879 */
1075static void cl_page_list_del(const struct lu_env *env, 880void cl_page_list_del(const struct lu_env *env, struct cl_page_list *plist,
1076 struct cl_page_list *plist, struct cl_page *page) 881 struct cl_page *page)
1077{ 882{
1078 LASSERT(plist->pl_nr > 0); 883 LASSERT(plist->pl_nr > 0);
1079 LINVRNT(plist->pl_owner == current); 884 LINVRNT(plist->pl_owner == current);
@@ -1086,6 +891,7 @@ static void cl_page_list_del(const struct lu_env *env,
1086 lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist); 891 lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist);
1087 cl_page_put(env, page); 892 cl_page_put(env, page);
1088} 893}
894EXPORT_SYMBOL(cl_page_list_del);
1089 895
1090/** 896/**
1091 * Moves a page from one page list to another. 897 * Moves a page from one page list to another.
@@ -1106,6 +912,24 @@ void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src,
1106EXPORT_SYMBOL(cl_page_list_move); 912EXPORT_SYMBOL(cl_page_list_move);
1107 913
1108/** 914/**
915 * Moves a page from one page list to the head of another list.
916 */
917void cl_page_list_move_head(struct cl_page_list *dst, struct cl_page_list *src,
918 struct cl_page *page)
919{
920 LASSERT(src->pl_nr > 0);
921 LINVRNT(dst->pl_owner == current);
922 LINVRNT(src->pl_owner == current);
923
924 list_move(&page->cp_batch, &dst->pl_pages);
925 --src->pl_nr;
926 ++dst->pl_nr;
927 lu_ref_set_at(&page->cp_reference, &page->cp_queue_ref, "queue",
928 src, dst);
929}
930EXPORT_SYMBOL(cl_page_list_move_head);
931
932/**
1109 * splice the cl_page_list, just as list head does 933 * splice the cl_page_list, just as list head does
1110 */ 934 */
1111void cl_page_list_splice(struct cl_page_list *list, struct cl_page_list *head) 935void cl_page_list_splice(struct cl_page_list *list, struct cl_page_list *head)
@@ -1162,8 +986,7 @@ EXPORT_SYMBOL(cl_page_list_disown);
1162/** 986/**
1163 * Releases pages from queue. 987 * Releases pages from queue.
1164 */ 988 */
1165static void cl_page_list_fini(const struct lu_env *env, 989void cl_page_list_fini(const struct lu_env *env, struct cl_page_list *plist)
1166 struct cl_page_list *plist)
1167{ 990{
1168 struct cl_page *page; 991 struct cl_page *page;
1169 struct cl_page *temp; 992 struct cl_page *temp;
@@ -1174,6 +997,7 @@ static void cl_page_list_fini(const struct lu_env *env,
1174 cl_page_list_del(env, plist, page); 997 cl_page_list_del(env, plist, page);
1175 LASSERT(plist->pl_nr == 0); 998 LASSERT(plist->pl_nr == 0);
1176} 999}
1000EXPORT_SYMBOL(cl_page_list_fini);
1177 1001
1178/** 1002/**
1179 * Assumes all pages in a queue. 1003 * Assumes all pages in a queue.
@@ -1260,7 +1084,7 @@ EXPORT_SYMBOL(cl_2queue_init_page);
1260/** 1084/**
1261 * Returns top-level io. 1085 * Returns top-level io.
1262 * 1086 *
1263 * \see cl_object_top(), cl_page_top(). 1087 * \see cl_object_top()
1264 */ 1088 */
1265struct cl_io *cl_io_top(struct cl_io *io) 1089struct cl_io *cl_io_top(struct cl_io *io)
1266{ 1090{
@@ -1323,19 +1147,14 @@ static int cl_req_init(const struct lu_env *env, struct cl_req *req,
1323 int result; 1147 int result;
1324 1148
1325 result = 0; 1149 result = 0;
1326 page = cl_page_top(page); 1150 list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
1327 do { 1151 dev = lu2cl_dev(slice->cpl_obj->co_lu.lo_dev);
1328 list_for_each_entry(slice, &page->cp_layers, cpl_linkage) { 1152 if (dev->cd_ops->cdo_req_init) {
1329 dev = lu2cl_dev(slice->cpl_obj->co_lu.lo_dev); 1153 result = dev->cd_ops->cdo_req_init(env, dev, req);
1330 if (dev->cd_ops->cdo_req_init) { 1154 if (result != 0)
1331 result = dev->cd_ops->cdo_req_init(env, 1155 break;
1332 dev, req);
1333 if (result != 0)
1334 break;
1335 }
1336 } 1156 }
1337 page = page->cp_child; 1157 }
1338 } while (page && result == 0);
1339 return result; 1158 return result;
1340} 1159}
1341 1160
@@ -1384,14 +1203,16 @@ struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page,
1384 if (req->crq_o) { 1203 if (req->crq_o) {
1385 req->crq_nrobjs = nr_objects; 1204 req->crq_nrobjs = nr_objects;
1386 result = cl_req_init(env, req, page); 1205 result = cl_req_init(env, req, page);
1387 } else 1206 } else {
1388 result = -ENOMEM; 1207 result = -ENOMEM;
1208 }
1389 if (result != 0) { 1209 if (result != 0) {
1390 cl_req_completion(env, req, result); 1210 cl_req_completion(env, req, result);
1391 req = ERR_PTR(result); 1211 req = ERR_PTR(result);
1392 } 1212 }
1393 } else 1213 } else {
1394 req = ERR_PTR(-ENOMEM); 1214 req = ERR_PTR(-ENOMEM);
1215 }
1395 return req; 1216 return req;
1396} 1217}
1397EXPORT_SYMBOL(cl_req_alloc); 1218EXPORT_SYMBOL(cl_req_alloc);
@@ -1406,8 +1227,6 @@ void cl_req_page_add(const struct lu_env *env,
1406 struct cl_req_obj *rqo; 1227 struct cl_req_obj *rqo;
1407 int i; 1228 int i;
1408 1229
1409 page = cl_page_top(page);
1410
1411 LASSERT(list_empty(&page->cp_flight)); 1230 LASSERT(list_empty(&page->cp_flight));
1412 LASSERT(!page->cp_req); 1231 LASSERT(!page->cp_req);
1413 1232
@@ -1438,8 +1257,6 @@ void cl_req_page_done(const struct lu_env *env, struct cl_page *page)
1438{ 1257{
1439 struct cl_req *req = page->cp_req; 1258 struct cl_req *req = page->cp_req;
1440 1259
1441 page = cl_page_top(page);
1442
1443 LASSERT(!list_empty(&page->cp_flight)); 1260 LASSERT(!list_empty(&page->cp_flight));
1444 LASSERT(req->crq_nrpages > 0); 1261 LASSERT(req->crq_nrpages > 0);
1445 1262
@@ -1511,25 +1328,39 @@ void cl_req_attr_set(const struct lu_env *env, struct cl_req *req,
1511} 1328}
1512EXPORT_SYMBOL(cl_req_attr_set); 1329EXPORT_SYMBOL(cl_req_attr_set);
1513 1330
1331/* cl_sync_io_callback assumes the caller must call cl_sync_io_wait() to
1332 * wait for the IO to finish.
1333 */
1334void cl_sync_io_end(const struct lu_env *env, struct cl_sync_io *anchor)
1335{
1336 wake_up_all(&anchor->csi_waitq);
1337
1338 /* it's safe to nuke or reuse anchor now */
1339 atomic_set(&anchor->csi_barrier, 0);
1340}
1341EXPORT_SYMBOL(cl_sync_io_end);
1514 1342
1515/** 1343/**
1516 * Initialize synchronous io wait anchor, for transfer of \a nrpages pages. 1344 * Initialize synchronous io wait anchor
1517 */ 1345 */
1518void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages) 1346void cl_sync_io_init(struct cl_sync_io *anchor, int nr,
1347 void (*end)(const struct lu_env *, struct cl_sync_io *))
1519{ 1348{
1349 memset(anchor, 0, sizeof(*anchor));
1520 init_waitqueue_head(&anchor->csi_waitq); 1350 init_waitqueue_head(&anchor->csi_waitq);
1521 atomic_set(&anchor->csi_sync_nr, nrpages); 1351 atomic_set(&anchor->csi_sync_nr, nr);
1522 atomic_set(&anchor->csi_barrier, nrpages > 0); 1352 atomic_set(&anchor->csi_barrier, nr > 0);
1523 anchor->csi_sync_rc = 0; 1353 anchor->csi_sync_rc = 0;
1354 anchor->csi_end_io = end;
1355 LASSERT(end);
1524} 1356}
1525EXPORT_SYMBOL(cl_sync_io_init); 1357EXPORT_SYMBOL(cl_sync_io_init);
1526 1358
1527/** 1359/**
1528 * Wait until all transfer completes. Transfer completion routine has to call 1360 * Wait until all IO completes. Transfer completion routine has to call
1529 * cl_sync_io_note() for every page. 1361 * cl_sync_io_note() for every entity.
1530 */ 1362 */
1531int cl_sync_io_wait(const struct lu_env *env, struct cl_io *io, 1363int cl_sync_io_wait(const struct lu_env *env, struct cl_sync_io *anchor,
1532 struct cl_page_list *queue, struct cl_sync_io *anchor,
1533 long timeout) 1364 long timeout)
1534{ 1365{
1535 struct l_wait_info lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(timeout), 1366 struct l_wait_info lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(timeout),
@@ -1542,11 +1373,9 @@ int cl_sync_io_wait(const struct lu_env *env, struct cl_io *io,
1542 atomic_read(&anchor->csi_sync_nr) == 0, 1373 atomic_read(&anchor->csi_sync_nr) == 0,
1543 &lwi); 1374 &lwi);
1544 if (rc < 0) { 1375 if (rc < 0) {
1545 CERROR("SYNC IO failed with error: %d, try to cancel %d remaining pages\n", 1376 CERROR("IO failed: %d, still wait for %d remaining entries\n",
1546 rc, atomic_read(&anchor->csi_sync_nr)); 1377 rc, atomic_read(&anchor->csi_sync_nr));
1547 1378
1548 (void)cl_io_cancel(env, io, queue);
1549
1550 lwi = (struct l_wait_info) { 0 }; 1379 lwi = (struct l_wait_info) { 0 };
1551 (void)l_wait_event(anchor->csi_waitq, 1380 (void)l_wait_event(anchor->csi_waitq,
1552 atomic_read(&anchor->csi_sync_nr) == 0, 1381 atomic_read(&anchor->csi_sync_nr) == 0,
@@ -1555,14 +1384,12 @@ int cl_sync_io_wait(const struct lu_env *env, struct cl_io *io,
1555 rc = anchor->csi_sync_rc; 1384 rc = anchor->csi_sync_rc;
1556 } 1385 }
1557 LASSERT(atomic_read(&anchor->csi_sync_nr) == 0); 1386 LASSERT(atomic_read(&anchor->csi_sync_nr) == 0);
1558 cl_page_list_assume(env, io, queue);
1559 1387
1560 /* wait until cl_sync_io_note() has done wakeup */ 1388 /* wait until cl_sync_io_note() has done wakeup */
1561 while (unlikely(atomic_read(&anchor->csi_barrier) != 0)) { 1389 while (unlikely(atomic_read(&anchor->csi_barrier) != 0)) {
1562 cpu_relax(); 1390 cpu_relax();
1563 } 1391 }
1564 1392
1565 POISON(anchor, 0x5a, sizeof(*anchor));
1566 return rc; 1393 return rc;
1567} 1394}
1568EXPORT_SYMBOL(cl_sync_io_wait); 1395EXPORT_SYMBOL(cl_sync_io_wait);
@@ -1570,7 +1397,8 @@ EXPORT_SYMBOL(cl_sync_io_wait);
1570/** 1397/**
1571 * Indicate that transfer of a single page completed. 1398 * Indicate that transfer of a single page completed.
1572 */ 1399 */
1573void cl_sync_io_note(struct cl_sync_io *anchor, int ioret) 1400void cl_sync_io_note(const struct lu_env *env, struct cl_sync_io *anchor,
1401 int ioret)
1574{ 1402{
1575 if (anchor->csi_sync_rc == 0 && ioret < 0) 1403 if (anchor->csi_sync_rc == 0 && ioret < 0)
1576 anchor->csi_sync_rc = ioret; 1404 anchor->csi_sync_rc = ioret;
@@ -1581,9 +1409,9 @@ void cl_sync_io_note(struct cl_sync_io *anchor, int ioret)
1581 */ 1409 */
1582 LASSERT(atomic_read(&anchor->csi_sync_nr) > 0); 1410 LASSERT(atomic_read(&anchor->csi_sync_nr) > 0);
1583 if (atomic_dec_and_test(&anchor->csi_sync_nr)) { 1411 if (atomic_dec_and_test(&anchor->csi_sync_nr)) {
1584 wake_up_all(&anchor->csi_waitq); 1412 LASSERT(anchor->csi_end_io);
1585 /* it's safe to nuke or reuse anchor now */ 1413 anchor->csi_end_io(env, anchor);
1586 atomic_set(&anchor->csi_barrier, 0); 1414 /* Can't access anchor any more */
1587 } 1415 }
1588} 1416}
1589EXPORT_SYMBOL(cl_sync_io_note); 1417EXPORT_SYMBOL(cl_sync_io_note);
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_lock.c b/drivers/staging/lustre/lustre/obdclass/cl_lock.c
index aec644eb4db9..26a576b63a72 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_lock.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_lock.c
@@ -36,6 +36,7 @@
36 * Client Extent Lock. 36 * Client Extent Lock.
37 * 37 *
38 * Author: Nikita Danilov <nikita.danilov@sun.com> 38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 * Author: Jinshan Xiong <jinshan.xiong@intel.com>
39 */ 40 */
40 41
41#define DEBUG_SUBSYSTEM S_CLASS 42#define DEBUG_SUBSYSTEM S_CLASS
@@ -47,138 +48,18 @@
47#include "../include/cl_object.h" 48#include "../include/cl_object.h"
48#include "cl_internal.h" 49#include "cl_internal.h"
49 50
50/** Lock class of cl_lock::cll_guard */
51static struct lock_class_key cl_lock_guard_class;
52static struct kmem_cache *cl_lock_kmem;
53
54static struct lu_kmem_descr cl_lock_caches[] = {
55 {
56 .ckd_cache = &cl_lock_kmem,
57 .ckd_name = "cl_lock_kmem",
58 .ckd_size = sizeof (struct cl_lock)
59 },
60 {
61 .ckd_cache = NULL
62 }
63};
64
65#define CS_LOCK_INC(o, item)
66#define CS_LOCK_DEC(o, item)
67#define CS_LOCKSTATE_INC(o, state)
68#define CS_LOCKSTATE_DEC(o, state)
69
70/**
71 * Basic lock invariant that is maintained at all times. Caller either has a
72 * reference to \a lock, or somehow assures that \a lock cannot be freed.
73 *
74 * \see cl_lock_invariant()
75 */
76static int cl_lock_invariant_trusted(const struct lu_env *env,
77 const struct cl_lock *lock)
78{
79 return ergo(lock->cll_state == CLS_FREEING, lock->cll_holds == 0) &&
80 atomic_read(&lock->cll_ref) >= lock->cll_holds &&
81 lock->cll_holds >= lock->cll_users &&
82 lock->cll_holds >= 0 &&
83 lock->cll_users >= 0 &&
84 lock->cll_depth >= 0;
85}
86
87/**
88 * Stronger lock invariant, checking that caller has a reference on a lock.
89 *
90 * \see cl_lock_invariant_trusted()
91 */
92static int cl_lock_invariant(const struct lu_env *env,
93 const struct cl_lock *lock)
94{
95 int result;
96
97 result = atomic_read(&lock->cll_ref) > 0 &&
98 cl_lock_invariant_trusted(env, lock);
99 if (!result && env)
100 CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken\n");
101 return result;
102}
103
104/**
105 * Returns lock "nesting": 0 for a top-lock and 1 for a sub-lock.
106 */
107static enum clt_nesting_level cl_lock_nesting(const struct cl_lock *lock)
108{
109 return cl_object_header(lock->cll_descr.cld_obj)->coh_nesting;
110}
111
112/**
113 * Returns a set of counters for this lock, depending on a lock nesting.
114 */
115static struct cl_thread_counters *cl_lock_counters(const struct lu_env *env,
116 const struct cl_lock *lock)
117{
118 struct cl_thread_info *info;
119 enum clt_nesting_level nesting;
120
121 info = cl_env_info(env);
122 nesting = cl_lock_nesting(lock);
123 LASSERT(nesting < ARRAY_SIZE(info->clt_counters));
124 return &info->clt_counters[nesting];
125}
126
127static void cl_lock_trace0(int level, const struct lu_env *env, 51static void cl_lock_trace0(int level, const struct lu_env *env,
128 const char *prefix, const struct cl_lock *lock, 52 const char *prefix, const struct cl_lock *lock,
129 const char *func, const int line) 53 const char *func, const int line)
130{ 54{
131 struct cl_object_header *h = cl_object_header(lock->cll_descr.cld_obj); 55 struct cl_object_header *h = cl_object_header(lock->cll_descr.cld_obj);
132 56
133 CDEBUG(level, "%s: %p@(%d %p %d %d %d %d %d %lx)(%p/%d/%d) at %s():%d\n", 57 CDEBUG(level, "%s: %p (%p/%d) at %s():%d\n",
134 prefix, lock, atomic_read(&lock->cll_ref), 58 prefix, lock, env, h->coh_nesting, func, line);
135 lock->cll_guarder, lock->cll_depth,
136 lock->cll_state, lock->cll_error, lock->cll_holds,
137 lock->cll_users, lock->cll_flags,
138 env, h->coh_nesting, cl_lock_nr_mutexed(env),
139 func, line);
140} 59}
141 60#define cl_lock_trace(level, env, prefix, lock) \
142#define cl_lock_trace(level, env, prefix, lock) \
143 cl_lock_trace0(level, env, prefix, lock, __func__, __LINE__) 61 cl_lock_trace0(level, env, prefix, lock, __func__, __LINE__)
144 62
145#define RETIP ((unsigned long)__builtin_return_address(0))
146
147#ifdef CONFIG_LOCKDEP
148static struct lock_class_key cl_lock_key;
149
150static void cl_lock_lockdep_init(struct cl_lock *lock)
151{
152 lockdep_set_class_and_name(lock, &cl_lock_key, "EXT");
153}
154
155static void cl_lock_lockdep_acquire(const struct lu_env *env,
156 struct cl_lock *lock, __u32 enqflags)
157{
158 cl_lock_counters(env, lock)->ctc_nr_locks_acquired++;
159 lock_map_acquire(&lock->dep_map);
160}
161
162static void cl_lock_lockdep_release(const struct lu_env *env,
163 struct cl_lock *lock)
164{
165 cl_lock_counters(env, lock)->ctc_nr_locks_acquired--;
166 lock_release(&lock->dep_map, 0, RETIP);
167}
168
169#else /* !CONFIG_LOCKDEP */
170
171static void cl_lock_lockdep_init(struct cl_lock *lock)
172{}
173static void cl_lock_lockdep_acquire(const struct lu_env *env,
174 struct cl_lock *lock, __u32 enqflags)
175{}
176static void cl_lock_lockdep_release(const struct lu_env *env,
177 struct cl_lock *lock)
178{}
179
180#endif /* !CONFIG_LOCKDEP */
181
182/** 63/**
183 * Adds lock slice to the compound lock. 64 * Adds lock slice to the compound lock.
184 * 65 *
@@ -199,62 +80,10 @@ void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
199} 80}
200EXPORT_SYMBOL(cl_lock_slice_add); 81EXPORT_SYMBOL(cl_lock_slice_add);
201 82
202/** 83void cl_lock_fini(const struct lu_env *env, struct cl_lock *lock)
203 * Returns true iff a lock with the mode \a has provides at least the same
204 * guarantees as a lock with the mode \a need.
205 */
206int cl_lock_mode_match(enum cl_lock_mode has, enum cl_lock_mode need)
207{
208 LINVRNT(need == CLM_READ || need == CLM_WRITE ||
209 need == CLM_PHANTOM || need == CLM_GROUP);
210 LINVRNT(has == CLM_READ || has == CLM_WRITE ||
211 has == CLM_PHANTOM || has == CLM_GROUP);
212 CLASSERT(CLM_PHANTOM < CLM_READ);
213 CLASSERT(CLM_READ < CLM_WRITE);
214 CLASSERT(CLM_WRITE < CLM_GROUP);
215
216 if (has != CLM_GROUP)
217 return need <= has;
218 else
219 return need == has;
220}
221EXPORT_SYMBOL(cl_lock_mode_match);
222
223/**
224 * Returns true iff extent portions of lock descriptions match.
225 */
226int cl_lock_ext_match(const struct cl_lock_descr *has,
227 const struct cl_lock_descr *need)
228{
229 return
230 has->cld_start <= need->cld_start &&
231 has->cld_end >= need->cld_end &&
232 cl_lock_mode_match(has->cld_mode, need->cld_mode) &&
233 (has->cld_mode != CLM_GROUP || has->cld_gid == need->cld_gid);
234}
235EXPORT_SYMBOL(cl_lock_ext_match);
236
237/**
238 * Returns true iff a lock with the description \a has provides at least the
239 * same guarantees as a lock with the description \a need.
240 */
241int cl_lock_descr_match(const struct cl_lock_descr *has,
242 const struct cl_lock_descr *need)
243{ 84{
244 return 85 cl_lock_trace(D_DLMTRACE, env, "destroy lock", lock);
245 cl_object_same(has->cld_obj, need->cld_obj) &&
246 cl_lock_ext_match(has, need);
247}
248EXPORT_SYMBOL(cl_lock_descr_match);
249 86
250static void cl_lock_free(const struct lu_env *env, struct cl_lock *lock)
251{
252 struct cl_object *obj = lock->cll_descr.cld_obj;
253
254 LINVRNT(!cl_lock_is_mutexed(lock));
255
256 cl_lock_trace(D_DLMTRACE, env, "free lock", lock);
257 might_sleep();
258 while (!list_empty(&lock->cll_layers)) { 87 while (!list_empty(&lock->cll_layers)) {
259 struct cl_lock_slice *slice; 88 struct cl_lock_slice *slice;
260 89
@@ -263,350 +92,36 @@ static void cl_lock_free(const struct lu_env *env, struct cl_lock *lock)
263 list_del_init(lock->cll_layers.next); 92 list_del_init(lock->cll_layers.next);
264 slice->cls_ops->clo_fini(env, slice); 93 slice->cls_ops->clo_fini(env, slice);
265 } 94 }
266 CS_LOCK_DEC(obj, total); 95 POISON(lock, 0x5a, sizeof(*lock));
267 CS_LOCKSTATE_DEC(obj, lock->cll_state);
268 lu_object_ref_del_at(&obj->co_lu, &lock->cll_obj_ref, "cl_lock", lock);
269 cl_object_put(env, obj);
270 lu_ref_fini(&lock->cll_reference);
271 lu_ref_fini(&lock->cll_holders);
272 mutex_destroy(&lock->cll_guard);
273 kmem_cache_free(cl_lock_kmem, lock);
274}
275
276/**
277 * Releases a reference on a lock.
278 *
279 * When last reference is released, lock is returned to the cache, unless it
280 * is in cl_lock_state::CLS_FREEING state, in which case it is destroyed
281 * immediately.
282 *
283 * \see cl_object_put(), cl_page_put()
284 */
285void cl_lock_put(const struct lu_env *env, struct cl_lock *lock)
286{
287 struct cl_object *obj;
288
289 LINVRNT(cl_lock_invariant(env, lock));
290 obj = lock->cll_descr.cld_obj;
291 LINVRNT(obj);
292
293 CDEBUG(D_TRACE, "releasing reference: %d %p %lu\n",
294 atomic_read(&lock->cll_ref), lock, RETIP);
295
296 if (atomic_dec_and_test(&lock->cll_ref)) {
297 if (lock->cll_state == CLS_FREEING) {
298 LASSERT(list_empty(&lock->cll_linkage));
299 cl_lock_free(env, lock);
300 }
301 CS_LOCK_DEC(obj, busy);
302 }
303}
304EXPORT_SYMBOL(cl_lock_put);
305
306/**
307 * Acquires an additional reference to a lock.
308 *
309 * This can be called only by caller already possessing a reference to \a
310 * lock.
311 *
312 * \see cl_object_get(), cl_page_get()
313 */
314void cl_lock_get(struct cl_lock *lock)
315{
316 LINVRNT(cl_lock_invariant(NULL, lock));
317 CDEBUG(D_TRACE, "acquiring reference: %d %p %lu\n",
318 atomic_read(&lock->cll_ref), lock, RETIP);
319 atomic_inc(&lock->cll_ref);
320}
321EXPORT_SYMBOL(cl_lock_get);
322
323/**
324 * Acquires a reference to a lock.
325 *
326 * This is much like cl_lock_get(), except that this function can be used to
327 * acquire initial reference to the cached lock. Caller has to deal with all
328 * possible races. Use with care!
329 *
330 * \see cl_page_get_trust()
331 */
332void cl_lock_get_trust(struct cl_lock *lock)
333{
334 CDEBUG(D_TRACE, "acquiring trusted reference: %d %p %lu\n",
335 atomic_read(&lock->cll_ref), lock, RETIP);
336 if (atomic_inc_return(&lock->cll_ref) == 1)
337 CS_LOCK_INC(lock->cll_descr.cld_obj, busy);
338}
339EXPORT_SYMBOL(cl_lock_get_trust);
340
341/**
342 * Helper function destroying the lock that wasn't completely initialized.
343 *
344 * Other threads can acquire references to the top-lock through its
345 * sub-locks. Hence, it cannot be cl_lock_free()-ed immediately.
346 */
347static void cl_lock_finish(const struct lu_env *env, struct cl_lock *lock)
348{
349 cl_lock_mutex_get(env, lock);
350 cl_lock_cancel(env, lock);
351 cl_lock_delete(env, lock);
352 cl_lock_mutex_put(env, lock);
353 cl_lock_put(env, lock);
354}
355
356static struct cl_lock *cl_lock_alloc(const struct lu_env *env,
357 struct cl_object *obj,
358 const struct cl_io *io,
359 const struct cl_lock_descr *descr)
360{
361 struct cl_lock *lock;
362 struct lu_object_header *head;
363
364 lock = kmem_cache_zalloc(cl_lock_kmem, GFP_NOFS);
365 if (lock) {
366 atomic_set(&lock->cll_ref, 1);
367 lock->cll_descr = *descr;
368 lock->cll_state = CLS_NEW;
369 cl_object_get(obj);
370 lu_object_ref_add_at(&obj->co_lu, &lock->cll_obj_ref, "cl_lock",
371 lock);
372 INIT_LIST_HEAD(&lock->cll_layers);
373 INIT_LIST_HEAD(&lock->cll_linkage);
374 INIT_LIST_HEAD(&lock->cll_inclosure);
375 lu_ref_init(&lock->cll_reference);
376 lu_ref_init(&lock->cll_holders);
377 mutex_init(&lock->cll_guard);
378 lockdep_set_class(&lock->cll_guard, &cl_lock_guard_class);
379 init_waitqueue_head(&lock->cll_wq);
380 head = obj->co_lu.lo_header;
381 CS_LOCKSTATE_INC(obj, CLS_NEW);
382 CS_LOCK_INC(obj, total);
383 CS_LOCK_INC(obj, create);
384 cl_lock_lockdep_init(lock);
385 list_for_each_entry(obj, &head->loh_layers, co_lu.lo_linkage) {
386 int err;
387
388 err = obj->co_ops->coo_lock_init(env, obj, lock, io);
389 if (err != 0) {
390 cl_lock_finish(env, lock);
391 lock = ERR_PTR(err);
392 break;
393 }
394 }
395 } else
396 lock = ERR_PTR(-ENOMEM);
397 return lock;
398}
399
400/**
401 * Transfer the lock into INTRANSIT state and return the original state.
402 *
403 * \pre state: CLS_CACHED, CLS_HELD or CLS_ENQUEUED
404 * \post state: CLS_INTRANSIT
405 * \see CLS_INTRANSIT
406 */
407static enum cl_lock_state cl_lock_intransit(const struct lu_env *env,
408 struct cl_lock *lock)
409{
410 enum cl_lock_state state = lock->cll_state;
411
412 LASSERT(cl_lock_is_mutexed(lock));
413 LASSERT(state != CLS_INTRANSIT);
414 LASSERTF(state >= CLS_ENQUEUED && state <= CLS_CACHED,
415 "Malformed lock state %d.\n", state);
416
417 cl_lock_state_set(env, lock, CLS_INTRANSIT);
418 lock->cll_intransit_owner = current;
419 cl_lock_hold_add(env, lock, "intransit", current);
420 return state;
421}
422
423/**
424 * Exit the intransit state and restore the lock state to the original state
425 */
426static void cl_lock_extransit(const struct lu_env *env, struct cl_lock *lock,
427 enum cl_lock_state state)
428{
429 LASSERT(cl_lock_is_mutexed(lock));
430 LASSERT(lock->cll_state == CLS_INTRANSIT);
431 LASSERT(state != CLS_INTRANSIT);
432 LASSERT(lock->cll_intransit_owner == current);
433
434 lock->cll_intransit_owner = NULL;
435 cl_lock_state_set(env, lock, state);
436 cl_lock_unhold(env, lock, "intransit", current);
437}
438
439/**
440 * Checking whether the lock is intransit state
441 */
442int cl_lock_is_intransit(struct cl_lock *lock)
443{
444 LASSERT(cl_lock_is_mutexed(lock));
445 return lock->cll_state == CLS_INTRANSIT &&
446 lock->cll_intransit_owner != current;
447}
448EXPORT_SYMBOL(cl_lock_is_intransit);
449/**
450 * Returns true iff lock is "suitable" for given io. E.g., locks acquired by
451 * truncate and O_APPEND cannot be reused for read/non-append-write, as they
452 * cover multiple stripes and can trigger cascading timeouts.
453 */
454static int cl_lock_fits_into(const struct lu_env *env,
455 const struct cl_lock *lock,
456 const struct cl_lock_descr *need,
457 const struct cl_io *io)
458{
459 const struct cl_lock_slice *slice;
460
461 LINVRNT(cl_lock_invariant_trusted(env, lock));
462 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
463 if (slice->cls_ops->clo_fits_into &&
464 !slice->cls_ops->clo_fits_into(env, slice, need, io))
465 return 0;
466 }
467 return 1;
468} 96}
97EXPORT_SYMBOL(cl_lock_fini);
469 98
470static struct cl_lock *cl_lock_lookup(const struct lu_env *env, 99int cl_lock_init(const struct lu_env *env, struct cl_lock *lock,
471 struct cl_object *obj, 100 const struct cl_io *io)
472 const struct cl_io *io,
473 const struct cl_lock_descr *need)
474{ 101{
475 struct cl_lock *lock; 102 struct cl_object *obj = lock->cll_descr.cld_obj;
476 struct cl_object_header *head; 103 struct cl_object *scan;
477 104 int result = 0;
478 head = cl_object_header(obj);
479 assert_spin_locked(&head->coh_lock_guard);
480 CS_LOCK_INC(obj, lookup);
481 list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
482 int matched;
483
484 matched = cl_lock_ext_match(&lock->cll_descr, need) &&
485 lock->cll_state < CLS_FREEING &&
486 lock->cll_error == 0 &&
487 !(lock->cll_flags & CLF_CANCELLED) &&
488 cl_lock_fits_into(env, lock, need, io);
489 CDEBUG(D_DLMTRACE, "has: "DDESCR"(%d) need: "DDESCR": %d\n",
490 PDESCR(&lock->cll_descr), lock->cll_state, PDESCR(need),
491 matched);
492 if (matched) {
493 cl_lock_get_trust(lock);
494 CS_LOCK_INC(obj, hit);
495 return lock;
496 }
497 }
498 return NULL;
499}
500
501/**
502 * Returns a lock matching description \a need.
503 *
504 * This is the main entry point into the cl_lock caching interface. First, a
505 * cache (implemented as a per-object linked list) is consulted. If lock is
506 * found there, it is returned immediately. Otherwise new lock is allocated
507 * and returned. In any case, additional reference to lock is acquired.
508 *
509 * \see cl_object_find(), cl_page_find()
510 */
511static struct cl_lock *cl_lock_find(const struct lu_env *env,
512 const struct cl_io *io,
513 const struct cl_lock_descr *need)
514{
515 struct cl_object_header *head;
516 struct cl_object *obj;
517 struct cl_lock *lock;
518
519 obj = need->cld_obj;
520 head = cl_object_header(obj);
521
522 spin_lock(&head->coh_lock_guard);
523 lock = cl_lock_lookup(env, obj, io, need);
524 spin_unlock(&head->coh_lock_guard);
525 105
526 if (!lock) { 106 /* Make sure cl_lock::cll_descr is initialized. */
527 lock = cl_lock_alloc(env, obj, io, need); 107 LASSERT(obj);
528 if (!IS_ERR(lock)) {
529 struct cl_lock *ghost;
530 108
531 spin_lock(&head->coh_lock_guard); 109 INIT_LIST_HEAD(&lock->cll_layers);
532 ghost = cl_lock_lookup(env, obj, io, need); 110 list_for_each_entry(scan, &obj->co_lu.lo_header->loh_layers,
533 if (!ghost) { 111 co_lu.lo_linkage) {
534 cl_lock_get_trust(lock); 112 result = scan->co_ops->coo_lock_init(env, scan, lock, io);
535 list_add_tail(&lock->cll_linkage, 113 if (result != 0) {
536 &head->coh_locks); 114 cl_lock_fini(env, lock);
537 spin_unlock(&head->coh_lock_guard); 115 break;
538 CS_LOCK_INC(obj, busy);
539 } else {
540 spin_unlock(&head->coh_lock_guard);
541 /*
542 * Other threads can acquire references to the
543 * top-lock through its sub-locks. Hence, it
544 * cannot be cl_lock_free()-ed immediately.
545 */
546 cl_lock_finish(env, lock);
547 lock = ghost;
548 }
549 } 116 }
550 } 117 }
551 return lock;
552}
553 118
554/** 119 return result;
555 * Returns existing lock matching given description. This is similar to
556 * cl_lock_find() except that no new lock is created, and returned lock is
557 * guaranteed to be in enum cl_lock_state::CLS_HELD state.
558 */
559struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io,
560 const struct cl_lock_descr *need,
561 const char *scope, const void *source)
562{
563 struct cl_object_header *head;
564 struct cl_object *obj;
565 struct cl_lock *lock;
566
567 obj = need->cld_obj;
568 head = cl_object_header(obj);
569
570 do {
571 spin_lock(&head->coh_lock_guard);
572 lock = cl_lock_lookup(env, obj, io, need);
573 spin_unlock(&head->coh_lock_guard);
574 if (!lock)
575 return NULL;
576
577 cl_lock_mutex_get(env, lock);
578 if (lock->cll_state == CLS_INTRANSIT)
579 /* Don't care return value. */
580 cl_lock_state_wait(env, lock);
581 if (lock->cll_state == CLS_FREEING) {
582 cl_lock_mutex_put(env, lock);
583 cl_lock_put(env, lock);
584 lock = NULL;
585 }
586 } while (!lock);
587
588 cl_lock_hold_add(env, lock, scope, source);
589 cl_lock_user_add(env, lock);
590 if (lock->cll_state == CLS_CACHED)
591 cl_use_try(env, lock, 1);
592 if (lock->cll_state == CLS_HELD) {
593 cl_lock_mutex_put(env, lock);
594 cl_lock_lockdep_acquire(env, lock, 0);
595 cl_lock_put(env, lock);
596 } else {
597 cl_unuse_try(env, lock);
598 cl_lock_unhold(env, lock, scope, source);
599 cl_lock_mutex_put(env, lock);
600 cl_lock_put(env, lock);
601 lock = NULL;
602 }
603
604 return lock;
605} 120}
606EXPORT_SYMBOL(cl_lock_peek); 121EXPORT_SYMBOL(cl_lock_init);
607 122
608/** 123/**
609 * Returns a slice within a lock, corresponding to the given layer in the 124 * Returns a slice with a lock, corresponding to the given layer in the
610 * device stack. 125 * device stack.
611 * 126 *
612 * \see cl_page_at() 127 * \see cl_page_at()
@@ -616,8 +131,6 @@ const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
616{ 131{
617 const struct cl_lock_slice *slice; 132 const struct cl_lock_slice *slice;
618 133
619 LINVRNT(cl_lock_invariant_trusted(NULL, lock));
620
621 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { 134 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
622 if (slice->cls_obj->co_lu.lo_dev->ld_type == dtype) 135 if (slice->cls_obj->co_lu.lo_dev->ld_type == dtype)
623 return slice; 136 return slice;
@@ -626,1537 +139,96 @@ const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
626} 139}
627EXPORT_SYMBOL(cl_lock_at); 140EXPORT_SYMBOL(cl_lock_at);
628 141
629static void cl_lock_mutex_tail(const struct lu_env *env, struct cl_lock *lock) 142void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock)
630{
631 struct cl_thread_counters *counters;
632
633 counters = cl_lock_counters(env, lock);
634 lock->cll_depth++;
635 counters->ctc_nr_locks_locked++;
636 lu_ref_add(&counters->ctc_locks_locked, "cll_guard", lock);
637 cl_lock_trace(D_TRACE, env, "got mutex", lock);
638}
639
640/**
641 * Locks cl_lock object.
642 *
643 * This is used to manipulate cl_lock fields, and to serialize state
644 * transitions in the lock state machine.
645 *
646 * \post cl_lock_is_mutexed(lock)
647 *
648 * \see cl_lock_mutex_put()
649 */
650void cl_lock_mutex_get(const struct lu_env *env, struct cl_lock *lock)
651{
652 LINVRNT(cl_lock_invariant(env, lock));
653
654 if (lock->cll_guarder == current) {
655 LINVRNT(cl_lock_is_mutexed(lock));
656 LINVRNT(lock->cll_depth > 0);
657 } else {
658 struct cl_object_header *hdr;
659 struct cl_thread_info *info;
660 int i;
661
662 LINVRNT(lock->cll_guarder != current);
663 hdr = cl_object_header(lock->cll_descr.cld_obj);
664 /*
665 * Check that mutices are taken in the bottom-to-top order.
666 */
667 info = cl_env_info(env);
668 for (i = 0; i < hdr->coh_nesting; ++i)
669 LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
670 mutex_lock_nested(&lock->cll_guard, hdr->coh_nesting);
671 lock->cll_guarder = current;
672 LINVRNT(lock->cll_depth == 0);
673 }
674 cl_lock_mutex_tail(env, lock);
675}
676EXPORT_SYMBOL(cl_lock_mutex_get);
677
678/**
679 * Try-locks cl_lock object.
680 *
681 * \retval 0 \a lock was successfully locked
682 *
683 * \retval -EBUSY \a lock cannot be locked right now
684 *
685 * \post ergo(result == 0, cl_lock_is_mutexed(lock))
686 *
687 * \see cl_lock_mutex_get()
688 */
689static int cl_lock_mutex_try(const struct lu_env *env, struct cl_lock *lock)
690{
691 int result;
692
693 LINVRNT(cl_lock_invariant_trusted(env, lock));
694
695 result = 0;
696 if (lock->cll_guarder == current) {
697 LINVRNT(lock->cll_depth > 0);
698 cl_lock_mutex_tail(env, lock);
699 } else if (mutex_trylock(&lock->cll_guard)) {
700 LINVRNT(lock->cll_depth == 0);
701 lock->cll_guarder = current;
702 cl_lock_mutex_tail(env, lock);
703 } else
704 result = -EBUSY;
705 return result;
706}
707
708/**
709 {* Unlocks cl_lock object.
710 *
711 * \pre cl_lock_is_mutexed(lock)
712 *
713 * \see cl_lock_mutex_get()
714 */
715void cl_lock_mutex_put(const struct lu_env *env, struct cl_lock *lock)
716{
717 struct cl_thread_counters *counters;
718
719 LINVRNT(cl_lock_invariant(env, lock));
720 LINVRNT(cl_lock_is_mutexed(lock));
721 LINVRNT(lock->cll_guarder == current);
722 LINVRNT(lock->cll_depth > 0);
723
724 counters = cl_lock_counters(env, lock);
725 LINVRNT(counters->ctc_nr_locks_locked > 0);
726
727 cl_lock_trace(D_TRACE, env, "put mutex", lock);
728 lu_ref_del(&counters->ctc_locks_locked, "cll_guard", lock);
729 counters->ctc_nr_locks_locked--;
730 if (--lock->cll_depth == 0) {
731 lock->cll_guarder = NULL;
732 mutex_unlock(&lock->cll_guard);
733 }
734}
735EXPORT_SYMBOL(cl_lock_mutex_put);
736
737/**
738 * Returns true iff lock's mutex is owned by the current thread.
739 */
740int cl_lock_is_mutexed(struct cl_lock *lock)
741{
742 return lock->cll_guarder == current;
743}
744EXPORT_SYMBOL(cl_lock_is_mutexed);
745
746/**
747 * Returns number of cl_lock mutices held by the current thread (environment).
748 */
749int cl_lock_nr_mutexed(const struct lu_env *env)
750{
751 struct cl_thread_info *info;
752 int i;
753 int locked;
754
755 /*
756 * NOTE: if summation across all nesting levels (currently 2) proves
757 * too expensive, a summary counter can be added to
758 * struct cl_thread_info.
759 */
760 info = cl_env_info(env);
761 for (i = 0, locked = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
762 locked += info->clt_counters[i].ctc_nr_locks_locked;
763 return locked;
764}
765EXPORT_SYMBOL(cl_lock_nr_mutexed);
766
767static void cl_lock_cancel0(const struct lu_env *env, struct cl_lock *lock)
768{
769 LINVRNT(cl_lock_is_mutexed(lock));
770 LINVRNT(cl_lock_invariant(env, lock));
771 if (!(lock->cll_flags & CLF_CANCELLED)) {
772 const struct cl_lock_slice *slice;
773
774 lock->cll_flags |= CLF_CANCELLED;
775 list_for_each_entry_reverse(slice, &lock->cll_layers,
776 cls_linkage) {
777 if (slice->cls_ops->clo_cancel)
778 slice->cls_ops->clo_cancel(env, slice);
779 }
780 }
781}
782
783static void cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock)
784{
785 struct cl_object_header *head;
786 const struct cl_lock_slice *slice;
787
788 LINVRNT(cl_lock_is_mutexed(lock));
789 LINVRNT(cl_lock_invariant(env, lock));
790
791 if (lock->cll_state < CLS_FREEING) {
792 bool in_cache;
793
794 LASSERT(lock->cll_state != CLS_INTRANSIT);
795 cl_lock_state_set(env, lock, CLS_FREEING);
796
797 head = cl_object_header(lock->cll_descr.cld_obj);
798
799 spin_lock(&head->coh_lock_guard);
800 in_cache = !list_empty(&lock->cll_linkage);
801 if (in_cache)
802 list_del_init(&lock->cll_linkage);
803 spin_unlock(&head->coh_lock_guard);
804
805 if (in_cache) /* coh_locks cache holds a refcount. */
806 cl_lock_put(env, lock);
807
808 /*
809 * From now on, no new references to this lock can be acquired
810 * by cl_lock_lookup().
811 */
812 list_for_each_entry_reverse(slice, &lock->cll_layers,
813 cls_linkage) {
814 if (slice->cls_ops->clo_delete)
815 slice->cls_ops->clo_delete(env, slice);
816 }
817 /*
818 * From now on, no new references to this lock can be acquired
819 * by layer-specific means (like a pointer from struct
820 * ldlm_lock in osc, or a pointer from top-lock to sub-lock in
821 * lov).
822 *
823 * Lock will be finally freed in cl_lock_put() when last of
824 * existing references goes away.
825 */
826 }
827}
828
829/**
830 * Mod(ifie)s cl_lock::cll_holds counter for a given lock. Also, for a
831 * top-lock (nesting == 0) accounts for this modification in the per-thread
832 * debugging counters. Sub-lock holds can be released by a thread different
833 * from one that acquired it.
834 */
835static void cl_lock_hold_mod(const struct lu_env *env, struct cl_lock *lock,
836 int delta)
837{
838 struct cl_thread_counters *counters;
839 enum clt_nesting_level nesting;
840
841 lock->cll_holds += delta;
842 nesting = cl_lock_nesting(lock);
843 if (nesting == CNL_TOP) {
844 counters = &cl_env_info(env)->clt_counters[CNL_TOP];
845 counters->ctc_nr_held += delta;
846 LASSERT(counters->ctc_nr_held >= 0);
847 }
848}
849
850/**
851 * Mod(ifie)s cl_lock::cll_users counter for a given lock. See
852 * cl_lock_hold_mod() for the explanation of the debugging code.
853 */
854static void cl_lock_used_mod(const struct lu_env *env, struct cl_lock *lock,
855 int delta)
856{
857 struct cl_thread_counters *counters;
858 enum clt_nesting_level nesting;
859
860 lock->cll_users += delta;
861 nesting = cl_lock_nesting(lock);
862 if (nesting == CNL_TOP) {
863 counters = &cl_env_info(env)->clt_counters[CNL_TOP];
864 counters->ctc_nr_used += delta;
865 LASSERT(counters->ctc_nr_used >= 0);
866 }
867}
868
869void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock,
870 const char *scope, const void *source)
871{
872 LINVRNT(cl_lock_is_mutexed(lock));
873 LINVRNT(cl_lock_invariant(env, lock));
874 LASSERT(lock->cll_holds > 0);
875
876 cl_lock_trace(D_DLMTRACE, env, "hold release lock", lock);
877 lu_ref_del(&lock->cll_holders, scope, source);
878 cl_lock_hold_mod(env, lock, -1);
879 if (lock->cll_holds == 0) {
880 CL_LOCK_ASSERT(lock->cll_state != CLS_HELD, env, lock);
881 if (lock->cll_descr.cld_mode == CLM_PHANTOM ||
882 lock->cll_descr.cld_mode == CLM_GROUP ||
883 lock->cll_state != CLS_CACHED)
884 /*
885 * If lock is still phantom or grouplock when user is
886 * done with it---destroy the lock.
887 */
888 lock->cll_flags |= CLF_CANCELPEND|CLF_DOOMED;
889 if (lock->cll_flags & CLF_CANCELPEND) {
890 lock->cll_flags &= ~CLF_CANCELPEND;
891 cl_lock_cancel0(env, lock);
892 }
893 if (lock->cll_flags & CLF_DOOMED) {
894 /* no longer doomed: it's dead... Jim. */
895 lock->cll_flags &= ~CLF_DOOMED;
896 cl_lock_delete0(env, lock);
897 }
898 }
899}
900EXPORT_SYMBOL(cl_lock_hold_release);
901
902/**
903 * Waits until lock state is changed.
904 *
905 * This function is called with cl_lock mutex locked, atomically releases
906 * mutex and goes to sleep, waiting for a lock state change (signaled by
907 * cl_lock_signal()), and re-acquires the mutex before return.
908 *
909 * This function is used to wait until lock state machine makes some progress
910 * and to emulate synchronous operations on top of asynchronous lock
911 * interface.
912 *
913 * \retval -EINTR wait was interrupted
914 *
915 * \retval 0 wait wasn't interrupted
916 *
917 * \pre cl_lock_is_mutexed(lock)
918 *
919 * \see cl_lock_signal()
920 */
921int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
922{
923 wait_queue_t waiter;
924 sigset_t blocked;
925 int result;
926
927 LINVRNT(cl_lock_is_mutexed(lock));
928 LINVRNT(cl_lock_invariant(env, lock));
929 LASSERT(lock->cll_depth == 1);
930 LASSERT(lock->cll_state != CLS_FREEING); /* too late to wait */
931
932 cl_lock_trace(D_DLMTRACE, env, "state wait lock", lock);
933 result = lock->cll_error;
934 if (result == 0) {
935 /* To avoid being interrupted by the 'non-fatal' signals
936 * (SIGCHLD, for instance), we'd block them temporarily.
937 * LU-305
938 */
939 blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
940
941 init_waitqueue_entry(&waiter, current);
942 add_wait_queue(&lock->cll_wq, &waiter);
943 set_current_state(TASK_INTERRUPTIBLE);
944 cl_lock_mutex_put(env, lock);
945
946 LASSERT(cl_lock_nr_mutexed(env) == 0);
947
948 /* Returning ERESTARTSYS instead of EINTR so syscalls
949 * can be restarted if signals are pending here
950 */
951 result = -ERESTARTSYS;
952 if (likely(!OBD_FAIL_CHECK(OBD_FAIL_LOCK_STATE_WAIT_INTR))) {
953 schedule();
954 if (!cfs_signal_pending())
955 result = 0;
956 }
957
958 cl_lock_mutex_get(env, lock);
959 set_current_state(TASK_RUNNING);
960 remove_wait_queue(&lock->cll_wq, &waiter);
961
962 /* Restore old blocked signals */
963 cfs_restore_sigs(blocked);
964 }
965 return result;
966}
967EXPORT_SYMBOL(cl_lock_state_wait);
968
969static void cl_lock_state_signal(const struct lu_env *env, struct cl_lock *lock,
970 enum cl_lock_state state)
971{
972 const struct cl_lock_slice *slice;
973
974 LINVRNT(cl_lock_is_mutexed(lock));
975 LINVRNT(cl_lock_invariant(env, lock));
976
977 list_for_each_entry(slice, &lock->cll_layers, cls_linkage)
978 if (slice->cls_ops->clo_state)
979 slice->cls_ops->clo_state(env, slice, state);
980 wake_up_all(&lock->cll_wq);
981}
982
983/**
984 * Notifies waiters that lock state changed.
985 *
986 * Wakes up all waiters sleeping in cl_lock_state_wait(), also notifies all
987 * layers about state change by calling cl_lock_operations::clo_state()
988 * top-to-bottom.
989 */
990void cl_lock_signal(const struct lu_env *env, struct cl_lock *lock)
991{
992 cl_lock_trace(D_DLMTRACE, env, "state signal lock", lock);
993 cl_lock_state_signal(env, lock, lock->cll_state);
994}
995EXPORT_SYMBOL(cl_lock_signal);
996
997/**
998 * Changes lock state.
999 *
1000 * This function is invoked to notify layers that lock state changed, possible
1001 * as a result of an asynchronous event such as call-back reception.
1002 *
1003 * \post lock->cll_state == state
1004 *
1005 * \see cl_lock_operations::clo_state()
1006 */
1007void cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock,
1008 enum cl_lock_state state)
1009{
1010 LASSERT(lock->cll_state <= state ||
1011 (lock->cll_state == CLS_CACHED &&
1012 (state == CLS_HELD || /* lock found in cache */
1013 state == CLS_NEW || /* sub-lock canceled */
1014 state == CLS_INTRANSIT)) ||
1015 /* lock is in transit state */
1016 lock->cll_state == CLS_INTRANSIT);
1017
1018 if (lock->cll_state != state) {
1019 CS_LOCKSTATE_DEC(lock->cll_descr.cld_obj, lock->cll_state);
1020 CS_LOCKSTATE_INC(lock->cll_descr.cld_obj, state);
1021
1022 cl_lock_state_signal(env, lock, state);
1023 lock->cll_state = state;
1024 }
1025}
1026EXPORT_SYMBOL(cl_lock_state_set);
1027
1028static int cl_unuse_try_internal(const struct lu_env *env, struct cl_lock *lock)
1029{
1030 const struct cl_lock_slice *slice;
1031 int result;
1032
1033 do {
1034 result = 0;
1035
1036 LINVRNT(cl_lock_is_mutexed(lock));
1037 LINVRNT(cl_lock_invariant(env, lock));
1038 LASSERT(lock->cll_state == CLS_INTRANSIT);
1039
1040 result = -ENOSYS;
1041 list_for_each_entry_reverse(slice, &lock->cll_layers,
1042 cls_linkage) {
1043 if (slice->cls_ops->clo_unuse) {
1044 result = slice->cls_ops->clo_unuse(env, slice);
1045 if (result != 0)
1046 break;
1047 }
1048 }
1049 LASSERT(result != -ENOSYS);
1050 } while (result == CLO_REPEAT);
1051
1052 return result;
1053}
1054
1055/**
1056 * Yanks lock from the cache (cl_lock_state::CLS_CACHED state) by calling
1057 * cl_lock_operations::clo_use() top-to-bottom to notify layers.
1058 * @atomic = 1, it must unuse the lock to recovery the lock to keep the
1059 * use process atomic
1060 */
1061int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic)
1062{ 143{
1063 const struct cl_lock_slice *slice; 144 const struct cl_lock_slice *slice;
1064 int result;
1065 enum cl_lock_state state;
1066
1067 cl_lock_trace(D_DLMTRACE, env, "use lock", lock);
1068
1069 LASSERT(lock->cll_state == CLS_CACHED);
1070 if (lock->cll_error)
1071 return lock->cll_error;
1072
1073 result = -ENOSYS;
1074 state = cl_lock_intransit(env, lock);
1075 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1076 if (slice->cls_ops->clo_use) {
1077 result = slice->cls_ops->clo_use(env, slice);
1078 if (result != 0)
1079 break;
1080 }
1081 }
1082 LASSERT(result != -ENOSYS);
1083
1084 LASSERTF(lock->cll_state == CLS_INTRANSIT, "Wrong state %d.\n",
1085 lock->cll_state);
1086
1087 if (result == 0) {
1088 state = CLS_HELD;
1089 } else {
1090 if (result == -ESTALE) {
1091 /*
1092 * ESTALE means sublock being cancelled
1093 * at this time, and set lock state to
1094 * be NEW here and ask the caller to repeat.
1095 */
1096 state = CLS_NEW;
1097 result = CLO_REPEAT;
1098 }
1099
1100 /* @atomic means back-off-on-failure. */
1101 if (atomic) {
1102 int rc;
1103
1104 rc = cl_unuse_try_internal(env, lock);
1105 /* Vet the results. */
1106 if (rc < 0 && result > 0)
1107 result = rc;
1108 }
1109 145
146 cl_lock_trace(D_DLMTRACE, env, "cancel lock", lock);
147 list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
148 if (slice->cls_ops->clo_cancel)
149 slice->cls_ops->clo_cancel(env, slice);
1110 } 150 }
1111 cl_lock_extransit(env, lock, state);
1112 return result;
1113} 151}
1114EXPORT_SYMBOL(cl_use_try); 152EXPORT_SYMBOL(cl_lock_cancel);
1115 153
1116/** 154/**
1117 * Helper for cl_enqueue_try() that calls ->clo_enqueue() across all layers 155 * Enqueue a lock.
1118 * top-to-bottom. 156 * \param anchor: if we need to wait for resources before getting the lock,
157 * use @anchor for the purpose.
158 * \retval 0 enqueue successfully
159 * \retval <0 error code
1119 */ 160 */
1120static int cl_enqueue_kick(const struct lu_env *env, 161int cl_lock_enqueue(const struct lu_env *env, struct cl_io *io,
1121 struct cl_lock *lock, 162 struct cl_lock *lock, struct cl_sync_io *anchor)
1122 struct cl_io *io, __u32 flags)
1123{ 163{
1124 int result;
1125 const struct cl_lock_slice *slice; 164 const struct cl_lock_slice *slice;
165 int rc = -ENOSYS;
1126 166
1127 result = -ENOSYS;
1128 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { 167 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1129 if (slice->cls_ops->clo_enqueue) { 168 if (!slice->cls_ops->clo_enqueue)
1130 result = slice->cls_ops->clo_enqueue(env, 169 continue;
1131 slice, io, flags);
1132 if (result != 0)
1133 break;
1134 }
1135 }
1136 LASSERT(result != -ENOSYS);
1137 return result;
1138}
1139
1140/**
1141 * Tries to enqueue a lock.
1142 *
1143 * This function is called repeatedly by cl_enqueue() until either lock is
1144 * enqueued, or error occurs. This function does not block waiting for
1145 * networking communication to complete.
1146 *
1147 * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1148 * lock->cll_state == CLS_HELD)
1149 *
1150 * \see cl_enqueue() cl_lock_operations::clo_enqueue()
1151 * \see cl_lock_state::CLS_ENQUEUED
1152 */
1153int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock,
1154 struct cl_io *io, __u32 flags)
1155{
1156 int result;
1157
1158 cl_lock_trace(D_DLMTRACE, env, "enqueue lock", lock);
1159 do {
1160 LINVRNT(cl_lock_is_mutexed(lock));
1161
1162 result = lock->cll_error;
1163 if (result != 0)
1164 break;
1165
1166 switch (lock->cll_state) {
1167 case CLS_NEW:
1168 cl_lock_state_set(env, lock, CLS_QUEUING);
1169 /* fall-through */
1170 case CLS_QUEUING:
1171 /* kick layers. */
1172 result = cl_enqueue_kick(env, lock, io, flags);
1173 /* For AGL case, the cl_lock::cll_state may
1174 * become CLS_HELD already.
1175 */
1176 if (result == 0 && lock->cll_state == CLS_QUEUING)
1177 cl_lock_state_set(env, lock, CLS_ENQUEUED);
1178 break;
1179 case CLS_INTRANSIT:
1180 LASSERT(cl_lock_is_intransit(lock));
1181 result = CLO_WAIT;
1182 break;
1183 case CLS_CACHED:
1184 /* yank lock from the cache. */
1185 result = cl_use_try(env, lock, 0);
1186 break;
1187 case CLS_ENQUEUED:
1188 case CLS_HELD:
1189 result = 0;
1190 break;
1191 default:
1192 case CLS_FREEING:
1193 /*
1194 * impossible, only held locks with increased
1195 * ->cll_holds can be enqueued, and they cannot be
1196 * freed.
1197 */
1198 LBUG();
1199 }
1200 } while (result == CLO_REPEAT);
1201 return result;
1202}
1203EXPORT_SYMBOL(cl_enqueue_try);
1204
1205/**
1206 * Cancel the conflicting lock found during previous enqueue.
1207 *
1208 * \retval 0 conflicting lock has been canceled.
1209 * \retval -ve error code.
1210 */
1211int cl_lock_enqueue_wait(const struct lu_env *env,
1212 struct cl_lock *lock,
1213 int keep_mutex)
1214{
1215 struct cl_lock *conflict;
1216 int rc = 0;
1217
1218 LASSERT(cl_lock_is_mutexed(lock));
1219 LASSERT(lock->cll_state == CLS_QUEUING);
1220 LASSERT(lock->cll_conflict);
1221
1222 conflict = lock->cll_conflict;
1223 lock->cll_conflict = NULL;
1224 170
1225 cl_lock_mutex_put(env, lock); 171 rc = slice->cls_ops->clo_enqueue(env, slice, io, anchor);
1226 LASSERT(cl_lock_nr_mutexed(env) == 0);
1227
1228 cl_lock_mutex_get(env, conflict);
1229 cl_lock_trace(D_DLMTRACE, env, "enqueue wait", conflict);
1230 cl_lock_cancel(env, conflict);
1231 cl_lock_delete(env, conflict);
1232
1233 while (conflict->cll_state != CLS_FREEING) {
1234 rc = cl_lock_state_wait(env, conflict);
1235 if (rc != 0) 172 if (rc != 0)
1236 break; 173 break;
1237 }
1238 cl_lock_mutex_put(env, conflict);
1239 lu_ref_del(&conflict->cll_reference, "cancel-wait", lock);
1240 cl_lock_put(env, conflict);
1241
1242 if (keep_mutex)
1243 cl_lock_mutex_get(env, lock);
1244
1245 LASSERT(rc <= 0);
1246 return rc;
1247}
1248EXPORT_SYMBOL(cl_lock_enqueue_wait);
1249
1250static int cl_enqueue_locked(const struct lu_env *env, struct cl_lock *lock,
1251 struct cl_io *io, __u32 enqflags)
1252{
1253 int result;
1254
1255 LINVRNT(cl_lock_is_mutexed(lock));
1256 LINVRNT(cl_lock_invariant(env, lock));
1257 LASSERT(lock->cll_holds > 0);
1258
1259 cl_lock_user_add(env, lock);
1260 do {
1261 result = cl_enqueue_try(env, lock, io, enqflags);
1262 if (result == CLO_WAIT) {
1263 if (lock->cll_conflict)
1264 result = cl_lock_enqueue_wait(env, lock, 1);
1265 else
1266 result = cl_lock_state_wait(env, lock);
1267 if (result == 0)
1268 continue;
1269 }
1270 break;
1271 } while (1);
1272 if (result != 0)
1273 cl_unuse_try(env, lock);
1274 LASSERT(ergo(result == 0 && !(enqflags & CEF_AGL),
1275 lock->cll_state == CLS_ENQUEUED ||
1276 lock->cll_state == CLS_HELD));
1277 return result;
1278}
1279
1280/**
1281 * Tries to unlock a lock.
1282 *
1283 * This function is called to release underlying resource:
1284 * 1. for top lock, the resource is sublocks it held;
1285 * 2. for sublock, the resource is the reference to dlmlock.
1286 *
1287 * cl_unuse_try is a one-shot operation, so it must NOT return CLO_WAIT.
1288 *
1289 * \see cl_unuse() cl_lock_operations::clo_unuse()
1290 * \see cl_lock_state::CLS_CACHED
1291 */
1292int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock)
1293{
1294 int result;
1295 enum cl_lock_state state = CLS_NEW;
1296
1297 cl_lock_trace(D_DLMTRACE, env, "unuse lock", lock);
1298
1299 if (lock->cll_users > 1) {
1300 cl_lock_user_del(env, lock);
1301 return 0;
1302 }
1303
1304 /* Only if the lock is in CLS_HELD or CLS_ENQUEUED state, it can hold
1305 * underlying resources.
1306 */
1307 if (!(lock->cll_state == CLS_HELD || lock->cll_state == CLS_ENQUEUED)) {
1308 cl_lock_user_del(env, lock);
1309 return 0;
1310 }
1311
1312 /*
1313 * New lock users (->cll_users) are not protecting unlocking
1314 * from proceeding. From this point, lock eventually reaches
1315 * CLS_CACHED, is reinitialized to CLS_NEW or fails into
1316 * CLS_FREEING.
1317 */
1318 state = cl_lock_intransit(env, lock);
1319
1320 result = cl_unuse_try_internal(env, lock);
1321 LASSERT(lock->cll_state == CLS_INTRANSIT);
1322 LASSERT(result != CLO_WAIT);
1323 cl_lock_user_del(env, lock);
1324 if (result == 0 || result == -ESTALE) {
1325 /*
1326 * Return lock back to the cache. This is the only
1327 * place where lock is moved into CLS_CACHED state.
1328 *
1329 * If one of ->clo_unuse() methods returned -ESTALE, lock
1330 * cannot be placed into cache and has to be
1331 * re-initialized. This happens e.g., when a sub-lock was
1332 * canceled while unlocking was in progress.
1333 */
1334 if (state == CLS_HELD && result == 0)
1335 state = CLS_CACHED;
1336 else
1337 state = CLS_NEW;
1338 cl_lock_extransit(env, lock, state);
1339
1340 /*
1341 * Hide -ESTALE error.
1342 * If the lock is a glimpse lock, and it has multiple
1343 * stripes. Assuming that one of its sublock returned -ENAVAIL,
1344 * and other sublocks are matched write locks. In this case,
1345 * we can't set this lock to error because otherwise some of
1346 * its sublocks may not be canceled. This causes some dirty
1347 * pages won't be written to OSTs. -jay
1348 */
1349 result = 0;
1350 } else {
1351 CERROR("result = %d, this is unlikely!\n", result);
1352 state = CLS_NEW;
1353 cl_lock_extransit(env, lock, state);
1354 }
1355 return result ?: lock->cll_error;
1356}
1357EXPORT_SYMBOL(cl_unuse_try);
1358
1359static void cl_unuse_locked(const struct lu_env *env, struct cl_lock *lock)
1360{
1361 int result;
1362
1363 result = cl_unuse_try(env, lock);
1364 if (result)
1365 CL_LOCK_DEBUG(D_ERROR, env, lock, "unuse return %d\n", result);
1366}
1367
1368/**
1369 * Unlocks a lock.
1370 */
1371void cl_unuse(const struct lu_env *env, struct cl_lock *lock)
1372{
1373 cl_lock_mutex_get(env, lock);
1374 cl_unuse_locked(env, lock);
1375 cl_lock_mutex_put(env, lock);
1376 cl_lock_lockdep_release(env, lock);
1377}
1378EXPORT_SYMBOL(cl_unuse);
1379
1380/**
1381 * Tries to wait for a lock.
1382 *
1383 * This function is called repeatedly by cl_wait() until either lock is
1384 * granted, or error occurs. This function does not block waiting for network
1385 * communication to complete.
1386 *
1387 * \see cl_wait() cl_lock_operations::clo_wait()
1388 * \see cl_lock_state::CLS_HELD
1389 */
1390int cl_wait_try(const struct lu_env *env, struct cl_lock *lock)
1391{
1392 const struct cl_lock_slice *slice;
1393 int result;
1394
1395 cl_lock_trace(D_DLMTRACE, env, "wait lock try", lock);
1396 do {
1397 LINVRNT(cl_lock_is_mutexed(lock));
1398 LINVRNT(cl_lock_invariant(env, lock));
1399 LASSERTF(lock->cll_state == CLS_QUEUING ||
1400 lock->cll_state == CLS_ENQUEUED ||
1401 lock->cll_state == CLS_HELD ||
1402 lock->cll_state == CLS_INTRANSIT,
1403 "lock state: %d\n", lock->cll_state);
1404 LASSERT(lock->cll_users > 0);
1405 LASSERT(lock->cll_holds > 0);
1406
1407 result = lock->cll_error;
1408 if (result != 0)
1409 break;
1410
1411 if (cl_lock_is_intransit(lock)) {
1412 result = CLO_WAIT;
1413 break;
1414 }
1415
1416 if (lock->cll_state == CLS_HELD)
1417 /* nothing to do */
1418 break;
1419
1420 result = -ENOSYS;
1421 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1422 if (slice->cls_ops->clo_wait) {
1423 result = slice->cls_ops->clo_wait(env, slice);
1424 if (result != 0)
1425 break;
1426 }
1427 }
1428 LASSERT(result != -ENOSYS);
1429 if (result == 0) {
1430 LASSERT(lock->cll_state != CLS_INTRANSIT);
1431 cl_lock_state_set(env, lock, CLS_HELD);
1432 }
1433 } while (result == CLO_REPEAT);
1434 return result;
1435}
1436EXPORT_SYMBOL(cl_wait_try);
1437
1438/**
1439 * Waits until enqueued lock is granted.
1440 *
1441 * \pre current thread or io owns a hold on the lock
1442 * \pre ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1443 * lock->cll_state == CLS_HELD)
1444 *
1445 * \post ergo(result == 0, lock->cll_state == CLS_HELD)
1446 */
1447int cl_wait(const struct lu_env *env, struct cl_lock *lock)
1448{
1449 int result;
1450
1451 cl_lock_mutex_get(env, lock);
1452
1453 LINVRNT(cl_lock_invariant(env, lock));
1454 LASSERTF(lock->cll_state == CLS_ENQUEUED || lock->cll_state == CLS_HELD,
1455 "Wrong state %d\n", lock->cll_state);
1456 LASSERT(lock->cll_holds > 0);
1457
1458 do {
1459 result = cl_wait_try(env, lock);
1460 if (result == CLO_WAIT) {
1461 result = cl_lock_state_wait(env, lock);
1462 if (result == 0)
1463 continue;
1464 }
1465 break;
1466 } while (1);
1467 if (result < 0) {
1468 cl_unuse_try(env, lock);
1469 cl_lock_lockdep_release(env, lock);
1470 }
1471 cl_lock_trace(D_DLMTRACE, env, "wait lock", lock);
1472 cl_lock_mutex_put(env, lock);
1473 LASSERT(ergo(result == 0, lock->cll_state == CLS_HELD));
1474 return result;
1475}
1476EXPORT_SYMBOL(cl_wait);
1477
1478/**
1479 * Executes cl_lock_operations::clo_weigh(), and sums results to estimate lock
1480 * value.
1481 */
1482unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock)
1483{
1484 const struct cl_lock_slice *slice;
1485 unsigned long pound;
1486 unsigned long ounce;
1487
1488 LINVRNT(cl_lock_is_mutexed(lock));
1489 LINVRNT(cl_lock_invariant(env, lock));
1490
1491 pound = 0;
1492 list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
1493 if (slice->cls_ops->clo_weigh) {
1494 ounce = slice->cls_ops->clo_weigh(env, slice);
1495 pound += ounce;
1496 if (pound < ounce) /* over-weight^Wflow */
1497 pound = ~0UL;
1498 }
1499 }
1500 return pound;
1501}
1502EXPORT_SYMBOL(cl_lock_weigh);
1503
1504/**
1505 * Notifies layers that lock description changed.
1506 *
1507 * The server can grant client a lock different from one that was requested
1508 * (e.g., larger in extent). This method is called when actually granted lock
1509 * description becomes known to let layers to accommodate for changed lock
1510 * description.
1511 *
1512 * \see cl_lock_operations::clo_modify()
1513 */
1514int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock,
1515 const struct cl_lock_descr *desc)
1516{
1517 const struct cl_lock_slice *slice;
1518 struct cl_object *obj = lock->cll_descr.cld_obj;
1519 struct cl_object_header *hdr = cl_object_header(obj);
1520 int result;
1521
1522 cl_lock_trace(D_DLMTRACE, env, "modify lock", lock);
1523 /* don't allow object to change */
1524 LASSERT(obj == desc->cld_obj);
1525 LINVRNT(cl_lock_is_mutexed(lock));
1526 LINVRNT(cl_lock_invariant(env, lock));
1527
1528 list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
1529 if (slice->cls_ops->clo_modify) {
1530 result = slice->cls_ops->clo_modify(env, slice, desc);
1531 if (result != 0)
1532 return result;
1533 }
1534 }
1535 CL_LOCK_DEBUG(D_DLMTRACE, env, lock, " -> "DDESCR"@"DFID"\n",
1536 PDESCR(desc), PFID(lu_object_fid(&desc->cld_obj->co_lu)));
1537 /*
1538 * Just replace description in place. Nothing more is needed for
1539 * now. If locks were indexed according to their extent and/or mode,
1540 * that index would have to be updated here.
1541 */
1542 spin_lock(&hdr->coh_lock_guard);
1543 lock->cll_descr = *desc;
1544 spin_unlock(&hdr->coh_lock_guard);
1545 return 0;
1546}
1547EXPORT_SYMBOL(cl_lock_modify);
1548
1549/**
1550 * Initializes lock closure with a given origin.
1551 *
1552 * \see cl_lock_closure
1553 */
1554void cl_lock_closure_init(const struct lu_env *env,
1555 struct cl_lock_closure *closure,
1556 struct cl_lock *origin, int wait)
1557{
1558 LINVRNT(cl_lock_is_mutexed(origin));
1559 LINVRNT(cl_lock_invariant(env, origin));
1560
1561 INIT_LIST_HEAD(&closure->clc_list);
1562 closure->clc_origin = origin;
1563 closure->clc_wait = wait;
1564 closure->clc_nr = 0;
1565}
1566EXPORT_SYMBOL(cl_lock_closure_init);
1567
1568/**
1569 * Builds a closure of \a lock.
1570 *
1571 * Building of a closure consists of adding initial lock (\a lock) into it,
1572 * and calling cl_lock_operations::clo_closure() methods of \a lock. These
1573 * methods might call cl_lock_closure_build() recursively again, adding more
1574 * locks to the closure, etc.
1575 *
1576 * \see cl_lock_closure
1577 */
1578int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock,
1579 struct cl_lock_closure *closure)
1580{
1581 const struct cl_lock_slice *slice;
1582 int result;
1583
1584 LINVRNT(cl_lock_is_mutexed(closure->clc_origin));
1585 LINVRNT(cl_lock_invariant(env, closure->clc_origin));
1586
1587 result = cl_lock_enclosure(env, lock, closure);
1588 if (result == 0) {
1589 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1590 if (slice->cls_ops->clo_closure) {
1591 result = slice->cls_ops->clo_closure(env, slice,
1592 closure);
1593 if (result != 0)
1594 break;
1595 }
1596 }
1597 }
1598 if (result != 0)
1599 cl_lock_disclosure(env, closure);
1600 return result;
1601}
1602EXPORT_SYMBOL(cl_lock_closure_build);
1603
1604/**
1605 * Adds new lock to a closure.
1606 *
1607 * Try-locks \a lock and if succeeded, adds it to the closure (never more than
1608 * once). If try-lock failed, returns CLO_REPEAT, after optionally waiting
1609 * until next try-lock is likely to succeed.
1610 */
1611int cl_lock_enclosure(const struct lu_env *env, struct cl_lock *lock,
1612 struct cl_lock_closure *closure)
1613{
1614 int result = 0;
1615
1616 cl_lock_trace(D_DLMTRACE, env, "enclosure lock", lock);
1617 if (!cl_lock_mutex_try(env, lock)) {
1618 /*
1619 * If lock->cll_inclosure is not empty, lock is already in
1620 * this closure.
1621 */
1622 if (list_empty(&lock->cll_inclosure)) {
1623 cl_lock_get_trust(lock);
1624 lu_ref_add(&lock->cll_reference, "closure", closure);
1625 list_add(&lock->cll_inclosure, &closure->clc_list);
1626 closure->clc_nr++;
1627 } else
1628 cl_lock_mutex_put(env, lock);
1629 result = 0;
1630 } else {
1631 cl_lock_disclosure(env, closure);
1632 if (closure->clc_wait) {
1633 cl_lock_get_trust(lock);
1634 lu_ref_add(&lock->cll_reference, "closure-w", closure);
1635 cl_lock_mutex_put(env, closure->clc_origin);
1636
1637 LASSERT(cl_lock_nr_mutexed(env) == 0);
1638 cl_lock_mutex_get(env, lock);
1639 cl_lock_mutex_put(env, lock);
1640
1641 cl_lock_mutex_get(env, closure->clc_origin);
1642 lu_ref_del(&lock->cll_reference, "closure-w", closure);
1643 cl_lock_put(env, lock);
1644 }
1645 result = CLO_REPEAT;
1646 }
1647 return result;
1648}
1649EXPORT_SYMBOL(cl_lock_enclosure);
1650
1651/** Releases mutices of enclosed locks. */
1652void cl_lock_disclosure(const struct lu_env *env,
1653 struct cl_lock_closure *closure)
1654{
1655 struct cl_lock *scan;
1656 struct cl_lock *temp;
1657
1658 cl_lock_trace(D_DLMTRACE, env, "disclosure lock", closure->clc_origin);
1659 list_for_each_entry_safe(scan, temp, &closure->clc_list,
1660 cll_inclosure) {
1661 list_del_init(&scan->cll_inclosure);
1662 cl_lock_mutex_put(env, scan);
1663 lu_ref_del(&scan->cll_reference, "closure", closure);
1664 cl_lock_put(env, scan);
1665 closure->clc_nr--;
1666 }
1667 LASSERT(closure->clc_nr == 0);
1668}
1669EXPORT_SYMBOL(cl_lock_disclosure);
1670
1671/** Finalizes a closure. */
1672void cl_lock_closure_fini(struct cl_lock_closure *closure)
1673{
1674 LASSERT(closure->clc_nr == 0);
1675 LASSERT(list_empty(&closure->clc_list));
1676}
1677EXPORT_SYMBOL(cl_lock_closure_fini);
1678
1679/**
1680 * Destroys this lock. Notifies layers (bottom-to-top) that lock is being
1681 * destroyed, then destroy the lock. If there are holds on the lock, postpone
1682 * destruction until all holds are released. This is called when a decision is
1683 * made to destroy the lock in the future. E.g., when a blocking AST is
1684 * received on it, or fatal communication error happens.
1685 *
1686 * Caller must have a reference on this lock to prevent a situation, when
1687 * deleted lock lingers in memory for indefinite time, because nobody calls
1688 * cl_lock_put() to finish it.
1689 *
1690 * \pre atomic_read(&lock->cll_ref) > 0
1691 * \pre ergo(cl_lock_nesting(lock) == CNL_TOP,
1692 * cl_lock_nr_mutexed(env) == 1)
1693 * [i.e., if a top-lock is deleted, mutices of no other locks can be
1694 * held, as deletion of sub-locks might require releasing a top-lock
1695 * mutex]
1696 *
1697 * \see cl_lock_operations::clo_delete()
1698 * \see cl_lock::cll_holds
1699 */
1700void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock)
1701{
1702 LINVRNT(cl_lock_is_mutexed(lock));
1703 LINVRNT(cl_lock_invariant(env, lock));
1704 LASSERT(ergo(cl_lock_nesting(lock) == CNL_TOP,
1705 cl_lock_nr_mutexed(env) == 1));
1706
1707 cl_lock_trace(D_DLMTRACE, env, "delete lock", lock);
1708 if (lock->cll_holds == 0)
1709 cl_lock_delete0(env, lock);
1710 else
1711 lock->cll_flags |= CLF_DOOMED;
1712}
1713EXPORT_SYMBOL(cl_lock_delete);
1714
1715/**
1716 * Mark lock as irrecoverably failed, and mark it for destruction. This
1717 * happens when, e.g., server fails to grant a lock to us, or networking
1718 * time-out happens.
1719 *
1720 * \pre atomic_read(&lock->cll_ref) > 0
1721 *
1722 * \see clo_lock_delete()
1723 * \see cl_lock::cll_holds
1724 */
1725void cl_lock_error(const struct lu_env *env, struct cl_lock *lock, int error)
1726{
1727 LINVRNT(cl_lock_is_mutexed(lock));
1728 LINVRNT(cl_lock_invariant(env, lock));
1729
1730 if (lock->cll_error == 0 && error != 0) {
1731 cl_lock_trace(D_DLMTRACE, env, "set lock error", lock);
1732 lock->cll_error = error;
1733 cl_lock_signal(env, lock);
1734 cl_lock_cancel(env, lock);
1735 cl_lock_delete(env, lock);
1736 }
1737}
1738EXPORT_SYMBOL(cl_lock_error);
1739
1740/**
1741 * Cancels this lock. Notifies layers
1742 * (bottom-to-top) that lock is being cancelled, then destroy the lock. If
1743 * there are holds on the lock, postpone cancellation until
1744 * all holds are released.
1745 *
1746 * Cancellation notification is delivered to layers at most once.
1747 *
1748 * \see cl_lock_operations::clo_cancel()
1749 * \see cl_lock::cll_holds
1750 */
1751void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock)
1752{
1753 LINVRNT(cl_lock_is_mutexed(lock));
1754 LINVRNT(cl_lock_invariant(env, lock));
1755
1756 cl_lock_trace(D_DLMTRACE, env, "cancel lock", lock);
1757 if (lock->cll_holds == 0)
1758 cl_lock_cancel0(env, lock);
1759 else
1760 lock->cll_flags |= CLF_CANCELPEND;
1761}
1762EXPORT_SYMBOL(cl_lock_cancel);
1763
1764/**
1765 * Finds an existing lock covering given index and optionally different from a
1766 * given \a except lock.
1767 */
1768struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env,
1769 struct cl_object *obj, pgoff_t index,
1770 struct cl_lock *except,
1771 int pending, int canceld)
1772{
1773 struct cl_object_header *head;
1774 struct cl_lock *scan;
1775 struct cl_lock *lock;
1776 struct cl_lock_descr *need;
1777
1778 head = cl_object_header(obj);
1779 need = &cl_env_info(env)->clt_descr;
1780 lock = NULL;
1781
1782 need->cld_mode = CLM_READ; /* CLM_READ matches both READ & WRITE, but
1783 * not PHANTOM
1784 */
1785 need->cld_start = need->cld_end = index;
1786 need->cld_enq_flags = 0;
1787
1788 spin_lock(&head->coh_lock_guard);
1789 /* It is fine to match any group lock since there could be only one
1790 * with a uniq gid and it conflicts with all other lock modes too
1791 */
1792 list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
1793 if (scan != except &&
1794 (scan->cll_descr.cld_mode == CLM_GROUP ||
1795 cl_lock_ext_match(&scan->cll_descr, need)) &&
1796 scan->cll_state >= CLS_HELD &&
1797 scan->cll_state < CLS_FREEING &&
1798 /*
1799 * This check is racy as the lock can be canceled right
1800 * after it is done, but this is fine, because page exists
1801 * already.
1802 */
1803 (canceld || !(scan->cll_flags & CLF_CANCELLED)) &&
1804 (pending || !(scan->cll_flags & CLF_CANCELPEND))) {
1805 /* Don't increase cs_hit here since this
1806 * is just a helper function.
1807 */
1808 cl_lock_get_trust(scan);
1809 lock = scan;
1810 break;
1811 } 174 }
1812 } 175 return rc;
1813 spin_unlock(&head->coh_lock_guard);
1814 return lock;
1815}
1816EXPORT_SYMBOL(cl_lock_at_pgoff);
1817
1818/**
1819 * Calculate the page offset at the layer of @lock.
1820 * At the time of this writing, @page is top page and @lock is sub lock.
1821 */
1822static pgoff_t pgoff_at_lock(struct cl_page *page, struct cl_lock *lock)
1823{
1824 struct lu_device_type *dtype;
1825 const struct cl_page_slice *slice;
1826
1827 dtype = lock->cll_descr.cld_obj->co_lu.lo_dev->ld_type;
1828 slice = cl_page_at(page, dtype);
1829 return slice->cpl_page->cp_index;
1830} 176}
177EXPORT_SYMBOL(cl_lock_enqueue);
1831 178
1832/** 179/**
1833 * Check if page @page is covered by an extra lock or discard it. 180 * Main high-level entry point of cl_lock interface that finds existing or
181 * enqueues new lock matching given description.
1834 */ 182 */
1835static int check_and_discard_cb(const struct lu_env *env, struct cl_io *io, 183int cl_lock_request(const struct lu_env *env, struct cl_io *io,
1836 struct cl_page *page, void *cbdata) 184 struct cl_lock *lock)
1837{ 185{
1838 struct cl_thread_info *info = cl_env_info(env); 186 struct cl_sync_io *anchor = NULL;
1839 struct cl_lock *lock = cbdata; 187 __u32 enq_flags = lock->cll_descr.cld_enq_flags;
1840 pgoff_t index = pgoff_at_lock(page, lock); 188 int rc;
1841 189
1842 if (index >= info->clt_fn_index) { 190 rc = cl_lock_init(env, lock, io);
1843 struct cl_lock *tmp; 191 if (rc < 0)
192 return rc;
1844 193
1845 /* refresh non-overlapped index */ 194 if ((enq_flags & CEF_ASYNC) && !(enq_flags & CEF_AGL)) {
1846 tmp = cl_lock_at_pgoff(env, lock->cll_descr.cld_obj, index, 195 anchor = &cl_env_info(env)->clt_anchor;
1847 lock, 1, 0); 196 cl_sync_io_init(anchor, 1, cl_sync_io_end);
1848 if (tmp) {
1849 /* Cache the first-non-overlapped index so as to skip
1850 * all pages within [index, clt_fn_index). This
1851 * is safe because if tmp lock is canceled, it will
1852 * discard these pages.
1853 */
1854 info->clt_fn_index = tmp->cll_descr.cld_end + 1;
1855 if (tmp->cll_descr.cld_end == CL_PAGE_EOF)
1856 info->clt_fn_index = CL_PAGE_EOF;
1857 cl_lock_put(env, tmp);
1858 } else if (cl_page_own(env, io, page) == 0) {
1859 /* discard the page */
1860 cl_page_unmap(env, io, page);
1861 cl_page_discard(env, io, page);
1862 cl_page_disown(env, io, page);
1863 } else {
1864 LASSERT(page->cp_state == CPS_FREEING);
1865 }
1866 } 197 }
1867 198
1868 info->clt_next_index = index + 1; 199 rc = cl_lock_enqueue(env, io, lock, anchor);
1869 return CLP_GANG_OKAY;
1870}
1871 200
1872static int discard_cb(const struct lu_env *env, struct cl_io *io, 201 if (anchor) {
1873 struct cl_page *page, void *cbdata) 202 int rc2;
1874{
1875 struct cl_thread_info *info = cl_env_info(env);
1876 struct cl_lock *lock = cbdata;
1877 203
1878 LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE); 204 /* drop the reference count held at initialization time */
1879 KLASSERT(ergo(page->cp_type == CPT_CACHEABLE, 205 cl_sync_io_note(env, anchor, 0);
1880 !PageWriteback(cl_page_vmpage(env, page)))); 206 rc2 = cl_sync_io_wait(env, anchor, 0);
1881 KLASSERT(ergo(page->cp_type == CPT_CACHEABLE, 207 if (rc2 < 0 && rc == 0)
1882 !PageDirty(cl_page_vmpage(env, page)))); 208 rc = rc2;
1883
1884 info->clt_next_index = pgoff_at_lock(page, lock) + 1;
1885 if (cl_page_own(env, io, page) == 0) {
1886 /* discard the page */
1887 cl_page_unmap(env, io, page);
1888 cl_page_discard(env, io, page);
1889 cl_page_disown(env, io, page);
1890 } else {
1891 LASSERT(page->cp_state == CPS_FREEING);
1892 } 209 }
1893 210
1894 return CLP_GANG_OKAY; 211 if (rc < 0)
1895} 212 cl_lock_release(env, lock);
1896 213
1897/** 214 return rc;
1898 * Discard pages protected by the given lock. This function traverses radix
1899 * tree to find all covering pages and discard them. If a page is being covered
1900 * by other locks, it should remain in cache.
1901 *
1902 * If error happens on any step, the process continues anyway (the reasoning
1903 * behind this being that lock cancellation cannot be delayed indefinitely).
1904 */
1905int cl_lock_discard_pages(const struct lu_env *env, struct cl_lock *lock)
1906{
1907 struct cl_thread_info *info = cl_env_info(env);
1908 struct cl_io *io = &info->clt_io;
1909 struct cl_lock_descr *descr = &lock->cll_descr;
1910 cl_page_gang_cb_t cb;
1911 int res;
1912 int result;
1913
1914 LINVRNT(cl_lock_invariant(env, lock));
1915
1916 io->ci_obj = cl_object_top(descr->cld_obj);
1917 io->ci_ignore_layout = 1;
1918 result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
1919 if (result != 0)
1920 goto out;
1921
1922 cb = descr->cld_mode == CLM_READ ? check_and_discard_cb : discard_cb;
1923 info->clt_fn_index = info->clt_next_index = descr->cld_start;
1924 do {
1925 res = cl_page_gang_lookup(env, descr->cld_obj, io,
1926 info->clt_next_index, descr->cld_end,
1927 cb, (void *)lock);
1928 if (info->clt_next_index > descr->cld_end)
1929 break;
1930
1931 if (res == CLP_GANG_RESCHED)
1932 cond_resched();
1933 } while (res != CLP_GANG_OKAY);
1934out:
1935 cl_io_fini(env, io);
1936 return result;
1937}
1938EXPORT_SYMBOL(cl_lock_discard_pages);
1939
1940/**
1941 * Eliminate all locks for a given object.
1942 *
1943 * Caller has to guarantee that no lock is in active use.
1944 *
1945 * \param cancel when this is set, cl_locks_prune() cancels locks before
1946 * destroying.
1947 */
1948void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel)
1949{
1950 struct cl_object_header *head;
1951 struct cl_lock *lock;
1952
1953 head = cl_object_header(obj);
1954 /*
1955 * If locks are destroyed without cancellation, all pages must be
1956 * already destroyed (as otherwise they will be left unprotected).
1957 */
1958 LASSERT(ergo(!cancel,
1959 !head->coh_tree.rnode && head->coh_pages == 0));
1960
1961 spin_lock(&head->coh_lock_guard);
1962 while (!list_empty(&head->coh_locks)) {
1963 lock = container_of(head->coh_locks.next,
1964 struct cl_lock, cll_linkage);
1965 cl_lock_get_trust(lock);
1966 spin_unlock(&head->coh_lock_guard);
1967 lu_ref_add(&lock->cll_reference, "prune", current);
1968
1969again:
1970 cl_lock_mutex_get(env, lock);
1971 if (lock->cll_state < CLS_FREEING) {
1972 LASSERT(lock->cll_users <= 1);
1973 if (unlikely(lock->cll_users == 1)) {
1974 struct l_wait_info lwi = { 0 };
1975
1976 cl_lock_mutex_put(env, lock);
1977 l_wait_event(lock->cll_wq,
1978 lock->cll_users == 0,
1979 &lwi);
1980 goto again;
1981 }
1982
1983 if (cancel)
1984 cl_lock_cancel(env, lock);
1985 cl_lock_delete(env, lock);
1986 }
1987 cl_lock_mutex_put(env, lock);
1988 lu_ref_del(&lock->cll_reference, "prune", current);
1989 cl_lock_put(env, lock);
1990 spin_lock(&head->coh_lock_guard);
1991 }
1992 spin_unlock(&head->coh_lock_guard);
1993}
1994EXPORT_SYMBOL(cl_locks_prune);
1995
1996static struct cl_lock *cl_lock_hold_mutex(const struct lu_env *env,
1997 const struct cl_io *io,
1998 const struct cl_lock_descr *need,
1999 const char *scope, const void *source)
2000{
2001 struct cl_lock *lock;
2002
2003 while (1) {
2004 lock = cl_lock_find(env, io, need);
2005 if (IS_ERR(lock))
2006 break;
2007 cl_lock_mutex_get(env, lock);
2008 if (lock->cll_state < CLS_FREEING &&
2009 !(lock->cll_flags & CLF_CANCELLED)) {
2010 cl_lock_hold_mod(env, lock, 1);
2011 lu_ref_add(&lock->cll_holders, scope, source);
2012 lu_ref_add(&lock->cll_reference, scope, source);
2013 break;
2014 }
2015 cl_lock_mutex_put(env, lock);
2016 cl_lock_put(env, lock);
2017 }
2018 return lock;
2019}
2020
2021/**
2022 * Returns a lock matching \a need description with a reference and a hold on
2023 * it.
2024 *
2025 * This is much like cl_lock_find(), except that cl_lock_hold() additionally
2026 * guarantees that lock is not in the CLS_FREEING state on return.
2027 */
2028struct cl_lock *cl_lock_hold(const struct lu_env *env, const struct cl_io *io,
2029 const struct cl_lock_descr *need,
2030 const char *scope, const void *source)
2031{
2032 struct cl_lock *lock;
2033
2034 lock = cl_lock_hold_mutex(env, io, need, scope, source);
2035 if (!IS_ERR(lock))
2036 cl_lock_mutex_put(env, lock);
2037 return lock;
2038}
2039EXPORT_SYMBOL(cl_lock_hold);
2040
2041/**
2042 * Main high-level entry point of cl_lock interface that finds existing or
2043 * enqueues new lock matching given description.
2044 */
2045struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io,
2046 const struct cl_lock_descr *need,
2047 const char *scope, const void *source)
2048{
2049 struct cl_lock *lock;
2050 int rc;
2051 __u32 enqflags = need->cld_enq_flags;
2052
2053 do {
2054 lock = cl_lock_hold_mutex(env, io, need, scope, source);
2055 if (IS_ERR(lock))
2056 break;
2057
2058 rc = cl_enqueue_locked(env, lock, io, enqflags);
2059 if (rc == 0) {
2060 if (cl_lock_fits_into(env, lock, need, io)) {
2061 if (!(enqflags & CEF_AGL)) {
2062 cl_lock_mutex_put(env, lock);
2063 cl_lock_lockdep_acquire(env, lock,
2064 enqflags);
2065 break;
2066 }
2067 rc = 1;
2068 }
2069 cl_unuse_locked(env, lock);
2070 }
2071 cl_lock_trace(D_DLMTRACE, env,
2072 rc <= 0 ? "enqueue failed" : "agl succeed", lock);
2073 cl_lock_hold_release(env, lock, scope, source);
2074 cl_lock_mutex_put(env, lock);
2075 lu_ref_del(&lock->cll_reference, scope, source);
2076 cl_lock_put(env, lock);
2077 if (rc > 0) {
2078 LASSERT(enqflags & CEF_AGL);
2079 lock = NULL;
2080 } else if (rc != 0) {
2081 lock = ERR_PTR(rc);
2082 }
2083 } while (rc == 0);
2084 return lock;
2085} 215}
2086EXPORT_SYMBOL(cl_lock_request); 216EXPORT_SYMBOL(cl_lock_request);
2087 217
2088/** 218/**
2089 * Adds a hold to a known lock.
2090 */
2091void cl_lock_hold_add(const struct lu_env *env, struct cl_lock *lock,
2092 const char *scope, const void *source)
2093{
2094 LINVRNT(cl_lock_is_mutexed(lock));
2095 LINVRNT(cl_lock_invariant(env, lock));
2096 LASSERT(lock->cll_state != CLS_FREEING);
2097
2098 cl_lock_hold_mod(env, lock, 1);
2099 cl_lock_get(lock);
2100 lu_ref_add(&lock->cll_holders, scope, source);
2101 lu_ref_add(&lock->cll_reference, scope, source);
2102}
2103EXPORT_SYMBOL(cl_lock_hold_add);
2104
2105/**
2106 * Releases a hold and a reference on a lock, on which caller acquired a
2107 * mutex.
2108 */
2109void cl_lock_unhold(const struct lu_env *env, struct cl_lock *lock,
2110 const char *scope, const void *source)
2111{
2112 LINVRNT(cl_lock_invariant(env, lock));
2113 cl_lock_hold_release(env, lock, scope, source);
2114 lu_ref_del(&lock->cll_reference, scope, source);
2115 cl_lock_put(env, lock);
2116}
2117EXPORT_SYMBOL(cl_lock_unhold);
2118
2119/**
2120 * Releases a hold and a reference on a lock, obtained by cl_lock_hold(). 219 * Releases a hold and a reference on a lock, obtained by cl_lock_hold().
2121 */ 220 */
2122void cl_lock_release(const struct lu_env *env, struct cl_lock *lock, 221void cl_lock_release(const struct lu_env *env, struct cl_lock *lock)
2123 const char *scope, const void *source)
2124{ 222{
2125 LINVRNT(cl_lock_invariant(env, lock));
2126 cl_lock_trace(D_DLMTRACE, env, "release lock", lock); 223 cl_lock_trace(D_DLMTRACE, env, "release lock", lock);
2127 cl_lock_mutex_get(env, lock); 224 cl_lock_cancel(env, lock);
2128 cl_lock_hold_release(env, lock, scope, source); 225 cl_lock_fini(env, lock);
2129 cl_lock_mutex_put(env, lock);
2130 lu_ref_del(&lock->cll_reference, scope, source);
2131 cl_lock_put(env, lock);
2132} 226}
2133EXPORT_SYMBOL(cl_lock_release); 227EXPORT_SYMBOL(cl_lock_release);
2134 228
2135void cl_lock_user_add(const struct lu_env *env, struct cl_lock *lock)
2136{
2137 LINVRNT(cl_lock_is_mutexed(lock));
2138 LINVRNT(cl_lock_invariant(env, lock));
2139
2140 cl_lock_used_mod(env, lock, 1);
2141}
2142EXPORT_SYMBOL(cl_lock_user_add);
2143
2144void cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock)
2145{
2146 LINVRNT(cl_lock_is_mutexed(lock));
2147 LINVRNT(cl_lock_invariant(env, lock));
2148 LASSERT(lock->cll_users > 0);
2149
2150 cl_lock_used_mod(env, lock, -1);
2151 if (lock->cll_users == 0)
2152 wake_up_all(&lock->cll_wq);
2153}
2154EXPORT_SYMBOL(cl_lock_user_del);
2155
2156const char *cl_lock_mode_name(const enum cl_lock_mode mode) 229const char *cl_lock_mode_name(const enum cl_lock_mode mode)
2157{ 230{
2158 static const char *names[] = { 231 static const char *names[] = {
2159 [CLM_PHANTOM] = "P",
2160 [CLM_READ] = "R", 232 [CLM_READ] = "R",
2161 [CLM_WRITE] = "W", 233 [CLM_WRITE] = "W",
2162 [CLM_GROUP] = "G" 234 [CLM_GROUP] = "G"
@@ -2189,10 +261,8 @@ void cl_lock_print(const struct lu_env *env, void *cookie,
2189 lu_printer_t printer, const struct cl_lock *lock) 261 lu_printer_t printer, const struct cl_lock *lock)
2190{ 262{
2191 const struct cl_lock_slice *slice; 263 const struct cl_lock_slice *slice;
2192 (*printer)(env, cookie, "lock@%p[%d %d %d %d %d %08lx] ", 264
2193 lock, atomic_read(&lock->cll_ref), 265 (*printer)(env, cookie, "lock@%p", lock);
2194 lock->cll_state, lock->cll_error, lock->cll_holds,
2195 lock->cll_users, lock->cll_flags);
2196 cl_lock_descr_print(env, cookie, printer, &lock->cll_descr); 266 cl_lock_descr_print(env, cookie, printer, &lock->cll_descr);
2197 (*printer)(env, cookie, " {\n"); 267 (*printer)(env, cookie, " {\n");
2198 268
@@ -2207,13 +277,3 @@ void cl_lock_print(const struct lu_env *env, void *cookie,
2207 (*printer)(env, cookie, "} lock@%p\n", lock); 277 (*printer)(env, cookie, "} lock@%p\n", lock);
2208} 278}
2209EXPORT_SYMBOL(cl_lock_print); 279EXPORT_SYMBOL(cl_lock_print);
2210
2211int cl_lock_init(void)
2212{
2213 return lu_kmem_init(cl_lock_caches);
2214}
2215
2216void cl_lock_fini(void)
2217{
2218 lu_kmem_fini(cl_lock_caches);
2219}
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_object.c b/drivers/staging/lustre/lustre/obdclass/cl_object.c
index 43e299d4d416..5940f30318ec 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_object.c
@@ -36,6 +36,7 @@
36 * Client Lustre Object. 36 * Client Lustre Object.
37 * 37 *
38 * Author: Nikita Danilov <nikita.danilov@sun.com> 38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 * Author: Jinshan Xiong <jinshan.xiong@intel.com>
39 */ 40 */
40 41
41/* 42/*
@@ -43,8 +44,6 @@
43 * 44 *
44 * i_mutex 45 * i_mutex
45 * PG_locked 46 * PG_locked
46 * ->coh_page_guard
47 * ->coh_lock_guard
48 * ->coh_attr_guard 47 * ->coh_attr_guard
49 * ->ls_guard 48 * ->ls_guard
50 */ 49 */
@@ -63,10 +62,6 @@
63 62
64static struct kmem_cache *cl_env_kmem; 63static struct kmem_cache *cl_env_kmem;
65 64
66/** Lock class of cl_object_header::coh_page_guard */
67static struct lock_class_key cl_page_guard_class;
68/** Lock class of cl_object_header::coh_lock_guard */
69static struct lock_class_key cl_lock_guard_class;
70/** Lock class of cl_object_header::coh_attr_guard */ 65/** Lock class of cl_object_header::coh_attr_guard */
71static struct lock_class_key cl_attr_guard_class; 66static struct lock_class_key cl_attr_guard_class;
72 67
@@ -81,17 +76,9 @@ int cl_object_header_init(struct cl_object_header *h)
81 76
82 result = lu_object_header_init(&h->coh_lu); 77 result = lu_object_header_init(&h->coh_lu);
83 if (result == 0) { 78 if (result == 0) {
84 spin_lock_init(&h->coh_page_guard);
85 spin_lock_init(&h->coh_lock_guard);
86 spin_lock_init(&h->coh_attr_guard); 79 spin_lock_init(&h->coh_attr_guard);
87 lockdep_set_class(&h->coh_page_guard, &cl_page_guard_class);
88 lockdep_set_class(&h->coh_lock_guard, &cl_lock_guard_class);
89 lockdep_set_class(&h->coh_attr_guard, &cl_attr_guard_class); 80 lockdep_set_class(&h->coh_attr_guard, &cl_attr_guard_class);
90 h->coh_pages = 0; 81 h->coh_page_bufsize = 0;
91 /* XXX hard coded GFP_* mask. */
92 INIT_RADIX_TREE(&h->coh_tree, GFP_ATOMIC);
93 INIT_LIST_HEAD(&h->coh_locks);
94 h->coh_page_bufsize = ALIGN(sizeof(struct cl_page), 8);
95 } 82 }
96 return result; 83 return result;
97} 84}
@@ -145,7 +132,7 @@ EXPORT_SYMBOL(cl_object_get);
145/** 132/**
146 * Returns the top-object for a given \a o. 133 * Returns the top-object for a given \a o.
147 * 134 *
148 * \see cl_page_top(), cl_io_top() 135 * \see cl_io_top()
149 */ 136 */
150struct cl_object *cl_object_top(struct cl_object *o) 137struct cl_object *cl_object_top(struct cl_object *o)
151{ 138{
@@ -315,6 +302,29 @@ int cl_conf_set(const struct lu_env *env, struct cl_object *obj,
315EXPORT_SYMBOL(cl_conf_set); 302EXPORT_SYMBOL(cl_conf_set);
316 303
317/** 304/**
305 * Prunes caches of pages and locks for this object.
306 */
307int cl_object_prune(const struct lu_env *env, struct cl_object *obj)
308{
309 struct lu_object_header *top;
310 struct cl_object *o;
311 int result;
312
313 top = obj->co_lu.lo_header;
314 result = 0;
315 list_for_each_entry(o, &top->loh_layers, co_lu.lo_linkage) {
316 if (o->co_ops->coo_prune) {
317 result = o->co_ops->coo_prune(env, o);
318 if (result != 0)
319 break;
320 }
321 }
322
323 return result;
324}
325EXPORT_SYMBOL(cl_object_prune);
326
327/**
318 * Helper function removing all object locks, and marking object for 328 * Helper function removing all object locks, and marking object for
319 * deletion. All object pages must have been deleted at this point. 329 * deletion. All object pages must have been deleted at this point.
320 * 330 *
@@ -323,34 +333,12 @@ EXPORT_SYMBOL(cl_conf_set);
323 */ 333 */
324void cl_object_kill(const struct lu_env *env, struct cl_object *obj) 334void cl_object_kill(const struct lu_env *env, struct cl_object *obj)
325{ 335{
326 struct cl_object_header *hdr; 336 struct cl_object_header *hdr = cl_object_header(obj);
327
328 hdr = cl_object_header(obj);
329 LASSERT(!hdr->coh_tree.rnode);
330 LASSERT(hdr->coh_pages == 0);
331 337
332 set_bit(LU_OBJECT_HEARD_BANSHEE, &hdr->coh_lu.loh_flags); 338 set_bit(LU_OBJECT_HEARD_BANSHEE, &hdr->coh_lu.loh_flags);
333 /*
334 * Destroy all locks. Object destruction (including cl_inode_fini())
335 * cannot cancel the locks, because in the case of a local client,
336 * where client and server share the same thread running
337 * prune_icache(), this can dead-lock with ldlm_cancel_handler()
338 * waiting on __wait_on_freeing_inode().
339 */
340 cl_locks_prune(env, obj, 0);
341} 339}
342EXPORT_SYMBOL(cl_object_kill); 340EXPORT_SYMBOL(cl_object_kill);
343 341
344/**
345 * Prunes caches of pages and locks for this object.
346 */
347void cl_object_prune(const struct lu_env *env, struct cl_object *obj)
348{
349 cl_pages_prune(env, obj);
350 cl_locks_prune(env, obj, 1);
351}
352EXPORT_SYMBOL(cl_object_prune);
353
354void cache_stats_init(struct cache_stats *cs, const char *name) 342void cache_stats_init(struct cache_stats *cs, const char *name)
355{ 343{
356 int i; 344 int i;
@@ -383,6 +371,8 @@ static int cache_stats_print(const struct cache_stats *cs,
383 return 0; 371 return 0;
384} 372}
385 373
374static void cl_env_percpu_refill(void);
375
386/** 376/**
387 * Initialize client site. 377 * Initialize client site.
388 * 378 *
@@ -397,11 +387,9 @@ int cl_site_init(struct cl_site *s, struct cl_device *d)
397 result = lu_site_init(&s->cs_lu, &d->cd_lu_dev); 387 result = lu_site_init(&s->cs_lu, &d->cd_lu_dev);
398 if (result == 0) { 388 if (result == 0) {
399 cache_stats_init(&s->cs_pages, "pages"); 389 cache_stats_init(&s->cs_pages, "pages");
400 cache_stats_init(&s->cs_locks, "locks");
401 for (i = 0; i < ARRAY_SIZE(s->cs_pages_state); ++i) 390 for (i = 0; i < ARRAY_SIZE(s->cs_pages_state); ++i)
402 atomic_set(&s->cs_pages_state[0], 0); 391 atomic_set(&s->cs_pages_state[0], 0);
403 for (i = 0; i < ARRAY_SIZE(s->cs_locks_state); ++i) 392 cl_env_percpu_refill();
404 atomic_set(&s->cs_locks_state[i], 0);
405 } 393 }
406 return result; 394 return result;
407} 395}
@@ -435,15 +423,6 @@ int cl_site_stats_print(const struct cl_site *site, struct seq_file *m)
435 [CPS_PAGEIN] = "r", 423 [CPS_PAGEIN] = "r",
436 [CPS_FREEING] = "f" 424 [CPS_FREEING] = "f"
437 }; 425 };
438 static const char *lstate[] = {
439 [CLS_NEW] = "n",
440 [CLS_QUEUING] = "q",
441 [CLS_ENQUEUED] = "e",
442 [CLS_HELD] = "h",
443 [CLS_INTRANSIT] = "t",
444 [CLS_CACHED] = "c",
445 [CLS_FREEING] = "f"
446 };
447/* 426/*
448 lookup hit total busy create 427 lookup hit total busy create
449pages: ...... ...... ...... ...... ...... [...... ...... ...... ......] 428pages: ...... ...... ...... ...... ...... [...... ...... ...... ......]
@@ -457,12 +436,6 @@ locks: ...... ...... ...... ...... ...... [...... ...... ...... ...... ......]
457 seq_printf(m, "%s: %u ", pstate[i], 436 seq_printf(m, "%s: %u ", pstate[i],
458 atomic_read(&site->cs_pages_state[i])); 437 atomic_read(&site->cs_pages_state[i]));
459 seq_printf(m, "]\n"); 438 seq_printf(m, "]\n");
460 cache_stats_print(&site->cs_locks, m, 0);
461 seq_printf(m, " [");
462 for (i = 0; i < ARRAY_SIZE(site->cs_locks_state); ++i)
463 seq_printf(m, "%s: %u ", lstate[i],
464 atomic_read(&site->cs_locks_state[i]));
465 seq_printf(m, "]\n");
466 cache_stats_print(&cl_env_stats, m, 0); 439 cache_stats_print(&cl_env_stats, m, 0);
467 seq_printf(m, "\n"); 440 seq_printf(m, "\n");
468 return 0; 441 return 0;
@@ -492,6 +465,13 @@ EXPORT_SYMBOL(cl_site_stats_print);
492 * bz20044, bz22683. 465 * bz20044, bz22683.
493 */ 466 */
494 467
468static LIST_HEAD(cl_envs);
469static unsigned int cl_envs_cached_nr;
470static unsigned int cl_envs_cached_max = 128; /* XXX: prototype: arbitrary limit
471 * for now.
472 */
473static DEFINE_SPINLOCK(cl_envs_guard);
474
495struct cl_env { 475struct cl_env {
496 void *ce_magic; 476 void *ce_magic;
497 struct lu_env ce_lu; 477 struct lu_env ce_lu;
@@ -674,8 +654,9 @@ static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug)
674 lu_context_enter(&cle->ce_ses); 654 lu_context_enter(&cle->ce_ses);
675 env->le_ses = &cle->ce_ses; 655 env->le_ses = &cle->ce_ses;
676 cl_env_init0(cle, debug); 656 cl_env_init0(cle, debug);
677 } else 657 } else {
678 lu_env_fini(env); 658 lu_env_fini(env);
659 }
679 } 660 }
680 if (rc != 0) { 661 if (rc != 0) {
681 kmem_cache_free(cl_env_kmem, cle); 662 kmem_cache_free(cl_env_kmem, cle);
@@ -684,8 +665,9 @@ static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug)
684 CL_ENV_INC(create); 665 CL_ENV_INC(create);
685 CL_ENV_INC(total); 666 CL_ENV_INC(total);
686 } 667 }
687 } else 668 } else {
688 env = ERR_PTR(-ENOMEM); 669 env = ERR_PTR(-ENOMEM);
670 }
689 return env; 671 return env;
690} 672}
691 673
@@ -697,6 +679,39 @@ static void cl_env_fini(struct cl_env *cle)
697 kmem_cache_free(cl_env_kmem, cle); 679 kmem_cache_free(cl_env_kmem, cle);
698} 680}
699 681
682static struct lu_env *cl_env_obtain(void *debug)
683{
684 struct cl_env *cle;
685 struct lu_env *env;
686
687 spin_lock(&cl_envs_guard);
688 LASSERT(equi(cl_envs_cached_nr == 0, list_empty(&cl_envs)));
689 if (cl_envs_cached_nr > 0) {
690 int rc;
691
692 cle = container_of(cl_envs.next, struct cl_env, ce_linkage);
693 list_del_init(&cle->ce_linkage);
694 cl_envs_cached_nr--;
695 spin_unlock(&cl_envs_guard);
696
697 env = &cle->ce_lu;
698 rc = lu_env_refill(env);
699 if (rc == 0) {
700 cl_env_init0(cle, debug);
701 lu_context_enter(&env->le_ctx);
702 lu_context_enter(&cle->ce_ses);
703 } else {
704 cl_env_fini(cle);
705 env = ERR_PTR(rc);
706 }
707 } else {
708 spin_unlock(&cl_envs_guard);
709 env = cl_env_new(lu_context_tags_default,
710 lu_session_tags_default, debug);
711 }
712 return env;
713}
714
700static inline struct cl_env *cl_env_container(struct lu_env *env) 715static inline struct cl_env *cl_env_container(struct lu_env *env)
701{ 716{
702 return container_of(env, struct cl_env, ce_lu); 717 return container_of(env, struct cl_env, ce_lu);
@@ -727,6 +742,8 @@ static struct lu_env *cl_env_peek(int *refcheck)
727 * Returns lu_env: if there already is an environment associated with the 742 * Returns lu_env: if there already is an environment associated with the
728 * current thread, it is returned, otherwise, new environment is allocated. 743 * current thread, it is returned, otherwise, new environment is allocated.
729 * 744 *
745 * Allocations are amortized through the global cache of environments.
746 *
730 * \param refcheck pointer to a counter used to detect environment leaks. In 747 * \param refcheck pointer to a counter used to detect environment leaks. In
731 * the usual case cl_env_get() and cl_env_put() are called in the same lexical 748 * the usual case cl_env_get() and cl_env_put() are called in the same lexical
732 * scope and pointer to the same integer is passed as \a refcheck. This is 749 * scope and pointer to the same integer is passed as \a refcheck. This is
@@ -740,10 +757,7 @@ struct lu_env *cl_env_get(int *refcheck)
740 757
741 env = cl_env_peek(refcheck); 758 env = cl_env_peek(refcheck);
742 if (!env) { 759 if (!env) {
743 env = cl_env_new(lu_context_tags_default, 760 env = cl_env_obtain(__builtin_return_address(0));
744 lu_session_tags_default,
745 __builtin_return_address(0));
746
747 if (!IS_ERR(env)) { 761 if (!IS_ERR(env)) {
748 struct cl_env *cle; 762 struct cl_env *cle;
749 763
@@ -787,6 +801,32 @@ static void cl_env_exit(struct cl_env *cle)
787} 801}
788 802
789/** 803/**
804 * Finalizes and frees a given number of cached environments. This is done to
805 * (1) free some memory (not currently hooked into VM), or (2) release
806 * references to modules.
807 */
808unsigned int cl_env_cache_purge(unsigned int nr)
809{
810 struct cl_env *cle;
811
812 spin_lock(&cl_envs_guard);
813 for (; !list_empty(&cl_envs) && nr > 0; --nr) {
814 cle = container_of(cl_envs.next, struct cl_env, ce_linkage);
815 list_del_init(&cle->ce_linkage);
816 LASSERT(cl_envs_cached_nr > 0);
817 cl_envs_cached_nr--;
818 spin_unlock(&cl_envs_guard);
819
820 cl_env_fini(cle);
821 spin_lock(&cl_envs_guard);
822 }
823 LASSERT(equi(cl_envs_cached_nr == 0, list_empty(&cl_envs)));
824 spin_unlock(&cl_envs_guard);
825 return nr;
826}
827EXPORT_SYMBOL(cl_env_cache_purge);
828
829/**
790 * Release an environment. 830 * Release an environment.
791 * 831 *
792 * Decrement \a env reference counter. When counter drops to 0, nothing in 832 * Decrement \a env reference counter. When counter drops to 0, nothing in
@@ -808,7 +848,22 @@ void cl_env_put(struct lu_env *env, int *refcheck)
808 cl_env_detach(cle); 848 cl_env_detach(cle);
809 cle->ce_debug = NULL; 849 cle->ce_debug = NULL;
810 cl_env_exit(cle); 850 cl_env_exit(cle);
811 cl_env_fini(cle); 851 /*
852 * Don't bother to take a lock here.
853 *
854 * Return environment to the cache only when it was allocated
855 * with the standard tags.
856 */
857 if (cl_envs_cached_nr < cl_envs_cached_max &&
858 (env->le_ctx.lc_tags & ~LCT_HAS_EXIT) == LCT_CL_THREAD &&
859 (env->le_ses->lc_tags & ~LCT_HAS_EXIT) == LCT_SESSION) {
860 spin_lock(&cl_envs_guard);
861 list_add(&cle->ce_linkage, &cl_envs);
862 cl_envs_cached_nr++;
863 spin_unlock(&cl_envs_guard);
864 } else {
865 cl_env_fini(cle);
866 }
812 } 867 }
813} 868}
814EXPORT_SYMBOL(cl_env_put); 869EXPORT_SYMBOL(cl_env_put);
@@ -914,6 +969,104 @@ void cl_lvb2attr(struct cl_attr *attr, const struct ost_lvb *lvb)
914} 969}
915EXPORT_SYMBOL(cl_lvb2attr); 970EXPORT_SYMBOL(cl_lvb2attr);
916 971
972static struct cl_env cl_env_percpu[NR_CPUS];
973
974static int cl_env_percpu_init(void)
975{
976 struct cl_env *cle;
977 int tags = LCT_REMEMBER | LCT_NOREF;
978 int i, j;
979 int rc = 0;
980
981 for_each_possible_cpu(i) {
982 struct lu_env *env;
983
984 cle = &cl_env_percpu[i];
985 env = &cle->ce_lu;
986
987 INIT_LIST_HEAD(&cle->ce_linkage);
988 cle->ce_magic = &cl_env_init0;
989 rc = lu_env_init(env, LCT_CL_THREAD | tags);
990 if (rc == 0) {
991 rc = lu_context_init(&cle->ce_ses, LCT_SESSION | tags);
992 if (rc == 0) {
993 lu_context_enter(&cle->ce_ses);
994 env->le_ses = &cle->ce_ses;
995 } else {
996 lu_env_fini(env);
997 }
998 }
999 if (rc != 0)
1000 break;
1001 }
1002 if (rc != 0) {
1003 /* Indices 0 to i (excluding i) were correctly initialized,
1004 * thus we must uninitialize up to i, the rest are undefined.
1005 */
1006 for (j = 0; j < i; j++) {
1007 cle = &cl_env_percpu[i];
1008 lu_context_exit(&cle->ce_ses);
1009 lu_context_fini(&cle->ce_ses);
1010 lu_env_fini(&cle->ce_lu);
1011 }
1012 }
1013
1014 return rc;
1015}
1016
1017static void cl_env_percpu_fini(void)
1018{
1019 int i;
1020
1021 for_each_possible_cpu(i) {
1022 struct cl_env *cle = &cl_env_percpu[i];
1023
1024 lu_context_exit(&cle->ce_ses);
1025 lu_context_fini(&cle->ce_ses);
1026 lu_env_fini(&cle->ce_lu);
1027 }
1028}
1029
1030static void cl_env_percpu_refill(void)
1031{
1032 int i;
1033
1034 for_each_possible_cpu(i)
1035 lu_env_refill(&cl_env_percpu[i].ce_lu);
1036}
1037
1038void cl_env_percpu_put(struct lu_env *env)
1039{
1040 struct cl_env *cle;
1041 int cpu;
1042
1043 cpu = smp_processor_id();
1044 cle = cl_env_container(env);
1045 LASSERT(cle == &cl_env_percpu[cpu]);
1046
1047 cle->ce_ref--;
1048 LASSERT(cle->ce_ref == 0);
1049
1050 CL_ENV_DEC(busy);
1051 cl_env_detach(cle);
1052 cle->ce_debug = NULL;
1053
1054 put_cpu();
1055}
1056EXPORT_SYMBOL(cl_env_percpu_put);
1057
1058struct lu_env *cl_env_percpu_get(void)
1059{
1060 struct cl_env *cle;
1061
1062 cle = &cl_env_percpu[get_cpu()];
1063 cl_env_init0(cle, __builtin_return_address(0));
1064
1065 cl_env_attach(cle);
1066 return &cle->ce_lu;
1067}
1068EXPORT_SYMBOL(cl_env_percpu_get);
1069
917/***************************************************************************** 1070/*****************************************************************************
918 * 1071 *
919 * Temporary prototype thing: mirror obd-devices into cl devices. 1072 * Temporary prototype thing: mirror obd-devices into cl devices.
@@ -944,8 +1097,9 @@ struct cl_device *cl_type_setup(const struct lu_env *env, struct lu_site *site,
944 CERROR("can't init device '%s', %d\n", typename, rc); 1097 CERROR("can't init device '%s', %d\n", typename, rc);
945 d = ERR_PTR(rc); 1098 d = ERR_PTR(rc);
946 } 1099 }
947 } else 1100 } else {
948 CERROR("Cannot allocate device: '%s'\n", typename); 1101 CERROR("Cannot allocate device: '%s'\n", typename);
1102 }
949 return lu2cl_dev(d); 1103 return lu2cl_dev(d);
950} 1104}
951EXPORT_SYMBOL(cl_type_setup); 1105EXPORT_SYMBOL(cl_type_setup);
@@ -959,12 +1113,6 @@ void cl_stack_fini(const struct lu_env *env, struct cl_device *cl)
959} 1113}
960EXPORT_SYMBOL(cl_stack_fini); 1114EXPORT_SYMBOL(cl_stack_fini);
961 1115
962int cl_lock_init(void);
963void cl_lock_fini(void);
964
965int cl_page_init(void);
966void cl_page_fini(void);
967
968static struct lu_context_key cl_key; 1116static struct lu_context_key cl_key;
969 1117
970struct cl_thread_info *cl_env_info(const struct lu_env *env) 1118struct cl_thread_info *cl_env_info(const struct lu_env *env)
@@ -1059,17 +1207,13 @@ int cl_global_init(void)
1059 if (result) 1207 if (result)
1060 goto out_kmem; 1208 goto out_kmem;
1061 1209
1062 result = cl_lock_init(); 1210 result = cl_env_percpu_init();
1063 if (result) 1211 if (result)
1212 /* no cl_env_percpu_fini on error */
1064 goto out_context; 1213 goto out_context;
1065 1214
1066 result = cl_page_init();
1067 if (result)
1068 goto out_lock;
1069
1070 return 0; 1215 return 0;
1071out_lock: 1216
1072 cl_lock_fini();
1073out_context: 1217out_context:
1074 lu_context_key_degister(&cl_key); 1218 lu_context_key_degister(&cl_key);
1075out_kmem: 1219out_kmem:
@@ -1084,8 +1228,7 @@ out_store:
1084 */ 1228 */
1085void cl_global_fini(void) 1229void cl_global_fini(void)
1086{ 1230{
1087 cl_lock_fini(); 1231 cl_env_percpu_fini();
1088 cl_page_fini();
1089 lu_context_key_degister(&cl_key); 1232 lu_context_key_degister(&cl_key);
1090 lu_kmem_fini(cl_object_caches); 1233 lu_kmem_fini(cl_object_caches);
1091 cl_env_store_fini(); 1234 cl_env_store_fini();
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_page.c b/drivers/staging/lustre/lustre/obdclass/cl_page.c
index 394580016638..b754f516e557 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_page.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_page.c
@@ -36,6 +36,7 @@
36 * Client Lustre Page. 36 * Client Lustre Page.
37 * 37 *
38 * Author: Nikita Danilov <nikita.danilov@sun.com> 38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 * Author: Jinshan Xiong <jinshan.xiong@intel.com>
39 */ 40 */
40 41
41#define DEBUG_SUBSYSTEM S_CLASS 42#define DEBUG_SUBSYSTEM S_CLASS
@@ -48,8 +49,7 @@
48#include "../include/cl_object.h" 49#include "../include/cl_object.h"
49#include "cl_internal.h" 50#include "cl_internal.h"
50 51
51static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg, 52static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg);
52 int radix);
53 53
54# define PASSERT(env, page, expr) \ 54# define PASSERT(env, page, expr) \
55 do { \ 55 do { \
@@ -63,24 +63,11 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
63 ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp)) 63 ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
64 64
65/** 65/**
66 * Internal version of cl_page_top, it should be called if the page is
67 * known to be not freed, says with page referenced, or radix tree lock held,
68 * or page owned.
69 */
70static struct cl_page *cl_page_top_trusted(struct cl_page *page)
71{
72 while (page->cp_parent)
73 page = page->cp_parent;
74 return page;
75}
76
77/**
78 * Internal version of cl_page_get(). 66 * Internal version of cl_page_get().
79 * 67 *
80 * This function can be used to obtain initial reference to previously 68 * This function can be used to obtain initial reference to previously
81 * unreferenced cached object. It can be called only if concurrent page 69 * unreferenced cached object. It can be called only if concurrent page
82 * reclamation is somehow prevented, e.g., by locking page radix-tree 70 * reclamation is somehow prevented, e.g., by keeping a lock on a VM page,
83 * (cl_object_header::hdr->coh_page_guard), or by keeping a lock on a VM page,
84 * associated with \a page. 71 * associated with \a page.
85 * 72 *
86 * Use with care! Not exported. 73 * Use with care! Not exported.
@@ -103,142 +90,12 @@ cl_page_at_trusted(const struct cl_page *page,
103{ 90{
104 const struct cl_page_slice *slice; 91 const struct cl_page_slice *slice;
105 92
106 page = cl_page_top_trusted((struct cl_page *)page); 93 list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
107 do { 94 if (slice->cpl_obj->co_lu.lo_dev->ld_type == dtype)
108 list_for_each_entry(slice, &page->cp_layers, cpl_linkage) { 95 return slice;
109 if (slice->cpl_obj->co_lu.lo_dev->ld_type == dtype)
110 return slice;
111 }
112 page = page->cp_child;
113 } while (page);
114 return NULL;
115}
116
117/**
118 * Returns a page with given index in the given object, or NULL if no page is
119 * found. Acquires a reference on \a page.
120 *
121 * Locking: called under cl_object_header::coh_page_guard spin-lock.
122 */
123struct cl_page *cl_page_lookup(struct cl_object_header *hdr, pgoff_t index)
124{
125 struct cl_page *page;
126
127 assert_spin_locked(&hdr->coh_page_guard);
128
129 page = radix_tree_lookup(&hdr->coh_tree, index);
130 if (page)
131 cl_page_get_trust(page);
132 return page;
133}
134EXPORT_SYMBOL(cl_page_lookup);
135
136/**
137 * Returns a list of pages by a given [start, end] of \a obj.
138 *
139 * \param resched If not NULL, then we give up before hogging CPU for too
140 * long and set *resched = 1, in that case caller should implement a retry
141 * logic.
142 *
143 * Gang tree lookup (radix_tree_gang_lookup()) optimization is absolutely
144 * crucial in the face of [offset, EOF] locks.
145 *
146 * Return at least one page in @queue unless there is no covered page.
147 */
148int cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj,
149 struct cl_io *io, pgoff_t start, pgoff_t end,
150 cl_page_gang_cb_t cb, void *cbdata)
151{
152 struct cl_object_header *hdr;
153 struct cl_page *page;
154 struct cl_page **pvec;
155 const struct cl_page_slice *slice;
156 const struct lu_device_type *dtype;
157 pgoff_t idx;
158 unsigned int nr;
159 unsigned int i;
160 unsigned int j;
161 int res = CLP_GANG_OKAY;
162 int tree_lock = 1;
163
164 idx = start;
165 hdr = cl_object_header(obj);
166 pvec = cl_env_info(env)->clt_pvec;
167 dtype = cl_object_top(obj)->co_lu.lo_dev->ld_type;
168 spin_lock(&hdr->coh_page_guard);
169 while ((nr = radix_tree_gang_lookup(&hdr->coh_tree, (void **)pvec,
170 idx, CLT_PVEC_SIZE)) > 0) {
171 int end_of_region = 0;
172
173 idx = pvec[nr - 1]->cp_index + 1;
174 for (i = 0, j = 0; i < nr; ++i) {
175 page = pvec[i];
176 pvec[i] = NULL;
177
178 LASSERT(page->cp_type == CPT_CACHEABLE);
179 if (page->cp_index > end) {
180 end_of_region = 1;
181 break;
182 }
183 if (page->cp_state == CPS_FREEING)
184 continue;
185
186 slice = cl_page_at_trusted(page, dtype);
187 /*
188 * Pages for lsm-less file has no underneath sub-page
189 * for osc, in case of ...
190 */
191 PASSERT(env, page, slice);
192
193 page = slice->cpl_page;
194 /*
195 * Can safely call cl_page_get_trust() under
196 * radix-tree spin-lock.
197 *
198 * XXX not true, because @page is from object another
199 * than @hdr and protected by different tree lock.
200 */
201 cl_page_get_trust(page);
202 lu_ref_add_atomic(&page->cp_reference,
203 "gang_lookup", current);
204 pvec[j++] = page;
205 }
206
207 /*
208 * Here a delicate locking dance is performed. Current thread
209 * holds a reference to a page, but has to own it before it
210 * can be placed into queue. Owning implies waiting, so
211 * radix-tree lock is to be released. After a wait one has to
212 * check that pages weren't truncated (cl_page_own() returns
213 * error in the latter case).
214 */
215 spin_unlock(&hdr->coh_page_guard);
216 tree_lock = 0;
217
218 for (i = 0; i < j; ++i) {
219 page = pvec[i];
220 if (res == CLP_GANG_OKAY)
221 res = (*cb)(env, io, page, cbdata);
222 lu_ref_del(&page->cp_reference,
223 "gang_lookup", current);
224 cl_page_put(env, page);
225 }
226 if (nr < CLT_PVEC_SIZE || end_of_region)
227 break;
228
229 if (res == CLP_GANG_OKAY && need_resched())
230 res = CLP_GANG_RESCHED;
231 if (res != CLP_GANG_OKAY)
232 break;
233
234 spin_lock(&hdr->coh_page_guard);
235 tree_lock = 1;
236 } 96 }
237 if (tree_lock) 97 return NULL;
238 spin_unlock(&hdr->coh_page_guard);
239 return res;
240} 98}
241EXPORT_SYMBOL(cl_page_gang_lookup);
242 99
243static void cl_page_free(const struct lu_env *env, struct cl_page *page) 100static void cl_page_free(const struct lu_env *env, struct cl_page *page)
244{ 101{
@@ -247,17 +104,16 @@ static void cl_page_free(const struct lu_env *env, struct cl_page *page)
247 PASSERT(env, page, list_empty(&page->cp_batch)); 104 PASSERT(env, page, list_empty(&page->cp_batch));
248 PASSERT(env, page, !page->cp_owner); 105 PASSERT(env, page, !page->cp_owner);
249 PASSERT(env, page, !page->cp_req); 106 PASSERT(env, page, !page->cp_req);
250 PASSERT(env, page, !page->cp_parent);
251 PASSERT(env, page, page->cp_state == CPS_FREEING); 107 PASSERT(env, page, page->cp_state == CPS_FREEING);
252 108
253 might_sleep();
254 while (!list_empty(&page->cp_layers)) { 109 while (!list_empty(&page->cp_layers)) {
255 struct cl_page_slice *slice; 110 struct cl_page_slice *slice;
256 111
257 slice = list_entry(page->cp_layers.next, 112 slice = list_entry(page->cp_layers.next,
258 struct cl_page_slice, cpl_linkage); 113 struct cl_page_slice, cpl_linkage);
259 list_del_init(page->cp_layers.next); 114 list_del_init(page->cp_layers.next);
260 slice->cpl_ops->cpo_fini(env, slice); 115 if (unlikely(slice->cpl_ops->cpo_fini))
116 slice->cpl_ops->cpo_fini(env, slice);
261 } 117 }
262 lu_object_ref_del_at(&obj->co_lu, &page->cp_obj_ref, "cl_page", page); 118 lu_object_ref_del_at(&obj->co_lu, &page->cp_obj_ref, "cl_page", page);
263 cl_object_put(env, obj); 119 cl_object_put(env, obj);
@@ -276,10 +132,10 @@ static inline void cl_page_state_set_trust(struct cl_page *page,
276 *(enum cl_page_state *)&page->cp_state = state; 132 *(enum cl_page_state *)&page->cp_state = state;
277} 133}
278 134
279static struct cl_page *cl_page_alloc(const struct lu_env *env, 135struct cl_page *cl_page_alloc(const struct lu_env *env,
280 struct cl_object *o, pgoff_t ind, 136 struct cl_object *o, pgoff_t ind,
281 struct page *vmpage, 137 struct page *vmpage,
282 enum cl_page_type type) 138 enum cl_page_type type)
283{ 139{
284 struct cl_page *page; 140 struct cl_page *page;
285 struct lu_object_header *head; 141 struct lu_object_header *head;
@@ -289,13 +145,11 @@ static struct cl_page *cl_page_alloc(const struct lu_env *env,
289 int result = 0; 145 int result = 0;
290 146
291 atomic_set(&page->cp_ref, 1); 147 atomic_set(&page->cp_ref, 1);
292 if (type == CPT_CACHEABLE) /* for radix tree */
293 atomic_inc(&page->cp_ref);
294 page->cp_obj = o; 148 page->cp_obj = o;
295 cl_object_get(o); 149 cl_object_get(o);
296 lu_object_ref_add_at(&o->co_lu, &page->cp_obj_ref, "cl_page", 150 lu_object_ref_add_at(&o->co_lu, &page->cp_obj_ref, "cl_page",
297 page); 151 page);
298 page->cp_index = ind; 152 page->cp_vmpage = vmpage;
299 cl_page_state_set_trust(page, CPS_CACHED); 153 cl_page_state_set_trust(page, CPS_CACHED);
300 page->cp_type = type; 154 page->cp_type = type;
301 INIT_LIST_HEAD(&page->cp_layers); 155 INIT_LIST_HEAD(&page->cp_layers);
@@ -306,10 +160,10 @@ static struct cl_page *cl_page_alloc(const struct lu_env *env,
306 head = o->co_lu.lo_header; 160 head = o->co_lu.lo_header;
307 list_for_each_entry(o, &head->loh_layers, co_lu.lo_linkage) { 161 list_for_each_entry(o, &head->loh_layers, co_lu.lo_linkage) {
308 if (o->co_ops->coo_page_init) { 162 if (o->co_ops->coo_page_init) {
309 result = o->co_ops->coo_page_init(env, o, 163 result = o->co_ops->coo_page_init(env, o, page,
310 page, vmpage); 164 ind);
311 if (result != 0) { 165 if (result != 0) {
312 cl_page_delete0(env, page, 0); 166 cl_page_delete0(env, page);
313 cl_page_free(env, page); 167 cl_page_free(env, page);
314 page = ERR_PTR(result); 168 page = ERR_PTR(result);
315 break; 169 break;
@@ -321,6 +175,7 @@ static struct cl_page *cl_page_alloc(const struct lu_env *env,
321 } 175 }
322 return page; 176 return page;
323} 177}
178EXPORT_SYMBOL(cl_page_alloc);
324 179
325/** 180/**
326 * Returns a cl_page with index \a idx at the object \a o, and associated with 181 * Returns a cl_page with index \a idx at the object \a o, and associated with
@@ -333,16 +188,13 @@ static struct cl_page *cl_page_alloc(const struct lu_env *env,
333 * 188 *
334 * \see cl_object_find(), cl_lock_find() 189 * \see cl_object_find(), cl_lock_find()
335 */ 190 */
336static struct cl_page *cl_page_find0(const struct lu_env *env, 191struct cl_page *cl_page_find(const struct lu_env *env,
337 struct cl_object *o, 192 struct cl_object *o,
338 pgoff_t idx, struct page *vmpage, 193 pgoff_t idx, struct page *vmpage,
339 enum cl_page_type type, 194 enum cl_page_type type)
340 struct cl_page *parent)
341{ 195{
342 struct cl_page *page = NULL; 196 struct cl_page *page = NULL;
343 struct cl_page *ghost = NULL;
344 struct cl_object_header *hdr; 197 struct cl_object_header *hdr;
345 int err;
346 198
347 LASSERT(type == CPT_CACHEABLE || type == CPT_TRANSIENT); 199 LASSERT(type == CPT_CACHEABLE || type == CPT_TRANSIENT);
348 might_sleep(); 200 might_sleep();
@@ -368,120 +220,25 @@ static struct cl_page *cl_page_find0(const struct lu_env *env,
368 * reference on it. 220 * reference on it.
369 */ 221 */
370 page = cl_vmpage_page(vmpage, o); 222 page = cl_vmpage_page(vmpage, o);
371 PINVRNT(env, page,
372 ergo(page,
373 cl_page_vmpage(env, page) == vmpage &&
374 (void *)radix_tree_lookup(&hdr->coh_tree,
375 idx) == page));
376 }
377 223
378 if (page) 224 if (page)
379 return page; 225 return page;
226 }
380 227
381 /* allocate and initialize cl_page */ 228 /* allocate and initialize cl_page */
382 page = cl_page_alloc(env, o, idx, vmpage, type); 229 page = cl_page_alloc(env, o, idx, vmpage, type);
383 if (IS_ERR(page))
384 return page;
385
386 if (type == CPT_TRANSIENT) {
387 if (parent) {
388 LASSERT(!page->cp_parent);
389 page->cp_parent = parent;
390 parent->cp_child = page;
391 }
392 return page;
393 }
394
395 /*
396 * XXX optimization: use radix_tree_preload() here, and change tree
397 * gfp mask to GFP_KERNEL in cl_object_header_init().
398 */
399 spin_lock(&hdr->coh_page_guard);
400 err = radix_tree_insert(&hdr->coh_tree, idx, page);
401 if (err != 0) {
402 ghost = page;
403 /*
404 * Noted by Jay: a lock on \a vmpage protects cl_page_find()
405 * from this race, but
406 *
407 * 0. it's better to have cl_page interface "locally
408 * consistent" so that its correctness can be reasoned
409 * about without appealing to the (obscure world of) VM
410 * locking.
411 *
412 * 1. handling this race allows ->coh_tree to remain
413 * consistent even when VM locking is somehow busted,
414 * which is very useful during diagnosing and debugging.
415 */
416 page = ERR_PTR(err);
417 CL_PAGE_DEBUG(D_ERROR, env, ghost,
418 "fail to insert into radix tree: %d\n", err);
419 } else {
420 if (parent) {
421 LASSERT(!page->cp_parent);
422 page->cp_parent = parent;
423 parent->cp_child = page;
424 }
425 hdr->coh_pages++;
426 }
427 spin_unlock(&hdr->coh_page_guard);
428
429 if (unlikely(ghost)) {
430 cl_page_delete0(env, ghost, 0);
431 cl_page_free(env, ghost);
432 }
433 return page; 230 return page;
434} 231}
435
436struct cl_page *cl_page_find(const struct lu_env *env, struct cl_object *o,
437 pgoff_t idx, struct page *vmpage,
438 enum cl_page_type type)
439{
440 return cl_page_find0(env, o, idx, vmpage, type, NULL);
441}
442EXPORT_SYMBOL(cl_page_find); 232EXPORT_SYMBOL(cl_page_find);
443 233
444struct cl_page *cl_page_find_sub(const struct lu_env *env, struct cl_object *o,
445 pgoff_t idx, struct page *vmpage,
446 struct cl_page *parent)
447{
448 return cl_page_find0(env, o, idx, vmpage, parent->cp_type, parent);
449}
450EXPORT_SYMBOL(cl_page_find_sub);
451
452static inline int cl_page_invariant(const struct cl_page *pg) 234static inline int cl_page_invariant(const struct cl_page *pg)
453{ 235{
454 struct cl_object_header *header;
455 struct cl_page *parent;
456 struct cl_page *child;
457 struct cl_io *owner;
458
459 /* 236 /*
460 * Page invariant is protected by a VM lock. 237 * Page invariant is protected by a VM lock.
461 */ 238 */
462 LINVRNT(cl_page_is_vmlocked(NULL, pg)); 239 LINVRNT(cl_page_is_vmlocked(NULL, pg));
463 240
464 header = cl_object_header(pg->cp_obj); 241 return cl_page_in_use_noref(pg);
465 parent = pg->cp_parent;
466 child = pg->cp_child;
467 owner = pg->cp_owner;
468
469 return cl_page_in_use(pg) &&
470 ergo(parent, parent->cp_child == pg) &&
471 ergo(child, child->cp_parent == pg) &&
472 ergo(child, pg->cp_obj != child->cp_obj) &&
473 ergo(parent, pg->cp_obj != parent->cp_obj) &&
474 ergo(owner && parent,
475 parent->cp_owner == pg->cp_owner->ci_parent) &&
476 ergo(owner && child, child->cp_owner->ci_parent == owner) &&
477 /*
478 * Either page is early in initialization (has neither child
479 * nor parent yet), or it is in the object radix tree.
480 */
481 ergo(pg->cp_state < CPS_FREEING && pg->cp_type == CPT_CACHEABLE,
482 (void *)radix_tree_lookup(&header->coh_tree,
483 pg->cp_index) == pg ||
484 (!child && !parent));
485} 242}
486 243
487static void cl_page_state_set0(const struct lu_env *env, 244static void cl_page_state_set0(const struct lu_env *env,
@@ -534,13 +291,9 @@ static void cl_page_state_set0(const struct lu_env *env,
534 old = page->cp_state; 291 old = page->cp_state;
535 PASSERT(env, page, allowed_transitions[old][state]); 292 PASSERT(env, page, allowed_transitions[old][state]);
536 CL_PAGE_HEADER(D_TRACE, env, page, "%d -> %d\n", old, state); 293 CL_PAGE_HEADER(D_TRACE, env, page, "%d -> %d\n", old, state);
537 for (; page; page = page->cp_child) { 294 PASSERT(env, page, page->cp_state == old);
538 PASSERT(env, page, page->cp_state == old); 295 PASSERT(env, page, equi(state == CPS_OWNED, page->cp_owner));
539 PASSERT(env, page, 296 cl_page_state_set_trust(page, state);
540 equi(state == CPS_OWNED, page->cp_owner));
541
542 cl_page_state_set_trust(page, state);
543 }
544} 297}
545 298
546static void cl_page_state_set(const struct lu_env *env, 299static void cl_page_state_set(const struct lu_env *env,
@@ -574,8 +327,6 @@ EXPORT_SYMBOL(cl_page_get);
574 */ 327 */
575void cl_page_put(const struct lu_env *env, struct cl_page *page) 328void cl_page_put(const struct lu_env *env, struct cl_page *page)
576{ 329{
577 PASSERT(env, page, atomic_read(&page->cp_ref) > !!page->cp_parent);
578
579 CL_PAGE_HEADER(D_TRACE, env, page, "%d\n", 330 CL_PAGE_HEADER(D_TRACE, env, page, "%d\n",
580 atomic_read(&page->cp_ref)); 331 atomic_read(&page->cp_ref));
581 332
@@ -595,34 +346,10 @@ void cl_page_put(const struct lu_env *env, struct cl_page *page)
595EXPORT_SYMBOL(cl_page_put); 346EXPORT_SYMBOL(cl_page_put);
596 347
597/** 348/**
598 * Returns a VM page associated with a given cl_page.
599 */
600struct page *cl_page_vmpage(const struct lu_env *env, struct cl_page *page)
601{
602 const struct cl_page_slice *slice;
603
604 /*
605 * Find uppermost layer with ->cpo_vmpage() method, and return its
606 * result.
607 */
608 page = cl_page_top(page);
609 do {
610 list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
611 if (slice->cpl_ops->cpo_vmpage)
612 return slice->cpl_ops->cpo_vmpage(env, slice);
613 }
614 page = page->cp_child;
615 } while (page);
616 LBUG(); /* ->cpo_vmpage() has to be defined somewhere in the stack */
617}
618EXPORT_SYMBOL(cl_page_vmpage);
619
620/**
621 * Returns a cl_page associated with a VM page, and given cl_object. 349 * Returns a cl_page associated with a VM page, and given cl_object.
622 */ 350 */
623struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj) 351struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj)
624{ 352{
625 struct cl_page *top;
626 struct cl_page *page; 353 struct cl_page *page;
627 354
628 KLASSERT(PageLocked(vmpage)); 355 KLASSERT(PageLocked(vmpage));
@@ -633,36 +360,15 @@ struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj)
633 * bottom-to-top pass. 360 * bottom-to-top pass.
634 */ 361 */
635 362
636 /* 363 page = (struct cl_page *)vmpage->private;
637 * This loop assumes that ->private points to the top-most page. This 364 if (page) {
638 * can be rectified easily. 365 cl_page_get_trust(page);
639 */ 366 LASSERT(page->cp_type == CPT_CACHEABLE);
640 top = (struct cl_page *)vmpage->private;
641 if (!top)
642 return NULL;
643
644 for (page = top; page; page = page->cp_child) {
645 if (cl_object_same(page->cp_obj, obj)) {
646 cl_page_get_trust(page);
647 break;
648 }
649 } 367 }
650 LASSERT(ergo(page, page->cp_type == CPT_CACHEABLE));
651 return page; 368 return page;
652} 369}
653EXPORT_SYMBOL(cl_vmpage_page); 370EXPORT_SYMBOL(cl_vmpage_page);
654 371
655/**
656 * Returns the top-page for a given page.
657 *
658 * \see cl_object_top(), cl_io_top()
659 */
660struct cl_page *cl_page_top(struct cl_page *page)
661{
662 return cl_page_top_trusted(page);
663}
664EXPORT_SYMBOL(cl_page_top);
665
666const struct cl_page_slice *cl_page_at(const struct cl_page *page, 372const struct cl_page_slice *cl_page_at(const struct cl_page *page,
667 const struct lu_device_type *dtype) 373 const struct lu_device_type *dtype)
668{ 374{
@@ -682,26 +388,43 @@ EXPORT_SYMBOL(cl_page_at);
682 int (*__method)_proto; \ 388 int (*__method)_proto; \
683 \ 389 \
684 __result = 0; \ 390 __result = 0; \
685 __page = cl_page_top(__page); \ 391 list_for_each_entry(__scan, &__page->cp_layers, cpl_linkage) { \
686 do { \ 392 __method = *(void **)((char *)__scan->cpl_ops + __op); \
687 list_for_each_entry(__scan, &__page->cp_layers, \ 393 if (__method) { \
688 cpl_linkage) { \ 394 __result = (*__method)(__env, __scan, ## __VA_ARGS__); \
689 __method = *(void **)((char *)__scan->cpl_ops + \ 395 if (__result != 0) \
690 __op); \ 396 break; \
691 if (__method) { \ 397 } \
692 __result = (*__method)(__env, __scan, \ 398 } \
693 ## __VA_ARGS__); \
694 if (__result != 0) \
695 break; \
696 } \
697 } \
698 __page = __page->cp_child; \
699 } while (__page && __result == 0); \
700 if (__result > 0) \ 399 if (__result > 0) \
701 __result = 0; \ 400 __result = 0; \
702 __result; \ 401 __result; \
703}) 402})
704 403
404#define CL_PAGE_INVOKE_REVERSE(_env, _page, _op, _proto, ...) \
405({ \
406 const struct lu_env *__env = (_env); \
407 struct cl_page *__page = (_page); \
408 const struct cl_page_slice *__scan; \
409 int __result; \
410 ptrdiff_t __op = (_op); \
411 int (*__method)_proto; \
412 \
413 __result = 0; \
414 list_for_each_entry_reverse(__scan, &__page->cp_layers, \
415 cpl_linkage) { \
416 __method = *(void **)((char *)__scan->cpl_ops + __op); \
417 if (__method) { \
418 __result = (*__method)(__env, __scan, ## __VA_ARGS__); \
419 if (__result != 0) \
420 break; \
421 } \
422 } \
423 if (__result > 0) \
424 __result = 0; \
425 __result; \
426})
427
705#define CL_PAGE_INVOID(_env, _page, _op, _proto, ...) \ 428#define CL_PAGE_INVOID(_env, _page, _op, _proto, ...) \
706do { \ 429do { \
707 const struct lu_env *__env = (_env); \ 430 const struct lu_env *__env = (_env); \
@@ -710,18 +433,11 @@ do { \
710 ptrdiff_t __op = (_op); \ 433 ptrdiff_t __op = (_op); \
711 void (*__method)_proto; \ 434 void (*__method)_proto; \
712 \ 435 \
713 __page = cl_page_top(__page); \ 436 list_for_each_entry(__scan, &__page->cp_layers, cpl_linkage) { \
714 do { \ 437 __method = *(void **)((char *)__scan->cpl_ops + __op); \
715 list_for_each_entry(__scan, &__page->cp_layers, \ 438 if (__method) \
716 cpl_linkage) { \ 439 (*__method)(__env, __scan, ## __VA_ARGS__); \
717 __method = *(void **)((char *)__scan->cpl_ops + \ 440 } \
718 __op); \
719 if (__method) \
720 (*__method)(__env, __scan, \
721 ## __VA_ARGS__); \
722 } \
723 __page = __page->cp_child; \
724 } while (__page); \
725} while (0) 441} while (0)
726 442
727#define CL_PAGE_INVOID_REVERSE(_env, _page, _op, _proto, ...) \ 443#define CL_PAGE_INVOID_REVERSE(_env, _page, _op, _proto, ...) \
@@ -732,20 +448,11 @@ do { \
732 ptrdiff_t __op = (_op); \ 448 ptrdiff_t __op = (_op); \
733 void (*__method)_proto; \ 449 void (*__method)_proto; \
734 \ 450 \
735 /* get to the bottom page. */ \ 451 list_for_each_entry_reverse(__scan, &__page->cp_layers, cpl_linkage) { \
736 while (__page->cp_child) \ 452 __method = *(void **)((char *)__scan->cpl_ops + __op); \
737 __page = __page->cp_child; \ 453 if (__method) \
738 do { \ 454 (*__method)(__env, __scan, ## __VA_ARGS__); \
739 list_for_each_entry_reverse(__scan, &__page->cp_layers, \ 455 } \
740 cpl_linkage) { \
741 __method = *(void **)((char *)__scan->cpl_ops + \
742 __op); \
743 if (__method) \
744 (*__method)(__env, __scan, \
745 ## __VA_ARGS__); \
746 } \
747 __page = __page->cp_parent; \
748 } while (__page); \
749} while (0) 456} while (0)
750 457
751static int cl_page_invoke(const struct lu_env *env, 458static int cl_page_invoke(const struct lu_env *env,
@@ -771,20 +478,17 @@ static void cl_page_invoid(const struct lu_env *env,
771 478
772static void cl_page_owner_clear(struct cl_page *page) 479static void cl_page_owner_clear(struct cl_page *page)
773{ 480{
774 for (page = cl_page_top(page); page; page = page->cp_child) { 481 if (page->cp_owner) {
775 if (page->cp_owner) { 482 LASSERT(page->cp_owner->ci_owned_nr > 0);
776 LASSERT(page->cp_owner->ci_owned_nr > 0); 483 page->cp_owner->ci_owned_nr--;
777 page->cp_owner->ci_owned_nr--; 484 page->cp_owner = NULL;
778 page->cp_owner = NULL; 485 page->cp_task = NULL;
779 page->cp_task = NULL;
780 }
781 } 486 }
782} 487}
783 488
784static void cl_page_owner_set(struct cl_page *page) 489static void cl_page_owner_set(struct cl_page *page)
785{ 490{
786 for (page = cl_page_top(page); page; page = page->cp_child) 491 page->cp_owner->ci_owned_nr++;
787 page->cp_owner->ci_owned_nr++;
788} 492}
789 493
790void cl_page_disown0(const struct lu_env *env, 494void cl_page_disown0(const struct lu_env *env,
@@ -794,7 +498,7 @@ void cl_page_disown0(const struct lu_env *env,
794 498
795 state = pg->cp_state; 499 state = pg->cp_state;
796 PINVRNT(env, pg, state == CPS_OWNED || state == CPS_FREEING); 500 PINVRNT(env, pg, state == CPS_OWNED || state == CPS_FREEING);
797 PINVRNT(env, pg, cl_page_invariant(pg)); 501 PINVRNT(env, pg, cl_page_invariant(pg) || state == CPS_FREEING);
798 cl_page_owner_clear(pg); 502 cl_page_owner_clear(pg);
799 503
800 if (state == CPS_OWNED) 504 if (state == CPS_OWNED)
@@ -815,8 +519,9 @@ void cl_page_disown0(const struct lu_env *env,
815 */ 519 */
816int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io) 520int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io)
817{ 521{
522 struct cl_io *top = cl_io_top((struct cl_io *)io);
818 LINVRNT(cl_object_same(pg->cp_obj, io->ci_obj)); 523 LINVRNT(cl_object_same(pg->cp_obj, io->ci_obj));
819 return pg->cp_state == CPS_OWNED && pg->cp_owner == io; 524 return pg->cp_state == CPS_OWNED && pg->cp_owner == top;
820} 525}
821EXPORT_SYMBOL(cl_page_is_owned); 526EXPORT_SYMBOL(cl_page_is_owned);
822 527
@@ -847,7 +552,6 @@ static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
847 552
848 PINVRNT(env, pg, !cl_page_is_owned(pg, io)); 553 PINVRNT(env, pg, !cl_page_is_owned(pg, io));
849 554
850 pg = cl_page_top(pg);
851 io = cl_io_top(io); 555 io = cl_io_top(io);
852 556
853 if (pg->cp_state == CPS_FREEING) { 557 if (pg->cp_state == CPS_FREEING) {
@@ -861,7 +565,7 @@ static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
861 if (result == 0) { 565 if (result == 0) {
862 PASSERT(env, pg, !pg->cp_owner); 566 PASSERT(env, pg, !pg->cp_owner);
863 PASSERT(env, pg, !pg->cp_req); 567 PASSERT(env, pg, !pg->cp_req);
864 pg->cp_owner = io; 568 pg->cp_owner = cl_io_top(io);
865 pg->cp_task = current; 569 pg->cp_task = current;
866 cl_page_owner_set(pg); 570 cl_page_owner_set(pg);
867 if (pg->cp_state != CPS_FREEING) { 571 if (pg->cp_state != CPS_FREEING) {
@@ -914,12 +618,11 @@ void cl_page_assume(const struct lu_env *env,
914{ 618{
915 PINVRNT(env, pg, cl_object_same(pg->cp_obj, io->ci_obj)); 619 PINVRNT(env, pg, cl_object_same(pg->cp_obj, io->ci_obj));
916 620
917 pg = cl_page_top(pg);
918 io = cl_io_top(io); 621 io = cl_io_top(io);
919 622
920 cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_assume)); 623 cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_assume));
921 PASSERT(env, pg, !pg->cp_owner); 624 PASSERT(env, pg, !pg->cp_owner);
922 pg->cp_owner = io; 625 pg->cp_owner = cl_io_top(io);
923 pg->cp_task = current; 626 pg->cp_task = current;
924 cl_page_owner_set(pg); 627 cl_page_owner_set(pg);
925 cl_page_state_set(env, pg, CPS_OWNED); 628 cl_page_state_set(env, pg, CPS_OWNED);
@@ -943,7 +646,6 @@ void cl_page_unassume(const struct lu_env *env,
943 PINVRNT(env, pg, cl_page_is_owned(pg, io)); 646 PINVRNT(env, pg, cl_page_is_owned(pg, io));
944 PINVRNT(env, pg, cl_page_invariant(pg)); 647 PINVRNT(env, pg, cl_page_invariant(pg));
945 648
946 pg = cl_page_top(pg);
947 io = cl_io_top(io); 649 io = cl_io_top(io);
948 cl_page_owner_clear(pg); 650 cl_page_owner_clear(pg);
949 cl_page_state_set(env, pg, CPS_CACHED); 651 cl_page_state_set(env, pg, CPS_CACHED);
@@ -968,9 +670,9 @@ EXPORT_SYMBOL(cl_page_unassume);
968void cl_page_disown(const struct lu_env *env, 670void cl_page_disown(const struct lu_env *env,
969 struct cl_io *io, struct cl_page *pg) 671 struct cl_io *io, struct cl_page *pg)
970{ 672{
971 PINVRNT(env, pg, cl_page_is_owned(pg, io)); 673 PINVRNT(env, pg, cl_page_is_owned(pg, io) ||
674 pg->cp_state == CPS_FREEING);
972 675
973 pg = cl_page_top(pg);
974 io = cl_io_top(io); 676 io = cl_io_top(io);
975 cl_page_disown0(env, io, pg); 677 cl_page_disown0(env, io, pg);
976} 678}
@@ -1001,12 +703,8 @@ EXPORT_SYMBOL(cl_page_discard);
1001 * pages, e.g,. in a error handling cl_page_find()->cl_page_delete0() 703 * pages, e.g,. in a error handling cl_page_find()->cl_page_delete0()
1002 * path. Doesn't check page invariant. 704 * path. Doesn't check page invariant.
1003 */ 705 */
1004static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg, 706static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg)
1005 int radix)
1006{ 707{
1007 struct cl_page *tmp = pg;
1008
1009 PASSERT(env, pg, pg == cl_page_top(pg));
1010 PASSERT(env, pg, pg->cp_state != CPS_FREEING); 708 PASSERT(env, pg, pg->cp_state != CPS_FREEING);
1011 709
1012 /* 710 /*
@@ -1014,41 +712,11 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
1014 */ 712 */
1015 cl_page_owner_clear(pg); 713 cl_page_owner_clear(pg);
1016 714
1017 /*
1018 * unexport the page firstly before freeing it so that
1019 * the page content is considered to be invalid.
1020 * We have to do this because a CPS_FREEING cl_page may
1021 * be NOT under the protection of a cl_lock.
1022 * Afterwards, if this page is found by other threads, then this
1023 * page will be forced to reread.
1024 */
1025 cl_page_export(env, pg, 0);
1026 cl_page_state_set0(env, pg, CPS_FREEING); 715 cl_page_state_set0(env, pg, CPS_FREEING);
1027 716
1028 CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_delete), 717 CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_delete),
1029 (const struct lu_env *, const struct cl_page_slice *)); 718 (const struct lu_env *,
1030 719 const struct cl_page_slice *));
1031 if (tmp->cp_type == CPT_CACHEABLE) {
1032 if (!radix)
1033 /* !radix means that @pg is not yet in the radix tree,
1034 * skip removing it.
1035 */
1036 tmp = pg->cp_child;
1037 for (; tmp; tmp = tmp->cp_child) {
1038 void *value;
1039 struct cl_object_header *hdr;
1040
1041 hdr = cl_object_header(tmp->cp_obj);
1042 spin_lock(&hdr->coh_page_guard);
1043 value = radix_tree_delete(&hdr->coh_tree,
1044 tmp->cp_index);
1045 PASSERT(env, tmp, value == tmp);
1046 PASSERT(env, tmp, hdr->coh_pages > 0);
1047 hdr->coh_pages--;
1048 spin_unlock(&hdr->coh_page_guard);
1049 cl_page_put(env, tmp);
1050 }
1051 }
1052} 720}
1053 721
1054/** 722/**
@@ -1070,7 +738,6 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
1070 * Once page reaches cl_page_state::CPS_FREEING, all remaining references will 738 * Once page reaches cl_page_state::CPS_FREEING, all remaining references will
1071 * drain after some time, at which point page will be recycled. 739 * drain after some time, at which point page will be recycled.
1072 * 740 *
1073 * \pre pg == cl_page_top(pg)
1074 * \pre VM page is locked 741 * \pre VM page is locked
1075 * \post pg->cp_state == CPS_FREEING 742 * \post pg->cp_state == CPS_FREEING
1076 * 743 *
@@ -1079,30 +746,11 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
1079void cl_page_delete(const struct lu_env *env, struct cl_page *pg) 746void cl_page_delete(const struct lu_env *env, struct cl_page *pg)
1080{ 747{
1081 PINVRNT(env, pg, cl_page_invariant(pg)); 748 PINVRNT(env, pg, cl_page_invariant(pg));
1082 cl_page_delete0(env, pg, 1); 749 cl_page_delete0(env, pg);
1083} 750}
1084EXPORT_SYMBOL(cl_page_delete); 751EXPORT_SYMBOL(cl_page_delete);
1085 752
1086/** 753/**
1087 * Unmaps page from user virtual memory.
1088 *
1089 * Calls cl_page_operations::cpo_unmap() through all layers top-to-bottom. The
1090 * layer responsible for VM interaction has to unmap page from user space
1091 * virtual memory.
1092 *
1093 * \see cl_page_operations::cpo_unmap()
1094 */
1095int cl_page_unmap(const struct lu_env *env,
1096 struct cl_io *io, struct cl_page *pg)
1097{
1098 PINVRNT(env, pg, cl_page_is_owned(pg, io));
1099 PINVRNT(env, pg, cl_page_invariant(pg));
1100
1101 return cl_page_invoke(env, io, pg, CL_PAGE_OP(cpo_unmap));
1102}
1103EXPORT_SYMBOL(cl_page_unmap);
1104
1105/**
1106 * Marks page up-to-date. 754 * Marks page up-to-date.
1107 * 755 *
1108 * Call cl_page_operations::cpo_export() through all layers top-to-bottom. The 756 * Call cl_page_operations::cpo_export() through all layers top-to-bottom. The
@@ -1129,7 +777,6 @@ int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg)
1129 int result; 777 int result;
1130 const struct cl_page_slice *slice; 778 const struct cl_page_slice *slice;
1131 779
1132 pg = cl_page_top_trusted((struct cl_page *)pg);
1133 slice = container_of(pg->cp_layers.next, 780 slice = container_of(pg->cp_layers.next,
1134 const struct cl_page_slice, cpl_linkage); 781 const struct cl_page_slice, cpl_linkage);
1135 PASSERT(env, pg, slice->cpl_ops->cpo_is_vmlocked); 782 PASSERT(env, pg, slice->cpl_ops->cpo_is_vmlocked);
@@ -1241,7 +888,7 @@ void cl_page_completion(const struct lu_env *env,
1241 cl_page_put(env, pg); 888 cl_page_put(env, pg);
1242 889
1243 if (anchor) 890 if (anchor)
1244 cl_sync_io_note(anchor, ioret); 891 cl_sync_io_note(env, anchor, ioret);
1245} 892}
1246EXPORT_SYMBOL(cl_page_completion); 893EXPORT_SYMBOL(cl_page_completion);
1247 894
@@ -1276,44 +923,6 @@ int cl_page_make_ready(const struct lu_env *env, struct cl_page *pg,
1276EXPORT_SYMBOL(cl_page_make_ready); 923EXPORT_SYMBOL(cl_page_make_ready);
1277 924
1278/** 925/**
1279 * Notify layers that high level io decided to place this page into a cache
1280 * for future transfer.
1281 *
1282 * The layer implementing transfer engine (osc) has to register this page in
1283 * its queues.
1284 *
1285 * \pre cl_page_is_owned(pg, io)
1286 * \post cl_page_is_owned(pg, io)
1287 *
1288 * \see cl_page_operations::cpo_cache_add()
1289 */
1290int cl_page_cache_add(const struct lu_env *env, struct cl_io *io,
1291 struct cl_page *pg, enum cl_req_type crt)
1292{
1293 const struct cl_page_slice *scan;
1294 int result = 0;
1295
1296 PINVRNT(env, pg, crt < CRT_NR);
1297 PINVRNT(env, pg, cl_page_is_owned(pg, io));
1298 PINVRNT(env, pg, cl_page_invariant(pg));
1299
1300 if (crt >= CRT_NR)
1301 return -EINVAL;
1302
1303 list_for_each_entry(scan, &pg->cp_layers, cpl_linkage) {
1304 if (!scan->cpl_ops->io[crt].cpo_cache_add)
1305 continue;
1306
1307 result = scan->cpl_ops->io[crt].cpo_cache_add(env, scan, io);
1308 if (result != 0)
1309 break;
1310 }
1311 CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
1312 return result;
1313}
1314EXPORT_SYMBOL(cl_page_cache_add);
1315
1316/**
1317 * Called if a pge is being written back by kernel's intention. 926 * Called if a pge is being written back by kernel's intention.
1318 * 927 *
1319 * \pre cl_page_is_owned(pg, io) 928 * \pre cl_page_is_owned(pg, io)
@@ -1344,68 +953,21 @@ EXPORT_SYMBOL(cl_page_flush);
1344 * \see cl_page_operations::cpo_is_under_lock() 953 * \see cl_page_operations::cpo_is_under_lock()
1345 */ 954 */
1346int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io, 955int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
1347 struct cl_page *page) 956 struct cl_page *page, pgoff_t *max_index)
1348{ 957{
1349 int rc; 958 int rc;
1350 959
1351 PINVRNT(env, page, cl_page_invariant(page)); 960 PINVRNT(env, page, cl_page_invariant(page));
1352 961
1353 rc = CL_PAGE_INVOKE(env, page, CL_PAGE_OP(cpo_is_under_lock), 962 rc = CL_PAGE_INVOKE_REVERSE(env, page, CL_PAGE_OP(cpo_is_under_lock),
1354 (const struct lu_env *, 963 (const struct lu_env *,
1355 const struct cl_page_slice *, struct cl_io *), 964 const struct cl_page_slice *,
1356 io); 965 struct cl_io *, pgoff_t *),
1357 PASSERT(env, page, rc != 0); 966 io, max_index);
1358 return rc; 967 return rc;
1359} 968}
1360EXPORT_SYMBOL(cl_page_is_under_lock); 969EXPORT_SYMBOL(cl_page_is_under_lock);
1361 970
1362static int page_prune_cb(const struct lu_env *env, struct cl_io *io,
1363 struct cl_page *page, void *cbdata)
1364{
1365 cl_page_own(env, io, page);
1366 cl_page_unmap(env, io, page);
1367 cl_page_discard(env, io, page);
1368 cl_page_disown(env, io, page);
1369 return CLP_GANG_OKAY;
1370}
1371
1372/**
1373 * Purges all cached pages belonging to the object \a obj.
1374 */
1375int cl_pages_prune(const struct lu_env *env, struct cl_object *clobj)
1376{
1377 struct cl_thread_info *info;
1378 struct cl_object *obj = cl_object_top(clobj);
1379 struct cl_io *io;
1380 int result;
1381
1382 info = cl_env_info(env);
1383 io = &info->clt_io;
1384
1385 /*
1386 * initialize the io. This is ugly since we never do IO in this
1387 * function, we just make cl_page_list functions happy. -jay
1388 */
1389 io->ci_obj = obj;
1390 io->ci_ignore_layout = 1;
1391 result = cl_io_init(env, io, CIT_MISC, obj);
1392 if (result != 0) {
1393 cl_io_fini(env, io);
1394 return io->ci_result;
1395 }
1396
1397 do {
1398 result = cl_page_gang_lookup(env, obj, io, 0, CL_PAGE_EOF,
1399 page_prune_cb, NULL);
1400 if (result == CLP_GANG_RESCHED)
1401 cond_resched();
1402 } while (result != CLP_GANG_OKAY);
1403
1404 cl_io_fini(env, io);
1405 return result;
1406}
1407EXPORT_SYMBOL(cl_pages_prune);
1408
1409/** 971/**
1410 * Tells transfer engine that only part of a page is to be transmitted. 972 * Tells transfer engine that only part of a page is to be transmitted.
1411 * 973 *
@@ -1431,9 +993,8 @@ void cl_page_header_print(const struct lu_env *env, void *cookie,
1431 lu_printer_t printer, const struct cl_page *pg) 993 lu_printer_t printer, const struct cl_page *pg)
1432{ 994{
1433 (*printer)(env, cookie, 995 (*printer)(env, cookie,
1434 "page@%p[%d %p:%lu ^%p_%p %d %d %d %p %p %#x]\n", 996 "page@%p[%d %p %d %d %d %p %p %#x]\n",
1435 pg, atomic_read(&pg->cp_ref), pg->cp_obj, 997 pg, atomic_read(&pg->cp_ref), pg->cp_obj,
1436 pg->cp_index, pg->cp_parent, pg->cp_child,
1437 pg->cp_state, pg->cp_error, pg->cp_type, 998 pg->cp_state, pg->cp_error, pg->cp_type,
1438 pg->cp_owner, pg->cp_req, pg->cp_flags); 999 pg->cp_owner, pg->cp_req, pg->cp_flags);
1439} 1000}
@@ -1445,11 +1006,7 @@ EXPORT_SYMBOL(cl_page_header_print);
1445void cl_page_print(const struct lu_env *env, void *cookie, 1006void cl_page_print(const struct lu_env *env, void *cookie,
1446 lu_printer_t printer, const struct cl_page *pg) 1007 lu_printer_t printer, const struct cl_page *pg)
1447{ 1008{
1448 struct cl_page *scan; 1009 cl_page_header_print(env, cookie, printer, pg);
1449
1450 for (scan = cl_page_top((struct cl_page *)pg); scan;
1451 scan = scan->cp_child)
1452 cl_page_header_print(env, cookie, printer, scan);
1453 CL_PAGE_INVOKE(env, (struct cl_page *)pg, CL_PAGE_OP(cpo_print), 1010 CL_PAGE_INVOKE(env, (struct cl_page *)pg, CL_PAGE_OP(cpo_print),
1454 (const struct lu_env *env, 1011 (const struct lu_env *env,
1455 const struct cl_page_slice *slice, 1012 const struct cl_page_slice *slice,
@@ -1509,21 +1066,13 @@ EXPORT_SYMBOL(cl_page_size);
1509 * \see cl_lock_slice_add(), cl_req_slice_add(), cl_io_slice_add() 1066 * \see cl_lock_slice_add(), cl_req_slice_add(), cl_io_slice_add()
1510 */ 1067 */
1511void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice, 1068void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
1512 struct cl_object *obj, 1069 struct cl_object *obj, pgoff_t index,
1513 const struct cl_page_operations *ops) 1070 const struct cl_page_operations *ops)
1514{ 1071{
1515 list_add_tail(&slice->cpl_linkage, &page->cp_layers); 1072 list_add_tail(&slice->cpl_linkage, &page->cp_layers);
1516 slice->cpl_obj = obj; 1073 slice->cpl_obj = obj;
1074 slice->cpl_index = index;
1517 slice->cpl_ops = ops; 1075 slice->cpl_ops = ops;
1518 slice->cpl_page = page; 1076 slice->cpl_page = page;
1519} 1077}
1520EXPORT_SYMBOL(cl_page_slice_add); 1078EXPORT_SYMBOL(cl_page_slice_add);
1521
1522int cl_page_init(void)
1523{
1524 return 0;
1525}
1526
1527void cl_page_fini(void)
1528{
1529}
diff --git a/drivers/staging/lustre/lustre/obdclass/class_obd.c b/drivers/staging/lustre/lustre/obdclass/class_obd.c
index c2cf015962dd..f48816af8be7 100644
--- a/drivers/staging/lustre/lustre/obdclass/class_obd.c
+++ b/drivers/staging/lustre/lustre/obdclass/class_obd.c
@@ -60,6 +60,8 @@ unsigned int obd_dump_on_eviction;
60EXPORT_SYMBOL(obd_dump_on_eviction); 60EXPORT_SYMBOL(obd_dump_on_eviction);
61unsigned int obd_max_dirty_pages = 256; 61unsigned int obd_max_dirty_pages = 256;
62EXPORT_SYMBOL(obd_max_dirty_pages); 62EXPORT_SYMBOL(obd_max_dirty_pages);
63atomic_t obd_unstable_pages;
64EXPORT_SYMBOL(obd_unstable_pages);
63atomic_t obd_dirty_pages; 65atomic_t obd_dirty_pages;
64EXPORT_SYMBOL(obd_dirty_pages); 66EXPORT_SYMBOL(obd_dirty_pages);
65unsigned int obd_timeout = OBD_TIMEOUT_DEFAULT; /* seconds */ 67unsigned int obd_timeout = OBD_TIMEOUT_DEFAULT; /* seconds */
@@ -335,7 +337,6 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg)
335 err = 0; 337 err = 0;
336 goto out; 338 goto out;
337 } 339 }
338
339 } 340 }
340 341
341 if (data->ioc_dev == OBD_DEV_BY_DEVNAME) { 342 if (data->ioc_dev == OBD_DEV_BY_DEVNAME) {
@@ -461,7 +462,7 @@ static int obd_init_checks(void)
461 CWARN("LPD64 wrong length! strlen(%s)=%d != 2\n", buf, len); 462 CWARN("LPD64 wrong length! strlen(%s)=%d != 2\n", buf, len);
462 ret = -EINVAL; 463 ret = -EINVAL;
463 } 464 }
464 if ((u64val & ~CFS_PAGE_MASK) >= PAGE_SIZE) { 465 if ((u64val & ~PAGE_MASK) >= PAGE_SIZE) {
465 CWARN("mask failed: u64val %llu >= %llu\n", u64val, 466 CWARN("mask failed: u64val %llu >= %llu\n", u64val,
466 (__u64)PAGE_SIZE); 467 (__u64)PAGE_SIZE);
467 ret = -EINVAL; 468 ret = -EINVAL;
diff --git a/drivers/staging/lustre/lustre/obdclass/debug.c b/drivers/staging/lustre/lustre/obdclass/debug.c
index 43a7f7a79b35..e4edfb2c0a20 100644
--- a/drivers/staging/lustre/lustre/obdclass/debug.c
+++ b/drivers/staging/lustre/lustre/obdclass/debug.c
@@ -68,8 +68,8 @@ int block_debug_check(char *who, void *addr, int end, __u64 off, __u64 id)
68 68
69 LASSERT(addr); 69 LASSERT(addr);
70 70
71 ne_off = le64_to_cpu (off); 71 ne_off = le64_to_cpu(off);
72 id = le64_to_cpu (id); 72 id = le64_to_cpu(id);
73 if (memcmp(addr, (char *)&ne_off, LPDS)) { 73 if (memcmp(addr, (char *)&ne_off, LPDS)) {
74 CDEBUG(D_ERROR, "%s: id %#llx offset %llu off: %#llx != %#llx\n", 74 CDEBUG(D_ERROR, "%s: id %#llx offset %llu off: %#llx != %#llx\n",
75 who, id, off, *(__u64 *)addr, ne_off); 75 who, id, off, *(__u64 *)addr, ne_off);
diff --git a/drivers/staging/lustre/lustre/obdclass/genops.c b/drivers/staging/lustre/lustre/obdclass/genops.c
index cf97b8f06764..d95f11d62a32 100644
--- a/drivers/staging/lustre/lustre/obdclass/genops.c
+++ b/drivers/staging/lustre/lustre/obdclass/genops.c
@@ -604,7 +604,6 @@ int obd_init_caches(void)
604 out: 604 out:
605 obd_cleanup_caches(); 605 obd_cleanup_caches();
606 return -ENOMEM; 606 return -ENOMEM;
607
608} 607}
609 608
610/* map connection to client */ 609/* map connection to client */
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
index 8eddf206f1ed..2cd4522462d9 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
@@ -158,9 +158,7 @@ int obd_ioctl_popdata(void __user *arg, void *data, int len)
158{ 158{
159 int err; 159 int err;
160 160
161 err = copy_to_user(arg, data, len); 161 err = copy_to_user(arg, data, len) ? -EFAULT : 0;
162 if (err)
163 err = -EFAULT;
164 return err; 162 return err;
165} 163}
166EXPORT_SYMBOL(obd_ioctl_popdata); 164EXPORT_SYMBOL(obd_ioctl_popdata);
diff --git a/drivers/staging/lustre/lustre/obdclass/llog.c b/drivers/staging/lustre/lustre/obdclass/llog.c
index 992573eae1b1..79194d8cb587 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog.c
@@ -265,7 +265,6 @@ repeat:
265 for (rec = (struct llog_rec_hdr *)buf; 265 for (rec = (struct llog_rec_hdr *)buf;
266 (char *)rec < buf + LLOG_CHUNK_SIZE; 266 (char *)rec < buf + LLOG_CHUNK_SIZE;
267 rec = (struct llog_rec_hdr *)((char *)rec + rec->lrh_len)) { 267 rec = (struct llog_rec_hdr *)((char *)rec + rec->lrh_len)) {
268
269 CDEBUG(D_OTHER, "processing rec 0x%p type %#x\n", 268 CDEBUG(D_OTHER, "processing rec 0x%p type %#x\n",
270 rec, rec->lrh_type); 269 rec, rec->lrh_type);
271 270
diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
index d93f42fee420..5a1eae1de2ec 100644
--- a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
+++ b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
@@ -49,7 +49,7 @@
49static const char * const obd_connect_names[] = { 49static const char * const obd_connect_names[] = {
50 "read_only", 50 "read_only",
51 "lov_index", 51 "lov_index",
52 "unused", 52 "connect_from_mds",
53 "write_grant", 53 "write_grant",
54 "server_lock", 54 "server_lock",
55 "version", 55 "version",
@@ -122,6 +122,56 @@ int obd_connect_flags2str(char *page, int count, __u64 flags, char *sep)
122} 122}
123EXPORT_SYMBOL(obd_connect_flags2str); 123EXPORT_SYMBOL(obd_connect_flags2str);
124 124
125static void obd_connect_data_seqprint(struct seq_file *m,
126 struct obd_connect_data *ocd)
127{
128 int flags;
129
130 LASSERT(ocd);
131 flags = ocd->ocd_connect_flags;
132
133 seq_printf(m, " connect_data:\n"
134 " flags: %llx\n"
135 " instance: %u\n",
136 ocd->ocd_connect_flags,
137 ocd->ocd_instance);
138 if (flags & OBD_CONNECT_VERSION)
139 seq_printf(m, " target_version: %u.%u.%u.%u\n",
140 OBD_OCD_VERSION_MAJOR(ocd->ocd_version),
141 OBD_OCD_VERSION_MINOR(ocd->ocd_version),
142 OBD_OCD_VERSION_PATCH(ocd->ocd_version),
143 OBD_OCD_VERSION_FIX(ocd->ocd_version));
144 if (flags & OBD_CONNECT_MDS)
145 seq_printf(m, " mdt_index: %d\n", ocd->ocd_group);
146 if (flags & OBD_CONNECT_GRANT)
147 seq_printf(m, " initial_grant: %d\n", ocd->ocd_grant);
148 if (flags & OBD_CONNECT_INDEX)
149 seq_printf(m, " target_index: %u\n", ocd->ocd_index);
150 if (flags & OBD_CONNECT_BRW_SIZE)
151 seq_printf(m, " max_brw_size: %d\n", ocd->ocd_brw_size);
152 if (flags & OBD_CONNECT_IBITS)
153 seq_printf(m, " ibits_known: %llx\n",
154 ocd->ocd_ibits_known);
155 if (flags & OBD_CONNECT_GRANT_PARAM)
156 seq_printf(m, " grant_block_size: %d\n"
157 " grant_inode_size: %d\n"
158 " grant_extent_overhead: %d\n",
159 ocd->ocd_blocksize,
160 ocd->ocd_inodespace,
161 ocd->ocd_grant_extent);
162 if (flags & OBD_CONNECT_TRANSNO)
163 seq_printf(m, " first_transno: %llx\n",
164 ocd->ocd_transno);
165 if (flags & OBD_CONNECT_CKSUM)
166 seq_printf(m, " cksum_types: %#x\n",
167 ocd->ocd_cksum_types);
168 if (flags & OBD_CONNECT_MAX_EASIZE)
169 seq_printf(m, " max_easize: %d\n", ocd->ocd_max_easize);
170 if (flags & OBD_CONNECT_MAXBYTES)
171 seq_printf(m, " max_object_bytes: %llx\n",
172 ocd->ocd_maxbytes);
173}
174
125int lprocfs_read_frac_helper(char *buffer, unsigned long count, long val, 175int lprocfs_read_frac_helper(char *buffer, unsigned long count, long val,
126 int mult) 176 int mult)
127{ 177{
@@ -624,6 +674,7 @@ int lprocfs_rd_import(struct seq_file *m, void *data)
624 struct obd_device *obd = data; 674 struct obd_device *obd = data;
625 struct obd_import *imp; 675 struct obd_import *imp;
626 struct obd_import_conn *conn; 676 struct obd_import_conn *conn;
677 struct obd_connect_data *ocd;
627 int j; 678 int j;
628 int k; 679 int k;
629 int rw = 0; 680 int rw = 0;
@@ -635,9 +686,9 @@ int lprocfs_rd_import(struct seq_file *m, void *data)
635 return rc; 686 return rc;
636 687
637 imp = obd->u.cli.cl_import; 688 imp = obd->u.cli.cl_import;
689 ocd = &imp->imp_connect_data;
638 690
639 seq_printf(m, 691 seq_printf(m, "import:\n"
640 "import:\n"
641 " name: %s\n" 692 " name: %s\n"
642 " target: %s\n" 693 " target: %s\n"
643 " state: %s\n" 694 " state: %s\n"
@@ -649,9 +700,9 @@ int lprocfs_rd_import(struct seq_file *m, void *data)
649 imp->imp_connect_data.ocd_instance); 700 imp->imp_connect_data.ocd_instance);
650 obd_connect_seq_flags2str(m, imp->imp_connect_data.ocd_connect_flags, 701 obd_connect_seq_flags2str(m, imp->imp_connect_data.ocd_connect_flags,
651 ", "); 702 ", ");
652 seq_printf(m, 703 seq_printf(m, " ]\n");
653 " ]\n" 704 obd_connect_data_seqprint(m, ocd);
654 " import_flags: [ "); 705 seq_printf(m, " import_flags: [ ");
655 obd_import_flags2str(imp, m); 706 obd_import_flags2str(imp, m);
656 707
657 seq_printf(m, 708 seq_printf(m,
@@ -694,8 +745,9 @@ int lprocfs_rd_import(struct seq_file *m, void *data)
694 745
695 do_div(sum, ret.lc_count); 746 do_div(sum, ret.lc_count);
696 ret.lc_sum = sum; 747 ret.lc_sum = sum;
697 } else 748 } else {
698 ret.lc_sum = 0; 749 ret.lc_sum = 0;
750 }
699 seq_printf(m, 751 seq_printf(m,
700 " rpcs:\n" 752 " rpcs:\n"
701 " inflight: %u\n" 753 " inflight: %u\n"
@@ -1471,10 +1523,10 @@ EXPORT_SYMBOL(lprocfs_oh_tally);
1471 1523
1472void lprocfs_oh_tally_log2(struct obd_histogram *oh, unsigned int value) 1524void lprocfs_oh_tally_log2(struct obd_histogram *oh, unsigned int value)
1473{ 1525{
1474 unsigned int val; 1526 unsigned int val = 0;
1475 1527
1476 for (val = 0; ((1 << val) < value) && (val <= OBD_HIST_MAX); val++) 1528 if (likely(value != 0))
1477 ; 1529 val = min(fls(value - 1), OBD_HIST_MAX);
1478 1530
1479 lprocfs_oh_tally(oh, val); 1531 lprocfs_oh_tally(oh, val);
1480} 1532}
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c
index 978568ada8e9..e04385760f21 100644
--- a/drivers/staging/lustre/lustre/obdclass/lu_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c
@@ -55,6 +55,7 @@
55#include "../include/lustre_disk.h" 55#include "../include/lustre_disk.h"
56#include "../include/lustre_fid.h" 56#include "../include/lustre_fid.h"
57#include "../include/lu_object.h" 57#include "../include/lu_object.h"
58#include "../include/cl_object.h"
58#include "../include/lu_ref.h" 59#include "../include/lu_ref.h"
59#include <linux/list.h> 60#include <linux/list.h>
60 61
@@ -103,7 +104,6 @@ void lu_object_put(const struct lu_env *env, struct lu_object *o)
103 104
104 if (!cfs_hash_bd_dec_and_lock(site->ls_obj_hash, &bd, &top->loh_ref)) { 105 if (!cfs_hash_bd_dec_and_lock(site->ls_obj_hash, &bd, &top->loh_ref)) {
105 if (lu_object_is_dying(top)) { 106 if (lu_object_is_dying(top)) {
106
107 /* 107 /*
108 * somebody may be waiting for this, currently only 108 * somebody may be waiting for this, currently only
109 * used for cl_object, see cl_object_put_last(). 109 * used for cl_object, see cl_object_put_last().
@@ -357,7 +357,6 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
357 357
358 if (count > 0 && --count == 0) 358 if (count > 0 && --count == 0)
359 break; 359 break;
360
361 } 360 }
362 cfs_hash_bd_unlock(s->ls_obj_hash, &bd, 1); 361 cfs_hash_bd_unlock(s->ls_obj_hash, &bd, 1);
363 cond_resched(); 362 cond_resched();
@@ -715,8 +714,9 @@ struct lu_object *lu_object_find_slice(const struct lu_env *env,
715 obj = lu_object_locate(top->lo_header, dev->ld_type); 714 obj = lu_object_locate(top->lo_header, dev->ld_type);
716 if (!obj) 715 if (!obj)
717 lu_object_put(env, top); 716 lu_object_put(env, top);
718 } else 717 } else {
719 obj = top; 718 obj = top;
719 }
720 return obj; 720 return obj;
721} 721}
722EXPORT_SYMBOL(lu_object_find_slice); 722EXPORT_SYMBOL(lu_object_find_slice);
@@ -935,7 +935,7 @@ static void lu_dev_add_linkage(struct lu_site *s, struct lu_device *d)
935 * Initialize site \a s, with \a d as the top level device. 935 * Initialize site \a s, with \a d as the top level device.
936 */ 936 */
937#define LU_SITE_BITS_MIN 12 937#define LU_SITE_BITS_MIN 12
938#define LU_SITE_BITS_MAX 24 938#define LU_SITE_BITS_MAX 19
939/** 939/**
940 * total 256 buckets, we don't want too many buckets because: 940 * total 256 buckets, we don't want too many buckets because:
941 * - consume too much memory 941 * - consume too much memory
@@ -1468,6 +1468,7 @@ void lu_context_key_quiesce(struct lu_context_key *key)
1468 /* 1468 /*
1469 * XXX layering violation. 1469 * XXX layering violation.
1470 */ 1470 */
1471 cl_env_cache_purge(~0);
1471 key->lct_tags |= LCT_QUIESCENT; 1472 key->lct_tags |= LCT_QUIESCENT;
1472 /* 1473 /*
1473 * XXX memory barrier has to go here. 1474 * XXX memory barrier has to go here.
diff --git a/drivers/staging/lustre/lustre/obdclass/lustre_peer.c b/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
index 5f812460b3ea..b1abe023bb35 100644
--- a/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
+++ b/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
@@ -163,8 +163,9 @@ int class_del_uuid(const char *uuid)
163 break; 163 break;
164 } 164 }
165 } 165 }
166 } else 166 } else {
167 list_splice_init(&g_uuid_list, &deathrow); 167 list_splice_init(&g_uuid_list, &deathrow);
168 }
168 spin_unlock(&g_uuid_lock); 169 spin_unlock(&g_uuid_lock);
169 170
170 if (uuid && list_empty(&deathrow)) { 171 if (uuid && list_empty(&deathrow)) {
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_config.c b/drivers/staging/lustre/lustre/obdclass/obd_config.c
index 5395e994deab..cb1d65c3d95d 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_config.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_config.c
@@ -606,7 +606,7 @@ static int class_del_conn(struct obd_device *obd, struct lustre_cfg *lcfg)
606 return rc; 606 return rc;
607} 607}
608 608
609LIST_HEAD(lustre_profile_list); 609static LIST_HEAD(lustre_profile_list);
610 610
611struct lustre_profile *class_get_profile(const char *prof) 611struct lustre_profile *class_get_profile(const char *prof)
612{ 612{
@@ -961,7 +961,6 @@ int class_process_config(struct lustre_cfg *lcfg)
961 default: { 961 default: {
962 err = obd_process_config(obd, sizeof(*lcfg), lcfg); 962 err = obd_process_config(obd, sizeof(*lcfg), lcfg);
963 goto out; 963 goto out;
964
965 } 964 }
966 } 965 }
967out: 966out:
@@ -1001,7 +1000,13 @@ int class_process_proc_param(char *prefix, struct lprocfs_vars *lvars,
1001 for (i = 1; i < lcfg->lcfg_bufcount; i++) { 1000 for (i = 1; i < lcfg->lcfg_bufcount; i++) {
1002 key = lustre_cfg_buf(lcfg, i); 1001 key = lustre_cfg_buf(lcfg, i);
1003 /* Strip off prefix */ 1002 /* Strip off prefix */
1004 class_match_param(key, prefix, &key); 1003 if (class_match_param(key, prefix, &key)) {
1004 /*
1005 * If the prefix doesn't match, return error so we
1006 * can pass it down the stack
1007 */
1008 return -ENOSYS;
1009 }
1005 sval = strchr(key, '='); 1010 sval = strchr(key, '=');
1006 if (!sval || (*(sval + 1) == 0)) { 1011 if (!sval || (*(sval + 1) == 0)) {
1007 CERROR("Can't parse param %s (missing '=')\n", key); 1012 CERROR("Can't parse param %s (missing '=')\n", key);
@@ -1034,18 +1039,14 @@ int class_process_proc_param(char *prefix, struct lprocfs_vars *lvars,
1034 j++; 1039 j++;
1035 } 1040 }
1036 if (!matched) { 1041 if (!matched) {
1037 /* If the prefix doesn't match, return error so we 1042 CERROR("%.*s: %s unknown param %s\n",
1038 * can pass it down the stack 1043 (int)strlen(prefix) - 1, prefix,
1039 */
1040 if (strnchr(key, keylen, '.'))
1041 return -ENOSYS;
1042 CERROR("%s: unknown param %s\n",
1043 (char *)lustre_cfg_string(lcfg, 0), key); 1044 (char *)lustre_cfg_string(lcfg, 0), key);
1044 /* rc = -EINVAL; continue parsing other params */ 1045 /* rc = -EINVAL; continue parsing other params */
1045 skip++; 1046 skip++;
1046 } else if (rc < 0) { 1047 } else if (rc < 0) {
1047 CERROR("writing proc entry %s err %d\n", 1048 CERROR("%s: error writing proc entry '%s': rc = %d\n",
1048 var->name, rc); 1049 prefix, var->name, rc);
1049 rc = 0; 1050 rc = 0;
1050 } else { 1051 } else {
1051 CDEBUG(D_CONFIG, "%s.%.*s: Set parameter %.*s=%s\n", 1052 CDEBUG(D_CONFIG, "%s.%.*s: Set parameter %.*s=%s\n",
@@ -1350,6 +1351,7 @@ static int class_config_parse_rec(struct llog_rec_hdr *rec, char *buf,
1350 lustre_cfg_string(lcfg, i)); 1351 lustre_cfg_string(lcfg, i));
1351 } 1352 }
1352 } 1353 }
1354 ptr += snprintf(ptr, end - ptr, "\n");
1353 /* return consumed bytes */ 1355 /* return consumed bytes */
1354 rc = ptr - buf; 1356 rc = ptr - buf;
1355 return rc; 1357 return rc;
@@ -1368,7 +1370,7 @@ int class_config_dump_handler(const struct lu_env *env,
1368 1370
1369 if (rec->lrh_type == OBD_CFG_REC) { 1371 if (rec->lrh_type == OBD_CFG_REC) {
1370 class_config_parse_rec(rec, outstr, 256); 1372 class_config_parse_rec(rec, outstr, 256);
1371 LCONSOLE(D_WARNING, " %s\n", outstr); 1373 LCONSOLE(D_WARNING, " %s", outstr);
1372 } else { 1374 } else {
1373 LCONSOLE(D_WARNING, "unhandled lrh_type: %#x\n", rec->lrh_type); 1375 LCONSOLE(D_WARNING, "unhandled lrh_type: %#x\n", rec->lrh_type);
1374 rc = -EINVAL; 1376 rc = -EINVAL;
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_mount.c b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
index d3e28a389ac1..e0c90adc72a7 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_mount.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
@@ -102,7 +102,7 @@ int lustre_process_log(struct super_block *sb, char *logname,
102 LCONSOLE_ERROR_MSG(0x15b, "%s: The configuration from log '%s' failed from the MGS (%d). Make sure this client and the MGS are running compatible versions of Lustre.\n", 102 LCONSOLE_ERROR_MSG(0x15b, "%s: The configuration from log '%s' failed from the MGS (%d). Make sure this client and the MGS are running compatible versions of Lustre.\n",
103 mgc->obd_name, logname, rc); 103 mgc->obd_name, logname, rc);
104 104
105 if (rc) 105 else if (rc)
106 LCONSOLE_ERROR_MSG(0x15c, "%s: The configuration from log '%s' failed (%d). This may be the result of communication errors between this node and the MGS, a bad configuration, or other errors. See the syslog for more information.\n", 106 LCONSOLE_ERROR_MSG(0x15c, "%s: The configuration from log '%s' failed (%d). This may be the result of communication errors between this node and the MGS, a bad configuration, or other errors. See the syslog for more information.\n",
107 mgc->obd_name, logname, 107 mgc->obd_name, logname,
108 rc); 108 rc);
@@ -307,7 +307,8 @@ int lustre_start_mgc(struct super_block *sb)
307 while (class_parse_nid(ptr, &nid, &ptr) == 0) { 307 while (class_parse_nid(ptr, &nid, &ptr) == 0) {
308 rc = do_lcfg(mgcname, nid, 308 rc = do_lcfg(mgcname, nid,
309 LCFG_ADD_UUID, niduuid, NULL, NULL, NULL); 309 LCFG_ADD_UUID, niduuid, NULL, NULL, NULL);
310 i++; 310 if (!rc)
311 i++;
311 /* Stop at the first failover nid */ 312 /* Stop at the first failover nid */
312 if (*ptr == ':') 313 if (*ptr == ':')
313 break; 314 break;
@@ -345,16 +346,18 @@ int lustre_start_mgc(struct super_block *sb)
345 sprintf(niduuid, "%s_%x", mgcname, i); 346 sprintf(niduuid, "%s_%x", mgcname, i);
346 j = 0; 347 j = 0;
347 while (class_parse_nid_quiet(ptr, &nid, &ptr) == 0) { 348 while (class_parse_nid_quiet(ptr, &nid, &ptr) == 0) {
348 j++; 349 rc = do_lcfg(mgcname, nid, LCFG_ADD_UUID, niduuid,
349 rc = do_lcfg(mgcname, nid, 350 NULL, NULL, NULL);
350 LCFG_ADD_UUID, niduuid, NULL, NULL, NULL); 351 if (!rc)
352 ++j;
351 if (*ptr == ':') 353 if (*ptr == ':')
352 break; 354 break;
353 } 355 }
354 if (j > 0) { 356 if (j > 0) {
355 rc = do_lcfg(mgcname, 0, LCFG_ADD_CONN, 357 rc = do_lcfg(mgcname, 0, LCFG_ADD_CONN,
356 niduuid, NULL, NULL, NULL); 358 niduuid, NULL, NULL, NULL);
357 i++; 359 if (!rc)
360 i++;
358 } else { 361 } else {
359 /* at ":/fsname" */ 362 /* at ":/fsname" */
360 break; 363 break;
diff --git a/drivers/staging/lustre/lustre/obdclass/obdo.c b/drivers/staging/lustre/lustre/obdclass/obdo.c
index e6436cb4ac62..748e33f017d5 100644
--- a/drivers/staging/lustre/lustre/obdclass/obdo.c
+++ b/drivers/staging/lustre/lustre/obdclass/obdo.c
@@ -185,8 +185,7 @@ void md_from_obdo(struct md_op_data *op_data, struct obdo *oa, u32 valid)
185 op_data->op_attr.ia_valid |= ATTR_BLOCKS; 185 op_data->op_attr.ia_valid |= ATTR_BLOCKS;
186 } 186 }
187 if (valid & OBD_MD_FLFLAGS) { 187 if (valid & OBD_MD_FLFLAGS) {
188 ((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags = 188 op_data->op_attr_flags = oa->o_flags;
189 oa->o_flags;
190 op_data->op_attr.ia_valid |= ATTR_ATTR_FLAG; 189 op_data->op_attr.ia_valid |= ATTR_ATTR_FLAG;
191 } 190 }
192} 191}
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
index 1e83669c204d..91ef06f17934 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
+++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
@@ -81,7 +81,6 @@ struct echo_object_conf {
81struct echo_page { 81struct echo_page {
82 struct cl_page_slice ep_cl; 82 struct cl_page_slice ep_cl;
83 struct mutex ep_lock; 83 struct mutex ep_lock;
84 struct page *ep_vmpage;
85}; 84};
86 85
87struct echo_lock { 86struct echo_lock {
@@ -164,15 +163,13 @@ static int cl_echo_object_put(struct echo_object *eco);
164static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset, 163static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
165 struct page **pages, int npages, int async); 164 struct page **pages, int npages, int async);
166 165
167static struct echo_thread_info *echo_env_info(const struct lu_env *env);
168
169struct echo_thread_info { 166struct echo_thread_info {
170 struct echo_object_conf eti_conf; 167 struct echo_object_conf eti_conf;
171 struct lustre_md eti_md; 168 struct lustre_md eti_md;
172 169
173 struct cl_2queue eti_queue; 170 struct cl_2queue eti_queue;
174 struct cl_io eti_io; 171 struct cl_io eti_io;
175 struct cl_lock_descr eti_descr; 172 struct cl_lock eti_lock;
176 struct lu_fid eti_fid; 173 struct lu_fid eti_fid;
177 struct lu_fid eti_fid2; 174 struct lu_fid eti_fid2;
178}; 175};
@@ -219,12 +216,6 @@ static struct lu_kmem_descr echo_caches[] = {
219 * 216 *
220 * @{ 217 * @{
221 */ 218 */
222static struct page *echo_page_vmpage(const struct lu_env *env,
223 const struct cl_page_slice *slice)
224{
225 return cl2echo_page(slice)->ep_vmpage;
226}
227
228static int echo_page_own(const struct lu_env *env, 219static int echo_page_own(const struct lu_env *env,
229 const struct cl_page_slice *slice, 220 const struct cl_page_slice *slice,
230 struct cl_io *io, int nonblock) 221 struct cl_io *io, int nonblock)
@@ -273,12 +264,10 @@ static void echo_page_completion(const struct lu_env *env,
273static void echo_page_fini(const struct lu_env *env, 264static void echo_page_fini(const struct lu_env *env,
274 struct cl_page_slice *slice) 265 struct cl_page_slice *slice)
275{ 266{
276 struct echo_page *ep = cl2echo_page(slice);
277 struct echo_object *eco = cl2echo_obj(slice->cpl_obj); 267 struct echo_object *eco = cl2echo_obj(slice->cpl_obj);
278 struct page *vmpage = ep->ep_vmpage;
279 268
280 atomic_dec(&eco->eo_npages); 269 atomic_dec(&eco->eo_npages);
281 put_page(vmpage); 270 put_page(slice->cpl_page->cp_vmpage);
282} 271}
283 272
284static int echo_page_prep(const struct lu_env *env, 273static int echo_page_prep(const struct lu_env *env,
@@ -295,7 +284,8 @@ static int echo_page_print(const struct lu_env *env,
295 struct echo_page *ep = cl2echo_page(slice); 284 struct echo_page *ep = cl2echo_page(slice);
296 285
297 (*printer)(env, cookie, LUSTRE_ECHO_CLIENT_NAME"-page@%p %d vm@%p\n", 286 (*printer)(env, cookie, LUSTRE_ECHO_CLIENT_NAME"-page@%p %d vm@%p\n",
298 ep, mutex_is_locked(&ep->ep_lock), ep->ep_vmpage); 287 ep, mutex_is_locked(&ep->ep_lock),
288 slice->cpl_page->cp_vmpage);
299 return 0; 289 return 0;
300} 290}
301 291
@@ -303,7 +293,6 @@ static const struct cl_page_operations echo_page_ops = {
303 .cpo_own = echo_page_own, 293 .cpo_own = echo_page_own,
304 .cpo_disown = echo_page_disown, 294 .cpo_disown = echo_page_disown,
305 .cpo_discard = echo_page_discard, 295 .cpo_discard = echo_page_discard,
306 .cpo_vmpage = echo_page_vmpage,
307 .cpo_fini = echo_page_fini, 296 .cpo_fini = echo_page_fini,
308 .cpo_print = echo_page_print, 297 .cpo_print = echo_page_print,
309 .cpo_is_vmlocked = echo_page_is_vmlocked, 298 .cpo_is_vmlocked = echo_page_is_vmlocked,
@@ -336,26 +325,8 @@ static void echo_lock_fini(const struct lu_env *env,
336 kmem_cache_free(echo_lock_kmem, ecl); 325 kmem_cache_free(echo_lock_kmem, ecl);
337} 326}
338 327
339static void echo_lock_delete(const struct lu_env *env,
340 const struct cl_lock_slice *slice)
341{
342 struct echo_lock *ecl = cl2echo_lock(slice);
343
344 LASSERT(list_empty(&ecl->el_chain));
345}
346
347static int echo_lock_fits_into(const struct lu_env *env,
348 const struct cl_lock_slice *slice,
349 const struct cl_lock_descr *need,
350 const struct cl_io *unused)
351{
352 return 1;
353}
354
355static struct cl_lock_operations echo_lock_ops = { 328static struct cl_lock_operations echo_lock_ops = {
356 .clo_fini = echo_lock_fini, 329 .clo_fini = echo_lock_fini,
357 .clo_delete = echo_lock_delete,
358 .clo_fits_into = echo_lock_fits_into
359}; 330};
360 331
361/** @} echo_lock */ 332/** @} echo_lock */
@@ -367,15 +338,14 @@ static struct cl_lock_operations echo_lock_ops = {
367 * @{ 338 * @{
368 */ 339 */
369static int echo_page_init(const struct lu_env *env, struct cl_object *obj, 340static int echo_page_init(const struct lu_env *env, struct cl_object *obj,
370 struct cl_page *page, struct page *vmpage) 341 struct cl_page *page, pgoff_t index)
371{ 342{
372 struct echo_page *ep = cl_object_page_slice(obj, page); 343 struct echo_page *ep = cl_object_page_slice(obj, page);
373 struct echo_object *eco = cl2echo_obj(obj); 344 struct echo_object *eco = cl2echo_obj(obj);
374 345
375 ep->ep_vmpage = vmpage; 346 get_page(page->cp_vmpage);
376 get_page(vmpage);
377 mutex_init(&ep->ep_lock); 347 mutex_init(&ep->ep_lock);
378 cl_page_slice_add(page, &ep->ep_cl, obj, &echo_page_ops); 348 cl_page_slice_add(page, &ep->ep_cl, obj, index, &echo_page_ops);
379 atomic_inc(&eco->eo_npages); 349 atomic_inc(&eco->eo_npages);
380 return 0; 350 return 0;
381} 351}
@@ -568,6 +538,8 @@ static struct lu_object *echo_object_alloc(const struct lu_env *env,
568 538
569 obj = &echo_obj2cl(eco)->co_lu; 539 obj = &echo_obj2cl(eco)->co_lu;
570 cl_object_header_init(hdr); 540 cl_object_header_init(hdr);
541 hdr->coh_page_bufsize = cfs_size_round(sizeof(struct cl_page));
542
571 lu_object_init(obj, &hdr->coh_lu, dev); 543 lu_object_init(obj, &hdr->coh_lu, dev);
572 lu_object_add_top(&hdr->coh_lu, obj); 544 lu_object_add_top(&hdr->coh_lu, obj);
573 545
@@ -694,8 +666,7 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
694 struct obd_device *obd = NULL; /* to keep compiler happy */ 666 struct obd_device *obd = NULL; /* to keep compiler happy */
695 struct obd_device *tgt; 667 struct obd_device *tgt;
696 const char *tgt_type_name; 668 const char *tgt_type_name;
697 int rc; 669 int rc, err;
698 int cleanup = 0;
699 670
700 ed = kzalloc(sizeof(*ed), GFP_NOFS); 671 ed = kzalloc(sizeof(*ed), GFP_NOFS);
701 if (!ed) { 672 if (!ed) {
@@ -703,16 +674,14 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
703 goto out; 674 goto out;
704 } 675 }
705 676
706 cleanup = 1;
707 cd = &ed->ed_cl; 677 cd = &ed->ed_cl;
708 rc = cl_device_init(cd, t); 678 rc = cl_device_init(cd, t);
709 if (rc) 679 if (rc)
710 goto out; 680 goto out_free;
711 681
712 cd->cd_lu_dev.ld_ops = &echo_device_lu_ops; 682 cd->cd_lu_dev.ld_ops = &echo_device_lu_ops;
713 cd->cd_ops = &echo_device_cl_ops; 683 cd->cd_ops = &echo_device_cl_ops;
714 684
715 cleanup = 2;
716 obd = class_name2obd(lustre_cfg_string(cfg, 0)); 685 obd = class_name2obd(lustre_cfg_string(cfg, 0));
717 LASSERT(obd); 686 LASSERT(obd);
718 LASSERT(env); 687 LASSERT(env);
@@ -722,28 +691,25 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
722 CERROR("Can not find tgt device %s\n", 691 CERROR("Can not find tgt device %s\n",
723 lustre_cfg_string(cfg, 1)); 692 lustre_cfg_string(cfg, 1));
724 rc = -ENODEV; 693 rc = -ENODEV;
725 goto out; 694 goto out_device_fini;
726 } 695 }
727 696
728 next = tgt->obd_lu_dev; 697 next = tgt->obd_lu_dev;
729 if (!strcmp(tgt->obd_type->typ_name, LUSTRE_MDT_NAME)) { 698 if (!strcmp(tgt->obd_type->typ_name, LUSTRE_MDT_NAME)) {
730 CERROR("echo MDT client must be run on server\n"); 699 CERROR("echo MDT client must be run on server\n");
731 rc = -EOPNOTSUPP; 700 rc = -EOPNOTSUPP;
732 goto out; 701 goto out_device_fini;
733 } 702 }
734 703
735 rc = echo_site_init(env, ed); 704 rc = echo_site_init(env, ed);
736 if (rc) 705 if (rc)
737 goto out; 706 goto out_device_fini;
738
739 cleanup = 3;
740 707
741 rc = echo_client_setup(env, obd, cfg); 708 rc = echo_client_setup(env, obd, cfg);
742 if (rc) 709 if (rc)
743 goto out; 710 goto out_site_fini;
744 711
745 ed->ed_ec = &obd->u.echo_client; 712 ed->ed_ec = &obd->u.echo_client;
746 cleanup = 4;
747 713
748 /* if echo client is to be stacked upon ost device, the next is 714 /* if echo client is to be stacked upon ost device, the next is
749 * NULL since ost is not a clio device so far 715 * NULL since ost is not a clio device so far
@@ -755,7 +721,7 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
755 if (next) { 721 if (next) {
756 if (next->ld_site) { 722 if (next->ld_site) {
757 rc = -EBUSY; 723 rc = -EBUSY;
758 goto out; 724 goto out_cleanup;
759 } 725 }
760 726
761 next->ld_site = &ed->ed_site->cs_lu; 727 next->ld_site = &ed->ed_site->cs_lu;
@@ -763,7 +729,7 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
763 next->ld_type->ldt_name, 729 next->ld_type->ldt_name,
764 NULL); 730 NULL);
765 if (rc) 731 if (rc)
766 goto out; 732 goto out_cleanup;
767 733
768 } else { 734 } else {
769 LASSERT(strcmp(tgt_type_name, LUSTRE_OST_NAME) == 0); 735 LASSERT(strcmp(tgt_type_name, LUSTRE_OST_NAME) == 0);
@@ -771,27 +737,19 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
771 737
772 ed->ed_next = next; 738 ed->ed_next = next;
773 return &cd->cd_lu_dev; 739 return &cd->cd_lu_dev;
774out:
775 switch (cleanup) {
776 case 4: {
777 int rc2;
778
779 rc2 = echo_client_cleanup(obd);
780 if (rc2)
781 CERROR("Cleanup obd device %s error(%d)\n",
782 obd->obd_name, rc2);
783 }
784 740
785 case 3: 741out_cleanup:
786 echo_site_fini(env, ed); 742 err = echo_client_cleanup(obd);
787 case 2: 743 if (err)
788 cl_device_fini(&ed->ed_cl); 744 CERROR("Cleanup obd device %s error(%d)\n",
789 case 1: 745 obd->obd_name, err);
790 kfree(ed); 746out_site_fini:
791 case 0: 747 echo_site_fini(env, ed);
792 default: 748out_device_fini:
793 break; 749 cl_device_fini(&ed->ed_cl);
794 } 750out_free:
751 kfree(ed);
752out:
795 return ERR_PTR(rc); 753 return ERR_PTR(rc);
796} 754}
797 755
@@ -819,16 +777,7 @@ static void echo_lock_release(const struct lu_env *env,
819{ 777{
820 struct cl_lock *clk = echo_lock2cl(ecl); 778 struct cl_lock *clk = echo_lock2cl(ecl);
821 779
822 cl_lock_get(clk); 780 cl_lock_release(env, clk);
823 cl_unuse(env, clk);
824 cl_lock_release(env, clk, "ec enqueue", ecl->el_object);
825 if (!still_used) {
826 cl_lock_mutex_get(env, clk);
827 cl_lock_cancel(env, clk);
828 cl_lock_delete(env, clk);
829 cl_lock_mutex_put(env, clk);
830 }
831 cl_lock_put(env, clk);
832} 781}
833 782
834static struct lu_device *echo_device_free(const struct lu_env *env, 783static struct lu_device *echo_device_free(const struct lu_env *env,
@@ -1022,9 +971,11 @@ static int cl_echo_enqueue0(struct lu_env *env, struct echo_object *eco,
1022 971
1023 info = echo_env_info(env); 972 info = echo_env_info(env);
1024 io = &info->eti_io; 973 io = &info->eti_io;
1025 descr = &info->eti_descr; 974 lck = &info->eti_lock;
1026 obj = echo_obj2cl(eco); 975 obj = echo_obj2cl(eco);
1027 976
977 memset(lck, 0, sizeof(*lck));
978 descr = &lck->cll_descr;
1028 descr->cld_obj = obj; 979 descr->cld_obj = obj;
1029 descr->cld_start = cl_index(obj, start); 980 descr->cld_start = cl_index(obj, start);
1030 descr->cld_end = cl_index(obj, end); 981 descr->cld_end = cl_index(obj, end);
@@ -1032,25 +983,20 @@ static int cl_echo_enqueue0(struct lu_env *env, struct echo_object *eco,
1032 descr->cld_enq_flags = enqflags; 983 descr->cld_enq_flags = enqflags;
1033 io->ci_obj = obj; 984 io->ci_obj = obj;
1034 985
1035 lck = cl_lock_request(env, io, descr, "ec enqueue", eco); 986 rc = cl_lock_request(env, io, lck);
1036 if (lck) { 987 if (rc == 0) {
1037 struct echo_client_obd *ec = eco->eo_dev->ed_ec; 988 struct echo_client_obd *ec = eco->eo_dev->ed_ec;
1038 struct echo_lock *el; 989 struct echo_lock *el;
1039 990
1040 rc = cl_wait(env, lck); 991 el = cl2echo_lock(cl_lock_at(lck, &echo_device_type));
1041 if (rc == 0) { 992 spin_lock(&ec->ec_lock);
1042 el = cl2echo_lock(cl_lock_at(lck, &echo_device_type)); 993 if (list_empty(&el->el_chain)) {
1043 spin_lock(&ec->ec_lock); 994 list_add(&el->el_chain, &ec->ec_locks);
1044 if (list_empty(&el->el_chain)) { 995 el->el_cookie = ++ec->ec_unique;
1045 list_add(&el->el_chain, &ec->ec_locks);
1046 el->el_cookie = ++ec->ec_unique;
1047 }
1048 atomic_inc(&el->el_refcount);
1049 *cookie = el->el_cookie;
1050 spin_unlock(&ec->ec_lock);
1051 } else {
1052 cl_lock_release(env, lck, "ec enqueue", current);
1053 } 996 }
997 atomic_inc(&el->el_refcount);
998 *cookie = el->el_cookie;
999 spin_unlock(&ec->ec_lock);
1054 } 1000 }
1055 return rc; 1001 return rc;
1056} 1002}
@@ -1085,22 +1031,17 @@ static int cl_echo_cancel0(struct lu_env *env, struct echo_device *ed,
1085 return 0; 1031 return 0;
1086} 1032}
1087 1033
1088static int cl_echo_async_brw(const struct lu_env *env, struct cl_io *io, 1034static void echo_commit_callback(const struct lu_env *env, struct cl_io *io,
1089 enum cl_req_type unused, struct cl_2queue *queue) 1035 struct cl_page *page)
1090{ 1036{
1091 struct cl_page *clp; 1037 struct echo_thread_info *info;
1092 struct cl_page *temp; 1038 struct cl_2queue *queue;
1093 int result = 0;
1094 1039
1095 cl_page_list_for_each_safe(clp, temp, &queue->c2_qin) { 1040 info = echo_env_info(env);
1096 int rc; 1041 LASSERT(io == &info->eti_io);
1097 1042
1098 rc = cl_page_cache_add(env, io, clp, CRT_WRITE); 1043 queue = &info->eti_queue;
1099 if (rc == 0) 1044 cl_page_list_add(&queue->c2_qout, page);
1100 continue;
1101 result = result ?: rc;
1102 }
1103 return result;
1104} 1045}
1105 1046
1106static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset, 1047static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
@@ -1119,7 +1060,7 @@ static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
1119 int rc; 1060 int rc;
1120 int i; 1061 int i;
1121 1062
1122 LASSERT((offset & ~CFS_PAGE_MASK) == 0); 1063 LASSERT((offset & ~PAGE_MASK) == 0);
1123 LASSERT(ed->ed_next); 1064 LASSERT(ed->ed_next);
1124 env = cl_env_get(&refcheck); 1065 env = cl_env_get(&refcheck);
1125 if (IS_ERR(env)) 1066 if (IS_ERR(env))
@@ -1179,7 +1120,9 @@ static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
1179 1120
1180 async = async && (typ == CRT_WRITE); 1121 async = async && (typ == CRT_WRITE);
1181 if (async) 1122 if (async)
1182 rc = cl_echo_async_brw(env, io, typ, queue); 1123 rc = cl_io_commit_async(env, io, &queue->c2_qin,
1124 0, PAGE_SIZE,
1125 echo_commit_callback);
1183 else 1126 else
1184 rc = cl_io_submit_sync(env, io, typ, queue, 0); 1127 rc = cl_io_submit_sync(env, io, typ, queue, 0);
1185 CDEBUG(D_INFO, "echo_client %s write returns %d\n", 1128 CDEBUG(D_INFO, "echo_client %s write returns %d\n",
@@ -1387,7 +1330,7 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
1387 LASSERT(rw == OBD_BRW_WRITE || rw == OBD_BRW_READ); 1330 LASSERT(rw == OBD_BRW_WRITE || rw == OBD_BRW_READ);
1388 1331
1389 if (count <= 0 || 1332 if (count <= 0 ||
1390 (count & (~CFS_PAGE_MASK)) != 0) 1333 (count & (~PAGE_MASK)) != 0)
1391 return -EINVAL; 1334 return -EINVAL;
1392 1335
1393 /* XXX think again with misaligned I/O */ 1336 /* XXX think again with misaligned I/O */
@@ -1409,7 +1352,6 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
1409 for (i = 0, pgp = pga, off = offset; 1352 for (i = 0, pgp = pga, off = offset;
1410 i < npages; 1353 i < npages;
1411 i++, pgp++, off += PAGE_SIZE) { 1354 i++, pgp++, off += PAGE_SIZE) {
1412
1413 LASSERT(!pgp->pg); /* for cleanup */ 1355 LASSERT(!pgp->pg); /* for cleanup */
1414 1356
1415 rc = -ENOMEM; 1357 rc = -ENOMEM;
@@ -1470,7 +1412,7 @@ static int echo_client_prep_commit(const struct lu_env *env,
1470 u64 npages, tot_pages; 1412 u64 npages, tot_pages;
1471 int i, ret = 0, brw_flags = 0; 1413 int i, ret = 0, brw_flags = 0;
1472 1414
1473 if (count <= 0 || (count & (~CFS_PAGE_MASK)) != 0) 1415 if (count <= 0 || (count & (~PAGE_MASK)) != 0)
1474 return -EINVAL; 1416 return -EINVAL;
1475 1417
1476 npages = batch >> PAGE_SHIFT; 1418 npages = batch >> PAGE_SHIFT;
@@ -1886,7 +1828,6 @@ static int __init obdecho_init(void)
1886static void /*__exit*/ obdecho_exit(void) 1828static void /*__exit*/ obdecho_exit(void)
1887{ 1829{
1888 echo_client_exit(); 1830 echo_client_exit();
1889
1890} 1831}
1891 1832
1892MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>"); 1833MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
diff --git a/drivers/staging/lustre/lustre/osc/lproc_osc.c b/drivers/staging/lustre/lustre/osc/lproc_osc.c
index a3358c39b2f1..33a113213bf5 100644
--- a/drivers/staging/lustre/lustre/osc/lproc_osc.c
+++ b/drivers/staging/lustre/lustre/osc/lproc_osc.c
@@ -121,9 +121,9 @@ static ssize_t max_rpcs_in_flight_store(struct kobject *kobj,
121 atomic_add(added, &osc_pool_req_count); 121 atomic_add(added, &osc_pool_req_count);
122 } 122 }
123 123
124 client_obd_list_lock(&cli->cl_loi_list_lock); 124 spin_lock(&cli->cl_loi_list_lock);
125 cli->cl_max_rpcs_in_flight = val; 125 cli->cl_max_rpcs_in_flight = val;
126 client_obd_list_unlock(&cli->cl_loi_list_lock); 126 spin_unlock(&cli->cl_loi_list_lock);
127 127
128 return count; 128 return count;
129} 129}
@@ -139,9 +139,9 @@ static ssize_t max_dirty_mb_show(struct kobject *kobj,
139 long val; 139 long val;
140 int mult; 140 int mult;
141 141
142 client_obd_list_lock(&cli->cl_loi_list_lock); 142 spin_lock(&cli->cl_loi_list_lock);
143 val = cli->cl_dirty_max; 143 val = cli->cl_dirty_max;
144 client_obd_list_unlock(&cli->cl_loi_list_lock); 144 spin_unlock(&cli->cl_loi_list_lock);
145 145
146 mult = 1 << 20; 146 mult = 1 << 20;
147 return lprocfs_read_frac_helper(buf, PAGE_SIZE, val, mult); 147 return lprocfs_read_frac_helper(buf, PAGE_SIZE, val, mult);
@@ -169,10 +169,10 @@ static ssize_t max_dirty_mb_store(struct kobject *kobj,
169 pages_number > totalram_pages / 4) /* 1/4 of RAM */ 169 pages_number > totalram_pages / 4) /* 1/4 of RAM */
170 return -ERANGE; 170 return -ERANGE;
171 171
172 client_obd_list_lock(&cli->cl_loi_list_lock); 172 spin_lock(&cli->cl_loi_list_lock);
173 cli->cl_dirty_max = (u32)(pages_number << PAGE_SHIFT); 173 cli->cl_dirty_max = (u32)(pages_number << PAGE_SHIFT);
174 osc_wake_cache_waiters(cli); 174 osc_wake_cache_waiters(cli);
175 client_obd_list_unlock(&cli->cl_loi_list_lock); 175 spin_unlock(&cli->cl_loi_list_lock);
176 176
177 return count; 177 return count;
178} 178}
@@ -222,8 +222,16 @@ static ssize_t osc_cached_mb_seq_write(struct file *file,
222 return -ERANGE; 222 return -ERANGE;
223 223
224 rc = atomic_read(&cli->cl_lru_in_list) - pages_number; 224 rc = atomic_read(&cli->cl_lru_in_list) - pages_number;
225 if (rc > 0) 225 if (rc > 0) {
226 (void)osc_lru_shrink(cli, rc); 226 struct lu_env *env;
227 int refcheck;
228
229 env = cl_env_get(&refcheck);
230 if (!IS_ERR(env)) {
231 (void)osc_lru_shrink(env, cli, rc, true);
232 cl_env_put(env, &refcheck);
233 }
234 }
227 235
228 return count; 236 return count;
229} 237}
@@ -239,9 +247,9 @@ static ssize_t cur_dirty_bytes_show(struct kobject *kobj,
239 struct client_obd *cli = &dev->u.cli; 247 struct client_obd *cli = &dev->u.cli;
240 int len; 248 int len;
241 249
242 client_obd_list_lock(&cli->cl_loi_list_lock); 250 spin_lock(&cli->cl_loi_list_lock);
243 len = sprintf(buf, "%lu\n", cli->cl_dirty); 251 len = sprintf(buf, "%lu\n", cli->cl_dirty);
244 client_obd_list_unlock(&cli->cl_loi_list_lock); 252 spin_unlock(&cli->cl_loi_list_lock);
245 253
246 return len; 254 return len;
247} 255}
@@ -256,9 +264,9 @@ static ssize_t cur_grant_bytes_show(struct kobject *kobj,
256 struct client_obd *cli = &dev->u.cli; 264 struct client_obd *cli = &dev->u.cli;
257 int len; 265 int len;
258 266
259 client_obd_list_lock(&cli->cl_loi_list_lock); 267 spin_lock(&cli->cl_loi_list_lock);
260 len = sprintf(buf, "%lu\n", cli->cl_avail_grant); 268 len = sprintf(buf, "%lu\n", cli->cl_avail_grant);
261 client_obd_list_unlock(&cli->cl_loi_list_lock); 269 spin_unlock(&cli->cl_loi_list_lock);
262 270
263 return len; 271 return len;
264} 272}
@@ -279,12 +287,12 @@ static ssize_t cur_grant_bytes_store(struct kobject *kobj,
279 return rc; 287 return rc;
280 288
281 /* this is only for shrinking grant */ 289 /* this is only for shrinking grant */
282 client_obd_list_lock(&cli->cl_loi_list_lock); 290 spin_lock(&cli->cl_loi_list_lock);
283 if (val >= cli->cl_avail_grant) { 291 if (val >= cli->cl_avail_grant) {
284 client_obd_list_unlock(&cli->cl_loi_list_lock); 292 spin_unlock(&cli->cl_loi_list_lock);
285 return -EINVAL; 293 return -EINVAL;
286 } 294 }
287 client_obd_list_unlock(&cli->cl_loi_list_lock); 295 spin_unlock(&cli->cl_loi_list_lock);
288 296
289 if (cli->cl_import->imp_state == LUSTRE_IMP_FULL) 297 if (cli->cl_import->imp_state == LUSTRE_IMP_FULL)
290 rc = osc_shrink_grant_to_target(cli, val); 298 rc = osc_shrink_grant_to_target(cli, val);
@@ -303,9 +311,9 @@ static ssize_t cur_lost_grant_bytes_show(struct kobject *kobj,
303 struct client_obd *cli = &dev->u.cli; 311 struct client_obd *cli = &dev->u.cli;
304 int len; 312 int len;
305 313
306 client_obd_list_lock(&cli->cl_loi_list_lock); 314 spin_lock(&cli->cl_loi_list_lock);
307 len = sprintf(buf, "%lu\n", cli->cl_lost_grant); 315 len = sprintf(buf, "%lu\n", cli->cl_lost_grant);
308 client_obd_list_unlock(&cli->cl_loi_list_lock); 316 spin_unlock(&cli->cl_loi_list_lock);
309 317
310 return len; 318 return len;
311} 319}
@@ -577,14 +585,31 @@ static ssize_t max_pages_per_rpc_store(struct kobject *kobj,
577 if (val == 0 || val > ocd->ocd_brw_size >> PAGE_SHIFT) { 585 if (val == 0 || val > ocd->ocd_brw_size >> PAGE_SHIFT) {
578 return -ERANGE; 586 return -ERANGE;
579 } 587 }
580 client_obd_list_lock(&cli->cl_loi_list_lock); 588 spin_lock(&cli->cl_loi_list_lock);
581 cli->cl_max_pages_per_rpc = val; 589 cli->cl_max_pages_per_rpc = val;
582 client_obd_list_unlock(&cli->cl_loi_list_lock); 590 spin_unlock(&cli->cl_loi_list_lock);
583 591
584 return count; 592 return count;
585} 593}
586LUSTRE_RW_ATTR(max_pages_per_rpc); 594LUSTRE_RW_ATTR(max_pages_per_rpc);
587 595
596static ssize_t unstable_stats_show(struct kobject *kobj,
597 struct attribute *attr,
598 char *buf)
599{
600 struct obd_device *dev = container_of(kobj, struct obd_device,
601 obd_kobj);
602 struct client_obd *cli = &dev->u.cli;
603 int pages, mb;
604
605 pages = atomic_read(&cli->cl_unstable_count);
606 mb = (pages * PAGE_SIZE) >> 20;
607
608 return sprintf(buf, "unstable_pages: %8d\n"
609 "unstable_mb: %8d\n", pages, mb);
610}
611LUSTRE_RO_ATTR(unstable_stats);
612
588LPROC_SEQ_FOPS_RO_TYPE(osc, connect_flags); 613LPROC_SEQ_FOPS_RO_TYPE(osc, connect_flags);
589LPROC_SEQ_FOPS_RO_TYPE(osc, server_uuid); 614LPROC_SEQ_FOPS_RO_TYPE(osc, server_uuid);
590LPROC_SEQ_FOPS_RO_TYPE(osc, conn_uuid); 615LPROC_SEQ_FOPS_RO_TYPE(osc, conn_uuid);
@@ -623,7 +648,7 @@ static int osc_rpc_stats_seq_show(struct seq_file *seq, void *v)
623 648
624 ktime_get_real_ts64(&now); 649 ktime_get_real_ts64(&now);
625 650
626 client_obd_list_lock(&cli->cl_loi_list_lock); 651 spin_lock(&cli->cl_loi_list_lock);
627 652
628 seq_printf(seq, "snapshot_time: %llu.%9lu (secs.usecs)\n", 653 seq_printf(seq, "snapshot_time: %llu.%9lu (secs.usecs)\n",
629 (s64)now.tv_sec, (unsigned long)now.tv_nsec); 654 (s64)now.tv_sec, (unsigned long)now.tv_nsec);
@@ -707,7 +732,7 @@ static int osc_rpc_stats_seq_show(struct seq_file *seq, void *v)
707 break; 732 break;
708 } 733 }
709 734
710 client_obd_list_unlock(&cli->cl_loi_list_lock); 735 spin_unlock(&cli->cl_loi_list_lock);
711 736
712 return 0; 737 return 0;
713} 738}
@@ -794,6 +819,7 @@ static struct attribute *osc_attrs[] = {
794 &lustre_attr_max_pages_per_rpc.attr, 819 &lustre_attr_max_pages_per_rpc.attr,
795 &lustre_attr_max_rpcs_in_flight.attr, 820 &lustre_attr_max_rpcs_in_flight.attr,
796 &lustre_attr_resend_count.attr, 821 &lustre_attr_resend_count.attr,
822 &lustre_attr_unstable_stats.attr,
797 NULL, 823 NULL,
798}; 824};
799 825
diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c
index 5f25bf83dcfc..5a14bea961b4 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cache.c
+++ b/drivers/staging/lustre/lustre/osc/osc_cache.c
@@ -76,6 +76,8 @@ static inline char *ext_flags(struct osc_extent *ext, char *flags)
76 *buf++ = ext->oe_rw ? 'r' : 'w'; 76 *buf++ = ext->oe_rw ? 'r' : 'w';
77 if (ext->oe_intree) 77 if (ext->oe_intree)
78 *buf++ = 'i'; 78 *buf++ = 'i';
79 if (ext->oe_sync)
80 *buf++ = 'S';
79 if (ext->oe_srvlock) 81 if (ext->oe_srvlock)
80 *buf++ = 's'; 82 *buf++ = 's';
81 if (ext->oe_hp) 83 if (ext->oe_hp)
@@ -121,9 +123,13 @@ static const char *oes_strings[] = {
121 __ext->oe_grants, __ext->oe_nr_pages, \ 123 __ext->oe_grants, __ext->oe_nr_pages, \
122 list_empty_marker(&__ext->oe_pages), \ 124 list_empty_marker(&__ext->oe_pages), \
123 waitqueue_active(&__ext->oe_waitq) ? '+' : '-', \ 125 waitqueue_active(&__ext->oe_waitq) ? '+' : '-', \
124 __ext->oe_osclock, __ext->oe_mppr, __ext->oe_owner, \ 126 __ext->oe_dlmlock, __ext->oe_mppr, __ext->oe_owner, \
125 /* ----- part 4 ----- */ \ 127 /* ----- part 4 ----- */ \
126 ## __VA_ARGS__); \ 128 ## __VA_ARGS__); \
129 if (lvl == D_ERROR && __ext->oe_dlmlock) \
130 LDLM_ERROR(__ext->oe_dlmlock, "extent: %p\n", __ext); \
131 else \
132 LDLM_DEBUG(__ext->oe_dlmlock, "extent: %p\n", __ext); \
127} while (0) 133} while (0)
128 134
129#undef EASSERTF 135#undef EASSERTF
@@ -240,20 +246,25 @@ static int osc_extent_sanity_check0(struct osc_extent *ext,
240 goto out; 246 goto out;
241 } 247 }
242 248
243 if (!ext->oe_osclock && ext->oe_grants > 0) { 249 if (ext->oe_sync && ext->oe_grants > 0) {
244 rc = 90; 250 rc = 90;
245 goto out; 251 goto out;
246 } 252 }
247 253
248 if (ext->oe_osclock) { 254 if (ext->oe_dlmlock) {
249 struct cl_lock_descr *descr; 255 struct ldlm_extent *extent;
250 256
251 descr = &ext->oe_osclock->cll_descr; 257 extent = &ext->oe_dlmlock->l_policy_data.l_extent;
252 if (!(descr->cld_start <= ext->oe_start && 258 if (!(extent->start <= cl_offset(osc2cl(obj), ext->oe_start) &&
253 descr->cld_end >= ext->oe_max_end)) { 259 extent->end >= cl_offset(osc2cl(obj), ext->oe_max_end))) {
254 rc = 100; 260 rc = 100;
255 goto out; 261 goto out;
256 } 262 }
263
264 if (!(ext->oe_dlmlock->l_granted_mode & (LCK_PW | LCK_GROUP))) {
265 rc = 102;
266 goto out;
267 }
257 } 268 }
258 269
259 if (ext->oe_nr_pages > ext->oe_mppr) { 270 if (ext->oe_nr_pages > ext->oe_mppr) {
@@ -276,7 +287,7 @@ static int osc_extent_sanity_check0(struct osc_extent *ext,
276 287
277 page_count = 0; 288 page_count = 0;
278 list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) { 289 list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
279 pgoff_t index = oap2cl_page(oap)->cp_index; 290 pgoff_t index = osc_index(oap2osc(oap));
280 ++page_count; 291 ++page_count;
281 if (index > ext->oe_end || index < ext->oe_start) { 292 if (index > ext->oe_end || index < ext->oe_start) {
282 rc = 110; 293 rc = 110;
@@ -359,7 +370,7 @@ static struct osc_extent *osc_extent_alloc(struct osc_object *obj)
359 ext->oe_state = OES_INV; 370 ext->oe_state = OES_INV;
360 INIT_LIST_HEAD(&ext->oe_pages); 371 INIT_LIST_HEAD(&ext->oe_pages);
361 init_waitqueue_head(&ext->oe_waitq); 372 init_waitqueue_head(&ext->oe_waitq);
362 ext->oe_osclock = NULL; 373 ext->oe_dlmlock = NULL;
363 374
364 return ext; 375 return ext;
365} 376}
@@ -385,9 +396,11 @@ static void osc_extent_put(const struct lu_env *env, struct osc_extent *ext)
385 LASSERT(ext->oe_state == OES_INV); 396 LASSERT(ext->oe_state == OES_INV);
386 LASSERT(!ext->oe_intree); 397 LASSERT(!ext->oe_intree);
387 398
388 if (ext->oe_osclock) { 399 if (ext->oe_dlmlock) {
389 cl_lock_put(env, ext->oe_osclock); 400 lu_ref_add(&ext->oe_dlmlock->l_reference,
390 ext->oe_osclock = NULL; 401 "osc_extent", ext);
402 LDLM_LOCK_PUT(ext->oe_dlmlock);
403 ext->oe_dlmlock = NULL;
391 } 404 }
392 osc_extent_free(ext); 405 osc_extent_free(ext);
393 } 406 }
@@ -543,7 +556,7 @@ static int osc_extent_merge(const struct lu_env *env, struct osc_extent *cur,
543 if (cur->oe_max_end != victim->oe_max_end) 556 if (cur->oe_max_end != victim->oe_max_end)
544 return -ERANGE; 557 return -ERANGE;
545 558
546 LASSERT(cur->oe_osclock == victim->oe_osclock); 559 LASSERT(cur->oe_dlmlock == victim->oe_dlmlock);
547 ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_SHIFT; 560 ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_SHIFT;
548 chunk_start = cur->oe_start >> ppc_bits; 561 chunk_start = cur->oe_start >> ppc_bits;
549 chunk_end = cur->oe_end >> ppc_bits; 562 chunk_end = cur->oe_end >> ppc_bits;
@@ -624,10 +637,10 @@ static inline int overlapped(struct osc_extent *ex1, struct osc_extent *ex2)
624static struct osc_extent *osc_extent_find(const struct lu_env *env, 637static struct osc_extent *osc_extent_find(const struct lu_env *env,
625 struct osc_object *obj, pgoff_t index, 638 struct osc_object *obj, pgoff_t index,
626 int *grants) 639 int *grants)
627
628{ 640{
629 struct client_obd *cli = osc_cli(obj); 641 struct client_obd *cli = osc_cli(obj);
630 struct cl_lock *lock; 642 struct osc_lock *olck;
643 struct cl_lock_descr *descr;
631 struct osc_extent *cur; 644 struct osc_extent *cur;
632 struct osc_extent *ext; 645 struct osc_extent *ext;
633 struct osc_extent *conflict = NULL; 646 struct osc_extent *conflict = NULL;
@@ -644,8 +657,12 @@ static struct osc_extent *osc_extent_find(const struct lu_env *env,
644 if (!cur) 657 if (!cur)
645 return ERR_PTR(-ENOMEM); 658 return ERR_PTR(-ENOMEM);
646 659
647 lock = cl_lock_at_pgoff(env, osc2cl(obj), index, NULL, 1, 0); 660 olck = osc_env_io(env)->oi_write_osclock;
648 LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE); 661 LASSERTF(olck, "page %lu is not covered by lock\n", index);
662 LASSERT(olck->ols_state == OLS_GRANTED);
663
664 descr = &olck->ols_cl.cls_lock->cll_descr;
665 LASSERT(descr->cld_mode >= CLM_WRITE);
649 666
650 LASSERT(cli->cl_chunkbits >= PAGE_SHIFT); 667 LASSERT(cli->cl_chunkbits >= PAGE_SHIFT);
651 ppc_bits = cli->cl_chunkbits - PAGE_SHIFT; 668 ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
@@ -657,19 +674,23 @@ static struct osc_extent *osc_extent_find(const struct lu_env *env,
657 max_pages = cli->cl_max_pages_per_rpc; 674 max_pages = cli->cl_max_pages_per_rpc;
658 LASSERT((max_pages & ~chunk_mask) == 0); 675 LASSERT((max_pages & ~chunk_mask) == 0);
659 max_end = index - (index % max_pages) + max_pages - 1; 676 max_end = index - (index % max_pages) + max_pages - 1;
660 max_end = min_t(pgoff_t, max_end, lock->cll_descr.cld_end); 677 max_end = min_t(pgoff_t, max_end, descr->cld_end);
661 678
662 /* initialize new extent by parameters so far */ 679 /* initialize new extent by parameters so far */
663 cur->oe_max_end = max_end; 680 cur->oe_max_end = max_end;
664 cur->oe_start = index & chunk_mask; 681 cur->oe_start = index & chunk_mask;
665 cur->oe_end = ((index + ~chunk_mask + 1) & chunk_mask) - 1; 682 cur->oe_end = ((index + ~chunk_mask + 1) & chunk_mask) - 1;
666 if (cur->oe_start < lock->cll_descr.cld_start) 683 if (cur->oe_start < descr->cld_start)
667 cur->oe_start = lock->cll_descr.cld_start; 684 cur->oe_start = descr->cld_start;
668 if (cur->oe_end > max_end) 685 if (cur->oe_end > max_end)
669 cur->oe_end = max_end; 686 cur->oe_end = max_end;
670 cur->oe_osclock = lock;
671 cur->oe_grants = 0; 687 cur->oe_grants = 0;
672 cur->oe_mppr = max_pages; 688 cur->oe_mppr = max_pages;
689 if (olck->ols_dlmlock) {
690 LASSERT(olck->ols_hold);
691 cur->oe_dlmlock = LDLM_LOCK_GET(olck->ols_dlmlock);
692 lu_ref_add(&olck->ols_dlmlock->l_reference, "osc_extent", cur);
693 }
673 694
674 /* grants has been allocated by caller */ 695 /* grants has been allocated by caller */
675 LASSERTF(*grants >= chunksize + cli->cl_extent_tax, 696 LASSERTF(*grants >= chunksize + cli->cl_extent_tax,
@@ -691,7 +712,7 @@ restart:
691 break; 712 break;
692 713
693 /* if covering by different locks, no chance to match */ 714 /* if covering by different locks, no chance to match */
694 if (lock != ext->oe_osclock) { 715 if (olck->ols_dlmlock != ext->oe_dlmlock) {
695 EASSERTF(!overlapped(ext, cur), ext, 716 EASSERTF(!overlapped(ext, cur), ext,
696 EXTSTR"\n", EXTPARA(cur)); 717 EXTSTR"\n", EXTPARA(cur));
697 718
@@ -795,7 +816,7 @@ restart:
795 if (found) { 816 if (found) {
796 LASSERT(!conflict); 817 LASSERT(!conflict);
797 if (!IS_ERR(found)) { 818 if (!IS_ERR(found)) {
798 LASSERT(found->oe_osclock == cur->oe_osclock); 819 LASSERT(found->oe_dlmlock == cur->oe_dlmlock);
799 OSC_EXTENT_DUMP(D_CACHE, found, 820 OSC_EXTENT_DUMP(D_CACHE, found,
800 "found caching ext for %lu.\n", index); 821 "found caching ext for %lu.\n", index);
801 } 822 }
@@ -810,7 +831,7 @@ restart:
810 found = osc_extent_hold(cur); 831 found = osc_extent_hold(cur);
811 osc_extent_insert(obj, cur); 832 osc_extent_insert(obj, cur);
812 OSC_EXTENT_DUMP(D_CACHE, cur, "add into tree %lu/%lu.\n", 833 OSC_EXTENT_DUMP(D_CACHE, cur, "add into tree %lu/%lu.\n",
813 index, lock->cll_descr.cld_end); 834 index, descr->cld_end);
814 } 835 }
815 osc_object_unlock(obj); 836 osc_object_unlock(obj);
816 837
@@ -856,6 +877,8 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
856 877
857 ext->oe_rc = rc ?: ext->oe_nr_pages; 878 ext->oe_rc = rc ?: ext->oe_nr_pages;
858 EASSERT(ergo(rc == 0, ext->oe_state == OES_RPC), ext); 879 EASSERT(ergo(rc == 0, ext->oe_state == OES_RPC), ext);
880
881 osc_lru_add_batch(cli, &ext->oe_pages);
859 list_for_each_entry_safe(oap, tmp, &ext->oe_pages, oap_pending_item) { 882 list_for_each_entry_safe(oap, tmp, &ext->oe_pages, oap_pending_item) {
860 list_del_init(&oap->oap_rpc_item); 883 list_del_init(&oap->oap_rpc_item);
861 list_del_init(&oap->oap_pending_item); 884 list_del_init(&oap->oap_pending_item);
@@ -877,10 +900,9 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
877 * span a whole chunk on the OST side, or our accounting goes 900 * span a whole chunk on the OST side, or our accounting goes
878 * wrong. Should match the code in filter_grant_check. 901 * wrong. Should match the code in filter_grant_check.
879 */ 902 */
880 int offset = oap->oap_page_off & ~CFS_PAGE_MASK; 903 int offset = last_off & ~PAGE_MASK;
881 int count = oap->oap_count + (offset & (blocksize - 1)); 904 int count = last_count + (offset & (blocksize - 1));
882 int end = (offset + oap->oap_count) & (blocksize - 1); 905 int end = (offset + last_count) & (blocksize - 1);
883
884 if (end) 906 if (end)
885 count += blocksize - end; 907 count += blocksize - end;
886 908
@@ -943,7 +965,7 @@ static int osc_extent_wait(const struct lu_env *env, struct osc_extent *ext,
943 "%s: wait ext to %d timedout, recovery in progress?\n", 965 "%s: wait ext to %d timedout, recovery in progress?\n",
944 osc_export(obj)->exp_obd->obd_name, state); 966 osc_export(obj)->exp_obd->obd_name, state);
945 967
946 lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL); 968 lwi = LWI_INTR(NULL, NULL);
947 rc = l_wait_event(ext->oe_waitq, extent_wait_cb(ext, state), 969 rc = l_wait_event(ext->oe_waitq, extent_wait_cb(ext, state),
948 &lwi); 970 &lwi);
949 } 971 }
@@ -990,19 +1012,19 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
990 1012
991 /* discard all pages with index greater then trunc_index */ 1013 /* discard all pages with index greater then trunc_index */
992 list_for_each_entry_safe(oap, tmp, &ext->oe_pages, oap_pending_item) { 1014 list_for_each_entry_safe(oap, tmp, &ext->oe_pages, oap_pending_item) {
993 struct cl_page *sub = oap2cl_page(oap); 1015 pgoff_t index = osc_index(oap2osc(oap));
994 struct cl_page *page = cl_page_top(sub); 1016 struct cl_page *page = oap2cl_page(oap);
995 1017
996 LASSERT(list_empty(&oap->oap_rpc_item)); 1018 LASSERT(list_empty(&oap->oap_rpc_item));
997 1019
998 /* only discard the pages with their index greater than 1020 /* only discard the pages with their index greater than
999 * trunc_index, and ... 1021 * trunc_index, and ...
1000 */ 1022 */
1001 if (sub->cp_index < trunc_index || 1023 if (index < trunc_index ||
1002 (sub->cp_index == trunc_index && partial)) { 1024 (index == trunc_index && partial)) {
1003 /* accounting how many pages remaining in the chunk 1025 /* accounting how many pages remaining in the chunk
1004 * so that we can calculate grants correctly. */ 1026 * so that we can calculate grants correctly. */
1005 if (sub->cp_index >> ppc_bits == trunc_chunk) 1027 if (index >> ppc_bits == trunc_chunk)
1006 ++pages_in_chunk; 1028 ++pages_in_chunk;
1007 continue; 1029 continue;
1008 } 1030 }
@@ -1013,7 +1035,6 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
1013 lu_ref_add(&page->cp_reference, "truncate", current); 1035 lu_ref_add(&page->cp_reference, "truncate", current);
1014 1036
1015 if (cl_page_own(env, io, page) == 0) { 1037 if (cl_page_own(env, io, page) == 0) {
1016 cl_page_unmap(env, io, page);
1017 cl_page_discard(env, io, page); 1038 cl_page_discard(env, io, page);
1018 cl_page_disown(env, io, page); 1039 cl_page_disown(env, io, page);
1019 } else { 1040 } else {
@@ -1126,7 +1147,9 @@ static int osc_extent_make_ready(const struct lu_env *env,
1126 last->oap_count = osc_refresh_count(env, last, OBD_BRW_WRITE); 1147 last->oap_count = osc_refresh_count(env, last, OBD_BRW_WRITE);
1127 LASSERT(last->oap_count > 0); 1148 LASSERT(last->oap_count > 0);
1128 LASSERT(last->oap_page_off + last->oap_count <= PAGE_SIZE); 1149 LASSERT(last->oap_page_off + last->oap_count <= PAGE_SIZE);
1150 spin_lock(&last->oap_lock);
1129 last->oap_async_flags |= ASYNC_COUNT_STABLE; 1151 last->oap_async_flags |= ASYNC_COUNT_STABLE;
1152 spin_unlock(&last->oap_lock);
1130 } 1153 }
1131 1154
1132 /* for the rest of pages, we don't need to call osf_refresh_count() 1155 /* for the rest of pages, we don't need to call osf_refresh_count()
@@ -1135,7 +1158,9 @@ static int osc_extent_make_ready(const struct lu_env *env,
1135 list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) { 1158 list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
1136 if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) { 1159 if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) {
1137 oap->oap_count = PAGE_SIZE - oap->oap_page_off; 1160 oap->oap_count = PAGE_SIZE - oap->oap_page_off;
1161 spin_lock(&last->oap_lock);
1138 oap->oap_async_flags |= ASYNC_COUNT_STABLE; 1162 oap->oap_async_flags |= ASYNC_COUNT_STABLE;
1163 spin_unlock(&last->oap_lock);
1139 } 1164 }
1140 } 1165 }
1141 1166
@@ -1256,7 +1281,7 @@ static int osc_make_ready(const struct lu_env *env, struct osc_async_page *oap,
1256 int cmd) 1281 int cmd)
1257{ 1282{
1258 struct osc_page *opg = oap2osc_page(oap); 1283 struct osc_page *opg = oap2osc_page(oap);
1259 struct cl_page *page = cl_page_top(oap2cl_page(oap)); 1284 struct cl_page *page = oap2cl_page(oap);
1260 int result; 1285 int result;
1261 1286
1262 LASSERT(cmd == OBD_BRW_WRITE); /* no cached reads */ 1287 LASSERT(cmd == OBD_BRW_WRITE); /* no cached reads */
@@ -1271,7 +1296,7 @@ static int osc_refresh_count(const struct lu_env *env,
1271 struct osc_async_page *oap, int cmd) 1296 struct osc_async_page *oap, int cmd)
1272{ 1297{
1273 struct osc_page *opg = oap2osc_page(oap); 1298 struct osc_page *opg = oap2osc_page(oap);
1274 struct cl_page *page = oap2cl_page(oap); 1299 pgoff_t index = osc_index(oap2osc(oap));
1275 struct cl_object *obj; 1300 struct cl_object *obj;
1276 struct cl_attr *attr = &osc_env_info(env)->oti_attr; 1301 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
1277 1302
@@ -1288,10 +1313,10 @@ static int osc_refresh_count(const struct lu_env *env,
1288 if (result < 0) 1313 if (result < 0)
1289 return result; 1314 return result;
1290 kms = attr->cat_kms; 1315 kms = attr->cat_kms;
1291 if (cl_offset(obj, page->cp_index) >= kms) 1316 if (cl_offset(obj, index) >= kms)
1292 /* catch race with truncate */ 1317 /* catch race with truncate */
1293 return 0; 1318 return 0;
1294 else if (cl_offset(obj, page->cp_index + 1) > kms) 1319 else if (cl_offset(obj, index + 1) > kms)
1295 /* catch sub-page write at end of file */ 1320 /* catch sub-page write at end of file */
1296 return kms % PAGE_SIZE; 1321 return kms % PAGE_SIZE;
1297 else 1322 else
@@ -1302,14 +1327,16 @@ static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
1302 int cmd, int rc) 1327 int cmd, int rc)
1303{ 1328{
1304 struct osc_page *opg = oap2osc_page(oap); 1329 struct osc_page *opg = oap2osc_page(oap);
1305 struct cl_page *page = cl_page_top(oap2cl_page(oap)); 1330 struct cl_page *page = oap2cl_page(oap);
1306 struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj); 1331 struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
1307 enum cl_req_type crt; 1332 enum cl_req_type crt;
1308 int srvlock; 1333 int srvlock;
1309 1334
1310 cmd &= ~OBD_BRW_NOQUOTA; 1335 cmd &= ~OBD_BRW_NOQUOTA;
1311 LASSERT(equi(page->cp_state == CPS_PAGEIN, cmd == OBD_BRW_READ)); 1336 LASSERTF(equi(page->cp_state == CPS_PAGEIN, cmd == OBD_BRW_READ),
1312 LASSERT(equi(page->cp_state == CPS_PAGEOUT, cmd == OBD_BRW_WRITE)); 1337 "cp_state:%u, cmd:%d\n", page->cp_state, cmd);
1338 LASSERTF(equi(page->cp_state == CPS_PAGEOUT, cmd == OBD_BRW_WRITE),
1339 "cp_state:%u, cmd:%d\n", page->cp_state, cmd);
1313 LASSERT(opg->ops_transfer_pinned); 1340 LASSERT(opg->ops_transfer_pinned);
1314 1341
1315 /* 1342 /*
@@ -1358,22 +1385,28 @@ static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
1358 return 0; 1385 return 0;
1359} 1386}
1360 1387
1361#define OSC_DUMP_GRANT(cli, fmt, args...) do { \ 1388#define OSC_DUMP_GRANT(lvl, cli, fmt, args...) do { \
1362 struct client_obd *__tmp = (cli); \ 1389 struct client_obd *__tmp = (cli); \
1363 CDEBUG(D_CACHE, "%s: { dirty: %ld/%ld dirty_pages: %d/%d " \ 1390 CDEBUG(lvl, "%s: grant { dirty: %ld/%ld dirty_pages: %d/%d " \
1364 "dropped: %ld avail: %ld, reserved: %ld, flight: %d } " fmt, \ 1391 "unstable_pages: %d/%d dropped: %ld avail: %ld, " \
1392 "reserved: %ld, flight: %d } lru {in list: %d, " \
1393 "left: %d, waiters: %d }" fmt, \
1365 __tmp->cl_import->imp_obd->obd_name, \ 1394 __tmp->cl_import->imp_obd->obd_name, \
1366 __tmp->cl_dirty, __tmp->cl_dirty_max, \ 1395 __tmp->cl_dirty, __tmp->cl_dirty_max, \
1367 atomic_read(&obd_dirty_pages), obd_max_dirty_pages, \ 1396 atomic_read(&obd_dirty_pages), obd_max_dirty_pages, \
1397 atomic_read(&obd_unstable_pages), obd_max_dirty_pages, \
1368 __tmp->cl_lost_grant, __tmp->cl_avail_grant, \ 1398 __tmp->cl_lost_grant, __tmp->cl_avail_grant, \
1369 __tmp->cl_reserved_grant, __tmp->cl_w_in_flight, ##args); \ 1399 __tmp->cl_reserved_grant, __tmp->cl_w_in_flight, \
1400 atomic_read(&__tmp->cl_lru_in_list), \
1401 atomic_read(&__tmp->cl_lru_busy), \
1402 atomic_read(&__tmp->cl_lru_shrinkers), ##args); \
1370} while (0) 1403} while (0)
1371 1404
1372/* caller must hold loi_list_lock */ 1405/* caller must hold loi_list_lock */
1373static void osc_consume_write_grant(struct client_obd *cli, 1406static void osc_consume_write_grant(struct client_obd *cli,
1374 struct brw_page *pga) 1407 struct brw_page *pga)
1375{ 1408{
1376 assert_spin_locked(&cli->cl_loi_list_lock.lock); 1409 assert_spin_locked(&cli->cl_loi_list_lock);
1377 LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT)); 1410 LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT));
1378 atomic_inc(&obd_dirty_pages); 1411 atomic_inc(&obd_dirty_pages);
1379 cli->cl_dirty += PAGE_SIZE; 1412 cli->cl_dirty += PAGE_SIZE;
@@ -1389,7 +1422,7 @@ static void osc_consume_write_grant(struct client_obd *cli,
1389static void osc_release_write_grant(struct client_obd *cli, 1422static void osc_release_write_grant(struct client_obd *cli,
1390 struct brw_page *pga) 1423 struct brw_page *pga)
1391{ 1424{
1392 assert_spin_locked(&cli->cl_loi_list_lock.lock); 1425 assert_spin_locked(&cli->cl_loi_list_lock);
1393 if (!(pga->flag & OBD_BRW_FROM_GRANT)) { 1426 if (!(pga->flag & OBD_BRW_FROM_GRANT)) {
1394 return; 1427 return;
1395 } 1428 }
@@ -1408,7 +1441,7 @@ static void osc_release_write_grant(struct client_obd *cli,
1408 * To avoid sleeping with object lock held, it's good for us allocate enough 1441 * To avoid sleeping with object lock held, it's good for us allocate enough
1409 * grants before entering into critical section. 1442 * grants before entering into critical section.
1410 * 1443 *
1411 * client_obd_list_lock held by caller 1444 * spin_lock held by caller
1412 */ 1445 */
1413static int osc_reserve_grant(struct client_obd *cli, unsigned int bytes) 1446static int osc_reserve_grant(struct client_obd *cli, unsigned int bytes)
1414{ 1447{
@@ -1442,11 +1475,11 @@ static void __osc_unreserve_grant(struct client_obd *cli,
1442static void osc_unreserve_grant(struct client_obd *cli, 1475static void osc_unreserve_grant(struct client_obd *cli,
1443 unsigned int reserved, unsigned int unused) 1476 unsigned int reserved, unsigned int unused)
1444{ 1477{
1445 client_obd_list_lock(&cli->cl_loi_list_lock); 1478 spin_lock(&cli->cl_loi_list_lock);
1446 __osc_unreserve_grant(cli, reserved, unused); 1479 __osc_unreserve_grant(cli, reserved, unused);
1447 if (unused > 0) 1480 if (unused > 0)
1448 osc_wake_cache_waiters(cli); 1481 osc_wake_cache_waiters(cli);
1449 client_obd_list_unlock(&cli->cl_loi_list_lock); 1482 spin_unlock(&cli->cl_loi_list_lock);
1450} 1483}
1451 1484
1452/** 1485/**
@@ -1467,7 +1500,7 @@ static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages,
1467{ 1500{
1468 int grant = (1 << cli->cl_chunkbits) + cli->cl_extent_tax; 1501 int grant = (1 << cli->cl_chunkbits) + cli->cl_extent_tax;
1469 1502
1470 client_obd_list_lock(&cli->cl_loi_list_lock); 1503 spin_lock(&cli->cl_loi_list_lock);
1471 atomic_sub(nr_pages, &obd_dirty_pages); 1504 atomic_sub(nr_pages, &obd_dirty_pages);
1472 cli->cl_dirty -= nr_pages << PAGE_SHIFT; 1505 cli->cl_dirty -= nr_pages << PAGE_SHIFT;
1473 cli->cl_lost_grant += lost_grant; 1506 cli->cl_lost_grant += lost_grant;
@@ -1479,7 +1512,7 @@ static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages,
1479 cli->cl_avail_grant += grant; 1512 cli->cl_avail_grant += grant;
1480 } 1513 }
1481 osc_wake_cache_waiters(cli); 1514 osc_wake_cache_waiters(cli);
1482 client_obd_list_unlock(&cli->cl_loi_list_lock); 1515 spin_unlock(&cli->cl_loi_list_lock);
1483 CDEBUG(D_CACHE, "lost %u grant: %lu avail: %lu dirty: %lu\n", 1516 CDEBUG(D_CACHE, "lost %u grant: %lu avail: %lu dirty: %lu\n",
1484 lost_grant, cli->cl_lost_grant, 1517 lost_grant, cli->cl_lost_grant,
1485 cli->cl_avail_grant, cli->cl_dirty); 1518 cli->cl_avail_grant, cli->cl_dirty);
@@ -1491,9 +1524,9 @@ static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages,
1491 */ 1524 */
1492static void osc_exit_cache(struct client_obd *cli, struct osc_async_page *oap) 1525static void osc_exit_cache(struct client_obd *cli, struct osc_async_page *oap)
1493{ 1526{
1494 client_obd_list_lock(&cli->cl_loi_list_lock); 1527 spin_lock(&cli->cl_loi_list_lock);
1495 osc_release_write_grant(cli, &oap->oap_brw_page); 1528 osc_release_write_grant(cli, &oap->oap_brw_page);
1496 client_obd_list_unlock(&cli->cl_loi_list_lock); 1529 spin_unlock(&cli->cl_loi_list_lock);
1497} 1530}
1498 1531
1499/** 1532/**
@@ -1506,14 +1539,15 @@ static int osc_enter_cache_try(struct client_obd *cli,
1506{ 1539{
1507 int rc; 1540 int rc;
1508 1541
1509 OSC_DUMP_GRANT(cli, "need:%d.\n", bytes); 1542 OSC_DUMP_GRANT(D_CACHE, cli, "need:%d.\n", bytes);
1510 1543
1511 rc = osc_reserve_grant(cli, bytes); 1544 rc = osc_reserve_grant(cli, bytes);
1512 if (rc < 0) 1545 if (rc < 0)
1513 return 0; 1546 return 0;
1514 1547
1515 if (cli->cl_dirty + PAGE_SIZE <= cli->cl_dirty_max && 1548 if (cli->cl_dirty + PAGE_SIZE <= cli->cl_dirty_max &&
1516 atomic_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages) { 1549 atomic_read(&obd_unstable_pages) + 1 +
1550 atomic_read(&obd_dirty_pages) <= obd_max_dirty_pages) {
1517 osc_consume_write_grant(cli, &oap->oap_brw_page); 1551 osc_consume_write_grant(cli, &oap->oap_brw_page);
1518 if (transient) { 1552 if (transient) {
1519 cli->cl_dirty_transit += PAGE_SIZE; 1553 cli->cl_dirty_transit += PAGE_SIZE;
@@ -1532,9 +1566,9 @@ static int ocw_granted(struct client_obd *cli, struct osc_cache_waiter *ocw)
1532{ 1566{
1533 int rc; 1567 int rc;
1534 1568
1535 client_obd_list_lock(&cli->cl_loi_list_lock); 1569 spin_lock(&cli->cl_loi_list_lock);
1536 rc = list_empty(&ocw->ocw_entry); 1570 rc = list_empty(&ocw->ocw_entry);
1537 client_obd_list_unlock(&cli->cl_loi_list_lock); 1571 spin_unlock(&cli->cl_loi_list_lock);
1538 return rc; 1572 return rc;
1539} 1573}
1540 1574
@@ -1551,12 +1585,13 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
1551 struct osc_object *osc = oap->oap_obj; 1585 struct osc_object *osc = oap->oap_obj;
1552 struct lov_oinfo *loi = osc->oo_oinfo; 1586 struct lov_oinfo *loi = osc->oo_oinfo;
1553 struct osc_cache_waiter ocw; 1587 struct osc_cache_waiter ocw;
1554 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL); 1588 struct l_wait_info lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(600), NULL,
1589 LWI_ON_SIGNAL_NOOP, NULL);
1555 int rc = -EDQUOT; 1590 int rc = -EDQUOT;
1556 1591
1557 OSC_DUMP_GRANT(cli, "need:%d.\n", bytes); 1592 OSC_DUMP_GRANT(D_CACHE, cli, "need:%d.\n", bytes);
1558 1593
1559 client_obd_list_lock(&cli->cl_loi_list_lock); 1594 spin_lock(&cli->cl_loi_list_lock);
1560 1595
1561 /* force the caller to try sync io. this can jump the list 1596 /* force the caller to try sync io. this can jump the list
1562 * of queued writes and create a discontiguous rpc stream 1597 * of queued writes and create a discontiguous rpc stream
@@ -1587,7 +1622,7 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
1587 while (cli->cl_dirty > 0 || cli->cl_w_in_flight > 0) { 1622 while (cli->cl_dirty > 0 || cli->cl_w_in_flight > 0) {
1588 list_add_tail(&ocw.ocw_entry, &cli->cl_cache_waiters); 1623 list_add_tail(&ocw.ocw_entry, &cli->cl_cache_waiters);
1589 ocw.ocw_rc = 0; 1624 ocw.ocw_rc = 0;
1590 client_obd_list_unlock(&cli->cl_loi_list_lock); 1625 spin_unlock(&cli->cl_loi_list_lock);
1591 1626
1592 osc_io_unplug_async(env, cli, NULL); 1627 osc_io_unplug_async(env, cli, NULL);
1593 1628
@@ -1596,10 +1631,17 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
1596 1631
1597 rc = l_wait_event(ocw.ocw_waitq, ocw_granted(cli, &ocw), &lwi); 1632 rc = l_wait_event(ocw.ocw_waitq, ocw_granted(cli, &ocw), &lwi);
1598 1633
1599 client_obd_list_lock(&cli->cl_loi_list_lock); 1634 spin_lock(&cli->cl_loi_list_lock);
1600 1635
1601 /* l_wait_event is interrupted by signal */ 1636 /* l_wait_event is interrupted by signal, or timed out */
1602 if (rc < 0) { 1637 if (rc < 0) {
1638 if (rc == -ETIMEDOUT) {
1639 OSC_DUMP_GRANT(D_ERROR, cli,
1640 "try to reserve %d.\n", bytes);
1641 osc_extent_tree_dump(D_ERROR, osc);
1642 rc = -EDQUOT;
1643 }
1644
1603 list_del_init(&ocw.ocw_entry); 1645 list_del_init(&ocw.ocw_entry);
1604 goto out; 1646 goto out;
1605 } 1647 }
@@ -1615,8 +1657,8 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
1615 } 1657 }
1616 } 1658 }
1617out: 1659out:
1618 client_obd_list_unlock(&cli->cl_loi_list_lock); 1660 spin_unlock(&cli->cl_loi_list_lock);
1619 OSC_DUMP_GRANT(cli, "returned %d.\n", rc); 1661 OSC_DUMP_GRANT(D_CACHE, cli, "returned %d.\n", rc);
1620 return rc; 1662 return rc;
1621} 1663}
1622 1664
@@ -1633,8 +1675,8 @@ void osc_wake_cache_waiters(struct client_obd *cli)
1633 ocw->ocw_rc = -EDQUOT; 1675 ocw->ocw_rc = -EDQUOT;
1634 /* we can't dirty more */ 1676 /* we can't dirty more */
1635 if ((cli->cl_dirty + PAGE_SIZE > cli->cl_dirty_max) || 1677 if ((cli->cl_dirty + PAGE_SIZE > cli->cl_dirty_max) ||
1636 (atomic_read(&obd_dirty_pages) + 1 > 1678 (atomic_read(&obd_unstable_pages) + 1 +
1637 obd_max_dirty_pages)) { 1679 atomic_read(&obd_dirty_pages) > obd_max_dirty_pages)) {
1638 CDEBUG(D_CACHE, "no dirty room: dirty: %ld osc max %ld, sys max %d\n", 1680 CDEBUG(D_CACHE, "no dirty room: dirty: %ld osc max %ld, sys max %d\n",
1639 cli->cl_dirty, 1681 cli->cl_dirty,
1640 cli->cl_dirty_max, obd_max_dirty_pages); 1682 cli->cl_dirty_max, obd_max_dirty_pages);
@@ -1776,9 +1818,9 @@ static int osc_list_maint(struct client_obd *cli, struct osc_object *osc)
1776{ 1818{
1777 int is_ready; 1819 int is_ready;
1778 1820
1779 client_obd_list_lock(&cli->cl_loi_list_lock); 1821 spin_lock(&cli->cl_loi_list_lock);
1780 is_ready = __osc_list_maint(cli, osc); 1822 is_ready = __osc_list_maint(cli, osc);
1781 client_obd_list_unlock(&cli->cl_loi_list_lock); 1823 spin_unlock(&cli->cl_loi_list_lock);
1782 1824
1783 return is_ready; 1825 return is_ready;
1784} 1826}
@@ -1799,13 +1841,101 @@ static void osc_process_ar(struct osc_async_rc *ar, __u64 xid,
1799 ar->ar_force_sync = 1; 1841 ar->ar_force_sync = 1;
1800 ar->ar_min_xid = ptlrpc_sample_next_xid(); 1842 ar->ar_min_xid = ptlrpc_sample_next_xid();
1801 return; 1843 return;
1802
1803 } 1844 }
1804 1845
1805 if (ar->ar_force_sync && (xid >= ar->ar_min_xid)) 1846 if (ar->ar_force_sync && (xid >= ar->ar_min_xid))
1806 ar->ar_force_sync = 0; 1847 ar->ar_force_sync = 0;
1807} 1848}
1808 1849
1850/**
1851 * Performs "unstable" page accounting. This function balances the
1852 * increment operations performed in osc_inc_unstable_pages. It is
1853 * registered as the RPC request callback, and is executed when the
1854 * bulk RPC is committed on the server. Thus at this point, the pages
1855 * involved in the bulk transfer are no longer considered unstable.
1856 */
1857void osc_dec_unstable_pages(struct ptlrpc_request *req)
1858{
1859 struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
1860 struct ptlrpc_bulk_desc *desc = req->rq_bulk;
1861 int page_count = desc->bd_iov_count;
1862 int i;
1863
1864 /* No unstable page tracking */
1865 if (!cli->cl_cache)
1866 return;
1867
1868 LASSERT(page_count >= 0);
1869
1870 for (i = 0; i < page_count; i++)
1871 dec_zone_page_state(desc->bd_iov[i].kiov_page, NR_UNSTABLE_NFS);
1872
1873 atomic_sub(page_count, &cli->cl_cache->ccc_unstable_nr);
1874 LASSERT(atomic_read(&cli->cl_cache->ccc_unstable_nr) >= 0);
1875
1876 atomic_sub(page_count, &cli->cl_unstable_count);
1877 LASSERT(atomic_read(&cli->cl_unstable_count) >= 0);
1878
1879 atomic_sub(page_count, &obd_unstable_pages);
1880 LASSERT(atomic_read(&obd_unstable_pages) >= 0);
1881
1882 spin_lock(&req->rq_lock);
1883 req->rq_committed = 1;
1884 req->rq_unstable = 0;
1885 spin_unlock(&req->rq_lock);
1886
1887 wake_up_all(&cli->cl_cache->ccc_unstable_waitq);
1888}
1889
1890/* "unstable" page accounting. See: osc_dec_unstable_pages. */
1891void osc_inc_unstable_pages(struct ptlrpc_request *req)
1892{
1893 struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
1894 struct ptlrpc_bulk_desc *desc = req->rq_bulk;
1895 long page_count = desc->bd_iov_count;
1896 int i;
1897
1898 /* No unstable page tracking */
1899 if (!cli->cl_cache)
1900 return;
1901
1902 LASSERT(page_count >= 0);
1903
1904 for (i = 0; i < page_count; i++)
1905 inc_zone_page_state(desc->bd_iov[i].kiov_page, NR_UNSTABLE_NFS);
1906
1907 LASSERT(atomic_read(&cli->cl_cache->ccc_unstable_nr) >= 0);
1908 atomic_add(page_count, &cli->cl_cache->ccc_unstable_nr);
1909
1910 LASSERT(atomic_read(&cli->cl_unstable_count) >= 0);
1911 atomic_add(page_count, &cli->cl_unstable_count);
1912
1913 LASSERT(atomic_read(&obd_unstable_pages) >= 0);
1914 atomic_add(page_count, &obd_unstable_pages);
1915
1916 spin_lock(&req->rq_lock);
1917
1918 /*
1919 * If the request has already been committed (i.e. brw_commit
1920 * called via rq_commit_cb), we need to undo the unstable page
1921 * increments we just performed because rq_commit_cb wont be
1922 * called again. Otherwise, just set the commit callback so the
1923 * unstable page accounting is properly updated when the request
1924 * is committed
1925 */
1926 if (req->rq_committed) {
1927 /* Drop lock before calling osc_dec_unstable_pages */
1928 spin_unlock(&req->rq_lock);
1929 osc_dec_unstable_pages(req);
1930 spin_lock(&req->rq_lock);
1931 } else {
1932 req->rq_unstable = 1;
1933 req->rq_commit_cb = osc_dec_unstable_pages;
1934 }
1935
1936 spin_unlock(&req->rq_lock);
1937}
1938
1809/* this must be called holding the loi list lock to give coverage to exit_cache, 1939/* this must be called holding the loi list lock to give coverage to exit_cache,
1810 * async_flag maintenance, and oap_request 1940 * async_flag maintenance, and oap_request
1811 */ 1941 */
@@ -1817,6 +1947,9 @@ static void osc_ap_completion(const struct lu_env *env, struct client_obd *cli,
1817 __u64 xid = 0; 1947 __u64 xid = 0;
1818 1948
1819 if (oap->oap_request) { 1949 if (oap->oap_request) {
1950 if (!rc)
1951 osc_inc_unstable_pages(oap->oap_request);
1952
1820 xid = ptlrpc_req_xid(oap->oap_request); 1953 xid = ptlrpc_req_xid(oap->oap_request);
1821 ptlrpc_req_finished(oap->oap_request); 1954 ptlrpc_req_finished(oap->oap_request);
1822 oap->oap_request = NULL; 1955 oap->oap_request = NULL;
@@ -1829,10 +1962,10 @@ static void osc_ap_completion(const struct lu_env *env, struct client_obd *cli,
1829 oap->oap_interrupted = 0; 1962 oap->oap_interrupted = 0;
1830 1963
1831 if (oap->oap_cmd & OBD_BRW_WRITE && xid > 0) { 1964 if (oap->oap_cmd & OBD_BRW_WRITE && xid > 0) {
1832 client_obd_list_lock(&cli->cl_loi_list_lock); 1965 spin_lock(&cli->cl_loi_list_lock);
1833 osc_process_ar(&cli->cl_ar, xid, rc); 1966 osc_process_ar(&cli->cl_ar, xid, rc);
1834 osc_process_ar(&loi->loi_ar, xid, rc); 1967 osc_process_ar(&loi->loi_ar, xid, rc);
1835 client_obd_list_unlock(&cli->cl_loi_list_lock); 1968 spin_unlock(&cli->cl_loi_list_lock);
1836 } 1969 }
1837 1970
1838 rc = osc_completion(env, oap, oap->oap_cmd, rc); 1971 rc = osc_completion(env, oap, oap->oap_cmd, rc);
@@ -2133,9 +2266,8 @@ static void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli)
2133 } 2266 }
2134 2267
2135 cl_object_get(obj); 2268 cl_object_get(obj);
2136 client_obd_list_unlock(&cli->cl_loi_list_lock); 2269 spin_unlock(&cli->cl_loi_list_lock);
2137 lu_object_ref_add_at(&obj->co_lu, &link, "check", 2270 lu_object_ref_add_at(&obj->co_lu, &link, "check", current);
2138 current);
2139 2271
2140 /* attempt some read/write balancing by alternating between 2272 /* attempt some read/write balancing by alternating between
2141 * reads and writes in an object. The makes_rpc checks here 2273 * reads and writes in an object. The makes_rpc checks here
@@ -2178,11 +2310,10 @@ static void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli)
2178 osc_object_unlock(osc); 2310 osc_object_unlock(osc);
2179 2311
2180 osc_list_maint(cli, osc); 2312 osc_list_maint(cli, osc);
2181 lu_object_ref_del_at(&obj->co_lu, &link, "check", 2313 lu_object_ref_del_at(&obj->co_lu, &link, "check", current);
2182 current);
2183 cl_object_put(env, obj); 2314 cl_object_put(env, obj);
2184 2315
2185 client_obd_list_lock(&cli->cl_loi_list_lock); 2316 spin_lock(&cli->cl_loi_list_lock);
2186 } 2317 }
2187} 2318}
2188 2319
@@ -2199,9 +2330,9 @@ static int osc_io_unplug0(const struct lu_env *env, struct client_obd *cli,
2199 * potential stack overrun problem. LU-2859 2330 * potential stack overrun problem. LU-2859
2200 */ 2331 */
2201 atomic_inc(&cli->cl_lru_shrinkers); 2332 atomic_inc(&cli->cl_lru_shrinkers);
2202 client_obd_list_lock(&cli->cl_loi_list_lock); 2333 spin_lock(&cli->cl_loi_list_lock);
2203 osc_check_rpcs(env, cli); 2334 osc_check_rpcs(env, cli);
2204 client_obd_list_unlock(&cli->cl_loi_list_lock); 2335 spin_unlock(&cli->cl_loi_list_lock);
2205 atomic_dec(&cli->cl_lru_shrinkers); 2336 atomic_dec(&cli->cl_lru_shrinkers);
2206 } else { 2337 } else {
2207 CDEBUG(D_CACHE, "Queue writeback work for client %p.\n", cli); 2338 CDEBUG(D_CACHE, "Queue writeback work for client %p.\n", cli);
@@ -2238,7 +2369,7 @@ int osc_prep_async_page(struct osc_object *osc, struct osc_page *ops,
2238 2369
2239 oap->oap_page = page; 2370 oap->oap_page = page;
2240 oap->oap_obj_off = offset; 2371 oap->oap_obj_off = offset;
2241 LASSERT(!(offset & ~CFS_PAGE_MASK)); 2372 LASSERT(!(offset & ~PAGE_MASK));
2242 2373
2243 if (!client_is_remote(exp) && capable(CFS_CAP_SYS_RESOURCE)) 2374 if (!client_is_remote(exp) && capable(CFS_CAP_SYS_RESOURCE))
2244 oap->oap_brw_flags = OBD_BRW_NOQUOTA; 2375 oap->oap_brw_flags = OBD_BRW_NOQUOTA;
@@ -2306,16 +2437,23 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
2306 return rc; 2437 return rc;
2307 } 2438 }
2308 2439
2440 if (osc_over_unstable_soft_limit(cli))
2441 brw_flags |= OBD_BRW_SOFT_SYNC;
2442
2309 oap->oap_cmd = cmd; 2443 oap->oap_cmd = cmd;
2310 oap->oap_page_off = ops->ops_from; 2444 oap->oap_page_off = ops->ops_from;
2311 oap->oap_count = ops->ops_to - ops->ops_from; 2445 oap->oap_count = ops->ops_to - ops->ops_from;
2446 /*
2447 * No need to hold a lock here,
2448 * since this page is not in any list yet.
2449 */
2312 oap->oap_async_flags = 0; 2450 oap->oap_async_flags = 0;
2313 oap->oap_brw_flags = brw_flags; 2451 oap->oap_brw_flags = brw_flags;
2314 2452
2315 OSC_IO_DEBUG(osc, "oap %p page %p added for cmd %d\n", 2453 OSC_IO_DEBUG(osc, "oap %p page %p added for cmd %d\n",
2316 oap, oap->oap_page, oap->oap_cmd & OBD_BRW_RWMASK); 2454 oap, oap->oap_page, oap->oap_cmd & OBD_BRW_RWMASK);
2317 2455
2318 index = oap2cl_page(oap)->cp_index; 2456 index = osc_index(oap2osc(oap));
2319 2457
2320 /* Add this page into extent by the following steps: 2458 /* Add this page into extent by the following steps:
2321 * 1. if there exists an active extent for this IO, mostly this page 2459 * 1. if there exists an active extent for this IO, mostly this page
@@ -2334,9 +2472,9 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
2334 grants = 0; 2472 grants = 0;
2335 2473
2336 /* it doesn't need any grant to dirty this page */ 2474 /* it doesn't need any grant to dirty this page */
2337 client_obd_list_lock(&cli->cl_loi_list_lock); 2475 spin_lock(&cli->cl_loi_list_lock);
2338 rc = osc_enter_cache_try(cli, oap, grants, 0); 2476 rc = osc_enter_cache_try(cli, oap, grants, 0);
2339 client_obd_list_unlock(&cli->cl_loi_list_lock); 2477 spin_unlock(&cli->cl_loi_list_lock);
2340 if (rc == 0) { /* try failed */ 2478 if (rc == 0) { /* try failed */
2341 grants = 0; 2479 grants = 0;
2342 need_release = 1; 2480 need_release = 1;
@@ -2427,21 +2565,21 @@ int osc_teardown_async_page(const struct lu_env *env,
2427 LASSERT(oap->oap_magic == OAP_MAGIC); 2565 LASSERT(oap->oap_magic == OAP_MAGIC);
2428 2566
2429 CDEBUG(D_INFO, "teardown oap %p page %p at index %lu.\n", 2567 CDEBUG(D_INFO, "teardown oap %p page %p at index %lu.\n",
2430 oap, ops, oap2cl_page(oap)->cp_index); 2568 oap, ops, osc_index(oap2osc(oap)));
2431 2569
2432 osc_object_lock(obj); 2570 osc_object_lock(obj);
2433 if (!list_empty(&oap->oap_rpc_item)) { 2571 if (!list_empty(&oap->oap_rpc_item)) {
2434 CDEBUG(D_CACHE, "oap %p is not in cache.\n", oap); 2572 CDEBUG(D_CACHE, "oap %p is not in cache.\n", oap);
2435 rc = -EBUSY; 2573 rc = -EBUSY;
2436 } else if (!list_empty(&oap->oap_pending_item)) { 2574 } else if (!list_empty(&oap->oap_pending_item)) {
2437 ext = osc_extent_lookup(obj, oap2cl_page(oap)->cp_index); 2575 ext = osc_extent_lookup(obj, osc_index(oap2osc(oap)));
2438 /* only truncated pages are allowed to be taken out. 2576 /* only truncated pages are allowed to be taken out.
2439 * See osc_extent_truncate() and osc_cache_truncate_start() 2577 * See osc_extent_truncate() and osc_cache_truncate_start()
2440 * for details. 2578 * for details.
2441 */ 2579 */
2442 if (ext && ext->oe_state != OES_TRUNC) { 2580 if (ext && ext->oe_state != OES_TRUNC) {
2443 OSC_EXTENT_DUMP(D_ERROR, ext, "trunc at %lu.\n", 2581 OSC_EXTENT_DUMP(D_ERROR, ext, "trunc at %lu.\n",
2444 oap2cl_page(oap)->cp_index); 2582 osc_index(oap2osc(oap)));
2445 rc = -EBUSY; 2583 rc = -EBUSY;
2446 } 2584 }
2447 } 2585 }
@@ -2464,7 +2602,7 @@ int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
2464 struct osc_extent *ext = NULL; 2602 struct osc_extent *ext = NULL;
2465 struct osc_object *obj = cl2osc(ops->ops_cl.cpl_obj); 2603 struct osc_object *obj = cl2osc(ops->ops_cl.cpl_obj);
2466 struct cl_page *cp = ops->ops_cl.cpl_page; 2604 struct cl_page *cp = ops->ops_cl.cpl_page;
2467 pgoff_t index = cp->cp_index; 2605 pgoff_t index = osc_index(ops);
2468 struct osc_async_page *oap = &ops->ops_oap; 2606 struct osc_async_page *oap = &ops->ops_oap;
2469 bool unplug = false; 2607 bool unplug = false;
2470 int rc = 0; 2608 int rc = 0;
@@ -2479,8 +2617,7 @@ int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
2479 switch (ext->oe_state) { 2617 switch (ext->oe_state) {
2480 case OES_RPC: 2618 case OES_RPC:
2481 case OES_LOCK_DONE: 2619 case OES_LOCK_DONE:
2482 CL_PAGE_DEBUG(D_ERROR, env, cl_page_top(cp), 2620 CL_PAGE_DEBUG(D_ERROR, env, cp, "flush an in-rpc page?\n");
2483 "flush an in-rpc page?\n");
2484 LASSERT(0); 2621 LASSERT(0);
2485 break; 2622 break;
2486 case OES_LOCKING: 2623 case OES_LOCKING:
@@ -2506,7 +2643,7 @@ int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
2506 break; 2643 break;
2507 } 2644 }
2508 2645
2509 rc = cl_page_prep(env, io, cl_page_top(cp), CRT_WRITE); 2646 rc = cl_page_prep(env, io, cp, CRT_WRITE);
2510 if (rc) 2647 if (rc)
2511 goto out; 2648 goto out;
2512 2649
@@ -2550,7 +2687,7 @@ int osc_cancel_async_page(const struct lu_env *env, struct osc_page *ops)
2550 struct osc_extent *ext; 2687 struct osc_extent *ext;
2551 struct osc_extent *found = NULL; 2688 struct osc_extent *found = NULL;
2552 struct list_head *plist; 2689 struct list_head *plist;
2553 pgoff_t index = oap2cl_page(oap)->cp_index; 2690 pgoff_t index = osc_index(ops);
2554 int rc = -EBUSY; 2691 int rc = -EBUSY;
2555 int cmd; 2692 int cmd;
2556 2693
@@ -2613,12 +2750,12 @@ int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj,
2613 pgoff_t end = 0; 2750 pgoff_t end = 0;
2614 2751
2615 list_for_each_entry(oap, list, oap_pending_item) { 2752 list_for_each_entry(oap, list, oap_pending_item) {
2616 struct cl_page *cp = oap2cl_page(oap); 2753 pgoff_t index = osc_index(oap2osc(oap));
2617 2754
2618 if (cp->cp_index > end) 2755 if (index > end)
2619 end = cp->cp_index; 2756 end = index;
2620 if (cp->cp_index < start) 2757 if (index < start)
2621 start = cp->cp_index; 2758 start = index;
2622 ++page_count; 2759 ++page_count;
2623 mppr <<= (page_count > mppr); 2760 mppr <<= (page_count > mppr);
2624 } 2761 }
@@ -2633,6 +2770,7 @@ int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj,
2633 } 2770 }
2634 2771
2635 ext->oe_rw = !!(cmd & OBD_BRW_READ); 2772 ext->oe_rw = !!(cmd & OBD_BRW_READ);
2773 ext->oe_sync = 1;
2636 ext->oe_urgent = 1; 2774 ext->oe_urgent = 1;
2637 ext->oe_start = start; 2775 ext->oe_start = start;
2638 ext->oe_end = ext->oe_max_end = end; 2776 ext->oe_end = ext->oe_max_end = end;
@@ -2988,7 +3126,200 @@ int osc_cache_writeback_range(const struct lu_env *env, struct osc_object *obj,
2988 result = rc; 3126 result = rc;
2989 } 3127 }
2990 3128
2991 OSC_IO_DEBUG(obj, "cache page out.\n"); 3129 OSC_IO_DEBUG(obj, "pageout [%lu, %lu], %d.\n", start, end, result);
3130 return result;
3131}
3132
3133/**
3134 * Returns a list of pages by a given [start, end] of \a obj.
3135 *
3136 * \param resched If not NULL, then we give up before hogging CPU for too
3137 * long and set *resched = 1, in that case caller should implement a retry
3138 * logic.
3139 *
3140 * Gang tree lookup (radix_tree_gang_lookup()) optimization is absolutely
3141 * crucial in the face of [offset, EOF] locks.
3142 *
3143 * Return at least one page in @queue unless there is no covered page.
3144 */
3145int osc_page_gang_lookup(const struct lu_env *env, struct cl_io *io,
3146 struct osc_object *osc, pgoff_t start, pgoff_t end,
3147 osc_page_gang_cbt cb, void *cbdata)
3148{
3149 struct osc_page *ops;
3150 void **pvec;
3151 pgoff_t idx;
3152 unsigned int nr;
3153 unsigned int i;
3154 unsigned int j;
3155 int res = CLP_GANG_OKAY;
3156 bool tree_lock = true;
3157
3158 idx = start;
3159 pvec = osc_env_info(env)->oti_pvec;
3160 spin_lock(&osc->oo_tree_lock);
3161 while ((nr = radix_tree_gang_lookup(&osc->oo_tree, pvec,
3162 idx, OTI_PVEC_SIZE)) > 0) {
3163 struct cl_page *page;
3164 bool end_of_region = false;
3165
3166 for (i = 0, j = 0; i < nr; ++i) {
3167 ops = pvec[i];
3168 pvec[i] = NULL;
3169
3170 idx = osc_index(ops);
3171 if (idx > end) {
3172 end_of_region = true;
3173 break;
3174 }
3175
3176 page = ops->ops_cl.cpl_page;
3177 LASSERT(page->cp_type == CPT_CACHEABLE);
3178 if (page->cp_state == CPS_FREEING)
3179 continue;
3180
3181 cl_page_get(page);
3182 lu_ref_add_atomic(&page->cp_reference,
3183 "gang_lookup", current);
3184 pvec[j++] = ops;
3185 }
3186 ++idx;
3187
3188 /*
3189 * Here a delicate locking dance is performed. Current thread
3190 * holds a reference to a page, but has to own it before it
3191 * can be placed into queue. Owning implies waiting, so
3192 * radix-tree lock is to be released. After a wait one has to
3193 * check that pages weren't truncated (cl_page_own() returns
3194 * error in the latter case).
3195 */
3196 spin_unlock(&osc->oo_tree_lock);
3197 tree_lock = false;
3198
3199 for (i = 0; i < j; ++i) {
3200 ops = pvec[i];
3201 if (res == CLP_GANG_OKAY)
3202 res = (*cb)(env, io, ops, cbdata);
3203
3204 page = ops->ops_cl.cpl_page;
3205 lu_ref_del(&page->cp_reference, "gang_lookup", current);
3206 cl_page_put(env, page);
3207 }
3208 if (nr < OTI_PVEC_SIZE || end_of_region)
3209 break;
3210
3211 if (res == CLP_GANG_OKAY && need_resched())
3212 res = CLP_GANG_RESCHED;
3213 if (res != CLP_GANG_OKAY)
3214 break;
3215
3216 spin_lock(&osc->oo_tree_lock);
3217 tree_lock = true;
3218 }
3219 if (tree_lock)
3220 spin_unlock(&osc->oo_tree_lock);
3221 return res;
3222}
3223
3224/**
3225 * Check if page @page is covered by an extra lock or discard it.
3226 */
3227static int check_and_discard_cb(const struct lu_env *env, struct cl_io *io,
3228 struct osc_page *ops, void *cbdata)
3229{
3230 struct osc_thread_info *info = osc_env_info(env);
3231 struct osc_object *osc = cbdata;
3232 pgoff_t index;
3233
3234 index = osc_index(ops);
3235 if (index >= info->oti_fn_index) {
3236 struct ldlm_lock *tmp;
3237 struct cl_page *page = ops->ops_cl.cpl_page;
3238
3239 /* refresh non-overlapped index */
3240 tmp = osc_dlmlock_at_pgoff(env, osc, index, 0, 0);
3241 if (tmp) {
3242 __u64 end = tmp->l_policy_data.l_extent.end;
3243 /* Cache the first-non-overlapped index so as to skip
3244 * all pages within [index, oti_fn_index). This is safe
3245 * because if tmp lock is canceled, it will discard
3246 * these pages.
3247 */
3248 info->oti_fn_index = cl_index(osc2cl(osc), end + 1);
3249 if (end == OBD_OBJECT_EOF)
3250 info->oti_fn_index = CL_PAGE_EOF;
3251 LDLM_LOCK_PUT(tmp);
3252 } else if (cl_page_own(env, io, page) == 0) {
3253 /* discard the page */
3254 cl_page_discard(env, io, page);
3255 cl_page_disown(env, io, page);
3256 } else {
3257 LASSERT(page->cp_state == CPS_FREEING);
3258 }
3259 }
3260
3261 info->oti_next_index = index + 1;
3262 return CLP_GANG_OKAY;
3263}
3264
3265static int discard_cb(const struct lu_env *env, struct cl_io *io,
3266 struct osc_page *ops, void *cbdata)
3267{
3268 struct osc_thread_info *info = osc_env_info(env);
3269 struct cl_page *page = ops->ops_cl.cpl_page;
3270
3271 /* page is top page. */
3272 info->oti_next_index = osc_index(ops) + 1;
3273 if (cl_page_own(env, io, page) == 0) {
3274 KLASSERT(ergo(page->cp_type == CPT_CACHEABLE,
3275 !PageDirty(cl_page_vmpage(page))));
3276
3277 /* discard the page */
3278 cl_page_discard(env, io, page);
3279 cl_page_disown(env, io, page);
3280 } else {
3281 LASSERT(page->cp_state == CPS_FREEING);
3282 }
3283
3284 return CLP_GANG_OKAY;
3285}
3286
3287/**
3288 * Discard pages protected by the given lock. This function traverses radix
3289 * tree to find all covering pages and discard them. If a page is being covered
3290 * by other locks, it should remain in cache.
3291 *
3292 * If error happens on any step, the process continues anyway (the reasoning
3293 * behind this being that lock cancellation cannot be delayed indefinitely).
3294 */
3295int osc_lock_discard_pages(const struct lu_env *env, struct osc_object *osc,
3296 pgoff_t start, pgoff_t end, enum cl_lock_mode mode)
3297{
3298 struct osc_thread_info *info = osc_env_info(env);
3299 struct cl_io *io = &info->oti_io;
3300 osc_page_gang_cbt cb;
3301 int res;
3302 int result;
3303
3304 io->ci_obj = cl_object_top(osc2cl(osc));
3305 io->ci_ignore_layout = 1;
3306 result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
3307 if (result != 0)
3308 goto out;
3309
3310 cb = mode == CLM_READ ? check_and_discard_cb : discard_cb;
3311 info->oti_fn_index = info->oti_next_index = start;
3312 do {
3313 res = osc_page_gang_lookup(env, io, osc,
3314 info->oti_next_index, end, cb, osc);
3315 if (info->oti_next_index > end)
3316 break;
3317
3318 if (res == CLP_GANG_RESCHED)
3319 cond_resched();
3320 } while (res != CLP_GANG_OKAY);
3321out:
3322 cl_io_fini(env, io);
2992 return result; 3323 return result;
2993} 3324}
2994 3325
diff --git a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
index d55d04d0428b..ae19d396b537 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
+++ b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
@@ -51,7 +51,6 @@
51#include "../include/obd.h" 51#include "../include/obd.h"
52/* osc_build_res_name() */ 52/* osc_build_res_name() */
53#include "../include/cl_object.h" 53#include "../include/cl_object.h"
54#include "../include/lclient.h"
55#include "osc_internal.h" 54#include "osc_internal.h"
56 55
57/** \defgroup osc osc 56/** \defgroup osc osc
@@ -68,6 +67,9 @@ struct osc_io {
68 struct cl_io_slice oi_cl; 67 struct cl_io_slice oi_cl;
69 /** true if this io is lockless. */ 68 /** true if this io is lockless. */
70 int oi_lockless; 69 int oi_lockless;
70 /** how many LRU pages are reserved for this IO */
71 int oi_lru_reserved;
72
71 /** active extents, we know how many bytes is going to be written, 73 /** active extents, we know how many bytes is going to be written,
72 * so having an active extent will prevent it from being fragmented 74 * so having an active extent will prevent it from being fragmented
73 */ 75 */
@@ -77,6 +79,8 @@ struct osc_io {
77 */ 79 */
78 struct osc_extent *oi_trunc; 80 struct osc_extent *oi_trunc;
79 81
82 /** write osc_lock for this IO, used by osc_extent_find(). */
83 struct osc_lock *oi_write_osclock;
80 struct obd_info oi_info; 84 struct obd_info oi_info;
81 struct obdo oi_oa; 85 struct obdo oi_oa;
82 struct osc_async_cbargs { 86 struct osc_async_cbargs {
@@ -100,7 +104,7 @@ struct osc_session {
100 struct osc_io os_io; 104 struct osc_io os_io;
101}; 105};
102 106
103#define OTI_PVEC_SIZE 64 107#define OTI_PVEC_SIZE 256
104struct osc_thread_info { 108struct osc_thread_info {
105 struct ldlm_res_id oti_resname; 109 struct ldlm_res_id oti_resname;
106 ldlm_policy_data_t oti_policy; 110 ldlm_policy_data_t oti_policy;
@@ -109,7 +113,13 @@ struct osc_thread_info {
109 struct lustre_handle oti_handle; 113 struct lustre_handle oti_handle;
110 struct cl_page_list oti_plist; 114 struct cl_page_list oti_plist;
111 struct cl_io oti_io; 115 struct cl_io oti_io;
112 struct cl_page *oti_pvec[OTI_PVEC_SIZE]; 116 void *oti_pvec[OTI_PVEC_SIZE];
117 /**
118 * Fields used by cl_lock_discard_pages().
119 */
120 pgoff_t oti_next_index;
121 pgoff_t oti_fn_index; /* first non-overlapped index */
122 struct cl_sync_io oti_anchor;
113}; 123};
114 124
115struct osc_object { 125struct osc_object {
@@ -125,7 +135,7 @@ struct osc_object {
125 */ 135 */
126 struct list_head oo_inflight[CRT_NR]; 136 struct list_head oo_inflight[CRT_NR];
127 /** 137 /**
128 * Lock, protecting ccc_object::cob_inflight, because a seat-belt is 138 * Lock, protecting osc_page::ops_inflight, because a seat-belt is
129 * locked during take-off and landing. 139 * locked during take-off and landing.
130 */ 140 */
131 spinlock_t oo_seatbelt; 141 spinlock_t oo_seatbelt;
@@ -159,6 +169,17 @@ struct osc_object {
159 * oo_{read|write}_pages soon. 169 * oo_{read|write}_pages soon.
160 */ 170 */
161 spinlock_t oo_lock; 171 spinlock_t oo_lock;
172
173 /**
174 * Radix tree for caching pages
175 */
176 struct radix_tree_root oo_tree;
177 spinlock_t oo_tree_lock;
178 unsigned long oo_npages;
179
180 /* Protect osc_lock this osc_object has */
181 spinlock_t oo_ol_spin;
182 struct list_head oo_ol_list;
162}; 183};
163 184
164static inline void osc_object_lock(struct osc_object *obj) 185static inline void osc_object_lock(struct osc_object *obj)
@@ -198,8 +219,6 @@ enum osc_lock_state {
198 OLS_ENQUEUED, 219 OLS_ENQUEUED,
199 OLS_UPCALL_RECEIVED, 220 OLS_UPCALL_RECEIVED,
200 OLS_GRANTED, 221 OLS_GRANTED,
201 OLS_RELEASED,
202 OLS_BLOCKED,
203 OLS_CANCELLED 222 OLS_CANCELLED
204}; 223};
205 224
@@ -208,10 +227,8 @@ enum osc_lock_state {
208 * 227 *
209 * Interaction with DLM. 228 * Interaction with DLM.
210 * 229 *
211 * CLIO enqueues all DLM locks through ptlrpcd (that is, in "async" mode).
212 *
213 * Once receive upcall is invoked, osc_lock remembers a handle of DLM lock in 230 * Once receive upcall is invoked, osc_lock remembers a handle of DLM lock in
214 * osc_lock::ols_handle and a pointer to that lock in osc_lock::ols_lock. 231 * osc_lock::ols_handle and a pointer to that lock in osc_lock::ols_dlmlock.
215 * 232 *
216 * This pointer is protected through a reference, acquired by 233 * This pointer is protected through a reference, acquired by
217 * osc_lock_upcall0(). Also, an additional reference is acquired by 234 * osc_lock_upcall0(). Also, an additional reference is acquired by
@@ -249,26 +266,27 @@ enum osc_lock_state {
249 */ 266 */
250struct osc_lock { 267struct osc_lock {
251 struct cl_lock_slice ols_cl; 268 struct cl_lock_slice ols_cl;
269 /** Internal lock to protect states, etc. */
270 spinlock_t ols_lock;
271 /** Owner sleeps on this channel for state change */
272 struct cl_sync_io *ols_owner;
273 /** waiting list for this lock to be cancelled */
274 struct list_head ols_waiting_list;
275 /** wait entry of ols_waiting_list */
276 struct list_head ols_wait_entry;
277 /** list entry for osc_object::oo_ol_list */
278 struct list_head ols_nextlock_oscobj;
279
252 /** underlying DLM lock */ 280 /** underlying DLM lock */
253 struct ldlm_lock *ols_lock; 281 struct ldlm_lock *ols_dlmlock;
254 /** lock value block */
255 struct ost_lvb ols_lvb;
256 /** DLM flags with which osc_lock::ols_lock was enqueued */ 282 /** DLM flags with which osc_lock::ols_lock was enqueued */
257 __u64 ols_flags; 283 __u64 ols_flags;
258 /** osc_lock::ols_lock handle */ 284 /** osc_lock::ols_lock handle */
259 struct lustre_handle ols_handle; 285 struct lustre_handle ols_handle;
260 struct ldlm_enqueue_info ols_einfo; 286 struct ldlm_enqueue_info ols_einfo;
261 enum osc_lock_state ols_state; 287 enum osc_lock_state ols_state;
262 288 /** lock value block */
263 /** 289 struct ost_lvb ols_lvb;
264 * How many pages are using this lock for io, currently only used by
265 * read-ahead. If non-zero, the underlying dlm lock won't be cancelled
266 * during recovery to avoid deadlock. see bz16774.
267 *
268 * \see osc_page::ops_lock
269 * \see osc_page_addref_lock(), osc_page_putref_lock()
270 */
271 atomic_t ols_pageref;
272 290
273 /** 291 /**
274 * true, if ldlm_lock_addref() was called against 292 * true, if ldlm_lock_addref() was called against
@@ -299,16 +317,6 @@ struct osc_lock {
299 */ 317 */
300 ols_locklessable:1, 318 ols_locklessable:1,
301 /** 319 /**
302 * set by osc_lock_use() to wait until blocking AST enters into
303 * osc_ldlm_blocking_ast0(), so that cl_lock mutex can be used for
304 * further synchronization.
305 */
306 ols_ast_wait:1,
307 /**
308 * If the data of this lock has been flushed to server side.
309 */
310 ols_flush:1,
311 /**
312 * if set, the osc_lock is a glimpse lock. For glimpse locks, we treat 320 * if set, the osc_lock is a glimpse lock. For glimpse locks, we treat
313 * the EVAVAIL error as tolerable, this will make upper logic happy 321 * the EVAVAIL error as tolerable, this will make upper logic happy
314 * to wait all glimpse locks to each OSTs to be completed. 322 * to wait all glimpse locks to each OSTs to be completed.
@@ -321,15 +329,6 @@ struct osc_lock {
321 * For async glimpse lock. 329 * For async glimpse lock.
322 */ 330 */
323 ols_agl:1; 331 ols_agl:1;
324 /**
325 * IO that owns this lock. This field is used for a dead-lock
326 * avoidance by osc_lock_enqueue_wait().
327 *
328 * XXX: unfortunately, the owner of a osc_lock is not unique,
329 * the lock may have multiple users, if the lock is granted and
330 * then matched.
331 */
332 struct osc_io *ols_owner;
333}; 332};
334 333
335/** 334/**
@@ -369,18 +368,15 @@ struct osc_page {
369 * Set if the page must be transferred with OBD_BRW_SRVLOCK. 368 * Set if the page must be transferred with OBD_BRW_SRVLOCK.
370 */ 369 */
371 ops_srvlock:1; 370 ops_srvlock:1;
372 union { 371 /**
373 /** 372 * lru page list. See osc_lru_{del|use}() in osc_page.c for usage.
374 * lru page list. ops_inflight and ops_lru are exclusive so 373 */
375 * that they can share the same data. 374 struct list_head ops_lru;
376 */ 375 /**
377 struct list_head ops_lru; 376 * Linkage into a per-osc_object list of pages in flight. For
378 /** 377 * debugging.
379 * Linkage into a per-osc_object list of pages in flight. For 378 */
380 * debugging. 379 struct list_head ops_inflight;
381 */
382 struct list_head ops_inflight;
383 };
384 /** 380 /**
385 * Thread that submitted this page for transfer. For debugging. 381 * Thread that submitted this page for transfer. For debugging.
386 */ 382 */
@@ -389,16 +385,6 @@ struct osc_page {
389 * Submit time - the time when the page is starting RPC. For debugging. 385 * Submit time - the time when the page is starting RPC. For debugging.
390 */ 386 */
391 unsigned long ops_submit_time; 387 unsigned long ops_submit_time;
392
393 /**
394 * A lock of which we hold a reference covers this page. Only used by
395 * read-ahead: for a readahead page, we hold it's covering lock to
396 * prevent it from being canceled during recovery.
397 *
398 * \see osc_lock::ols_pageref
399 * \see osc_page_addref_lock(), osc_page_putref_lock().
400 */
401 struct cl_lock *ops_lock;
402}; 388};
403 389
404extern struct kmem_cache *osc_lock_kmem; 390extern struct kmem_cache *osc_lock_kmem;
@@ -417,21 +403,22 @@ extern struct lu_context_key osc_session_key;
417int osc_lock_init(const struct lu_env *env, 403int osc_lock_init(const struct lu_env *env,
418 struct cl_object *obj, struct cl_lock *lock, 404 struct cl_object *obj, struct cl_lock *lock,
419 const struct cl_io *io); 405 const struct cl_io *io);
420int osc_io_init (const struct lu_env *env, 406int osc_io_init(const struct lu_env *env,
421 struct cl_object *obj, struct cl_io *io); 407 struct cl_object *obj, struct cl_io *io);
422int osc_req_init (const struct lu_env *env, struct cl_device *dev, 408int osc_req_init(const struct lu_env *env, struct cl_device *dev,
423 struct cl_req *req); 409 struct cl_req *req);
424struct lu_object *osc_object_alloc(const struct lu_env *env, 410struct lu_object *osc_object_alloc(const struct lu_env *env,
425 const struct lu_object_header *hdr, 411 const struct lu_object_header *hdr,
426 struct lu_device *dev); 412 struct lu_device *dev);
427int osc_page_init(const struct lu_env *env, struct cl_object *obj, 413int osc_page_init(const struct lu_env *env, struct cl_object *obj,
428 struct cl_page *page, struct page *vmpage); 414 struct cl_page *page, pgoff_t ind);
429 415
430void osc_index2policy (ldlm_policy_data_t *policy, const struct cl_object *obj, 416void osc_index2policy(ldlm_policy_data_t *policy, const struct cl_object *obj,
431 pgoff_t start, pgoff_t end); 417 pgoff_t start, pgoff_t end);
432int osc_lvb_print (const struct lu_env *env, void *cookie, 418int osc_lvb_print(const struct lu_env *env, void *cookie,
433 lu_printer_t p, const struct ost_lvb *lvb); 419 lu_printer_t p, const struct ost_lvb *lvb);
434 420
421void osc_lru_add_batch(struct client_obd *cli, struct list_head *list);
435void osc_page_submit(const struct lu_env *env, struct osc_page *opg, 422void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
436 enum cl_req_type crt, int brw_flags); 423 enum cl_req_type crt, int brw_flags);
437int osc_cancel_async_page(const struct lu_env *env, struct osc_page *ops); 424int osc_cancel_async_page(const struct lu_env *env, struct osc_page *ops);
@@ -441,6 +428,8 @@ int osc_prep_async_page(struct osc_object *osc, struct osc_page *ops,
441 struct page *page, loff_t offset); 428 struct page *page, loff_t offset);
442int osc_queue_async_io(const struct lu_env *env, struct cl_io *io, 429int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
443 struct osc_page *ops); 430 struct osc_page *ops);
431int osc_page_cache_add(const struct lu_env *env,
432 const struct cl_page_slice *slice, struct cl_io *io);
444int osc_teardown_async_page(const struct lu_env *env, struct osc_object *obj, 433int osc_teardown_async_page(const struct lu_env *env, struct osc_object *obj,
445 struct osc_page *ops); 434 struct osc_page *ops);
446int osc_flush_async_page(const struct lu_env *env, struct cl_io *io, 435int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
@@ -457,12 +446,13 @@ int osc_cache_wait_range(const struct lu_env *env, struct osc_object *obj,
457 pgoff_t start, pgoff_t end); 446 pgoff_t start, pgoff_t end);
458void osc_io_unplug(const struct lu_env *env, struct client_obd *cli, 447void osc_io_unplug(const struct lu_env *env, struct client_obd *cli,
459 struct osc_object *osc); 448 struct osc_object *osc);
449int lru_queue_work(const struct lu_env *env, void *data);
460 450
461void osc_object_set_contended (struct osc_object *obj); 451void osc_object_set_contended(struct osc_object *obj);
462void osc_object_clear_contended(struct osc_object *obj); 452void osc_object_clear_contended(struct osc_object *obj);
463int osc_object_is_contended (struct osc_object *obj); 453int osc_object_is_contended(struct osc_object *obj);
464 454
465int osc_lock_is_lockless (const struct osc_lock *olck); 455int osc_lock_is_lockless(const struct osc_lock *olck);
466 456
467/***************************************************************************** 457/*****************************************************************************
468 * 458 *
@@ -558,6 +548,11 @@ static inline struct osc_page *oap2osc(struct osc_async_page *oap)
558 return container_of0(oap, struct osc_page, ops_oap); 548 return container_of0(oap, struct osc_page, ops_oap);
559} 549}
560 550
551static inline pgoff_t osc_index(struct osc_page *opg)
552{
553 return opg->ops_cl.cpl_index;
554}
555
561static inline struct cl_page *oap2cl_page(struct osc_async_page *oap) 556static inline struct cl_page *oap2cl_page(struct osc_async_page *oap)
562{ 557{
563 return oap2osc(oap)->ops_cl.cpl_page; 558 return oap2osc(oap)->ops_cl.cpl_page;
@@ -608,7 +603,7 @@ enum osc_extent_state {
608 * 603 *
609 * LOCKING ORDER 604 * LOCKING ORDER
610 * ============= 605 * =============
611 * page lock -> client_obd_list_lock -> object lock(osc_object::oo_lock) 606 * page lock -> cl_loi_list_lock -> object lock(osc_object::oo_lock)
612 */ 607 */
613struct osc_extent { 608struct osc_extent {
614 /** red-black tree node */ 609 /** red-black tree node */
@@ -627,6 +622,8 @@ struct osc_extent {
627 unsigned int oe_intree:1, 622 unsigned int oe_intree:1,
628 /** 0 is write, 1 is read */ 623 /** 0 is write, 1 is read */
629 oe_rw:1, 624 oe_rw:1,
625 /** sync extent, queued by osc_queue_sync_pages() */
626 oe_sync:1,
630 oe_srvlock:1, 627 oe_srvlock:1,
631 oe_memalloc:1, 628 oe_memalloc:1,
632 /** an ACTIVE extent is going to be truncated, so when this extent 629 /** an ACTIVE extent is going to be truncated, so when this extent
@@ -675,7 +672,7 @@ struct osc_extent {
675 */ 672 */
676 wait_queue_head_t oe_waitq; 673 wait_queue_head_t oe_waitq;
677 /** lock covering this extent */ 674 /** lock covering this extent */
678 struct cl_lock *oe_osclock; 675 struct ldlm_lock *oe_dlmlock;
679 /** terminator of this extent. Must be true if this extent is in IO. */ 676 /** terminator of this extent. Must be true if this extent is in IO. */
680 struct task_struct *oe_owner; 677 struct task_struct *oe_owner;
681 /** return value of writeback. If somebody is waiting for this extent, 678 /** return value of writeback. If somebody is waiting for this extent,
@@ -690,6 +687,14 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
690 int sent, int rc); 687 int sent, int rc);
691void osc_extent_release(const struct lu_env *env, struct osc_extent *ext); 688void osc_extent_release(const struct lu_env *env, struct osc_extent *ext);
692 689
690int osc_lock_discard_pages(const struct lu_env *env, struct osc_object *osc,
691 pgoff_t start, pgoff_t end, enum cl_lock_mode mode);
692
693typedef int (*osc_page_gang_cbt)(const struct lu_env *, struct cl_io *,
694 struct osc_page *, void *);
695int osc_page_gang_lookup(const struct lu_env *env, struct cl_io *io,
696 struct osc_object *osc, pgoff_t start, pgoff_t end,
697 osc_page_gang_cbt cb, void *cbdata);
693/** @} osc */ 698/** @} osc */
694 699
695#endif /* OSC_CL_INTERNAL_H */ 700#endif /* OSC_CL_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/osc/osc_internal.h b/drivers/staging/lustre/lustre/osc/osc_internal.h
index ea695c2099ee..7fad8278150f 100644
--- a/drivers/staging/lustre/lustre/osc/osc_internal.h
+++ b/drivers/staging/lustre/lustre/osc/osc_internal.h
@@ -83,6 +83,12 @@ struct osc_async_page {
83#define oap_count oap_brw_page.count 83#define oap_count oap_brw_page.count
84#define oap_brw_flags oap_brw_page.flag 84#define oap_brw_flags oap_brw_page.flag
85 85
86static inline struct osc_async_page *brw_page2oap(struct brw_page *pga)
87{
88 return (struct osc_async_page *)container_of(pga, struct osc_async_page,
89 oap_brw_page);
90}
91
86struct osc_cache_waiter { 92struct osc_cache_waiter {
87 struct list_head ocw_entry; 93 struct list_head ocw_entry;
88 wait_queue_head_t ocw_waitq; 94 wait_queue_head_t ocw_waitq;
@@ -102,12 +108,14 @@ void osc_update_next_shrink(struct client_obd *cli);
102 108
103extern struct ptlrpc_request_set *PTLRPCD_SET; 109extern struct ptlrpc_request_set *PTLRPCD_SET;
104 110
111typedef int (*osc_enqueue_upcall_f)(void *cookie, struct lustre_handle *lockh,
112 int rc);
113
105int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id, 114int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
106 __u64 *flags, ldlm_policy_data_t *policy, 115 __u64 *flags, ldlm_policy_data_t *policy,
107 struct ost_lvb *lvb, int kms_valid, 116 struct ost_lvb *lvb, int kms_valid,
108 obd_enqueue_update_f upcall, 117 osc_enqueue_upcall_f upcall,
109 void *cookie, struct ldlm_enqueue_info *einfo, 118 void *cookie, struct ldlm_enqueue_info *einfo,
110 struct lustre_handle *lockh,
111 struct ptlrpc_request_set *rqset, int async, int agl); 119 struct ptlrpc_request_set *rqset, int async, int agl);
112int osc_cancel_base(struct lustre_handle *lockh, __u32 mode); 120int osc_cancel_base(struct lustre_handle *lockh, __u32 mode);
113 121
@@ -130,9 +138,11 @@ int osc_sync_base(struct obd_export *exp, struct obd_info *oinfo,
130int osc_process_config_base(struct obd_device *obd, struct lustre_cfg *cfg); 138int osc_process_config_base(struct obd_device *obd, struct lustre_cfg *cfg);
131int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, 139int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
132 struct list_head *ext_list, int cmd); 140 struct list_head *ext_list, int cmd);
133int osc_lru_shrink(struct client_obd *cli, int target); 141int osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
142 int target, bool force);
143int osc_lru_reclaim(struct client_obd *cli);
134 144
135extern spinlock_t osc_ast_guard; 145unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock);
136 146
137int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg); 147int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg);
138 148
@@ -173,8 +183,6 @@ static inline struct osc_device *obd2osc_dev(const struct obd_device *d)
173 return container_of0(d->obd_lu_dev, struct osc_device, od_cl.cd_lu_dev); 183 return container_of0(d->obd_lu_dev, struct osc_device, od_cl.cd_lu_dev);
174} 184}
175 185
176int osc_dlm_lock_pageref(struct ldlm_lock *dlm);
177
178extern struct kmem_cache *osc_quota_kmem; 186extern struct kmem_cache *osc_quota_kmem;
179struct osc_quota_info { 187struct osc_quota_info {
180 /** linkage for quota hash table */ 188 /** linkage for quota hash table */
@@ -192,5 +200,12 @@ int osc_quotactl(struct obd_device *unused, struct obd_export *exp,
192int osc_quotacheck(struct obd_device *unused, struct obd_export *exp, 200int osc_quotacheck(struct obd_device *unused, struct obd_export *exp,
193 struct obd_quotactl *oqctl); 201 struct obd_quotactl *oqctl);
194int osc_quota_poll_check(struct obd_export *exp, struct if_quotacheck *qchk); 202int osc_quota_poll_check(struct obd_export *exp, struct if_quotacheck *qchk);
203void osc_inc_unstable_pages(struct ptlrpc_request *req);
204void osc_dec_unstable_pages(struct ptlrpc_request *req);
205int osc_over_unstable_soft_limit(struct client_obd *cli);
206
207struct ldlm_lock *osc_dlmlock_at_pgoff(const struct lu_env *env,
208 struct osc_object *obj, pgoff_t index,
209 int pending, int canceling);
195 210
196#endif /* OSC_INTERNAL_H */ 211#endif /* OSC_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/osc/osc_io.c b/drivers/staging/lustre/lustre/osc/osc_io.c
index 6bd0a45d8b06..d534b0e0edf6 100644
--- a/drivers/staging/lustre/lustre/osc/osc_io.c
+++ b/drivers/staging/lustre/lustre/osc/osc_io.c
@@ -68,11 +68,15 @@ static struct osc_io *cl2osc_io(const struct lu_env *env,
68 return oio; 68 return oio;
69} 69}
70 70
71static struct osc_page *osc_cl_page_osc(struct cl_page *page) 71static struct osc_page *osc_cl_page_osc(struct cl_page *page,
72 struct osc_object *osc)
72{ 73{
73 const struct cl_page_slice *slice; 74 const struct cl_page_slice *slice;
74 75
75 slice = cl_page_at(page, &osc_device_type); 76 if (osc)
77 slice = cl_object_page_slice(&osc->oo_cl, page);
78 else
79 slice = cl_page_at(page, &osc_device_type);
76 LASSERT(slice); 80 LASSERT(slice);
77 81
78 return cl2osc_page(slice); 82 return cl2osc_page(slice);
@@ -137,7 +141,7 @@ static int osc_io_submit(const struct lu_env *env,
137 io = page->cp_owner; 141 io = page->cp_owner;
138 LASSERT(io); 142 LASSERT(io);
139 143
140 opg = osc_cl_page_osc(page); 144 opg = osc_cl_page_osc(page, osc);
141 oap = &opg->ops_oap; 145 oap = &opg->ops_oap;
142 LASSERT(osc == oap->oap_obj); 146 LASSERT(osc == oap->oap_obj);
143 147
@@ -164,8 +168,10 @@ static int osc_io_submit(const struct lu_env *env,
164 } 168 }
165 169
166 cl_page_list_move(qout, qin, page); 170 cl_page_list_move(qout, qin, page);
171 spin_lock(&oap->oap_lock);
167 oap->oap_async_flags = ASYNC_URGENT|ASYNC_READY; 172 oap->oap_async_flags = ASYNC_URGENT|ASYNC_READY;
168 oap->oap_async_flags |= ASYNC_COUNT_STABLE; 173 oap->oap_async_flags |= ASYNC_COUNT_STABLE;
174 spin_unlock(&oap->oap_lock);
169 175
170 osc_page_submit(env, opg, crt, brw_flags); 176 osc_page_submit(env, opg, crt, brw_flags);
171 list_add_tail(&oap->oap_pending_item, &list); 177 list_add_tail(&oap->oap_pending_item, &list);
@@ -185,6 +191,13 @@ static int osc_io_submit(const struct lu_env *env,
185 return qout->pl_nr > 0 ? 0 : result; 191 return qout->pl_nr > 0 ? 0 : result;
186} 192}
187 193
194/**
195 * This is called when a page is accessed within file in a way that creates
196 * new page, if one were missing (i.e., if there were a hole at that place in
197 * the file, or accessed page is beyond the current file size).
198 *
199 * Expand stripe KMS if necessary.
200 */
188static void osc_page_touch_at(const struct lu_env *env, 201static void osc_page_touch_at(const struct lu_env *env,
189 struct cl_object *obj, pgoff_t idx, unsigned to) 202 struct cl_object *obj, pgoff_t idx, unsigned to)
190{ 203{
@@ -208,7 +221,8 @@ static void osc_page_touch_at(const struct lu_env *env,
208 kms > loi->loi_kms ? "" : "not ", loi->loi_kms, kms, 221 kms > loi->loi_kms ? "" : "not ", loi->loi_kms, kms,
209 loi->loi_lvb.lvb_size); 222 loi->loi_lvb.lvb_size);
210 223
211 valid = 0; 224 attr->cat_mtime = attr->cat_ctime = LTIME_S(CURRENT_TIME);
225 valid = CAT_MTIME | CAT_CTIME;
212 if (kms > loi->loi_kms) { 226 if (kms > loi->loi_kms) {
213 attr->cat_kms = kms; 227 attr->cat_kms = kms;
214 valid |= CAT_KMS; 228 valid |= CAT_KMS;
@@ -221,91 +235,128 @@ static void osc_page_touch_at(const struct lu_env *env,
221 cl_object_attr_unlock(obj); 235 cl_object_attr_unlock(obj);
222} 236}
223 237
224/** 238static int osc_io_commit_async(const struct lu_env *env,
225 * This is called when a page is accessed within file in a way that creates 239 const struct cl_io_slice *ios,
226 * new page, if one were missing (i.e., if there were a hole at that place in 240 struct cl_page_list *qin, int from, int to,
227 * the file, or accessed page is beyond the current file size). Examples: 241 cl_commit_cbt cb)
228 * ->commit_write() and ->nopage() methods.
229 *
230 * Expand stripe KMS if necessary.
231 */
232static void osc_page_touch(const struct lu_env *env,
233 struct osc_page *opage, unsigned to)
234{
235 struct cl_page *page = opage->ops_cl.cpl_page;
236 struct cl_object *obj = opage->ops_cl.cpl_obj;
237
238 osc_page_touch_at(env, obj, page->cp_index, to);
239}
240
241/**
242 * Implements cl_io_operations::cio_prepare_write() method for osc layer.
243 *
244 * \retval -EIO transfer initiated against this osc will most likely fail
245 * \retval 0 transfer initiated against this osc will most likely succeed.
246 *
247 * The reason for this check is to immediately return an error to the caller
248 * in the case of a deactivated import. Note, that import can be deactivated
249 * later, while pages, dirtied by this IO, are still in the cache, but this is
250 * irrelevant, because that would still return an error to the application (if
251 * it does fsync), but many applications don't do fsync because of performance
252 * issues, and we wanted to return an -EIO at write time to notify the
253 * application.
254 */
255static int osc_io_prepare_write(const struct lu_env *env,
256 const struct cl_io_slice *ios,
257 const struct cl_page_slice *slice,
258 unsigned from, unsigned to)
259{ 242{
260 struct osc_device *dev = lu2osc_dev(slice->cpl_obj->co_lu.lo_dev); 243 struct cl_io *io = ios->cis_io;
261 struct obd_import *imp = class_exp2cliimp(dev->od_exp);
262 struct osc_io *oio = cl2osc_io(env, ios); 244 struct osc_io *oio = cl2osc_io(env, ios);
245 struct osc_object *osc = cl2osc(ios->cis_obj);
246 struct cl_page *page;
247 struct cl_page *last_page;
248 struct osc_page *opg;
263 int result = 0; 249 int result = 0;
264 250
265 /* 251 LASSERT(qin->pl_nr > 0);
266 * This implements OBD_BRW_CHECK logic from old client. 252
267 */ 253 /* Handle partial page cases */
254 last_page = cl_page_list_last(qin);
255 if (oio->oi_lockless) {
256 page = cl_page_list_first(qin);
257 if (page == last_page) {
258 cl_page_clip(env, page, from, to);
259 } else {
260 if (from != 0)
261 cl_page_clip(env, page, from, PAGE_SIZE);
262 if (to != PAGE_SIZE)
263 cl_page_clip(env, last_page, 0, to);
264 }
265 }
266
267 while (qin->pl_nr > 0) {
268 struct osc_async_page *oap;
269
270 page = cl_page_list_first(qin);
271 opg = osc_cl_page_osc(page, osc);
272 oap = &opg->ops_oap;
273
274 if (!list_empty(&oap->oap_rpc_item)) {
275 CDEBUG(D_CACHE, "Busy oap %p page %p for submit.\n",
276 oap, opg);
277 result = -EBUSY;
278 break;
279 }
280
281 /* The page may be already in dirty cache. */
282 if (list_empty(&oap->oap_pending_item)) {
283 result = osc_page_cache_add(env, &opg->ops_cl, io);
284 if (result != 0)
285 break;
286 }
287
288 osc_page_touch_at(env, osc2cl(osc), osc_index(opg),
289 page == last_page ? to : PAGE_SIZE);
290
291 cl_page_list_del(env, qin, page);
268 292
269 if (!imp || imp->imp_invalid) 293 (*cb)(env, io, page);
270 result = -EIO; 294 /* Can't access page any more. Page can be in transfer and
271 if (result == 0 && oio->oi_lockless) 295 * complete at any time.
272 /* this page contains `invalid' data, but who cares?
273 * nobody can access the invalid data.
274 * in osc_io_commit_write(), we're going to write exact
275 * [from, to) bytes of this page to OST. -jay
276 */ 296 */
277 cl_page_export(env, slice->cpl_page, 1); 297 }
278 298
299 /* for sync write, kernel will wait for this page to be flushed before
300 * osc_io_end() is called, so release it earlier.
301 * for mkwrite(), it's known there is no further pages.
302 */
303 if (cl_io_is_sync_write(io) && oio->oi_active) {
304 osc_extent_release(env, oio->oi_active);
305 oio->oi_active = NULL;
306 }
307
308 CDEBUG(D_INFO, "%d %d\n", qin->pl_nr, result);
279 return result; 309 return result;
280} 310}
281 311
282static int osc_io_commit_write(const struct lu_env *env, 312static int osc_io_rw_iter_init(const struct lu_env *env,
283 const struct cl_io_slice *ios, 313 const struct cl_io_slice *ios)
284 const struct cl_page_slice *slice,
285 unsigned from, unsigned to)
286{ 314{
287 struct osc_io *oio = cl2osc_io(env, ios); 315 struct cl_io *io = ios->cis_io;
288 struct osc_page *opg = cl2osc_page(slice); 316 struct osc_io *oio = osc_env_io(env);
289 struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj); 317 struct osc_object *osc = cl2osc(ios->cis_obj);
290 struct osc_async_page *oap = &opg->ops_oap; 318 struct client_obd *cli = osc_cli(osc);
319 unsigned long c;
320 unsigned int npages;
321 unsigned int max_pages;
322
323 if (cl_io_is_append(io))
324 return 0;
325
326 npages = io->u.ci_rw.crw_count >> PAGE_SHIFT;
327 if (io->u.ci_rw.crw_pos & ~PAGE_MASK)
328 ++npages;
329
330 max_pages = cli->cl_max_pages_per_rpc * cli->cl_max_rpcs_in_flight;
331 if (npages > max_pages)
332 npages = max_pages;
333
334 c = atomic_read(cli->cl_lru_left);
335 if (c < npages && osc_lru_reclaim(cli) > 0)
336 c = atomic_read(cli->cl_lru_left);
337 while (c >= npages) {
338 if (c == atomic_cmpxchg(cli->cl_lru_left, c, c - npages)) {
339 oio->oi_lru_reserved = npages;
340 break;
341 }
342 c = atomic_read(cli->cl_lru_left);
343 }
291 344
292 LASSERT(to > 0); 345 return 0;
293 /* 346}
294 * XXX instead of calling osc_page_touch() here and in
295 * osc_io_fault_start() it might be more logical to introduce
296 * cl_page_touch() method, that generic cl_io_commit_write() and page
297 * fault code calls.
298 */
299 osc_page_touch(env, cl2osc_page(slice), to);
300 if (!client_is_remote(osc_export(obj)) &&
301 capable(CFS_CAP_SYS_RESOURCE))
302 oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
303 347
304 if (oio->oi_lockless) 348static void osc_io_rw_iter_fini(const struct lu_env *env,
305 /* see osc_io_prepare_write() for lockless io handling. */ 349 const struct cl_io_slice *ios)
306 cl_page_clip(env, slice->cpl_page, from, to); 350{
351 struct osc_io *oio = osc_env_io(env);
352 struct osc_object *osc = cl2osc(ios->cis_obj);
353 struct client_obd *cli = osc_cli(osc);
307 354
308 return 0; 355 if (oio->oi_lru_reserved > 0) {
356 atomic_add(oio->oi_lru_reserved, cli->cl_lru_left);
357 oio->oi_lru_reserved = 0;
358 }
359 oio->oi_write_osclock = NULL;
309} 360}
310 361
311static int osc_io_fault_start(const struct lu_env *env, 362static int osc_io_fault_start(const struct lu_env *env,
@@ -342,31 +393,21 @@ static int osc_async_upcall(void *a, int rc)
342 * Checks that there are no pages being written in the extent being truncated. 393 * Checks that there are no pages being written in the extent being truncated.
343 */ 394 */
344static int trunc_check_cb(const struct lu_env *env, struct cl_io *io, 395static int trunc_check_cb(const struct lu_env *env, struct cl_io *io,
345 struct cl_page *page, void *cbdata) 396 struct osc_page *ops, void *cbdata)
346{ 397{
347 const struct cl_page_slice *slice; 398 struct cl_page *page = ops->ops_cl.cpl_page;
348 struct osc_page *ops;
349 struct osc_async_page *oap; 399 struct osc_async_page *oap;
350 __u64 start = *(__u64 *)cbdata; 400 __u64 start = *(__u64 *)cbdata;
351 401
352 slice = cl_page_at(page, &osc_device_type);
353 LASSERT(slice);
354 ops = cl2osc_page(slice);
355 oap = &ops->ops_oap; 402 oap = &ops->ops_oap;
356
357 if (oap->oap_cmd & OBD_BRW_WRITE && 403 if (oap->oap_cmd & OBD_BRW_WRITE &&
358 !list_empty(&oap->oap_pending_item)) 404 !list_empty(&oap->oap_pending_item))
359 CL_PAGE_DEBUG(D_ERROR, env, page, "exists %llu/%s.\n", 405 CL_PAGE_DEBUG(D_ERROR, env, page, "exists %llu/%s.\n",
360 start, current->comm); 406 start, current->comm);
361 407
362 { 408 if (PageLocked(page->cp_vmpage))
363 struct page *vmpage = cl_page_vmpage(env, page); 409 CDEBUG(D_CACHE, "page %p index %lu locked for %d.\n",
364 410 ops, osc_index(ops), oap->oap_cmd & OBD_BRW_RWMASK);
365 if (PageLocked(vmpage))
366 CDEBUG(D_CACHE, "page %p index %lu locked for %d.\n",
367 ops, page->cp_index,
368 (oap->oap_cmd & OBD_BRW_RWMASK));
369 }
370 411
371 return CLP_GANG_OKAY; 412 return CLP_GANG_OKAY;
372} 413}
@@ -385,8 +426,9 @@ static void osc_trunc_check(const struct lu_env *env, struct cl_io *io,
385 /* 426 /*
386 * Complain if there are pages in the truncated region. 427 * Complain if there are pages in the truncated region.
387 */ 428 */
388 cl_page_gang_lookup(env, clob, io, start + partial, CL_PAGE_EOF, 429 osc_page_gang_lookup(env, io, cl2osc(clob),
389 trunc_check_cb, (void *)&size); 430 start + partial, CL_PAGE_EOF,
431 trunc_check_cb, (void *)&size);
390} 432}
391 433
392static int osc_io_setattr_start(const struct lu_env *env, 434static int osc_io_setattr_start(const struct lu_env *env,
@@ -650,6 +692,8 @@ static const struct cl_io_operations osc_io_ops = {
650 .cio_fini = osc_io_fini 692 .cio_fini = osc_io_fini
651 }, 693 },
652 [CIT_WRITE] = { 694 [CIT_WRITE] = {
695 .cio_iter_init = osc_io_rw_iter_init,
696 .cio_iter_fini = osc_io_rw_iter_fini,
653 .cio_start = osc_io_write_start, 697 .cio_start = osc_io_write_start,
654 .cio_end = osc_io_end, 698 .cio_end = osc_io_end,
655 .cio_fini = osc_io_fini 699 .cio_fini = osc_io_fini
@@ -672,16 +716,8 @@ static const struct cl_io_operations osc_io_ops = {
672 .cio_fini = osc_io_fini 716 .cio_fini = osc_io_fini
673 } 717 }
674 }, 718 },
675 .req_op = { 719 .cio_submit = osc_io_submit,
676 [CRT_READ] = { 720 .cio_commit_async = osc_io_commit_async
677 .cio_submit = osc_io_submit
678 },
679 [CRT_WRITE] = {
680 .cio_submit = osc_io_submit
681 }
682 },
683 .cio_prepare_write = osc_io_prepare_write,
684 .cio_commit_write = osc_io_commit_write
685}; 721};
686 722
687/***************************************************************************** 723/*****************************************************************************
@@ -718,8 +754,7 @@ static void osc_req_attr_set(const struct lu_env *env,
718 struct lov_oinfo *oinfo; 754 struct lov_oinfo *oinfo;
719 struct cl_req *clerq; 755 struct cl_req *clerq;
720 struct cl_page *apage; /* _some_ page in @clerq */ 756 struct cl_page *apage; /* _some_ page in @clerq */
721 struct cl_lock *lock; /* _some_ lock protecting @apage */ 757 struct ldlm_lock *lock; /* _some_ lock protecting @apage */
722 struct osc_lock *olck;
723 struct osc_page *opg; 758 struct osc_page *opg;
724 struct obdo *oa; 759 struct obdo *oa;
725 struct ost_lvb *lvb; 760 struct ost_lvb *lvb;
@@ -753,31 +788,32 @@ static void osc_req_attr_set(const struct lu_env *env,
753 LASSERT(!list_empty(&clerq->crq_pages)); 788 LASSERT(!list_empty(&clerq->crq_pages));
754 apage = container_of(clerq->crq_pages.next, 789 apage = container_of(clerq->crq_pages.next,
755 struct cl_page, cp_flight); 790 struct cl_page, cp_flight);
756 opg = osc_cl_page_osc(apage); 791 opg = osc_cl_page_osc(apage, NULL);
757 apage = opg->ops_cl.cpl_page; /* now apage is a sub-page */ 792 lock = osc_dlmlock_at_pgoff(env, cl2osc(obj), osc_index(opg),
758 lock = cl_lock_at_page(env, apage->cp_obj, apage, NULL, 1, 1); 793 1, 1);
759 if (!lock) { 794 if (!lock && !opg->ops_srvlock) {
760 struct cl_object_header *head; 795 struct ldlm_resource *res;
761 struct cl_lock *scan; 796 struct ldlm_res_id *resname;
762 797
763 head = cl_object_header(apage->cp_obj); 798 CL_PAGE_DEBUG(D_ERROR, env, apage, "uncovered page!\n");
764 list_for_each_entry(scan, &head->coh_locks, cll_linkage) 799
765 CL_LOCK_DEBUG(D_ERROR, env, scan, 800 resname = &osc_env_info(env)->oti_resname;
766 "no cover page!\n"); 801 ostid_build_res_name(&oinfo->loi_oi, resname);
767 CL_PAGE_DEBUG(D_ERROR, env, apage, 802 res = ldlm_resource_get(
768 "dump uncover page!\n"); 803 osc_export(cl2osc(obj))->exp_obd->obd_namespace,
804 NULL, resname, LDLM_EXTENT, 0);
805 ldlm_resource_dump(D_ERROR, res);
806
769 dump_stack(); 807 dump_stack();
770 LBUG(); 808 LBUG();
771 } 809 }
772 810
773 olck = osc_lock_at(lock);
774 LASSERT(ergo(opg->ops_srvlock, !olck->ols_lock));
775 /* check for lockless io. */ 811 /* check for lockless io. */
776 if (olck->ols_lock) { 812 if (lock) {
777 oa->o_handle = olck->ols_lock->l_remote_handle; 813 oa->o_handle = lock->l_remote_handle;
778 oa->o_valid |= OBD_MD_FLHANDLE; 814 oa->o_valid |= OBD_MD_FLHANDLE;
815 LDLM_LOCK_PUT(lock);
779 } 816 }
780 cl_lock_put(env, lock);
781 } 817 }
782} 818}
783 819
@@ -807,8 +843,9 @@ int osc_req_init(const struct lu_env *env, struct cl_device *dev,
807 if (or) { 843 if (or) {
808 cl_req_slice_add(req, &or->or_cl, dev, &osc_req_ops); 844 cl_req_slice_add(req, &or->or_cl, dev, &osc_req_ops);
809 result = 0; 845 result = 0;
810 } else 846 } else {
811 result = -ENOMEM; 847 result = -ENOMEM;
848 }
812 return result; 849 return result;
813} 850}
814 851
diff --git a/drivers/staging/lustre/lustre/osc/osc_lock.c b/drivers/staging/lustre/lustre/osc/osc_lock.c
index 013df9787f3e..16f9cd9d3b12 100644
--- a/drivers/staging/lustre/lustre/osc/osc_lock.c
+++ b/drivers/staging/lustre/lustre/osc/osc_lock.c
@@ -36,6 +36,7 @@
36 * Implementation of cl_lock for OSC layer. 36 * Implementation of cl_lock for OSC layer.
37 * 37 *
38 * Author: Nikita Danilov <nikita.danilov@sun.com> 38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 * Author: Jinshan Xiong <jinshan.xiong@intel.com>
39 */ 40 */
40 41
41#define DEBUG_SUBSYSTEM S_OSC 42#define DEBUG_SUBSYSTEM S_OSC
@@ -50,8 +51,6 @@
50 * @{ 51 * @{
51 */ 52 */
52 53
53#define _PAGEREF_MAGIC (-10000000)
54
55/***************************************************************************** 54/*****************************************************************************
56 * 55 *
57 * Type conversions. 56 * Type conversions.
@@ -62,7 +61,6 @@ static const struct cl_lock_operations osc_lock_ops;
62static const struct cl_lock_operations osc_lock_lockless_ops; 61static const struct cl_lock_operations osc_lock_lockless_ops;
63static void osc_lock_to_lockless(const struct lu_env *env, 62static void osc_lock_to_lockless(const struct lu_env *env,
64 struct osc_lock *ols, int force); 63 struct osc_lock *ols, int force);
65static int osc_lock_has_pages(struct osc_lock *olck);
66 64
67int osc_lock_is_lockless(const struct osc_lock *olck) 65int osc_lock_is_lockless(const struct osc_lock *olck)
68{ 66{
@@ -90,11 +88,11 @@ static struct ldlm_lock *osc_handle_ptr(struct lustre_handle *handle)
90static int osc_lock_invariant(struct osc_lock *ols) 88static int osc_lock_invariant(struct osc_lock *ols)
91{ 89{
92 struct ldlm_lock *lock = osc_handle_ptr(&ols->ols_handle); 90 struct ldlm_lock *lock = osc_handle_ptr(&ols->ols_handle);
93 struct ldlm_lock *olock = ols->ols_lock; 91 struct ldlm_lock *olock = ols->ols_dlmlock;
94 int handle_used = lustre_handle_is_used(&ols->ols_handle); 92 int handle_used = lustre_handle_is_used(&ols->ols_handle);
95 93
96 if (ergo(osc_lock_is_lockless(ols), 94 if (ergo(osc_lock_is_lockless(ols),
97 ols->ols_locklessable && !ols->ols_lock)) 95 ols->ols_locklessable && !ols->ols_dlmlock))
98 return 1; 96 return 1;
99 97
100 /* 98 /*
@@ -111,7 +109,7 @@ static int osc_lock_invariant(struct osc_lock *ols)
111 ergo(!lock, !olock))) 109 ergo(!lock, !olock)))
112 return 0; 110 return 0;
113 /* 111 /*
114 * Check that ->ols_handle and ->ols_lock are consistent, but 112 * Check that ->ols_handle and ->ols_dlmlock are consistent, but
115 * take into account that they are set at the different time. 113 * take into account that they are set at the different time.
116 */ 114 */
117 if (!ergo(ols->ols_state == OLS_CANCELLED, 115 if (!ergo(ols->ols_state == OLS_CANCELLED,
@@ -122,7 +120,7 @@ static int osc_lock_invariant(struct osc_lock *ols)
122 * ast. 120 * ast.
123 */ 121 */
124 if (!ergo(olock && ols->ols_state < OLS_CANCELLED, 122 if (!ergo(olock && ols->ols_state < OLS_CANCELLED,
125 ((olock->l_flags & LDLM_FL_DESTROYED) == 0))) 123 !ldlm_is_destroyed(olock)))
126 return 0; 124 return 0;
127 125
128 if (!ergo(ols->ols_state == OLS_GRANTED, 126 if (!ergo(ols->ols_state == OLS_GRANTED,
@@ -138,117 +136,13 @@ static int osc_lock_invariant(struct osc_lock *ols)
138 * 136 *
139 */ 137 */
140 138
141/**
142 * Breaks a link between osc_lock and dlm_lock.
143 */
144static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
145{
146 struct ldlm_lock *dlmlock;
147
148 spin_lock(&osc_ast_guard);
149 dlmlock = olck->ols_lock;
150 if (!dlmlock) {
151 spin_unlock(&osc_ast_guard);
152 return;
153 }
154
155 olck->ols_lock = NULL;
156 /* wb(); --- for all who checks (ols->ols_lock != NULL) before
157 * call to osc_lock_detach()
158 */
159 dlmlock->l_ast_data = NULL;
160 olck->ols_handle.cookie = 0ULL;
161 spin_unlock(&osc_ast_guard);
162
163 lock_res_and_lock(dlmlock);
164 if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
165 struct cl_object *obj = olck->ols_cl.cls_obj;
166 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
167 __u64 old_kms;
168
169 cl_object_attr_lock(obj);
170 /* Must get the value under the lock to avoid possible races. */
171 old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
172 /* Update the kms. Need to loop all granted locks.
173 * Not a problem for the client
174 */
175 attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);
176
177 cl_object_attr_set(env, obj, attr, CAT_KMS);
178 cl_object_attr_unlock(obj);
179 }
180 unlock_res_and_lock(dlmlock);
181
182 /* release a reference taken in osc_lock_upcall0(). */
183 LASSERT(olck->ols_has_ref);
184 lu_ref_del(&dlmlock->l_reference, "osc_lock", olck);
185 LDLM_LOCK_RELEASE(dlmlock);
186 olck->ols_has_ref = 0;
187}
188
189static int osc_lock_unhold(struct osc_lock *ols)
190{
191 int result = 0;
192
193 if (ols->ols_hold) {
194 ols->ols_hold = 0;
195 result = osc_cancel_base(&ols->ols_handle,
196 ols->ols_einfo.ei_mode);
197 }
198 return result;
199}
200
201static int osc_lock_unuse(const struct lu_env *env,
202 const struct cl_lock_slice *slice)
203{
204 struct osc_lock *ols = cl2osc_lock(slice);
205
206 LINVRNT(osc_lock_invariant(ols));
207
208 switch (ols->ols_state) {
209 case OLS_NEW:
210 LASSERT(!ols->ols_hold);
211 LASSERT(ols->ols_agl);
212 return 0;
213 case OLS_UPCALL_RECEIVED:
214 osc_lock_unhold(ols);
215 case OLS_ENQUEUED:
216 LASSERT(!ols->ols_hold);
217 osc_lock_detach(env, ols);
218 ols->ols_state = OLS_NEW;
219 return 0;
220 case OLS_GRANTED:
221 LASSERT(!ols->ols_glimpse);
222 LASSERT(ols->ols_hold);
223 /*
224 * Move lock into OLS_RELEASED state before calling
225 * osc_cancel_base() so that possible synchronous cancellation
226 * sees that lock is released.
227 */
228 ols->ols_state = OLS_RELEASED;
229 return osc_lock_unhold(ols);
230 default:
231 CERROR("Impossible state: %d\n", ols->ols_state);
232 LBUG();
233 }
234}
235
236static void osc_lock_fini(const struct lu_env *env, 139static void osc_lock_fini(const struct lu_env *env,
237 struct cl_lock_slice *slice) 140 struct cl_lock_slice *slice)
238{ 141{
239 struct osc_lock *ols = cl2osc_lock(slice); 142 struct osc_lock *ols = cl2osc_lock(slice);
240 143
241 LINVRNT(osc_lock_invariant(ols)); 144 LINVRNT(osc_lock_invariant(ols));
242 /* 145 LASSERT(!ols->ols_dlmlock);
243 * ->ols_hold can still be true at this point if, for example, a
244 * thread that requested a lock was killed (and released a reference
245 * to the lock), before reply from a server was received. In this case
246 * lock is destroyed immediately after upcall.
247 */
248 osc_lock_unhold(ols);
249 LASSERT(!ols->ols_lock);
250 LASSERT(atomic_read(&ols->ols_pageref) == 0 ||
251 atomic_read(&ols->ols_pageref) == _PAGEREF_MAGIC);
252 146
253 kmem_cache_free(osc_lock_kmem, ols); 147 kmem_cache_free(osc_lock_kmem, ols);
254} 148}
@@ -275,55 +169,12 @@ static __u64 osc_enq2ldlm_flags(__u32 enqflags)
275 result |= LDLM_FL_HAS_INTENT; 169 result |= LDLM_FL_HAS_INTENT;
276 if (enqflags & CEF_DISCARD_DATA) 170 if (enqflags & CEF_DISCARD_DATA)
277 result |= LDLM_FL_AST_DISCARD_DATA; 171 result |= LDLM_FL_AST_DISCARD_DATA;
172 if (enqflags & CEF_PEEK)
173 result |= LDLM_FL_TEST_LOCK;
278 return result; 174 return result;
279} 175}
280 176
281/** 177/**
282 * Global spin-lock protecting consistency of ldlm_lock::l_ast_data
283 * pointers. Initialized in osc_init().
284 */
285spinlock_t osc_ast_guard;
286
287static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock)
288{
289 struct osc_lock *olck;
290
291 lock_res_and_lock(dlm_lock);
292 spin_lock(&osc_ast_guard);
293 olck = dlm_lock->l_ast_data;
294 if (olck) {
295 struct cl_lock *lock = olck->ols_cl.cls_lock;
296 /*
297 * If osc_lock holds a reference on ldlm lock, return it even
298 * when cl_lock is in CLS_FREEING state. This way
299 *
300 * osc_ast_data_get(dlmlock) == NULL
301 *
302 * guarantees that all osc references on dlmlock were
303 * released. osc_dlm_blocking_ast0() relies on that.
304 */
305 if (lock->cll_state < CLS_FREEING || olck->ols_has_ref) {
306 cl_lock_get_trust(lock);
307 lu_ref_add_atomic(&lock->cll_reference,
308 "ast", current);
309 } else
310 olck = NULL;
311 }
312 spin_unlock(&osc_ast_guard);
313 unlock_res_and_lock(dlm_lock);
314 return olck;
315}
316
317static void osc_ast_data_put(const struct lu_env *env, struct osc_lock *olck)
318{
319 struct cl_lock *lock;
320
321 lock = olck->ols_cl.cls_lock;
322 lu_ref_del(&lock->cll_reference, "ast", current);
323 cl_lock_put(env, lock);
324}
325
326/**
327 * Updates object attributes from a lock value block (lvb) received together 178 * Updates object attributes from a lock value block (lvb) received together
328 * with the DLM lock reply from the server. Copy of osc_update_enqueue() 179 * with the DLM lock reply from the server. Copy of osc_update_enqueue()
329 * logic. 180 * logic.
@@ -333,35 +184,30 @@ static void osc_ast_data_put(const struct lu_env *env, struct osc_lock *olck)
333 * 184 *
334 * Called under lock and resource spin-locks. 185 * Called under lock and resource spin-locks.
335 */ 186 */
336static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck, 187static void osc_lock_lvb_update(const struct lu_env *env,
337 int rc) 188 struct osc_object *osc,
189 struct ldlm_lock *dlmlock,
190 struct ost_lvb *lvb)
338{ 191{
339 struct ost_lvb *lvb; 192 struct cl_object *obj = osc2cl(osc);
340 struct cl_object *obj; 193 struct lov_oinfo *oinfo = osc->oo_oinfo;
341 struct lov_oinfo *oinfo; 194 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
342 struct cl_attr *attr;
343 unsigned valid; 195 unsigned valid;
344 196
345 if (!(olck->ols_flags & LDLM_FL_LVB_READY))
346 return;
347
348 lvb = &olck->ols_lvb;
349 obj = olck->ols_cl.cls_obj;
350 oinfo = cl2osc(obj)->oo_oinfo;
351 attr = &osc_env_info(env)->oti_attr;
352 valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE; 197 valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE;
198 if (!lvb)
199 lvb = dlmlock->l_lvb_data;
200
353 cl_lvb2attr(attr, lvb); 201 cl_lvb2attr(attr, lvb);
354 202
355 cl_object_attr_lock(obj); 203 cl_object_attr_lock(obj);
356 if (rc == 0) { 204 if (dlmlock) {
357 struct ldlm_lock *dlmlock;
358 __u64 size; 205 __u64 size;
359 206
360 dlmlock = olck->ols_lock; 207 check_res_locked(dlmlock->l_resource);
361 208 LASSERT(lvb == dlmlock->l_lvb_data);
362 /* re-grab LVB from a dlm lock under DLM spin-locks. */
363 *lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
364 size = lvb->lvb_size; 209 size = lvb->lvb_size;
210
365 /* Extend KMS up to the end of this lock and no further 211 /* Extend KMS up to the end of this lock and no further
366 * A lock on [x,y] means a KMS of up to y + 1 bytes! 212 * A lock on [x,y] means a KMS of up to y + 1 bytes!
367 */ 213 */
@@ -378,102 +224,67 @@ static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck,
378 dlmlock->l_policy_data.l_extent.end); 224 dlmlock->l_policy_data.l_extent.end);
379 } 225 }
380 ldlm_lock_allow_match_locked(dlmlock); 226 ldlm_lock_allow_match_locked(dlmlock);
381 } else if (rc == -ENAVAIL && olck->ols_glimpse) { 227 }
382 CDEBUG(D_INODE, "glimpsed, setting rss=%llu; leaving kms=%llu\n",
383 lvb->lvb_size, oinfo->loi_kms);
384 } else
385 valid = 0;
386
387 if (valid != 0)
388 cl_object_attr_set(env, obj, attr, valid);
389 228
229 cl_object_attr_set(env, obj, attr, valid);
390 cl_object_attr_unlock(obj); 230 cl_object_attr_unlock(obj);
391} 231}
392 232
393/** 233static void osc_lock_granted(const struct lu_env *env, struct osc_lock *oscl,
394 * Called when a lock is granted, from an upcall (when server returned a 234 struct lustre_handle *lockh, bool lvb_update)
395 * granted lock), or from completion AST, when server returned a blocked lock.
396 *
397 * Called under lock and resource spin-locks, that are released temporarily
398 * here.
399 */
400static void osc_lock_granted(const struct lu_env *env, struct osc_lock *olck,
401 struct ldlm_lock *dlmlock, int rc)
402{ 235{
403 struct ldlm_extent *ext; 236 struct ldlm_lock *dlmlock;
404 struct cl_lock *lock;
405 struct cl_lock_descr *descr;
406 237
407 LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode); 238 dlmlock = ldlm_handle2lock_long(lockh, 0);
239 LASSERT(dlmlock);
408 240
409 if (olck->ols_state < OLS_GRANTED) { 241 /* lock reference taken by ldlm_handle2lock_long() is
410 lock = olck->ols_cl.cls_lock; 242 * owned by osc_lock and released in osc_lock_detach()
411 ext = &dlmlock->l_policy_data.l_extent; 243 */
412 descr = &osc_env_info(env)->oti_descr; 244 lu_ref_add(&dlmlock->l_reference, "osc_lock", oscl);
413 descr->cld_obj = lock->cll_descr.cld_obj; 245 oscl->ols_has_ref = 1;
414 246
415 /* XXX check that ->l_granted_mode is valid. */ 247 LASSERT(!oscl->ols_dlmlock);
416 descr->cld_mode = osc_ldlm2cl_lock(dlmlock->l_granted_mode); 248 oscl->ols_dlmlock = dlmlock;
417 descr->cld_start = cl_index(descr->cld_obj, ext->start); 249
418 descr->cld_end = cl_index(descr->cld_obj, ext->end); 250 /* This may be a matched lock for glimpse request, do not hold
419 descr->cld_gid = ext->gid; 251 * lock reference in that case.
420 /* 252 */
421 * tell upper layers the extent of the lock that was actually 253 if (!oscl->ols_glimpse) {
422 * granted 254 /* hold a refc for non glimpse lock which will
423 */ 255 * be released in osc_lock_cancel()
424 olck->ols_state = OLS_GRANTED;
425 osc_lock_lvb_update(env, olck, rc);
426
427 /* release DLM spin-locks to allow cl_lock_{modify,signal}()
428 * to take a semaphore on a parent lock. This is safe, because
429 * spin-locks are needed to protect consistency of
430 * dlmlock->l_*_mode and LVB, and we have finished processing
431 * them.
432 */ 256 */
433 unlock_res_and_lock(dlmlock); 257 lustre_handle_copy(&oscl->ols_handle, lockh);
434 cl_lock_modify(env, lock, descr); 258 ldlm_lock_addref(lockh, oscl->ols_einfo.ei_mode);
435 cl_lock_signal(env, lock); 259 oscl->ols_hold = 1;
436 LINVRNT(osc_lock_invariant(olck));
437 lock_res_and_lock(dlmlock);
438 } 260 }
439}
440
441static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck)
442
443{
444 struct ldlm_lock *dlmlock;
445
446 dlmlock = ldlm_handle2lock_long(&olck->ols_handle, 0);
447 LASSERT(dlmlock);
448 261
262 /* Lock must have been granted. */
449 lock_res_and_lock(dlmlock); 263 lock_res_and_lock(dlmlock);
450 spin_lock(&osc_ast_guard); 264 if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
451 LASSERT(dlmlock->l_ast_data == olck); 265 struct ldlm_extent *ext = &dlmlock->l_policy_data.l_extent;
452 LASSERT(!olck->ols_lock); 266 struct cl_lock_descr *descr = &oscl->ols_cl.cls_lock->cll_descr;
453 olck->ols_lock = dlmlock;
454 spin_unlock(&osc_ast_guard);
455 267
456 /* 268 /* extend the lock extent, otherwise it will have problem when
457 * Lock might be not yet granted. In this case, completion ast 269 * we decide whether to grant a lockless lock.
458 * (osc_ldlm_completion_ast()) comes later and finishes lock 270 */
459 * granting. 271 descr->cld_mode = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
460 */ 272 descr->cld_start = cl_index(descr->cld_obj, ext->start);
461 if (dlmlock->l_granted_mode == dlmlock->l_req_mode) 273 descr->cld_end = cl_index(descr->cld_obj, ext->end);
462 osc_lock_granted(env, olck, dlmlock, 0); 274 descr->cld_gid = ext->gid;
463 unlock_res_and_lock(dlmlock);
464 275
465 /* 276 /* no lvb update for matched lock */
466 * osc_enqueue_interpret() decrefs asynchronous locks, counter 277 if (lvb_update) {
467 * this. 278 LASSERT(oscl->ols_flags & LDLM_FL_LVB_READY);
468 */ 279 osc_lock_lvb_update(env, cl2osc(oscl->ols_cl.cls_obj),
469 ldlm_lock_addref(&olck->ols_handle, olck->ols_einfo.ei_mode); 280 dlmlock, NULL);
470 olck->ols_hold = 1; 281 }
282 LINVRNT(osc_lock_invariant(oscl));
283 }
284 unlock_res_and_lock(dlmlock);
471 285
472 /* lock reference taken by ldlm_handle2lock_long() is owned by 286 LASSERT(oscl->ols_state != OLS_GRANTED);
473 * osc_lock and released in osc_lock_detach() 287 oscl->ols_state = OLS_GRANTED;
474 */
475 lu_ref_add(&dlmlock->l_reference, "osc_lock", olck);
476 olck->ols_has_ref = 1;
477} 288}
478 289
479/** 290/**
@@ -481,143 +292,124 @@ static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck)
481 * received from a server, or after osc_enqueue_base() matched a local DLM 292 * received from a server, or after osc_enqueue_base() matched a local DLM
482 * lock. 293 * lock.
483 */ 294 */
484static int osc_lock_upcall(void *cookie, int errcode) 295static int osc_lock_upcall(void *cookie, struct lustre_handle *lockh,
296 int errcode)
485{ 297{
486 struct osc_lock *olck = cookie; 298 struct osc_lock *oscl = cookie;
487 struct cl_lock_slice *slice = &olck->ols_cl; 299 struct cl_lock_slice *slice = &oscl->ols_cl;
488 struct cl_lock *lock = slice->cls_lock;
489 struct lu_env *env; 300 struct lu_env *env;
490 struct cl_env_nest nest; 301 struct cl_env_nest nest;
302 int rc;
491 303
492 env = cl_env_nested_get(&nest); 304 env = cl_env_nested_get(&nest);
493 if (!IS_ERR(env)) { 305 /* should never happen, similar to osc_ldlm_blocking_ast(). */
494 int rc; 306 LASSERT(!IS_ERR(env));
307
308 rc = ldlm_error2errno(errcode);
309 if (oscl->ols_state == OLS_ENQUEUED) {
310 oscl->ols_state = OLS_UPCALL_RECEIVED;
311 } else if (oscl->ols_state == OLS_CANCELLED) {
312 rc = -EIO;
313 } else {
314 CERROR("Impossible state: %d\n", oscl->ols_state);
315 LBUG();
316 }
495 317
496 cl_lock_mutex_get(env, lock); 318 if (rc == 0)
319 osc_lock_granted(env, oscl, lockh, errcode == ELDLM_OK);
497 320
498 LASSERT(lock->cll_state >= CLS_QUEUING); 321 /* Error handling, some errors are tolerable. */
499 if (olck->ols_state == OLS_ENQUEUED) { 322 if (oscl->ols_locklessable && rc == -EUSERS) {
500 olck->ols_state = OLS_UPCALL_RECEIVED; 323 /* This is a tolerable error, turn this lock into
501 rc = ldlm_error2errno(errcode); 324 * lockless lock.
502 } else if (olck->ols_state == OLS_CANCELLED) { 325 */
503 rc = -EIO; 326 osc_object_set_contended(cl2osc(slice->cls_obj));
504 } else { 327 LASSERT(slice->cls_ops == &osc_lock_ops);
505 CERROR("Impossible state: %d\n", olck->ols_state); 328
506 LBUG(); 329 /* Change this lock to ldlmlock-less lock. */
507 } 330 osc_lock_to_lockless(env, oscl, 1);
508 if (rc) { 331 oscl->ols_state = OLS_GRANTED;
509 struct ldlm_lock *dlmlock; 332 rc = 0;
510 333 } else if (oscl->ols_glimpse && rc == -ENAVAIL) {
511 dlmlock = ldlm_handle2lock(&olck->ols_handle); 334 LASSERT(oscl->ols_flags & LDLM_FL_LVB_READY);
512 if (dlmlock) { 335 osc_lock_lvb_update(env, cl2osc(slice->cls_obj),
513 lock_res_and_lock(dlmlock); 336 NULL, &oscl->ols_lvb);
514 spin_lock(&osc_ast_guard); 337 /* Hide the error. */
515 LASSERT(!olck->ols_lock); 338 rc = 0;
516 dlmlock->l_ast_data = NULL; 339 }
517 olck->ols_handle.cookie = 0ULL;
518 spin_unlock(&osc_ast_guard);
519 ldlm_lock_fail_match_locked(dlmlock);
520 unlock_res_and_lock(dlmlock);
521 LDLM_LOCK_PUT(dlmlock);
522 }
523 } else {
524 if (olck->ols_glimpse)
525 olck->ols_glimpse = 0;
526 osc_lock_upcall0(env, olck);
527 }
528 340
529 /* Error handling, some errors are tolerable. */ 341 if (oscl->ols_owner)
530 if (olck->ols_locklessable && rc == -EUSERS) { 342 cl_sync_io_note(env, oscl->ols_owner, rc);
531 /* This is a tolerable error, turn this lock into 343 cl_env_nested_put(&nest, env);
532 * lockless lock.
533 */
534 osc_object_set_contended(cl2osc(slice->cls_obj));
535 LASSERT(slice->cls_ops == &osc_lock_ops);
536 344
537 /* Change this lock to ldlmlock-less lock. */ 345 return rc;
538 osc_lock_to_lockless(env, olck, 1); 346}
539 olck->ols_state = OLS_GRANTED;
540 rc = 0;
541 } else if (olck->ols_glimpse && rc == -ENAVAIL) {
542 osc_lock_lvb_update(env, olck, rc);
543 cl_lock_delete(env, lock);
544 /* Hide the error. */
545 rc = 0;
546 }
547
548 if (rc == 0) {
549 /* For AGL case, the RPC sponsor may exits the cl_lock
550 * processing without wait() called before related OSC
551 * lock upcall(). So update the lock status according
552 * to the enqueue result inside AGL upcall().
553 */
554 if (olck->ols_agl) {
555 lock->cll_flags |= CLF_FROM_UPCALL;
556 cl_wait_try(env, lock);
557 lock->cll_flags &= ~CLF_FROM_UPCALL;
558 if (!olck->ols_glimpse)
559 olck->ols_agl = 0;
560 }
561 cl_lock_signal(env, lock);
562 /* del user for lock upcall cookie */
563 cl_unuse_try(env, lock);
564 } else {
565 /* del user for lock upcall cookie */
566 cl_lock_user_del(env, lock);
567 cl_lock_error(env, lock, rc);
568 }
569 347
570 /* release cookie reference, acquired by osc_lock_enqueue() */ 348static int osc_lock_upcall_agl(void *cookie, struct lustre_handle *lockh,
571 cl_lock_hold_release(env, lock, "upcall", lock); 349 int errcode)
572 cl_lock_mutex_put(env, lock); 350{
351 struct osc_object *osc = cookie;
352 struct ldlm_lock *dlmlock;
353 struct lu_env *env;
354 struct cl_env_nest nest;
573 355
574 lu_ref_del(&lock->cll_reference, "upcall", lock); 356 env = cl_env_nested_get(&nest);
575 /* This maybe the last reference, so must be called after 357 LASSERT(!IS_ERR(env));
576 * cl_lock_mutex_put().
577 */
578 cl_lock_put(env, lock);
579 358
580 cl_env_nested_put(&nest, env); 359 if (errcode == ELDLM_LOCK_MATCHED) {
581 } else { 360 errcode = ELDLM_OK;
582 /* should never happen, similar to osc_ldlm_blocking_ast(). */ 361 goto out;
583 LBUG();
584 } 362 }
585 return errcode; 363
364 if (errcode != ELDLM_OK)
365 goto out;
366
367 dlmlock = ldlm_handle2lock(lockh);
368 LASSERT(dlmlock);
369
370 lock_res_and_lock(dlmlock);
371 LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);
372
373 /* there is no osc_lock associated with AGL lock */
374 osc_lock_lvb_update(env, osc, dlmlock, NULL);
375
376 unlock_res_and_lock(dlmlock);
377 LDLM_LOCK_PUT(dlmlock);
378
379out:
380 cl_object_put(env, osc2cl(osc));
381 cl_env_nested_put(&nest, env);
382 return ldlm_error2errno(errcode);
586} 383}
587 384
588/** 385static int osc_lock_flush(struct osc_object *obj, pgoff_t start, pgoff_t end,
589 * Core of osc_dlm_blocking_ast() logic. 386 enum cl_lock_mode mode, int discard)
590 */
591static void osc_lock_blocking(const struct lu_env *env,
592 struct ldlm_lock *dlmlock,
593 struct osc_lock *olck, int blocking)
594{ 387{
595 struct cl_lock *lock = olck->ols_cl.cls_lock; 388 struct lu_env *env;
389 struct cl_env_nest nest;
390 int rc = 0;
391 int rc2 = 0;
596 392
597 LASSERT(olck->ols_lock == dlmlock); 393 env = cl_env_nested_get(&nest);
598 CLASSERT(OLS_BLOCKED < OLS_CANCELLED); 394 if (IS_ERR(env))
599 LASSERT(!osc_lock_is_lockless(olck)); 395 return PTR_ERR(env);
396
397 if (mode == CLM_WRITE) {
398 rc = osc_cache_writeback_range(env, obj, start, end, 1,
399 discard);
400 CDEBUG(D_CACHE, "object %p: [%lu -> %lu] %d pages were %s.\n",
401 obj, start, end, rc,
402 discard ? "discarded" : "written back");
403 if (rc > 0)
404 rc = 0;
405 }
600 406
601 /* 407 rc2 = osc_lock_discard_pages(env, obj, start, end, mode);
602 * Lock might be still addref-ed here, if e.g., blocking ast 408 if (rc == 0 && rc2 < 0)
603 * is sent for a failed lock. 409 rc = rc2;
604 */
605 osc_lock_unhold(olck);
606 410
607 if (blocking && olck->ols_state < OLS_BLOCKED) 411 cl_env_nested_put(&nest, env);
608 /* 412 return rc;
609 * Move osc_lock into OLS_BLOCKED before canceling the lock,
610 * because it recursively re-enters osc_lock_blocking(), with
611 * the state set to OLS_CANCELLED.
612 */
613 olck->ols_state = OLS_BLOCKED;
614 /*
615 * cancel and destroy lock at least once no matter how blocking ast is
616 * entered (see comment above osc_ldlm_blocking_ast() for use
617 * cases). cl_lock_cancel() and cl_lock_delete() are idempotent.
618 */
619 cl_lock_cancel(env, lock);
620 cl_lock_delete(env, lock);
621} 413}
622 414
623/** 415/**
@@ -628,65 +420,63 @@ static int osc_dlm_blocking_ast0(const struct lu_env *env,
628 struct ldlm_lock *dlmlock, 420 struct ldlm_lock *dlmlock,
629 void *data, int flag) 421 void *data, int flag)
630{ 422{
631 struct osc_lock *olck; 423 struct cl_object *obj = NULL;
632 struct cl_lock *lock; 424 int result = 0;
633 int result; 425 int discard;
634 int cancel; 426 enum cl_lock_mode mode = CLM_READ;
635 427
636 LASSERT(flag == LDLM_CB_BLOCKING || flag == LDLM_CB_CANCELING); 428 LASSERT(flag == LDLM_CB_CANCELING);
637 429
638 cancel = 0; 430 lock_res_and_lock(dlmlock);
639 olck = osc_ast_data_get(dlmlock); 431 if (dlmlock->l_granted_mode != dlmlock->l_req_mode) {
640 if (olck) { 432 dlmlock->l_ast_data = NULL;
641 lock = olck->ols_cl.cls_lock; 433 unlock_res_and_lock(dlmlock);
642 cl_lock_mutex_get(env, lock); 434 return 0;
643 LINVRNT(osc_lock_invariant(olck)); 435 }
644 if (olck->ols_ast_wait) { 436
645 /* wake up osc_lock_use() */ 437 discard = ldlm_is_discard_data(dlmlock);
646 cl_lock_signal(env, lock); 438 if (dlmlock->l_granted_mode & (LCK_PW | LCK_GROUP))
647 olck->ols_ast_wait = 0; 439 mode = CLM_WRITE;
648 } 440
649 /* 441 if (dlmlock->l_ast_data) {
650 * Lock might have been canceled while this thread was 442 obj = osc2cl(dlmlock->l_ast_data);
651 * sleeping for lock mutex, but olck is pinned in memory. 443 dlmlock->l_ast_data = NULL;
652 */ 444
653 if (olck == dlmlock->l_ast_data) { 445 cl_object_get(obj);
654 /* 446 }
655 * NOTE: DLM sends blocking AST's for failed locks 447
656 * (that are still in pre-OLS_GRANTED state) 448 unlock_res_and_lock(dlmlock);
657 * too, and they have to be canceled otherwise 449
658 * DLM lock is never destroyed and stuck in 450 /* if l_ast_data is NULL, the dlmlock was enqueued by AGL or
659 * the memory. 451 * the object has been destroyed.
660 * 452 */
661 * Alternatively, ldlm_cli_cancel() can be 453 if (obj) {
662 * called here directly for osc_locks with 454 struct ldlm_extent *extent = &dlmlock->l_policy_data.l_extent;
663 * ols_state < OLS_GRANTED to maintain an 455 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
664 * invariant that ->clo_cancel() is only called 456 __u64 old_kms;
665 * for locks that were granted. 457
666 */ 458 /* Destroy pages covered by the extent of the DLM lock */
667 LASSERT(data == olck); 459 result = osc_lock_flush(cl2osc(obj),
668 osc_lock_blocking(env, dlmlock, 460 cl_index(obj, extent->start),
669 olck, flag == LDLM_CB_BLOCKING); 461 cl_index(obj, extent->end),
670 } else 462 mode, discard);
671 cancel = 1; 463
672 cl_lock_mutex_put(env, lock); 464 /* losing a lock, update kms */
673 osc_ast_data_put(env, olck); 465 lock_res_and_lock(dlmlock);
674 } else 466 cl_object_attr_lock(obj);
675 /* 467 /* Must get the value under the lock to avoid race. */
676 * DLM lock exists, but there is no cl_lock attached to it. 468 old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
677 * This is a `normal' race. cl_object and its cl_lock's can be 469 /* Update the kms. Need to loop all granted locks.
678 * removed by memory pressure, together with all pages. 470 * Not a problem for the client
679 */ 471 */
680 cancel = (flag == LDLM_CB_BLOCKING); 472 attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);
681 473
682 if (cancel) { 474 cl_object_attr_set(env, obj, attr, CAT_KMS);
683 struct lustre_handle *lockh; 475 cl_object_attr_unlock(obj);
476 unlock_res_and_lock(dlmlock);
684 477
685 lockh = &osc_env_info(env)->oti_handle; 478 cl_object_put(env, obj);
686 ldlm_lock2handle(dlmlock, lockh); 479 }
687 result = ldlm_cli_cancel(lockh, LCF_ASYNC);
688 } else
689 result = 0;
690 return result; 480 return result;
691} 481}
692 482
@@ -736,107 +526,52 @@ static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
736 struct ldlm_lock_desc *new, void *data, 526 struct ldlm_lock_desc *new, void *data,
737 int flag) 527 int flag)
738{ 528{
739 struct lu_env *env; 529 int result = 0;
740 struct cl_env_nest nest;
741 int result;
742 530
743 /* 531 switch (flag) {
744 * This can be called in the context of outer IO, e.g., 532 case LDLM_CB_BLOCKING: {
745 * 533 struct lustre_handle lockh;
746 * cl_enqueue()->... 534
747 * ->osc_enqueue_base()->... 535 ldlm_lock2handle(dlmlock, &lockh);
748 * ->ldlm_prep_elc_req()->... 536 result = ldlm_cli_cancel(&lockh, LCF_ASYNC);
749 * ->ldlm_cancel_callback()->...
750 * ->osc_ldlm_blocking_ast()
751 *
752 * new environment has to be created to not corrupt outer context.
753 */
754 env = cl_env_nested_get(&nest);
755 if (!IS_ERR(env)) {
756 result = osc_dlm_blocking_ast0(env, dlmlock, data, flag);
757 cl_env_nested_put(&nest, env);
758 } else {
759 result = PTR_ERR(env);
760 /*
761 * XXX This should never happen, as cl_lock is
762 * stuck. Pre-allocated environment a la vvp_inode_fini_env
763 * should be used.
764 */
765 LBUG();
766 }
767 if (result != 0) {
768 if (result == -ENODATA) 537 if (result == -ENODATA)
769 result = 0; 538 result = 0;
770 else 539 break;
771 CERROR("BAST failed: %d\n", result);
772 } 540 }
773 return result; 541 case LDLM_CB_CANCELING: {
774} 542 struct lu_env *env;
543 struct cl_env_nest nest;
775 544
776static int osc_ldlm_completion_ast(struct ldlm_lock *dlmlock, 545 /*
777 __u64 flags, void *data) 546 * This can be called in the context of outer IO, e.g.,
778{ 547 *
779 struct cl_env_nest nest; 548 * osc_enqueue_base()->...
780 struct lu_env *env; 549 * ->ldlm_prep_elc_req()->...
781 struct osc_lock *olck; 550 * ->ldlm_cancel_callback()->...
782 struct cl_lock *lock; 551 * ->osc_ldlm_blocking_ast()
783 int result; 552 *
784 int dlmrc; 553 * new environment has to be created to not corrupt outer
785 554 * context.
786 /* first, do dlm part of the work */ 555 */
787 dlmrc = ldlm_completion_ast_async(dlmlock, flags, data); 556 env = cl_env_nested_get(&nest);
788 /* then, notify cl_lock */ 557 if (IS_ERR(env)) {
789 env = cl_env_nested_get(&nest); 558 result = PTR_ERR(env);
790 if (!IS_ERR(env)) { 559 break;
791 olck = osc_ast_data_get(dlmlock); 560 }
792 if (olck) {
793 lock = olck->ols_cl.cls_lock;
794 cl_lock_mutex_get(env, lock);
795 /*
796 * ldlm_handle_cp_callback() copied LVB from request
797 * to lock->l_lvb_data, store it in osc_lock.
798 */
799 LASSERT(dlmlock->l_lvb_data);
800 lock_res_and_lock(dlmlock);
801 olck->ols_lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
802 if (!olck->ols_lock) {
803 /*
804 * upcall (osc_lock_upcall()) hasn't yet been
805 * called. Do nothing now, upcall will bind
806 * olck to dlmlock and signal the waiters.
807 *
808 * This maintains an invariant that osc_lock
809 * and ldlm_lock are always bound when
810 * osc_lock is in OLS_GRANTED state.
811 */
812 } else if (dlmlock->l_granted_mode ==
813 dlmlock->l_req_mode) {
814 osc_lock_granted(env, olck, dlmlock, dlmrc);
815 }
816 unlock_res_and_lock(dlmlock);
817 561
818 if (dlmrc != 0) { 562 result = osc_dlm_blocking_ast0(env, dlmlock, data, flag);
819 CL_LOCK_DEBUG(D_ERROR, env, lock,
820 "dlmlock returned %d\n", dlmrc);
821 cl_lock_error(env, lock, dlmrc);
822 }
823 cl_lock_mutex_put(env, lock);
824 osc_ast_data_put(env, olck);
825 result = 0;
826 } else
827 result = -ELDLM_NO_LOCK_DATA;
828 cl_env_nested_put(&nest, env); 563 cl_env_nested_put(&nest, env);
829 } else 564 break;
830 result = PTR_ERR(env); 565 }
831 return dlmrc ?: result; 566 default:
567 LBUG();
568 }
569 return result;
832} 570}
833 571
834static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data) 572static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
835{ 573{
836 struct ptlrpc_request *req = data; 574 struct ptlrpc_request *req = data;
837 struct osc_lock *olck;
838 struct cl_lock *lock;
839 struct cl_object *obj;
840 struct cl_env_nest nest; 575 struct cl_env_nest nest;
841 struct lu_env *env; 576 struct lu_env *env;
842 struct ost_lvb *lvb; 577 struct ost_lvb *lvb;
@@ -847,14 +582,16 @@ static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
847 582
848 env = cl_env_nested_get(&nest); 583 env = cl_env_nested_get(&nest);
849 if (!IS_ERR(env)) { 584 if (!IS_ERR(env)) {
850 /* osc_ast_data_get() has to go after environment is 585 struct cl_object *obj = NULL;
851 * allocated, because osc_ast_data() acquires a 586
852 * reference to a lock, and it can only be released in 587 lock_res_and_lock(dlmlock);
853 * environment. 588 if (dlmlock->l_ast_data) {
854 */ 589 obj = osc2cl(dlmlock->l_ast_data);
855 olck = osc_ast_data_get(dlmlock); 590 cl_object_get(obj);
856 if (olck) { 591 }
857 lock = olck->ols_cl.cls_lock; 592 unlock_res_and_lock(dlmlock);
593
594 if (obj) {
858 /* Do not grab the mutex of cl_lock for glimpse. 595 /* Do not grab the mutex of cl_lock for glimpse.
859 * See LU-1274 for details. 596 * See LU-1274 for details.
860 * BTW, it's okay for cl_lock to be cancelled during 597 * BTW, it's okay for cl_lock to be cancelled during
@@ -869,7 +606,6 @@ static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
869 result = req_capsule_server_pack(cap); 606 result = req_capsule_server_pack(cap);
870 if (result == 0) { 607 if (result == 0) {
871 lvb = req_capsule_server_get(cap, &RMF_DLM_LVB); 608 lvb = req_capsule_server_get(cap, &RMF_DLM_LVB);
872 obj = lock->cll_descr.cld_obj;
873 result = cl_object_glimpse(env, obj, lvb); 609 result = cl_object_glimpse(env, obj, lvb);
874 } 610 }
875 if (!exp_connect_lvb_type(req->rq_export)) 611 if (!exp_connect_lvb_type(req->rq_export))
@@ -877,7 +613,7 @@ static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
877 &RMF_DLM_LVB, 613 &RMF_DLM_LVB,
878 sizeof(struct ost_lvb_v1), 614 sizeof(struct ost_lvb_v1),
879 RCL_SERVER); 615 RCL_SERVER);
880 osc_ast_data_put(env, olck); 616 cl_object_put(env, obj);
881 } else { 617 } else {
882 /* 618 /*
883 * These errors are normal races, so we don't want to 619 * These errors are normal races, so we don't want to
@@ -888,44 +624,123 @@ static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
888 result = -ELDLM_NO_LOCK_DATA; 624 result = -ELDLM_NO_LOCK_DATA;
889 } 625 }
890 cl_env_nested_put(&nest, env); 626 cl_env_nested_put(&nest, env);
891 } else 627 } else {
892 result = PTR_ERR(env); 628 result = PTR_ERR(env);
629 }
893 req->rq_status = result; 630 req->rq_status = result;
894 return result; 631 return result;
895} 632}
896 633
897static unsigned long osc_lock_weigh(const struct lu_env *env, 634static int weigh_cb(const struct lu_env *env, struct cl_io *io,
898 const struct cl_lock_slice *slice) 635 struct osc_page *ops, void *cbdata)
899{ 636{
900 /* 637 struct cl_page *page = ops->ops_cl.cpl_page;
901 * don't need to grab coh_page_guard since we don't care the exact # 638
902 * of pages.. 639 if (cl_page_is_vmlocked(env, page) ||
903 */ 640 PageDirty(page->cp_vmpage) || PageWriteback(page->cp_vmpage)
904 return cl_object_header(slice->cls_obj)->coh_pages; 641 ) {
642 (*(unsigned long *)cbdata)++;
643 return CLP_GANG_ABORT;
644 }
645
646 return CLP_GANG_OKAY;
905} 647}
906 648
907static void osc_lock_build_einfo(const struct lu_env *env, 649static unsigned long osc_lock_weight(const struct lu_env *env,
908 const struct cl_lock *clock, 650 struct osc_object *oscobj,
909 struct osc_lock *lock, 651 struct ldlm_extent *extent)
910 struct ldlm_enqueue_info *einfo) 652{
653 struct cl_io *io = &osc_env_info(env)->oti_io;
654 struct cl_object *obj = cl_object_top(&oscobj->oo_cl);
655 unsigned long npages = 0;
656 int result;
657
658 io->ci_obj = obj;
659 io->ci_ignore_layout = 1;
660 result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
661 if (result != 0)
662 return result;
663
664 do {
665 result = osc_page_gang_lookup(env, io, oscobj,
666 cl_index(obj, extent->start),
667 cl_index(obj, extent->end),
668 weigh_cb, (void *)&npages);
669 if (result == CLP_GANG_ABORT)
670 break;
671 if (result == CLP_GANG_RESCHED)
672 cond_resched();
673 } while (result != CLP_GANG_OKAY);
674 cl_io_fini(env, io);
675
676 return npages;
677}
678
679/**
680 * Get the weight of dlm lock for early cancellation.
681 */
682unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
911{ 683{
912 enum cl_lock_mode mode; 684 struct cl_env_nest nest;
685 struct lu_env *env;
686 struct osc_object *obj;
687 struct osc_lock *oscl;
688 unsigned long weight;
689 bool found = false;
690
691 might_sleep();
692 /*
693 * osc_ldlm_weigh_ast has a complex context since it might be called
694 * because of lock canceling, or from user's input. We have to make
695 * a new environment for it. Probably it is implementation safe to use
696 * the upper context because cl_lock_put don't modify environment
697 * variables. But just in case ..
698 */
699 env = cl_env_nested_get(&nest);
700 if (IS_ERR(env))
701 /* Mostly because lack of memory, do not eliminate this lock */
702 return 1;
913 703
914 mode = clock->cll_descr.cld_mode; 704 LASSERT(dlmlock->l_resource->lr_type == LDLM_EXTENT);
915 if (mode == CLM_PHANTOM) 705 obj = dlmlock->l_ast_data;
706 if (obj) {
707 weight = 1;
708 goto out;
709 }
710
711 spin_lock(&obj->oo_ol_spin);
712 list_for_each_entry(oscl, &obj->oo_ol_list, ols_nextlock_oscobj) {
713 if (oscl->ols_dlmlock && oscl->ols_dlmlock != dlmlock)
714 continue;
715 found = true;
716 }
717 spin_unlock(&obj->oo_ol_spin);
718 if (found) {
916 /* 719 /*
917 * For now, enqueue all glimpse locks in read mode. In the 720 * If the lock is being used by an IO, definitely not cancel it.
918 * future, client might choose to enqueue LCK_PW lock for
919 * glimpse on a file opened for write.
920 */ 721 */
921 mode = CLM_READ; 722 weight = 1;
723 goto out;
724 }
725
726 weight = osc_lock_weight(env, obj, &dlmlock->l_policy_data.l_extent);
727
728out:
729 cl_env_nested_put(&nest, env);
730 return weight;
731}
922 732
733static void osc_lock_build_einfo(const struct lu_env *env,
734 const struct cl_lock *lock,
735 struct osc_object *osc,
736 struct ldlm_enqueue_info *einfo)
737{
923 einfo->ei_type = LDLM_EXTENT; 738 einfo->ei_type = LDLM_EXTENT;
924 einfo->ei_mode = osc_cl_lock2ldlm(mode); 739 einfo->ei_mode = osc_cl_lock2ldlm(lock->cll_descr.cld_mode);
925 einfo->ei_cb_bl = osc_ldlm_blocking_ast; 740 einfo->ei_cb_bl = osc_ldlm_blocking_ast;
926 einfo->ei_cb_cp = osc_ldlm_completion_ast; 741 einfo->ei_cb_cp = ldlm_completion_ast;
927 einfo->ei_cb_gl = osc_ldlm_glimpse_ast; 742 einfo->ei_cb_gl = osc_ldlm_glimpse_ast;
928 einfo->ei_cbdata = lock; /* value to be put into ->l_ast_data */ 743 einfo->ei_cbdata = osc; /* value to be put into ->l_ast_data */
929} 744}
930 745
931/** 746/**
@@ -981,113 +796,100 @@ static void osc_lock_to_lockless(const struct lu_env *env,
981 LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols))); 796 LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
982} 797}
983 798
984static int osc_lock_compatible(const struct osc_lock *qing, 799static bool osc_lock_compatible(const struct osc_lock *qing,
985 const struct osc_lock *qed) 800 const struct osc_lock *qed)
986{ 801{
987 enum cl_lock_mode qing_mode; 802 struct cl_lock_descr *qed_descr = &qed->ols_cl.cls_lock->cll_descr;
988 enum cl_lock_mode qed_mode; 803 struct cl_lock_descr *qing_descr = &qing->ols_cl.cls_lock->cll_descr;
989 804
990 qing_mode = qing->ols_cl.cls_lock->cll_descr.cld_mode; 805 if (qed->ols_glimpse)
991 if (qed->ols_glimpse && 806 return true;
992 (qed->ols_state >= OLS_UPCALL_RECEIVED || qing_mode == CLM_READ)) 807
993 return 1; 808 if (qing_descr->cld_mode == CLM_READ && qed_descr->cld_mode == CLM_READ)
809 return true;
810
811 if (qed->ols_state < OLS_GRANTED)
812 return true;
813
814 if (qed_descr->cld_mode >= qing_descr->cld_mode &&
815 qed_descr->cld_start <= qing_descr->cld_start &&
816 qed_descr->cld_end >= qing_descr->cld_end)
817 return true;
994 818
995 qed_mode = qed->ols_cl.cls_lock->cll_descr.cld_mode; 819 return false;
996 return ((qing_mode == CLM_READ) && (qed_mode == CLM_READ));
997} 820}
998 821
999/** 822static void osc_lock_wake_waiters(const struct lu_env *env,
1000 * Cancel all conflicting locks and wait for them to be destroyed. 823 struct osc_object *osc,
1001 * 824 struct osc_lock *oscl)
1002 * This function is used for two purposes:
1003 *
1004 * - early cancel all conflicting locks before starting IO, and
1005 *
1006 * - guarantee that pages added to the page cache by lockless IO are never
1007 * covered by locks other than lockless IO lock, and, hence, are not
1008 * visible to other threads.
1009 */
1010static int osc_lock_enqueue_wait(const struct lu_env *env,
1011 const struct osc_lock *olck)
1012{ 825{
1013 struct cl_lock *lock = olck->ols_cl.cls_lock; 826 spin_lock(&osc->oo_ol_spin);
1014 struct cl_lock_descr *descr = &lock->cll_descr; 827 list_del_init(&oscl->ols_nextlock_oscobj);
1015 struct cl_object_header *hdr = cl_object_header(descr->cld_obj); 828 spin_unlock(&osc->oo_ol_spin);
1016 struct cl_lock *scan;
1017 struct cl_lock *conflict = NULL;
1018 int lockless = osc_lock_is_lockless(olck);
1019 int rc = 0;
1020 829
1021 LASSERT(cl_lock_is_mutexed(lock)); 830 spin_lock(&oscl->ols_lock);
831 while (!list_empty(&oscl->ols_waiting_list)) {
832 struct osc_lock *scan;
1022 833
1023 /* make it enqueue anyway for glimpse lock, because we actually 834 scan = list_entry(oscl->ols_waiting_list.next, struct osc_lock,
1024 * don't need to cancel any conflicting locks. 835 ols_wait_entry);
1025 */ 836 list_del_init(&scan->ols_wait_entry);
1026 if (olck->ols_glimpse)
1027 return 0;
1028 837
1029 spin_lock(&hdr->coh_lock_guard); 838 cl_sync_io_note(env, scan->ols_owner, 0);
1030 list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) { 839 }
1031 struct cl_lock_descr *cld = &scan->cll_descr; 840 spin_unlock(&oscl->ols_lock);
1032 const struct osc_lock *scan_ols; 841}
842
843static void osc_lock_enqueue_wait(const struct lu_env *env,
844 struct osc_object *obj,
845 struct osc_lock *oscl)
846{
847 struct osc_lock *tmp_oscl;
848 struct cl_lock_descr *need = &oscl->ols_cl.cls_lock->cll_descr;
849 struct cl_sync_io *waiter = &osc_env_info(env)->oti_anchor;
1033 850
1034 if (scan == lock) 851 spin_lock(&obj->oo_ol_spin);
852 list_add_tail(&oscl->ols_nextlock_oscobj, &obj->oo_ol_list);
853
854restart:
855 list_for_each_entry(tmp_oscl, &obj->oo_ol_list,
856 ols_nextlock_oscobj) {
857 struct cl_lock_descr *descr;
858
859 if (tmp_oscl == oscl)
1035 break; 860 break;
1036 861
1037 if (scan->cll_state < CLS_QUEUING || 862 descr = &tmp_oscl->ols_cl.cls_lock->cll_descr;
1038 scan->cll_state == CLS_FREEING || 863 if (descr->cld_start > need->cld_end ||
1039 cld->cld_start > descr->cld_end || 864 descr->cld_end < need->cld_start)
1040 cld->cld_end < descr->cld_start)
1041 continue; 865 continue;
1042 866
1043 /* overlapped and living locks. */ 867 /* We're not supposed to give up group lock */
868 if (descr->cld_mode == CLM_GROUP)
869 break;
1044 870
1045 /* We're not supposed to give up group lock. */ 871 if (!osc_lock_is_lockless(oscl) &&
1046 if (scan->cll_descr.cld_mode == CLM_GROUP) { 872 osc_lock_compatible(oscl, tmp_oscl))
1047 LASSERT(descr->cld_mode != CLM_GROUP ||
1048 descr->cld_gid != scan->cll_descr.cld_gid);
1049 continue; 873 continue;
1050 }
1051 874
1052 scan_ols = osc_lock_at(scan); 875 /* wait for conflicting lock to be canceled */
876 cl_sync_io_init(waiter, 1, cl_sync_io_end);
877 oscl->ols_owner = waiter;
1053 878
1054 /* We need to cancel the compatible locks if we're enqueuing 879 spin_lock(&tmp_oscl->ols_lock);
1055 * a lockless lock, for example: 880 /* add oscl into tmp's ols_waiting list */
1056 * imagine that client has PR lock on [0, 1000], and thread T0 881 list_add_tail(&oscl->ols_wait_entry,
1057 * is doing lockless IO in [500, 1500] region. Concurrent 882 &tmp_oscl->ols_waiting_list);
1058 * thread T1 can see lockless data in [500, 1000], which is 883 spin_unlock(&tmp_oscl->ols_lock);
1059 * wrong, because these data are possibly stale.
1060 */
1061 if (!lockless && osc_lock_compatible(olck, scan_ols))
1062 continue;
1063 884
1064 cl_lock_get_trust(scan); 885 spin_unlock(&obj->oo_ol_spin);
1065 conflict = scan; 886 (void)cl_sync_io_wait(env, waiter, 0);
1066 break;
1067 }
1068 spin_unlock(&hdr->coh_lock_guard);
1069 887
1070 if (conflict) { 888 spin_lock(&obj->oo_ol_spin);
1071 if (lock->cll_descr.cld_mode == CLM_GROUP) { 889 oscl->ols_owner = NULL;
1072 /* we want a group lock but a previous lock request 890 goto restart;
1073 * conflicts, we do not wait but return 0 so the
1074 * request is send to the server
1075 */
1076 CDEBUG(D_DLMTRACE, "group lock %p is conflicted with %p, no wait, send to server\n",
1077 lock, conflict);
1078 cl_lock_put(env, conflict);
1079 rc = 0;
1080 } else {
1081 CDEBUG(D_DLMTRACE, "lock %p is conflicted with %p, will wait\n",
1082 lock, conflict);
1083 LASSERT(!lock->cll_conflict);
1084 lu_ref_add(&conflict->cll_reference, "cancel-wait",
1085 lock);
1086 lock->cll_conflict = conflict;
1087 rc = CLO_WAIT;
1088 }
1089 } 891 }
1090 return rc; 892 spin_unlock(&obj->oo_ol_spin);
1091} 893}
1092 894
1093/** 895/**
@@ -1106,188 +908,122 @@ static int osc_lock_enqueue_wait(const struct lu_env *env,
1106 */ 908 */
1107static int osc_lock_enqueue(const struct lu_env *env, 909static int osc_lock_enqueue(const struct lu_env *env,
1108 const struct cl_lock_slice *slice, 910 const struct cl_lock_slice *slice,
1109 struct cl_io *unused, __u32 enqflags) 911 struct cl_io *unused, struct cl_sync_io *anchor)
1110{ 912{
1111 struct osc_lock *ols = cl2osc_lock(slice); 913 struct osc_thread_info *info = osc_env_info(env);
1112 struct cl_lock *lock = ols->ols_cl.cls_lock; 914 struct osc_io *oio = osc_env_io(env);
915 struct osc_object *osc = cl2osc(slice->cls_obj);
916 struct osc_lock *oscl = cl2osc_lock(slice);
917 struct cl_lock *lock = slice->cls_lock;
918 struct ldlm_res_id *resname = &info->oti_resname;
919 ldlm_policy_data_t *policy = &info->oti_policy;
920 osc_enqueue_upcall_f upcall = osc_lock_upcall;
921 void *cookie = oscl;
922 bool async = false;
1113 int result; 923 int result;
1114 924
1115 LASSERT(cl_lock_is_mutexed(lock)); 925 LASSERTF(ergo(oscl->ols_glimpse, lock->cll_descr.cld_mode <= CLM_READ),
1116 LASSERTF(ols->ols_state == OLS_NEW, 926 "lock = %p, ols = %p\n", lock, oscl);
1117 "Impossible state: %d\n", ols->ols_state);
1118
1119 LASSERTF(ergo(ols->ols_glimpse, lock->cll_descr.cld_mode <= CLM_READ),
1120 "lock = %p, ols = %p\n", lock, ols);
1121 927
1122 result = osc_lock_enqueue_wait(env, ols); 928 if (oscl->ols_state == OLS_GRANTED)
1123 if (result == 0) { 929 return 0;
1124 if (!osc_lock_is_lockless(ols)) {
1125 struct osc_object *obj = cl2osc(slice->cls_obj);
1126 struct osc_thread_info *info = osc_env_info(env);
1127 struct ldlm_res_id *resname = &info->oti_resname;
1128 ldlm_policy_data_t *policy = &info->oti_policy;
1129 struct ldlm_enqueue_info *einfo = &ols->ols_einfo;
1130 930
1131 /* lock will be passed as upcall cookie, 931 if (oscl->ols_flags & LDLM_FL_TEST_LOCK)
1132 * hold ref to prevent to be released. 932 goto enqueue_base;
1133 */
1134 cl_lock_hold_add(env, lock, "upcall", lock);
1135 /* a user for lock also */
1136 cl_lock_user_add(env, lock);
1137 ols->ols_state = OLS_ENQUEUED;
1138 933
1139 /* 934 if (oscl->ols_glimpse) {
1140 * XXX: this is possible blocking point as 935 LASSERT(equi(oscl->ols_agl, !anchor));
1141 * ldlm_lock_match(LDLM_FL_LVB_READY) waits for 936 async = true;
1142 * LDLM_CP_CALLBACK. 937 goto enqueue_base;
1143 */
1144 ostid_build_res_name(&obj->oo_oinfo->loi_oi, resname);
1145 osc_lock_build_policy(env, lock, policy);
1146 result = osc_enqueue_base(osc_export(obj), resname,
1147 &ols->ols_flags, policy,
1148 &ols->ols_lvb,
1149 obj->oo_oinfo->loi_kms_valid,
1150 osc_lock_upcall,
1151 ols, einfo, &ols->ols_handle,
1152 PTLRPCD_SET, 1, ols->ols_agl);
1153 if (result != 0) {
1154 cl_lock_user_del(env, lock);
1155 cl_lock_unhold(env, lock, "upcall", lock);
1156 if (unlikely(result == -ECANCELED)) {
1157 ols->ols_state = OLS_NEW;
1158 result = 0;
1159 }
1160 }
1161 } else {
1162 ols->ols_state = OLS_GRANTED;
1163 ols->ols_owner = osc_env_io(env);
1164 }
1165 } 938 }
1166 LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
1167 return result;
1168}
1169 939
1170static int osc_lock_wait(const struct lu_env *env, 940 osc_lock_enqueue_wait(env, osc, oscl);
1171 const struct cl_lock_slice *slice) 941
1172{ 942 /* we can grant lockless lock right after all conflicting locks
1173 struct osc_lock *olck = cl2osc_lock(slice); 943 * are canceled.
1174 struct cl_lock *lock = olck->ols_cl.cls_lock; 944 */
1175 945 if (osc_lock_is_lockless(oscl)) {
1176 LINVRNT(osc_lock_invariant(olck)); 946 oscl->ols_state = OLS_GRANTED;
1177 947 oio->oi_lockless = 1;
1178 if (olck->ols_glimpse && olck->ols_state >= OLS_UPCALL_RECEIVED) { 948 return 0;
1179 if (olck->ols_flags & LDLM_FL_LVB_READY) {
1180 return 0;
1181 } else if (olck->ols_agl) {
1182 if (lock->cll_flags & CLF_FROM_UPCALL)
1183 /* It is from enqueue RPC reply upcall for
1184 * updating state. Do not re-enqueue.
1185 */
1186 return -ENAVAIL;
1187 olck->ols_state = OLS_NEW;
1188 } else {
1189 LASSERT(lock->cll_error);
1190 return lock->cll_error;
1191 }
1192 } 949 }
1193 950
1194 if (olck->ols_state == OLS_NEW) { 951enqueue_base:
1195 int rc; 952 oscl->ols_state = OLS_ENQUEUED;
1196 953 if (anchor) {
1197 LASSERT(olck->ols_agl); 954 atomic_inc(&anchor->csi_sync_nr);
1198 olck->ols_agl = 0; 955 oscl->ols_owner = anchor;
1199 olck->ols_flags &= ~LDLM_FL_BLOCK_NOWAIT;
1200 rc = osc_lock_enqueue(env, slice, NULL, CEF_ASYNC | CEF_MUST);
1201 if (rc != 0)
1202 return rc;
1203 else
1204 return CLO_REENQUEUED;
1205 } 956 }
1206 957
1207 LASSERT(equi(olck->ols_state >= OLS_UPCALL_RECEIVED && 958 /**
1208 lock->cll_error == 0, olck->ols_lock)); 959 * DLM lock's ast data must be osc_object;
960 * if glimpse or AGL lock, async of osc_enqueue_base() must be true,
961 * DLM's enqueue callback set to osc_lock_upcall() with cookie as
962 * osc_lock.
963 */
964 ostid_build_res_name(&osc->oo_oinfo->loi_oi, resname);
965 osc_lock_build_einfo(env, lock, osc, &oscl->ols_einfo);
966 osc_lock_build_policy(env, lock, policy);
967 if (oscl->ols_agl) {
968 oscl->ols_einfo.ei_cbdata = NULL;
969 /* hold a reference for callback */
970 cl_object_get(osc2cl(osc));
971 upcall = osc_lock_upcall_agl;
972 cookie = osc;
973 }
974 result = osc_enqueue_base(osc_export(osc), resname, &oscl->ols_flags,
975 policy, &oscl->ols_lvb,
976 osc->oo_oinfo->loi_kms_valid,
977 upcall, cookie,
978 &oscl->ols_einfo, PTLRPCD_SET, async,
979 oscl->ols_agl);
980 if (result != 0) {
981 oscl->ols_state = OLS_CANCELLED;
982 osc_lock_wake_waiters(env, osc, oscl);
1209 983
1210 return lock->cll_error ?: olck->ols_state >= OLS_GRANTED ? 0 : CLO_WAIT; 984 /* hide error for AGL lock. */
985 if (oscl->ols_agl) {
986 cl_object_put(env, osc2cl(osc));
987 result = 0;
988 }
989 if (anchor)
990 cl_sync_io_note(env, anchor, result);
991 } else {
992 if (osc_lock_is_lockless(oscl)) {
993 oio->oi_lockless = 1;
994 } else if (!async) {
995 LASSERT(oscl->ols_state == OLS_GRANTED);
996 LASSERT(oscl->ols_hold);
997 LASSERT(oscl->ols_dlmlock);
998 }
999 }
1000 return result;
1211} 1001}
1212 1002
1213/** 1003/**
1214 * An implementation of cl_lock_operations::clo_use() method that pins cached 1004 * Breaks a link between osc_lock and dlm_lock.
1215 * lock.
1216 */ 1005 */
1217static int osc_lock_use(const struct lu_env *env, 1006static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
1218 const struct cl_lock_slice *slice)
1219{ 1007{
1220 struct osc_lock *olck = cl2osc_lock(slice); 1008 struct ldlm_lock *dlmlock;
1221 int rc;
1222
1223 LASSERT(!olck->ols_hold);
1224 1009
1225 /* 1010 dlmlock = olck->ols_dlmlock;
1226 * Atomically check for LDLM_FL_CBPENDING and addref a lock if this 1011 if (!dlmlock)
1227 * flag is not set. This protects us from a concurrent blocking ast. 1012 return;
1228 */
1229 rc = ldlm_lock_addref_try(&olck->ols_handle, olck->ols_einfo.ei_mode);
1230 if (rc == 0) {
1231 olck->ols_hold = 1;
1232 olck->ols_state = OLS_GRANTED;
1233 } else {
1234 struct cl_lock *lock;
1235 1013
1236 /* 1014 if (olck->ols_hold) {
1237 * Lock is being cancelled somewhere within 1015 olck->ols_hold = 0;
1238 * ldlm_handle_bl_callback(): LDLM_FL_CBPENDING is already 1016 osc_cancel_base(&olck->ols_handle, olck->ols_einfo.ei_mode);
1239 * set, but osc_ldlm_blocking_ast() hasn't yet acquired 1017 olck->ols_handle.cookie = 0ULL;
1240 * cl_lock mutex.
1241 */
1242 lock = slice->cls_lock;
1243 LASSERT(lock->cll_state == CLS_INTRANSIT);
1244 LASSERT(lock->cll_users > 0);
1245 /* set a flag for osc_dlm_blocking_ast0() to signal the
1246 * lock.
1247 */
1248 olck->ols_ast_wait = 1;
1249 rc = CLO_WAIT;
1250 } 1018 }
1251 return rc;
1252}
1253 1019
1254static int osc_lock_flush(struct osc_lock *ols, int discard) 1020 olck->ols_dlmlock = NULL;
1255{
1256 struct cl_lock *lock = ols->ols_cl.cls_lock;
1257 struct cl_env_nest nest;
1258 struct lu_env *env;
1259 int result = 0;
1260
1261 env = cl_env_nested_get(&nest);
1262 if (!IS_ERR(env)) {
1263 struct osc_object *obj = cl2osc(ols->ols_cl.cls_obj);
1264 struct cl_lock_descr *descr = &lock->cll_descr;
1265 int rc = 0;
1266
1267 if (descr->cld_mode >= CLM_WRITE) {
1268 result = osc_cache_writeback_range(env, obj,
1269 descr->cld_start,
1270 descr->cld_end,
1271 1, discard);
1272 LDLM_DEBUG(ols->ols_lock,
1273 "lock %p: %d pages were %s.\n", lock, result,
1274 discard ? "discarded" : "written");
1275 if (result > 0)
1276 result = 0;
1277 }
1278 1021
1279 rc = cl_lock_discard_pages(env, lock); 1022 /* release a reference taken in osc_lock_upcall(). */
1280 if (result == 0 && rc < 0) 1023 LASSERT(olck->ols_has_ref);
1281 result = rc; 1024 lu_ref_del(&dlmlock->l_reference, "osc_lock", olck);
1282 1025 LDLM_LOCK_RELEASE(dlmlock);
1283 cl_env_nested_put(&nest, env); 1026 olck->ols_has_ref = 0;
1284 } else
1285 result = PTR_ERR(env);
1286 if (result == 0) {
1287 ols->ols_flush = 1;
1288 LINVRNT(!osc_lock_has_pages(ols));
1289 }
1290 return result;
1291} 1027}
1292 1028
1293/** 1029/**
@@ -1307,96 +1043,16 @@ static int osc_lock_flush(struct osc_lock *ols, int discard)
1307static void osc_lock_cancel(const struct lu_env *env, 1043static void osc_lock_cancel(const struct lu_env *env,
1308 const struct cl_lock_slice *slice) 1044 const struct cl_lock_slice *slice)
1309{ 1045{
1310 struct cl_lock *lock = slice->cls_lock; 1046 struct osc_object *obj = cl2osc(slice->cls_obj);
1311 struct osc_lock *olck = cl2osc_lock(slice); 1047 struct osc_lock *oscl = cl2osc_lock(slice);
1312 struct ldlm_lock *dlmlock = olck->ols_lock;
1313 int result = 0;
1314 int discard;
1315
1316 LASSERT(cl_lock_is_mutexed(lock));
1317 LINVRNT(osc_lock_invariant(olck));
1318
1319 if (dlmlock) {
1320 int do_cancel;
1321
1322 discard = !!(dlmlock->l_flags & LDLM_FL_DISCARD_DATA);
1323 if (olck->ols_state >= OLS_GRANTED)
1324 result = osc_lock_flush(olck, discard);
1325 osc_lock_unhold(olck);
1326
1327 lock_res_and_lock(dlmlock);
1328 /* Now that we're the only user of dlm read/write reference,
1329 * mostly the ->l_readers + ->l_writers should be zero.
1330 * However, there is a corner case.
1331 * See bug 18829 for details.
1332 */
1333 do_cancel = (dlmlock->l_readers == 0 &&
1334 dlmlock->l_writers == 0);
1335 dlmlock->l_flags |= LDLM_FL_CBPENDING;
1336 unlock_res_and_lock(dlmlock);
1337 if (do_cancel)
1338 result = ldlm_cli_cancel(&olck->ols_handle, LCF_ASYNC);
1339 if (result < 0)
1340 CL_LOCK_DEBUG(D_ERROR, env, lock,
1341 "lock %p cancel failure with error(%d)\n",
1342 lock, result);
1343 }
1344 olck->ols_state = OLS_CANCELLED;
1345 olck->ols_flags &= ~LDLM_FL_LVB_READY;
1346 osc_lock_detach(env, olck);
1347}
1348
1349static int osc_lock_has_pages(struct osc_lock *olck)
1350{
1351 return 0;
1352}
1353
1354static void osc_lock_delete(const struct lu_env *env,
1355 const struct cl_lock_slice *slice)
1356{
1357 struct osc_lock *olck;
1358 1048
1359 olck = cl2osc_lock(slice); 1049 LINVRNT(osc_lock_invariant(oscl));
1360 if (olck->ols_glimpse) {
1361 LASSERT(!olck->ols_hold);
1362 LASSERT(!olck->ols_lock);
1363 return;
1364 }
1365 1050
1366 LINVRNT(osc_lock_invariant(olck)); 1051 osc_lock_detach(env, oscl);
1367 LINVRNT(!osc_lock_has_pages(olck)); 1052 oscl->ols_state = OLS_CANCELLED;
1053 oscl->ols_flags &= ~LDLM_FL_LVB_READY;
1368 1054
1369 osc_lock_unhold(olck); 1055 osc_lock_wake_waiters(env, obj, oscl);
1370 osc_lock_detach(env, olck);
1371}
1372
1373/**
1374 * Implements cl_lock_operations::clo_state() method for osc layer.
1375 *
1376 * Maintains osc_lock::ols_owner field.
1377 *
1378 * This assumes that lock always enters CLS_HELD (from some other state) in
1379 * the same IO context as one that requested the lock. This should not be a
1380 * problem, because context is by definition shared by all activity pertaining
1381 * to the same high-level IO.
1382 */
1383static void osc_lock_state(const struct lu_env *env,
1384 const struct cl_lock_slice *slice,
1385 enum cl_lock_state state)
1386{
1387 struct osc_lock *lock = cl2osc_lock(slice);
1388
1389 /*
1390 * XXX multiple io contexts can use the lock at the same time.
1391 */
1392 LINVRNT(osc_lock_invariant(lock));
1393 if (state == CLS_HELD && slice->cls_lock->cll_state != CLS_HELD) {
1394 struct osc_io *oio = osc_env_io(env);
1395
1396 LASSERT(!lock->ols_owner);
1397 lock->ols_owner = oio;
1398 } else if (state != CLS_HELD)
1399 lock->ols_owner = NULL;
1400} 1056}
1401 1057
1402static int osc_lock_print(const struct lu_env *env, void *cookie, 1058static int osc_lock_print(const struct lu_env *env, void *cookie,
@@ -1404,221 +1060,161 @@ static int osc_lock_print(const struct lu_env *env, void *cookie,
1404{ 1060{
1405 struct osc_lock *lock = cl2osc_lock(slice); 1061 struct osc_lock *lock = cl2osc_lock(slice);
1406 1062
1407 /*
1408 * XXX print ldlm lock and einfo properly.
1409 */
1410 (*p)(env, cookie, "%p %#16llx %#llx %d %p ", 1063 (*p)(env, cookie, "%p %#16llx %#llx %d %p ",
1411 lock->ols_lock, lock->ols_flags, lock->ols_handle.cookie, 1064 lock->ols_dlmlock, lock->ols_flags, lock->ols_handle.cookie,
1412 lock->ols_state, lock->ols_owner); 1065 lock->ols_state, lock->ols_owner);
1413 osc_lvb_print(env, cookie, p, &lock->ols_lvb); 1066 osc_lvb_print(env, cookie, p, &lock->ols_lvb);
1414 return 0; 1067 return 0;
1415} 1068}
1416 1069
1417static int osc_lock_fits_into(const struct lu_env *env,
1418 const struct cl_lock_slice *slice,
1419 const struct cl_lock_descr *need,
1420 const struct cl_io *io)
1421{
1422 struct osc_lock *ols = cl2osc_lock(slice);
1423
1424 if (need->cld_enq_flags & CEF_NEVER)
1425 return 0;
1426
1427 if (ols->ols_state >= OLS_CANCELLED)
1428 return 0;
1429
1430 if (need->cld_mode == CLM_PHANTOM) {
1431 if (ols->ols_agl)
1432 return !(ols->ols_state > OLS_RELEASED);
1433
1434 /*
1435 * Note: the QUEUED lock can't be matched here, otherwise
1436 * it might cause the deadlocks.
1437 * In read_process,
1438 * P1: enqueued read lock, create sublock1
1439 * P2: enqueued write lock, create sublock2(conflicted
1440 * with sublock1).
1441 * P1: Grant read lock.
1442 * P1: enqueued glimpse lock(with holding sublock1_read),
1443 * matched with sublock2, waiting sublock2 to be granted.
1444 * But sublock2 can not be granted, because P1
1445 * will not release sublock1. Bang!
1446 */
1447 if (ols->ols_state < OLS_GRANTED ||
1448 ols->ols_state > OLS_RELEASED)
1449 return 0;
1450 } else if (need->cld_enq_flags & CEF_MUST) {
1451 /*
1452 * If the lock hasn't ever enqueued, it can't be matched
1453 * because enqueue process brings in many information
1454 * which can be used to determine things such as lockless,
1455 * CEF_MUST, etc.
1456 */
1457 if (ols->ols_state < OLS_UPCALL_RECEIVED &&
1458 ols->ols_locklessable)
1459 return 0;
1460 }
1461 return 1;
1462}
1463
1464static const struct cl_lock_operations osc_lock_ops = { 1070static const struct cl_lock_operations osc_lock_ops = {
1465 .clo_fini = osc_lock_fini, 1071 .clo_fini = osc_lock_fini,
1466 .clo_enqueue = osc_lock_enqueue, 1072 .clo_enqueue = osc_lock_enqueue,
1467 .clo_wait = osc_lock_wait,
1468 .clo_unuse = osc_lock_unuse,
1469 .clo_use = osc_lock_use,
1470 .clo_delete = osc_lock_delete,
1471 .clo_state = osc_lock_state,
1472 .clo_cancel = osc_lock_cancel, 1073 .clo_cancel = osc_lock_cancel,
1473 .clo_weigh = osc_lock_weigh,
1474 .clo_print = osc_lock_print, 1074 .clo_print = osc_lock_print,
1475 .clo_fits_into = osc_lock_fits_into,
1476}; 1075};
1477 1076
1478static int osc_lock_lockless_unuse(const struct lu_env *env,
1479 const struct cl_lock_slice *slice)
1480{
1481 struct osc_lock *ols = cl2osc_lock(slice);
1482 struct cl_lock *lock = slice->cls_lock;
1483
1484 LASSERT(ols->ols_state == OLS_GRANTED);
1485 LINVRNT(osc_lock_invariant(ols));
1486
1487 cl_lock_cancel(env, lock);
1488 cl_lock_delete(env, lock);
1489 return 0;
1490}
1491
1492static void osc_lock_lockless_cancel(const struct lu_env *env, 1077static void osc_lock_lockless_cancel(const struct lu_env *env,
1493 const struct cl_lock_slice *slice) 1078 const struct cl_lock_slice *slice)
1494{ 1079{
1495 struct osc_lock *ols = cl2osc_lock(slice); 1080 struct osc_lock *ols = cl2osc_lock(slice);
1081 struct osc_object *osc = cl2osc(slice->cls_obj);
1082 struct cl_lock_descr *descr = &slice->cls_lock->cll_descr;
1496 int result; 1083 int result;
1497 1084
1498 result = osc_lock_flush(ols, 0); 1085 LASSERT(!ols->ols_dlmlock);
1086 result = osc_lock_flush(osc, descr->cld_start, descr->cld_end,
1087 descr->cld_mode, 0);
1499 if (result) 1088 if (result)
1500 CERROR("Pages for lockless lock %p were not purged(%d)\n", 1089 CERROR("Pages for lockless lock %p were not purged(%d)\n",
1501 ols, result); 1090 ols, result);
1502 ols->ols_state = OLS_CANCELLED;
1503}
1504
1505static int osc_lock_lockless_wait(const struct lu_env *env,
1506 const struct cl_lock_slice *slice)
1507{
1508 struct osc_lock *olck = cl2osc_lock(slice);
1509 struct cl_lock *lock = olck->ols_cl.cls_lock;
1510 1091
1511 LINVRNT(osc_lock_invariant(olck)); 1092 osc_lock_wake_waiters(env, osc, ols);
1512 LASSERT(olck->ols_state >= OLS_UPCALL_RECEIVED);
1513
1514 return lock->cll_error;
1515} 1093}
1516 1094
1517static void osc_lock_lockless_state(const struct lu_env *env, 1095static const struct cl_lock_operations osc_lock_lockless_ops = {
1518 const struct cl_lock_slice *slice, 1096 .clo_fini = osc_lock_fini,
1519 enum cl_lock_state state) 1097 .clo_enqueue = osc_lock_enqueue,
1520{ 1098 .clo_cancel = osc_lock_lockless_cancel,
1521 struct osc_lock *lock = cl2osc_lock(slice); 1099 .clo_print = osc_lock_print
1100};
1522 1101
1523 LINVRNT(osc_lock_invariant(lock)); 1102static void osc_lock_set_writer(const struct lu_env *env,
1524 if (state == CLS_HELD) { 1103 const struct cl_io *io,
1525 struct osc_io *oio = osc_env_io(env); 1104 struct cl_object *obj, struct osc_lock *oscl)
1105{
1106 struct cl_lock_descr *descr = &oscl->ols_cl.cls_lock->cll_descr;
1107 pgoff_t io_start;
1108 pgoff_t io_end;
1526 1109
1527 LASSERT(ergo(lock->ols_owner, lock->ols_owner == oio)); 1110 if (!cl_object_same(io->ci_obj, obj))
1528 lock->ols_owner = oio; 1111 return;
1529 1112
1530 /* set the io to be lockless if this lock is for io's 1113 if (likely(io->ci_type == CIT_WRITE)) {
1531 * host object 1114 io_start = cl_index(obj, io->u.ci_rw.crw_pos);
1532 */ 1115 io_end = cl_index(obj, io->u.ci_rw.crw_pos +
1533 if (cl_object_same(oio->oi_cl.cis_obj, slice->cls_obj)) 1116 io->u.ci_rw.crw_count - 1);
1534 oio->oi_lockless = 1; 1117 if (cl_io_is_append(io)) {
1118 io_start = 0;
1119 io_end = CL_PAGE_EOF;
1120 }
1121 } else {
1122 LASSERT(cl_io_is_mkwrite(io));
1123 io_start = io_end = io->u.ci_fault.ft_index;
1535 } 1124 }
1536}
1537 1125
1538static int osc_lock_lockless_fits_into(const struct lu_env *env, 1126 if (descr->cld_mode >= CLM_WRITE &&
1539 const struct cl_lock_slice *slice, 1127 descr->cld_start <= io_start && descr->cld_end >= io_end) {
1540 const struct cl_lock_descr *need, 1128 struct osc_io *oio = osc_env_io(env);
1541 const struct cl_io *io)
1542{
1543 struct osc_lock *lock = cl2osc_lock(slice);
1544
1545 if (!(need->cld_enq_flags & CEF_NEVER))
1546 return 0;
1547 1129
1548 /* lockless lock should only be used by its owning io. b22147 */ 1130 /* There must be only one lock to match the write region */
1549 return (lock->ols_owner == osc_env_io(env)); 1131 LASSERT(!oio->oi_write_osclock);
1132 oio->oi_write_osclock = oscl;
1133 }
1550} 1134}
1551 1135
1552static const struct cl_lock_operations osc_lock_lockless_ops = {
1553 .clo_fini = osc_lock_fini,
1554 .clo_enqueue = osc_lock_enqueue,
1555 .clo_wait = osc_lock_lockless_wait,
1556 .clo_unuse = osc_lock_lockless_unuse,
1557 .clo_state = osc_lock_lockless_state,
1558 .clo_fits_into = osc_lock_lockless_fits_into,
1559 .clo_cancel = osc_lock_lockless_cancel,
1560 .clo_print = osc_lock_print
1561};
1562
1563int osc_lock_init(const struct lu_env *env, 1136int osc_lock_init(const struct lu_env *env,
1564 struct cl_object *obj, struct cl_lock *lock, 1137 struct cl_object *obj, struct cl_lock *lock,
1565 const struct cl_io *unused) 1138 const struct cl_io *io)
1566{ 1139{
1567 struct osc_lock *clk; 1140 struct osc_lock *oscl;
1568 int result; 1141 __u32 enqflags = lock->cll_descr.cld_enq_flags;
1569 1142
1570 clk = kmem_cache_zalloc(osc_lock_kmem, GFP_NOFS); 1143 oscl = kmem_cache_zalloc(osc_lock_kmem, GFP_NOFS);
1571 if (clk) { 1144 if (!oscl)
1572 __u32 enqflags = lock->cll_descr.cld_enq_flags; 1145 return -ENOMEM;
1146
1147 oscl->ols_state = OLS_NEW;
1148 spin_lock_init(&oscl->ols_lock);
1149 INIT_LIST_HEAD(&oscl->ols_waiting_list);
1150 INIT_LIST_HEAD(&oscl->ols_wait_entry);
1151 INIT_LIST_HEAD(&oscl->ols_nextlock_oscobj);
1152
1153 oscl->ols_flags = osc_enq2ldlm_flags(enqflags);
1154 oscl->ols_agl = !!(enqflags & CEF_AGL);
1155 if (oscl->ols_agl)
1156 oscl->ols_flags |= LDLM_FL_BLOCK_NOWAIT;
1157 if (oscl->ols_flags & LDLM_FL_HAS_INTENT) {
1158 oscl->ols_flags |= LDLM_FL_BLOCK_GRANTED;
1159 oscl->ols_glimpse = 1;
1160 }
1573 1161
1574 osc_lock_build_einfo(env, lock, clk, &clk->ols_einfo); 1162 cl_lock_slice_add(lock, &oscl->ols_cl, obj, &osc_lock_ops);
1575 atomic_set(&clk->ols_pageref, 0);
1576 clk->ols_state = OLS_NEW;
1577 1163
1578 clk->ols_flags = osc_enq2ldlm_flags(enqflags); 1164 if (!(enqflags & CEF_MUST))
1579 clk->ols_agl = !!(enqflags & CEF_AGL); 1165 /* try to convert this lock to a lockless lock */
1580 if (clk->ols_agl) 1166 osc_lock_to_lockless(env, oscl, (enqflags & CEF_NEVER));
1581 clk->ols_flags |= LDLM_FL_BLOCK_NOWAIT; 1167 if (oscl->ols_locklessable && !(enqflags & CEF_DISCARD_DATA))
1582 if (clk->ols_flags & LDLM_FL_HAS_INTENT) 1168 oscl->ols_flags |= LDLM_FL_DENY_ON_CONTENTION;
1583 clk->ols_glimpse = 1;
1584 1169
1585 cl_lock_slice_add(lock, &clk->ols_cl, obj, &osc_lock_ops); 1170 if (io->ci_type == CIT_WRITE || cl_io_is_mkwrite(io))
1171 osc_lock_set_writer(env, io, obj, oscl);
1586 1172
1587 if (!(enqflags & CEF_MUST))
1588 /* try to convert this lock to a lockless lock */
1589 osc_lock_to_lockless(env, clk, (enqflags & CEF_NEVER));
1590 if (clk->ols_locklessable && !(enqflags & CEF_DISCARD_DATA))
1591 clk->ols_flags |= LDLM_FL_DENY_ON_CONTENTION;
1592 1173
1593 LDLM_DEBUG_NOLOCK("lock %p, osc lock %p, flags %llx", 1174 LDLM_DEBUG_NOLOCK("lock %p, osc lock %p, flags %llx\n",
1594 lock, clk, clk->ols_flags); 1175 lock, oscl, oscl->ols_flags);
1595 1176
1596 result = 0; 1177 return 0;
1597 } else
1598 result = -ENOMEM;
1599 return result;
1600} 1178}
1601 1179
1602int osc_dlm_lock_pageref(struct ldlm_lock *dlm) 1180/**
1181 * Finds an existing lock covering given index and optionally different from a
1182 * given \a except lock.
1183 */
1184struct ldlm_lock *osc_dlmlock_at_pgoff(const struct lu_env *env,
1185 struct osc_object *obj, pgoff_t index,
1186 int pending, int canceling)
1603{ 1187{
1604 struct osc_lock *olock; 1188 struct osc_thread_info *info = osc_env_info(env);
1605 int rc = 0; 1189 struct ldlm_res_id *resname = &info->oti_resname;
1606 1190 ldlm_policy_data_t *policy = &info->oti_policy;
1607 spin_lock(&osc_ast_guard); 1191 struct lustre_handle lockh;
1608 olock = dlm->l_ast_data; 1192 struct ldlm_lock *lock = NULL;
1193 enum ldlm_mode mode;
1194 __u64 flags;
1195
1196 ostid_build_res_name(&obj->oo_oinfo->loi_oi, resname);
1197 osc_index2policy(policy, osc2cl(obj), index, index);
1198 policy->l_extent.gid = LDLM_GID_ANY;
1199
1200 flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_TEST_LOCK;
1201 if (pending)
1202 flags |= LDLM_FL_CBPENDING;
1609 /* 1203 /*
1610 * there's a very rare race with osc_page_addref_lock(), but that 1204 * It is fine to match any group lock since there could be only one
1611 * doesn't matter because in the worst case we don't cancel a lock 1205 * with a uniq gid and it conflicts with all other lock modes too
1612 * which we actually can, that's no harm.
1613 */ 1206 */
1614 if (olock && 1207again:
1615 atomic_add_return(_PAGEREF_MAGIC, 1208 mode = ldlm_lock_match(osc_export(obj)->exp_obd->obd_namespace,
1616 &olock->ols_pageref) != _PAGEREF_MAGIC) { 1209 flags, resname, LDLM_EXTENT, policy,
1617 atomic_sub(_PAGEREF_MAGIC, &olock->ols_pageref); 1210 LCK_PR | LCK_PW | LCK_GROUP, &lockh, canceling);
1618 rc = 1; 1211 if (mode != 0) {
1212 lock = ldlm_handle2lock(&lockh);
1213 /* RACE: the lock is cancelled so let's try again */
1214 if (unlikely(!lock))
1215 goto again;
1619 } 1216 }
1620 spin_unlock(&osc_ast_guard); 1217 return lock;
1621 return rc;
1622} 1218}
1623 1219
1624/** @} osc */ 1220/** @} osc */
diff --git a/drivers/staging/lustre/lustre/osc/osc_object.c b/drivers/staging/lustre/lustre/osc/osc_object.c
index 9d474fcdd9a7..738ab10ab274 100644
--- a/drivers/staging/lustre/lustre/osc/osc_object.c
+++ b/drivers/staging/lustre/lustre/osc/osc_object.c
@@ -36,6 +36,7 @@
36 * Implementation of cl_object for OSC layer. 36 * Implementation of cl_object for OSC layer.
37 * 37 *
38 * Author: Nikita Danilov <nikita.danilov@sun.com> 38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 * Author: Jinshan Xiong <jinshan.xiong@intel.com>
39 */ 40 */
40 41
41#define DEBUG_SUBSYSTEM S_OSC 42#define DEBUG_SUBSYSTEM S_OSC
@@ -94,6 +95,9 @@ static int osc_object_init(const struct lu_env *env, struct lu_object *obj,
94 atomic_set(&osc->oo_nr_reads, 0); 95 atomic_set(&osc->oo_nr_reads, 0);
95 atomic_set(&osc->oo_nr_writes, 0); 96 atomic_set(&osc->oo_nr_writes, 0);
96 spin_lock_init(&osc->oo_lock); 97 spin_lock_init(&osc->oo_lock);
98 spin_lock_init(&osc->oo_tree_lock);
99 spin_lock_init(&osc->oo_ol_spin);
100 INIT_LIST_HEAD(&osc->oo_ol_list);
97 101
98 cl_object_page_init(lu2cl(obj), sizeof(struct osc_page)); 102 cl_object_page_init(lu2cl(obj), sizeof(struct osc_page));
99 103
@@ -120,6 +124,7 @@ static void osc_object_free(const struct lu_env *env, struct lu_object *obj)
120 LASSERT(list_empty(&osc->oo_reading_exts)); 124 LASSERT(list_empty(&osc->oo_reading_exts));
121 LASSERT(atomic_read(&osc->oo_nr_reads) == 0); 125 LASSERT(atomic_read(&osc->oo_nr_reads) == 0);
122 LASSERT(atomic_read(&osc->oo_nr_writes) == 0); 126 LASSERT(atomic_read(&osc->oo_nr_writes) == 0);
127 LASSERT(list_empty(&osc->oo_ol_list));
123 128
124 lu_object_fini(obj); 129 lu_object_fini(obj);
125 kmem_cache_free(osc_object_kmem, osc); 130 kmem_cache_free(osc_object_kmem, osc);
@@ -192,6 +197,32 @@ static int osc_object_glimpse(const struct lu_env *env,
192 return 0; 197 return 0;
193} 198}
194 199
200static int osc_object_ast_clear(struct ldlm_lock *lock, void *data)
201{
202 LASSERT(lock->l_granted_mode == lock->l_req_mode);
203 if (lock->l_ast_data == data)
204 lock->l_ast_data = NULL;
205 return LDLM_ITER_CONTINUE;
206}
207
208static int osc_object_prune(const struct lu_env *env, struct cl_object *obj)
209{
210 struct osc_object *osc = cl2osc(obj);
211 struct ldlm_res_id *resname = &osc_env_info(env)->oti_resname;
212
213 LASSERTF(osc->oo_npages == 0,
214 DFID "still have %lu pages, obj: %p, osc: %p\n",
215 PFID(lu_object_fid(&obj->co_lu)), osc->oo_npages, obj, osc);
216
217 /* DLM locks don't hold a reference of osc_object so we have to
218 * clear it before the object is being destroyed.
219 */
220 ostid_build_res_name(&osc->oo_oinfo->loi_oi, resname);
221 ldlm_resource_iterate(osc_export(osc)->exp_obd->obd_namespace, resname,
222 osc_object_ast_clear, osc);
223 return 0;
224}
225
195void osc_object_set_contended(struct osc_object *obj) 226void osc_object_set_contended(struct osc_object *obj)
196{ 227{
197 obj->oo_contention_time = cfs_time_current(); 228 obj->oo_contention_time = cfs_time_current();
@@ -236,12 +267,12 @@ static const struct cl_object_operations osc_ops = {
236 .coo_io_init = osc_io_init, 267 .coo_io_init = osc_io_init,
237 .coo_attr_get = osc_attr_get, 268 .coo_attr_get = osc_attr_get,
238 .coo_attr_set = osc_attr_set, 269 .coo_attr_set = osc_attr_set,
239 .coo_glimpse = osc_object_glimpse 270 .coo_glimpse = osc_object_glimpse,
271 .coo_prune = osc_object_prune
240}; 272};
241 273
242static const struct lu_object_operations osc_lu_obj_ops = { 274static const struct lu_object_operations osc_lu_obj_ops = {
243 .loo_object_init = osc_object_init, 275 .loo_object_init = osc_object_init,
244 .loo_object_delete = NULL,
245 .loo_object_release = NULL, 276 .loo_object_release = NULL,
246 .loo_object_free = osc_object_free, 277 .loo_object_free = osc_object_free,
247 .loo_object_print = osc_object_print, 278 .loo_object_print = osc_object_print,
@@ -261,8 +292,9 @@ struct lu_object *osc_object_alloc(const struct lu_env *env,
261 lu_object_init(obj, NULL, dev); 292 lu_object_init(obj, NULL, dev);
262 osc->oo_cl.co_ops = &osc_ops; 293 osc->oo_cl.co_ops = &osc_ops;
263 obj->lo_ops = &osc_lu_obj_ops; 294 obj->lo_ops = &osc_lu_obj_ops;
264 } else 295 } else {
265 obj = NULL; 296 obj = NULL;
297 }
266 return obj; 298 return obj;
267} 299}
268 300
diff --git a/drivers/staging/lustre/lustre/osc/osc_page.c b/drivers/staging/lustre/lustre/osc/osc_page.c
index ce9ddd515f64..c29c2eabe39c 100644
--- a/drivers/staging/lustre/lustre/osc/osc_page.c
+++ b/drivers/staging/lustre/lustre/osc/osc_page.c
@@ -36,14 +36,15 @@
36 * Implementation of cl_page for OSC layer. 36 * Implementation of cl_page for OSC layer.
37 * 37 *
38 * Author: Nikita Danilov <nikita.danilov@sun.com> 38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 * Author: Jinshan Xiong <jinshan.xiong@intel.com>
39 */ 40 */
40 41
41#define DEBUG_SUBSYSTEM S_OSC 42#define DEBUG_SUBSYSTEM S_OSC
42 43
43#include "osc_cl_internal.h" 44#include "osc_cl_internal.h"
44 45
45static void osc_lru_del(struct client_obd *cli, struct osc_page *opg, bool del); 46static void osc_lru_del(struct client_obd *cli, struct osc_page *opg);
46static void osc_lru_add(struct client_obd *cli, struct osc_page *opg); 47static void osc_lru_use(struct client_obd *cli, struct osc_page *opg);
47static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj, 48static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
48 struct osc_page *opg); 49 struct osc_page *opg);
49 50
@@ -63,18 +64,9 @@ static int osc_page_protected(const struct lu_env *env,
63 * Page operations. 64 * Page operations.
64 * 65 *
65 */ 66 */
66static void osc_page_fini(const struct lu_env *env,
67 struct cl_page_slice *slice)
68{
69 struct osc_page *opg = cl2osc_page(slice);
70
71 CDEBUG(D_TRACE, "%p\n", opg);
72 LASSERT(!opg->ops_lock);
73}
74
75static void osc_page_transfer_get(struct osc_page *opg, const char *label) 67static void osc_page_transfer_get(struct osc_page *opg, const char *label)
76{ 68{
77 struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page); 69 struct cl_page *page = opg->ops_cl.cpl_page;
78 70
79 LASSERT(!opg->ops_transfer_pinned); 71 LASSERT(!opg->ops_transfer_pinned);
80 cl_page_get(page); 72 cl_page_get(page);
@@ -85,11 +77,11 @@ static void osc_page_transfer_get(struct osc_page *opg, const char *label)
85static void osc_page_transfer_put(const struct lu_env *env, 77static void osc_page_transfer_put(const struct lu_env *env,
86 struct osc_page *opg) 78 struct osc_page *opg)
87{ 79{
88 struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page); 80 struct cl_page *page = opg->ops_cl.cpl_page;
89 81
90 if (opg->ops_transfer_pinned) { 82 if (opg->ops_transfer_pinned) {
91 lu_ref_del(&page->cp_reference, "transfer", page);
92 opg->ops_transfer_pinned = 0; 83 opg->ops_transfer_pinned = 0;
84 lu_ref_del(&page->cp_reference, "transfer", page);
93 cl_page_put(env, page); 85 cl_page_put(env, page);
94 } 86 }
95} 87}
@@ -104,10 +96,7 @@ static void osc_page_transfer_add(const struct lu_env *env,
104{ 96{
105 struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj); 97 struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
106 98
107 /* ops_lru and ops_inflight share the same field, so take it from LRU 99 osc_lru_use(osc_cli(obj), opg);
108 * first and then use it as inflight.
109 */
110 osc_lru_del(osc_cli(obj), opg, false);
111 100
112 spin_lock(&obj->oo_seatbelt); 101 spin_lock(&obj->oo_seatbelt);
113 list_add(&opg->ops_inflight, &obj->oo_inflight[crt]); 102 list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
@@ -115,11 +104,9 @@ static void osc_page_transfer_add(const struct lu_env *env,
115 spin_unlock(&obj->oo_seatbelt); 104 spin_unlock(&obj->oo_seatbelt);
116} 105}
117 106
118static int osc_page_cache_add(const struct lu_env *env, 107int osc_page_cache_add(const struct lu_env *env,
119 const struct cl_page_slice *slice, 108 const struct cl_page_slice *slice, struct cl_io *io)
120 struct cl_io *io)
121{ 109{
122 struct osc_io *oio = osc_env_io(env);
123 struct osc_page *opg = cl2osc_page(slice); 110 struct osc_page *opg = cl2osc_page(slice);
124 int result; 111 int result;
125 112
@@ -132,17 +119,6 @@ static int osc_page_cache_add(const struct lu_env *env,
132 else 119 else
133 osc_page_transfer_add(env, opg, CRT_WRITE); 120 osc_page_transfer_add(env, opg, CRT_WRITE);
134 121
135 /* for sync write, kernel will wait for this page to be flushed before
136 * osc_io_end() is called, so release it earlier.
137 * for mkwrite(), it's known there is no further pages.
138 */
139 if (cl_io_is_sync_write(io) || cl_io_is_mkwrite(io)) {
140 if (oio->oi_active) {
141 osc_extent_release(env, oio->oi_active);
142 oio->oi_active = NULL;
143 }
144 }
145
146 return result; 122 return result;
147} 123}
148 124
@@ -154,102 +130,25 @@ void osc_index2policy(ldlm_policy_data_t *policy, const struct cl_object *obj,
154 policy->l_extent.end = cl_offset(obj, end + 1) - 1; 130 policy->l_extent.end = cl_offset(obj, end + 1) - 1;
155} 131}
156 132
157static int osc_page_addref_lock(const struct lu_env *env,
158 struct osc_page *opg,
159 struct cl_lock *lock)
160{
161 struct osc_lock *olock;
162 int rc;
163
164 LASSERT(!opg->ops_lock);
165
166 olock = osc_lock_at(lock);
167 if (atomic_inc_return(&olock->ols_pageref) <= 0) {
168 atomic_dec(&olock->ols_pageref);
169 rc = -ENODATA;
170 } else {
171 cl_lock_get(lock);
172 opg->ops_lock = lock;
173 rc = 0;
174 }
175 return rc;
176}
177
178static void osc_page_putref_lock(const struct lu_env *env,
179 struct osc_page *opg)
180{
181 struct cl_lock *lock = opg->ops_lock;
182 struct osc_lock *olock;
183
184 LASSERT(lock);
185 olock = osc_lock_at(lock);
186
187 atomic_dec(&olock->ols_pageref);
188 opg->ops_lock = NULL;
189
190 cl_lock_put(env, lock);
191}
192
193static int osc_page_is_under_lock(const struct lu_env *env, 133static int osc_page_is_under_lock(const struct lu_env *env,
194 const struct cl_page_slice *slice, 134 const struct cl_page_slice *slice,
195 struct cl_io *unused) 135 struct cl_io *unused, pgoff_t *max_index)
196{ 136{
197 struct cl_lock *lock; 137 struct osc_page *opg = cl2osc_page(slice);
138 struct ldlm_lock *dlmlock;
198 int result = -ENODATA; 139 int result = -ENODATA;
199 140
200 lock = cl_lock_at_page(env, slice->cpl_obj, slice->cpl_page, 141 dlmlock = osc_dlmlock_at_pgoff(env, cl2osc(slice->cpl_obj),
201 NULL, 1, 0); 142 osc_index(opg), 1, 0);
202 if (lock) { 143 if (dlmlock) {
203 if (osc_page_addref_lock(env, cl2osc_page(slice), lock) == 0) 144 *max_index = cl_index(slice->cpl_obj,
204 result = -EBUSY; 145 dlmlock->l_policy_data.l_extent.end);
205 cl_lock_put(env, lock); 146 LDLM_LOCK_PUT(dlmlock);
147 result = 0;
206 } 148 }
207 return result; 149 return result;
208} 150}
209 151
210static void osc_page_disown(const struct lu_env *env,
211 const struct cl_page_slice *slice,
212 struct cl_io *io)
213{
214 struct osc_page *opg = cl2osc_page(slice);
215
216 if (unlikely(opg->ops_lock))
217 osc_page_putref_lock(env, opg);
218}
219
220static void osc_page_completion_read(const struct lu_env *env,
221 const struct cl_page_slice *slice,
222 int ioret)
223{
224 struct osc_page *opg = cl2osc_page(slice);
225 struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
226
227 if (likely(opg->ops_lock))
228 osc_page_putref_lock(env, opg);
229 osc_lru_add(osc_cli(obj), opg);
230}
231
232static void osc_page_completion_write(const struct lu_env *env,
233 const struct cl_page_slice *slice,
234 int ioret)
235{
236 struct osc_page *opg = cl2osc_page(slice);
237 struct osc_object *obj = cl2osc(slice->cpl_obj);
238
239 osc_lru_add(osc_cli(obj), opg);
240}
241
242static int osc_page_fail(const struct lu_env *env,
243 const struct cl_page_slice *slice,
244 struct cl_io *unused)
245{
246 /*
247 * Cached read?
248 */
249 LBUG();
250 return 0;
251}
252
253static const char *osc_list(struct list_head *head) 152static const char *osc_list(struct list_head *head)
254{ 153{
255 return list_empty(head) ? "-" : "+"; 154 return list_empty(head) ? "-" : "+";
@@ -272,8 +171,8 @@ static int osc_page_print(const struct lu_env *env,
272 struct osc_object *obj = cl2osc(slice->cpl_obj); 171 struct osc_object *obj = cl2osc(slice->cpl_obj);
273 struct client_obd *cli = &osc_export(obj)->exp_obd->u.cli; 172 struct client_obd *cli = &osc_export(obj)->exp_obd->u.cli;
274 173
275 return (*printer)(env, cookie, LUSTRE_OSC_NAME "-page@%p: 1< %#x %d %u %s %s > 2< %llu %u %u %#x %#x | %p %p %p > 3< %s %p %d %lu %d > 4< %d %d %d %lu %s | %s %s %s %s > 5< %s %s %s %s | %d %s | %d %s %s>\n", 174 return (*printer)(env, cookie, LUSTRE_OSC_NAME "-page@%p %lu: 1< %#x %d %u %s %s > 2< %llu %u %u %#x %#x | %p %p %p > 3< %s %p %d %lu %d > 4< %d %d %d %lu %s | %s %s %s %s > 5< %s %s %s %s | %d %s | %d %s %s>\n",
276 opg, 175 opg, osc_index(opg),
277 /* 1 */ 176 /* 1 */
278 oap->oap_magic, oap->oap_cmd, 177 oap->oap_magic, oap->oap_cmd,
279 oap->oap_interrupted, 178 oap->oap_interrupted,
@@ -321,7 +220,7 @@ static void osc_page_delete(const struct lu_env *env,
321 osc_page_transfer_put(env, opg); 220 osc_page_transfer_put(env, opg);
322 rc = osc_teardown_async_page(env, obj, opg); 221 rc = osc_teardown_async_page(env, obj, opg);
323 if (rc) { 222 if (rc) {
324 CL_PAGE_DEBUG(D_ERROR, env, cl_page_top(slice->cpl_page), 223 CL_PAGE_DEBUG(D_ERROR, env, slice->cpl_page,
325 "Trying to teardown failed: %d\n", rc); 224 "Trying to teardown failed: %d\n", rc);
326 LASSERT(0); 225 LASSERT(0);
327 } 226 }
@@ -334,7 +233,19 @@ static void osc_page_delete(const struct lu_env *env,
334 } 233 }
335 spin_unlock(&obj->oo_seatbelt); 234 spin_unlock(&obj->oo_seatbelt);
336 235
337 osc_lru_del(osc_cli(obj), opg, true); 236 osc_lru_del(osc_cli(obj), opg);
237
238 if (slice->cpl_page->cp_type == CPT_CACHEABLE) {
239 void *value;
240
241 spin_lock(&obj->oo_tree_lock);
242 value = radix_tree_delete(&obj->oo_tree, osc_index(opg));
243 if (value)
244 --obj->oo_npages;
245 spin_unlock(&obj->oo_tree_lock);
246
247 LASSERT(ergo(value, value == opg));
248 }
338} 249}
339 250
340static void osc_page_clip(const struct lu_env *env, 251static void osc_page_clip(const struct lu_env *env,
@@ -382,28 +293,16 @@ static int osc_page_flush(const struct lu_env *env,
382} 293}
383 294
384static const struct cl_page_operations osc_page_ops = { 295static const struct cl_page_operations osc_page_ops = {
385 .cpo_fini = osc_page_fini,
386 .cpo_print = osc_page_print, 296 .cpo_print = osc_page_print,
387 .cpo_delete = osc_page_delete, 297 .cpo_delete = osc_page_delete,
388 .cpo_is_under_lock = osc_page_is_under_lock, 298 .cpo_is_under_lock = osc_page_is_under_lock,
389 .cpo_disown = osc_page_disown,
390 .io = {
391 [CRT_READ] = {
392 .cpo_cache_add = osc_page_fail,
393 .cpo_completion = osc_page_completion_read
394 },
395 [CRT_WRITE] = {
396 .cpo_cache_add = osc_page_cache_add,
397 .cpo_completion = osc_page_completion_write
398 }
399 },
400 .cpo_clip = osc_page_clip, 299 .cpo_clip = osc_page_clip,
401 .cpo_cancel = osc_page_cancel, 300 .cpo_cancel = osc_page_cancel,
402 .cpo_flush = osc_page_flush 301 .cpo_flush = osc_page_flush
403}; 302};
404 303
405int osc_page_init(const struct lu_env *env, struct cl_object *obj, 304int osc_page_init(const struct lu_env *env, struct cl_object *obj,
406 struct cl_page *page, struct page *vmpage) 305 struct cl_page *page, pgoff_t index)
407{ 306{
408 struct osc_object *osc = cl2osc(obj); 307 struct osc_object *osc = cl2osc(obj);
409 struct osc_page *opg = cl_object_page_slice(obj, page); 308 struct osc_page *opg = cl_object_page_slice(obj, page);
@@ -412,13 +311,14 @@ int osc_page_init(const struct lu_env *env, struct cl_object *obj,
412 opg->ops_from = 0; 311 opg->ops_from = 0;
413 opg->ops_to = PAGE_SIZE; 312 opg->ops_to = PAGE_SIZE;
414 313
415 result = osc_prep_async_page(osc, opg, vmpage, 314 result = osc_prep_async_page(osc, opg, page->cp_vmpage,
416 cl_offset(obj, page->cp_index)); 315 cl_offset(obj, index));
417 if (result == 0) { 316 if (result == 0) {
418 struct osc_io *oio = osc_env_io(env); 317 struct osc_io *oio = osc_env_io(env);
419 318
420 opg->ops_srvlock = osc_io_srvlock(oio); 319 opg->ops_srvlock = osc_io_srvlock(oio);
421 cl_page_slice_add(page, &opg->ops_cl, obj, &osc_page_ops); 320 cl_page_slice_add(page, &opg->ops_cl, obj, index,
321 &osc_page_ops);
422 } 322 }
423 /* 323 /*
424 * Cannot assert osc_page_protected() here as read-ahead 324 * Cannot assert osc_page_protected() here as read-ahead
@@ -431,12 +331,47 @@ int osc_page_init(const struct lu_env *env, struct cl_object *obj,
431 INIT_LIST_HEAD(&opg->ops_lru); 331 INIT_LIST_HEAD(&opg->ops_lru);
432 332
433 /* reserve an LRU space for this page */ 333 /* reserve an LRU space for this page */
434 if (page->cp_type == CPT_CACHEABLE && result == 0) 334 if (page->cp_type == CPT_CACHEABLE && result == 0) {
435 result = osc_lru_reserve(env, osc, opg); 335 result = osc_lru_reserve(env, osc, opg);
336 if (result == 0) {
337 spin_lock(&osc->oo_tree_lock);
338 result = radix_tree_insert(&osc->oo_tree, index, opg);
339 if (result == 0)
340 ++osc->oo_npages;
341 spin_unlock(&osc->oo_tree_lock);
342 LASSERT(result == 0);
343 }
344 }
436 345
437 return result; 346 return result;
438} 347}
439 348
349int osc_over_unstable_soft_limit(struct client_obd *cli)
350{
351 long obd_upages, obd_dpages, osc_upages;
352
353 /* Can't check cli->cl_unstable_count, therefore, no soft limit */
354 if (!cli)
355 return 0;
356
357 obd_upages = atomic_read(&obd_unstable_pages);
358 obd_dpages = atomic_read(&obd_dirty_pages);
359
360 osc_upages = atomic_read(&cli->cl_unstable_count);
361
362 /*
363 * obd_max_dirty_pages is the max number of (dirty + unstable)
364 * pages allowed at any given time. To simulate an unstable page
365 * only limit, we subtract the current number of dirty pages
366 * from this max. This difference is roughly the amount of pages
367 * currently available for unstable pages. Thus, the soft limit
368 * is half of that difference. Check osc_upages to ensure we don't
369 * set SOFT_SYNC for OSCs without any outstanding unstable pages.
370 */
371 return osc_upages &&
372 obd_upages >= (obd_max_dirty_pages - obd_dpages) / 2;
373}
374
440/** 375/**
441 * Helper function called by osc_io_submit() for every page in an immediate 376 * Helper function called by osc_io_submit() for every page in an immediate
442 * transfer (i.e., transferred synchronously). 377 * transfer (i.e., transferred synchronously).
@@ -460,6 +395,9 @@ void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
460 oap->oap_count = opg->ops_to - opg->ops_from; 395 oap->oap_count = opg->ops_to - opg->ops_from;
461 oap->oap_brw_flags = brw_flags | OBD_BRW_SYNC; 396 oap->oap_brw_flags = brw_flags | OBD_BRW_SYNC;
462 397
398 if (osc_over_unstable_soft_limit(oap->oap_cli))
399 oap->oap_brw_flags |= OBD_BRW_SOFT_SYNC;
400
463 if (!client_is_remote(osc_export(obj)) && 401 if (!client_is_remote(osc_export(obj)) &&
464 capable(CFS_CAP_SYS_RESOURCE)) { 402 capable(CFS_CAP_SYS_RESOURCE)) {
465 oap->oap_brw_flags |= OBD_BRW_NOQUOTA; 403 oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
@@ -483,13 +421,12 @@ void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
483 */ 421 */
484 422
485static DECLARE_WAIT_QUEUE_HEAD(osc_lru_waitq); 423static DECLARE_WAIT_QUEUE_HEAD(osc_lru_waitq);
486static atomic_t osc_lru_waiters = ATOMIC_INIT(0);
487/* LRU pages are freed in batch mode. OSC should at least free this 424/* LRU pages are freed in batch mode. OSC should at least free this
488 * number of pages to avoid running out of LRU budget, and.. 425 * number of pages to avoid running out of LRU budget, and..
489 */ 426 */
490static const int lru_shrink_min = 2 << (20 - PAGE_SHIFT); /* 2M */ 427static const int lru_shrink_min = 2 << (20 - PAGE_SHIFT); /* 2M */
491/* free this number at most otherwise it will take too long time to finish. */ 428/* free this number at most otherwise it will take too long time to finish. */
492static const int lru_shrink_max = 32 << (20 - PAGE_SHIFT); /* 32M */ 429static const int lru_shrink_max = 8 << (20 - PAGE_SHIFT); /* 8M */
493 430
494/* Check if we can free LRU slots from this OSC. If there exists LRU waiters, 431/* Check if we can free LRU slots from this OSC. If there exists LRU waiters,
495 * we should free slots aggressively. In this way, slots are freed in a steady 432 * we should free slots aggressively. In this way, slots are freed in a steady
@@ -500,65 +437,142 @@ static const int lru_shrink_max = 32 << (20 - PAGE_SHIFT); /* 32M */
500static int osc_cache_too_much(struct client_obd *cli) 437static int osc_cache_too_much(struct client_obd *cli)
501{ 438{
502 struct cl_client_cache *cache = cli->cl_cache; 439 struct cl_client_cache *cache = cli->cl_cache;
503 int pages = atomic_read(&cli->cl_lru_in_list) >> 1; 440 int pages = atomic_read(&cli->cl_lru_in_list);
441 unsigned long budget;
504 442
505 if (atomic_read(&osc_lru_waiters) > 0 && 443 budget = cache->ccc_lru_max / atomic_read(&cache->ccc_users);
506 atomic_read(cli->cl_lru_left) < lru_shrink_max)
507 /* drop lru pages aggressively */
508 return min(pages, lru_shrink_max);
509 444
510 /* if it's going to run out LRU slots, we should free some, but not 445 /* if it's going to run out LRU slots, we should free some, but not
511 * too much to maintain fairness among OSCs. 446 * too much to maintain fairness among OSCs.
512 */ 447 */
513 if (atomic_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) { 448 if (atomic_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) {
514 unsigned long tmp; 449 if (pages >= budget)
450 return lru_shrink_max;
451 else if (pages >= budget / 2)
452 return lru_shrink_min;
453 } else if (pages >= budget * 2) {
454 return lru_shrink_min;
455 }
456 return 0;
457}
515 458
516 tmp = cache->ccc_lru_max / atomic_read(&cache->ccc_users); 459int lru_queue_work(const struct lu_env *env, void *data)
517 if (pages > tmp) 460{
518 return min(pages, lru_shrink_max); 461 struct client_obd *cli = data;
519 462
520 return pages > lru_shrink_min ? lru_shrink_min : 0; 463 CDEBUG(D_CACHE, "Run LRU work for client obd %p.\n", cli);
521 } 464
465 if (osc_cache_too_much(cli))
466 osc_lru_shrink(env, cli, lru_shrink_max, true);
522 467
523 return 0; 468 return 0;
524} 469}
525 470
526/* Return how many pages are not discarded in @pvec. */ 471void osc_lru_add_batch(struct client_obd *cli, struct list_head *plist)
527static int discard_pagevec(const struct lu_env *env, struct cl_io *io, 472{
528 struct cl_page **pvec, int max_index) 473 LIST_HEAD(lru);
474 struct osc_async_page *oap;
475 int npages = 0;
476
477 list_for_each_entry(oap, plist, oap_pending_item) {
478 struct osc_page *opg = oap2osc_page(oap);
479
480 if (!opg->ops_in_lru)
481 continue;
482
483 ++npages;
484 LASSERT(list_empty(&opg->ops_lru));
485 list_add(&opg->ops_lru, &lru);
486 }
487
488 if (npages > 0) {
489 spin_lock(&cli->cl_lru_list_lock);
490 list_splice_tail(&lru, &cli->cl_lru_list);
491 atomic_sub(npages, &cli->cl_lru_busy);
492 atomic_add(npages, &cli->cl_lru_in_list);
493 spin_unlock(&cli->cl_lru_list_lock);
494
495 /* XXX: May set force to be true for better performance */
496 if (osc_cache_too_much(cli))
497 (void)ptlrpcd_queue_work(cli->cl_lru_work);
498 }
499}
500
501static void __osc_lru_del(struct client_obd *cli, struct osc_page *opg)
502{
503 LASSERT(atomic_read(&cli->cl_lru_in_list) > 0);
504 list_del_init(&opg->ops_lru);
505 atomic_dec(&cli->cl_lru_in_list);
506}
507
508/**
509 * Page is being destroyed. The page may be not in LRU list, if the transfer
510 * has never finished(error occurred).
511 */
512static void osc_lru_del(struct client_obd *cli, struct osc_page *opg)
513{
514 if (opg->ops_in_lru) {
515 spin_lock(&cli->cl_lru_list_lock);
516 if (!list_empty(&opg->ops_lru)) {
517 __osc_lru_del(cli, opg);
518 } else {
519 LASSERT(atomic_read(&cli->cl_lru_busy) > 0);
520 atomic_dec(&cli->cl_lru_busy);
521 }
522 spin_unlock(&cli->cl_lru_list_lock);
523
524 atomic_inc(cli->cl_lru_left);
525 /* this is a great place to release more LRU pages if
526 * this osc occupies too many LRU pages and kernel is
527 * stealing one of them.
528 */
529 if (!memory_pressure_get())
530 (void)ptlrpcd_queue_work(cli->cl_lru_work);
531 wake_up(&osc_lru_waitq);
532 } else {
533 LASSERT(list_empty(&opg->ops_lru));
534 }
535}
536
537/**
538 * Delete page from LRUlist for redirty.
539 */
540static void osc_lru_use(struct client_obd *cli, struct osc_page *opg)
541{
542 /* If page is being transferred for the first time,
543 * ops_lru should be empty
544 */
545 if (opg->ops_in_lru && !list_empty(&opg->ops_lru)) {
546 spin_lock(&cli->cl_lru_list_lock);
547 __osc_lru_del(cli, opg);
548 spin_unlock(&cli->cl_lru_list_lock);
549 atomic_inc(&cli->cl_lru_busy);
550 }
551}
552
553static void discard_pagevec(const struct lu_env *env, struct cl_io *io,
554 struct cl_page **pvec, int max_index)
529{ 555{
530 int count;
531 int i; 556 int i;
532 557
533 for (count = 0, i = 0; i < max_index; i++) { 558 for (i = 0; i < max_index; i++) {
534 struct cl_page *page = pvec[i]; 559 struct cl_page *page = pvec[i];
535 560
536 if (cl_page_own_try(env, io, page) == 0) { 561 LASSERT(cl_page_is_owned(page, io));
537 /* free LRU page only if nobody is using it. 562 cl_page_discard(env, io, page);
538 * This check is necessary to avoid freeing the pages 563 cl_page_disown(env, io, page);
539 * having already been removed from LRU and pinned
540 * for IO.
541 */
542 if (!cl_page_in_use(page)) {
543 cl_page_unmap(env, io, page);
544 cl_page_discard(env, io, page);
545 ++count;
546 }
547 cl_page_disown(env, io, page);
548 }
549 cl_page_put(env, page); 564 cl_page_put(env, page);
565
550 pvec[i] = NULL; 566 pvec[i] = NULL;
551 } 567 }
552 return max_index - count;
553} 568}
554 569
555/** 570/**
556 * Drop @target of pages from LRU at most. 571 * Drop @target of pages from LRU at most.
557 */ 572 */
558int osc_lru_shrink(struct client_obd *cli, int target) 573int osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
574 int target, bool force)
559{ 575{
560 struct cl_env_nest nest;
561 struct lu_env *env;
562 struct cl_io *io; 576 struct cl_io *io;
563 struct cl_object *clobj = NULL; 577 struct cl_object *clobj = NULL;
564 struct cl_page **pvec; 578 struct cl_page **pvec;
@@ -573,23 +587,31 @@ int osc_lru_shrink(struct client_obd *cli, int target)
573 if (atomic_read(&cli->cl_lru_in_list) == 0 || target <= 0) 587 if (atomic_read(&cli->cl_lru_in_list) == 0 || target <= 0)
574 return 0; 588 return 0;
575 589
576 env = cl_env_nested_get(&nest); 590 if (!force) {
577 if (IS_ERR(env)) 591 if (atomic_read(&cli->cl_lru_shrinkers) > 0)
578 return PTR_ERR(env); 592 return -EBUSY;
579 593
580 pvec = osc_env_info(env)->oti_pvec; 594 if (atomic_inc_return(&cli->cl_lru_shrinkers) > 1) {
595 atomic_dec(&cli->cl_lru_shrinkers);
596 return -EBUSY;
597 }
598 } else {
599 atomic_inc(&cli->cl_lru_shrinkers);
600 }
601
602 pvec = (struct cl_page **)osc_env_info(env)->oti_pvec;
581 io = &osc_env_info(env)->oti_io; 603 io = &osc_env_info(env)->oti_io;
582 604
583 client_obd_list_lock(&cli->cl_lru_list_lock); 605 spin_lock(&cli->cl_lru_list_lock);
584 atomic_inc(&cli->cl_lru_shrinkers);
585 maxscan = min(target << 1, atomic_read(&cli->cl_lru_in_list)); 606 maxscan = min(target << 1, atomic_read(&cli->cl_lru_in_list));
586 list_for_each_entry_safe(opg, temp, &cli->cl_lru_list, ops_lru) { 607 list_for_each_entry_safe(opg, temp, &cli->cl_lru_list, ops_lru) {
587 struct cl_page *page; 608 struct cl_page *page;
609 bool will_free = false;
588 610
589 if (--maxscan < 0) 611 if (--maxscan < 0)
590 break; 612 break;
591 613
592 page = cl_page_top(opg->ops_cl.cpl_page); 614 page = opg->ops_cl.cpl_page;
593 if (cl_page_in_use_noref(page)) { 615 if (cl_page_in_use_noref(page)) {
594 list_move_tail(&opg->ops_lru, &cli->cl_lru_list); 616 list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
595 continue; 617 continue;
@@ -600,10 +622,10 @@ int osc_lru_shrink(struct client_obd *cli, int target)
600 struct cl_object *tmp = page->cp_obj; 622 struct cl_object *tmp = page->cp_obj;
601 623
602 cl_object_get(tmp); 624 cl_object_get(tmp);
603 client_obd_list_unlock(&cli->cl_lru_list_lock); 625 spin_unlock(&cli->cl_lru_list_lock);
604 626
605 if (clobj) { 627 if (clobj) {
606 count -= discard_pagevec(env, io, pvec, index); 628 discard_pagevec(env, io, pvec, index);
607 index = 0; 629 index = 0;
608 630
609 cl_io_fini(env, io); 631 cl_io_fini(env, io);
@@ -616,7 +638,7 @@ int osc_lru_shrink(struct client_obd *cli, int target)
616 io->ci_ignore_layout = 1; 638 io->ci_ignore_layout = 1;
617 rc = cl_io_init(env, io, CIT_MISC, clobj); 639 rc = cl_io_init(env, io, CIT_MISC, clobj);
618 640
619 client_obd_list_lock(&cli->cl_lru_list_lock); 641 spin_lock(&cli->cl_lru_list_lock);
620 642
621 if (rc != 0) 643 if (rc != 0)
622 break; 644 break;
@@ -625,98 +647,54 @@ int osc_lru_shrink(struct client_obd *cli, int target)
625 continue; 647 continue;
626 } 648 }
627 649
628 /* move this page to the end of list as it will be discarded 650 if (cl_page_own_try(env, io, page) == 0) {
629 * soon. The page will be finally removed from LRU list in 651 if (!cl_page_in_use_noref(page)) {
630 * osc_page_delete(). 652 /* remove it from lru list earlier to avoid
631 */ 653 * lock contention
632 list_move_tail(&opg->ops_lru, &cli->cl_lru_list); 654 */
655 __osc_lru_del(cli, opg);
656 opg->ops_in_lru = 0; /* will be discarded */
657
658 cl_page_get(page);
659 will_free = true;
660 } else {
661 cl_page_disown(env, io, page);
662 }
663 }
633 664
634 /* it's okay to grab a refcount here w/o holding lock because 665 if (!will_free) {
635 * it has to grab cl_lru_list_lock to delete the page. 666 list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
636 */ 667 continue;
637 cl_page_get(page); 668 }
638 pvec[index++] = page;
639 if (++count >= target)
640 break;
641 669
670 /* Don't discard and free the page with cl_lru_list held */
671 pvec[index++] = page;
642 if (unlikely(index == OTI_PVEC_SIZE)) { 672 if (unlikely(index == OTI_PVEC_SIZE)) {
643 client_obd_list_unlock(&cli->cl_lru_list_lock); 673 spin_unlock(&cli->cl_lru_list_lock);
644 count -= discard_pagevec(env, io, pvec, index); 674 discard_pagevec(env, io, pvec, index);
645 index = 0; 675 index = 0;
646 676
647 client_obd_list_lock(&cli->cl_lru_list_lock); 677 spin_lock(&cli->cl_lru_list_lock);
648 } 678 }
679
680 if (++count >= target)
681 break;
649 } 682 }
650 client_obd_list_unlock(&cli->cl_lru_list_lock); 683 spin_unlock(&cli->cl_lru_list_lock);
651 684
652 if (clobj) { 685 if (clobj) {
653 count -= discard_pagevec(env, io, pvec, index); 686 discard_pagevec(env, io, pvec, index);
654 687
655 cl_io_fini(env, io); 688 cl_io_fini(env, io);
656 cl_object_put(env, clobj); 689 cl_object_put(env, clobj);
657 } 690 }
658 cl_env_nested_put(&nest, env);
659 691
660 atomic_dec(&cli->cl_lru_shrinkers); 692 atomic_dec(&cli->cl_lru_shrinkers);
661 return count > 0 ? count : rc; 693 if (count > 0) {
662} 694 atomic_add(count, cli->cl_lru_left);
663
664static void osc_lru_add(struct client_obd *cli, struct osc_page *opg)
665{
666 bool wakeup = false;
667
668 if (!opg->ops_in_lru)
669 return;
670
671 atomic_dec(&cli->cl_lru_busy);
672 client_obd_list_lock(&cli->cl_lru_list_lock);
673 if (list_empty(&opg->ops_lru)) {
674 list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
675 atomic_inc_return(&cli->cl_lru_in_list);
676 wakeup = atomic_read(&osc_lru_waiters) > 0;
677 }
678 client_obd_list_unlock(&cli->cl_lru_list_lock);
679
680 if (wakeup) {
681 osc_lru_shrink(cli, osc_cache_too_much(cli));
682 wake_up_all(&osc_lru_waitq); 695 wake_up_all(&osc_lru_waitq);
683 } 696 }
684} 697 return count > 0 ? count : rc;
685
686/* delete page from LRUlist. The page can be deleted from LRUlist for two
687 * reasons: redirtied or deleted from page cache.
688 */
689static void osc_lru_del(struct client_obd *cli, struct osc_page *opg, bool del)
690{
691 if (opg->ops_in_lru) {
692 client_obd_list_lock(&cli->cl_lru_list_lock);
693 if (!list_empty(&opg->ops_lru)) {
694 LASSERT(atomic_read(&cli->cl_lru_in_list) > 0);
695 list_del_init(&opg->ops_lru);
696 atomic_dec(&cli->cl_lru_in_list);
697 if (!del)
698 atomic_inc(&cli->cl_lru_busy);
699 } else if (del) {
700 LASSERT(atomic_read(&cli->cl_lru_busy) > 0);
701 atomic_dec(&cli->cl_lru_busy);
702 }
703 client_obd_list_unlock(&cli->cl_lru_list_lock);
704 if (del) {
705 atomic_inc(cli->cl_lru_left);
706 /* this is a great place to release more LRU pages if
707 * this osc occupies too many LRU pages and kernel is
708 * stealing one of them.
709 * cl_lru_shrinkers is to avoid recursive call in case
710 * we're already in the context of osc_lru_shrink().
711 */
712 if (atomic_read(&cli->cl_lru_shrinkers) == 0 &&
713 !memory_pressure_get())
714 osc_lru_shrink(cli, osc_cache_too_much(cli));
715 wake_up(&osc_lru_waitq);
716 }
717 } else {
718 LASSERT(list_empty(&opg->ops_lru));
719 }
720} 698}
721 699
722static inline int max_to_shrink(struct client_obd *cli) 700static inline int max_to_shrink(struct client_obd *cli)
@@ -724,19 +702,28 @@ static inline int max_to_shrink(struct client_obd *cli)
724 return min(atomic_read(&cli->cl_lru_in_list) >> 1, lru_shrink_max); 702 return min(atomic_read(&cli->cl_lru_in_list) >> 1, lru_shrink_max);
725} 703}
726 704
727static int osc_lru_reclaim(struct client_obd *cli) 705int osc_lru_reclaim(struct client_obd *cli)
728{ 706{
707 struct cl_env_nest nest;
708 struct lu_env *env;
729 struct cl_client_cache *cache = cli->cl_cache; 709 struct cl_client_cache *cache = cli->cl_cache;
730 int max_scans; 710 int max_scans;
731 int rc; 711 int rc = 0;
732 712
733 LASSERT(cache); 713 LASSERT(cache);
734 714
735 rc = osc_lru_shrink(cli, lru_shrink_min); 715 env = cl_env_nested_get(&nest);
716 if (IS_ERR(env))
717 return 0;
718
719 rc = osc_lru_shrink(env, cli, osc_cache_too_much(cli), false);
736 if (rc != 0) { 720 if (rc != 0) {
721 if (rc == -EBUSY)
722 rc = 0;
723
737 CDEBUG(D_CACHE, "%s: Free %d pages from own LRU: %p.\n", 724 CDEBUG(D_CACHE, "%s: Free %d pages from own LRU: %p.\n",
738 cli->cl_import->imp_obd->obd_name, rc, cli); 725 cli->cl_import->imp_obd->obd_name, rc, cli);
739 return rc; 726 goto out;
740 } 727 }
741 728
742 CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %d, busy: %d.\n", 729 CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %d, busy: %d.\n",
@@ -764,10 +751,11 @@ static int osc_lru_reclaim(struct client_obd *cli)
764 atomic_read(&cli->cl_lru_busy)); 751 atomic_read(&cli->cl_lru_busy));
765 752
766 list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru); 753 list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
767 if (atomic_read(&cli->cl_lru_in_list) > 0) { 754 if (osc_cache_too_much(cli) > 0) {
768 spin_unlock(&cache->ccc_lru_lock); 755 spin_unlock(&cache->ccc_lru_lock);
769 756
770 rc = osc_lru_shrink(cli, max_to_shrink(cli)); 757 rc = osc_lru_shrink(env, cli, osc_cache_too_much(cli),
758 true);
771 spin_lock(&cache->ccc_lru_lock); 759 spin_lock(&cache->ccc_lru_lock);
772 if (rc != 0) 760 if (rc != 0)
773 break; 761 break;
@@ -775,6 +763,8 @@ static int osc_lru_reclaim(struct client_obd *cli)
775 } 763 }
776 spin_unlock(&cache->ccc_lru_lock); 764 spin_unlock(&cache->ccc_lru_lock);
777 765
766out:
767 cl_env_nested_put(&nest, env);
778 CDEBUG(D_CACHE, "%s: cli %p freed %d pages.\n", 768 CDEBUG(D_CACHE, "%s: cli %p freed %d pages.\n",
779 cli->cl_import->imp_obd->obd_name, cli, rc); 769 cli->cl_import->imp_obd->obd_name, cli, rc);
780 return rc; 770 return rc;
@@ -784,16 +774,20 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
784 struct osc_page *opg) 774 struct osc_page *opg)
785{ 775{
786 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL); 776 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
777 struct osc_io *oio = osc_env_io(env);
787 struct client_obd *cli = osc_cli(obj); 778 struct client_obd *cli = osc_cli(obj);
788 int rc = 0; 779 int rc = 0;
789 780
790 if (!cli->cl_cache) /* shall not be in LRU */ 781 if (!cli->cl_cache) /* shall not be in LRU */
791 return 0; 782 return 0;
792 783
784 if (oio->oi_lru_reserved > 0) {
785 --oio->oi_lru_reserved;
786 goto out;
787 }
788
793 LASSERT(atomic_read(cli->cl_lru_left) >= 0); 789 LASSERT(atomic_read(cli->cl_lru_left) >= 0);
794 while (!atomic_add_unless(cli->cl_lru_left, -1, 0)) { 790 while (!atomic_add_unless(cli->cl_lru_left, -1, 0)) {
795 int gen;
796
797 /* run out of LRU spaces, try to drop some by itself */ 791 /* run out of LRU spaces, try to drop some by itself */
798 rc = osc_lru_reclaim(cli); 792 rc = osc_lru_reclaim(cli);
799 if (rc < 0) 793 if (rc < 0)
@@ -803,23 +797,15 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
803 797
804 cond_resched(); 798 cond_resched();
805 799
806 /* slowest case, all of caching pages are busy, notifying
807 * other OSCs that we're lack of LRU slots.
808 */
809 atomic_inc(&osc_lru_waiters);
810
811 gen = atomic_read(&cli->cl_lru_in_list);
812 rc = l_wait_event(osc_lru_waitq, 800 rc = l_wait_event(osc_lru_waitq,
813 atomic_read(cli->cl_lru_left) > 0 || 801 atomic_read(cli->cl_lru_left) > 0,
814 (atomic_read(&cli->cl_lru_in_list) > 0 &&
815 gen != atomic_read(&cli->cl_lru_in_list)),
816 &lwi); 802 &lwi);
817 803
818 atomic_dec(&osc_lru_waiters);
819 if (rc < 0) 804 if (rc < 0)
820 break; 805 break;
821 } 806 }
822 807
808out:
823 if (rc >= 0) { 809 if (rc >= 0) {
824 atomic_inc(&cli->cl_lru_busy); 810 atomic_inc(&cli->cl_lru_busy);
825 opg->ops_in_lru = 1; 811 opg->ops_in_lru = 1;
diff --git a/drivers/staging/lustre/lustre/osc/osc_request.c b/drivers/staging/lustre/lustre/osc/osc_request.c
index 30526ebcad04..47417f88fe3c 100644
--- a/drivers/staging/lustre/lustre/osc/osc_request.c
+++ b/drivers/staging/lustre/lustre/osc/osc_request.c
@@ -92,12 +92,13 @@ struct osc_fsync_args {
92 92
93struct osc_enqueue_args { 93struct osc_enqueue_args {
94 struct obd_export *oa_exp; 94 struct obd_export *oa_exp;
95 enum ldlm_type oa_type;
96 enum ldlm_mode oa_mode;
95 __u64 *oa_flags; 97 __u64 *oa_flags;
96 obd_enqueue_update_f oa_upcall; 98 osc_enqueue_upcall_f oa_upcall;
97 void *oa_cookie; 99 void *oa_cookie;
98 struct ost_lvb *oa_lvb; 100 struct ost_lvb *oa_lvb;
99 struct lustre_handle *oa_lockh; 101 struct lustre_handle oa_lockh;
100 struct ldlm_enqueue_info *oa_ei;
101 unsigned int oa_agl:1; 102 unsigned int oa_agl:1;
102}; 103};
103 104
@@ -801,21 +802,24 @@ static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
801 LASSERT(!(oa->o_valid & bits)); 802 LASSERT(!(oa->o_valid & bits));
802 803
803 oa->o_valid |= bits; 804 oa->o_valid |= bits;
804 client_obd_list_lock(&cli->cl_loi_list_lock); 805 spin_lock(&cli->cl_loi_list_lock);
805 oa->o_dirty = cli->cl_dirty; 806 oa->o_dirty = cli->cl_dirty;
806 if (unlikely(cli->cl_dirty - cli->cl_dirty_transit > 807 if (unlikely(cli->cl_dirty - cli->cl_dirty_transit >
807 cli->cl_dirty_max)) { 808 cli->cl_dirty_max)) {
808 CERROR("dirty %lu - %lu > dirty_max %lu\n", 809 CERROR("dirty %lu - %lu > dirty_max %lu\n",
809 cli->cl_dirty, cli->cl_dirty_transit, cli->cl_dirty_max); 810 cli->cl_dirty, cli->cl_dirty_transit, cli->cl_dirty_max);
810 oa->o_undirty = 0; 811 oa->o_undirty = 0;
811 } else if (unlikely(atomic_read(&obd_dirty_pages) - 812 } else if (unlikely(atomic_read(&obd_unstable_pages) +
813 atomic_read(&obd_dirty_pages) -
812 atomic_read(&obd_dirty_transit_pages) > 814 atomic_read(&obd_dirty_transit_pages) >
813 (long)(obd_max_dirty_pages + 1))) { 815 (long)(obd_max_dirty_pages + 1))) {
814 /* The atomic_read() allowing the atomic_inc() are 816 /* The atomic_read() allowing the atomic_inc() are
815 * not covered by a lock thus they may safely race and trip 817 * not covered by a lock thus they may safely race and trip
816 * this CERROR() unless we add in a small fudge factor (+1). 818 * this CERROR() unless we add in a small fudge factor (+1).
817 */ 819 */
818 CERROR("dirty %d - %d > system dirty_max %d\n", 820 CERROR("%s: dirty %d + %d - %d > system dirty_max %d\n",
821 cli->cl_import->imp_obd->obd_name,
822 atomic_read(&obd_unstable_pages),
819 atomic_read(&obd_dirty_pages), 823 atomic_read(&obd_dirty_pages),
820 atomic_read(&obd_dirty_transit_pages), 824 atomic_read(&obd_dirty_transit_pages),
821 obd_max_dirty_pages); 825 obd_max_dirty_pages);
@@ -833,10 +837,9 @@ static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
833 oa->o_grant = cli->cl_avail_grant + cli->cl_reserved_grant; 837 oa->o_grant = cli->cl_avail_grant + cli->cl_reserved_grant;
834 oa->o_dropped = cli->cl_lost_grant; 838 oa->o_dropped = cli->cl_lost_grant;
835 cli->cl_lost_grant = 0; 839 cli->cl_lost_grant = 0;
836 client_obd_list_unlock(&cli->cl_loi_list_lock); 840 spin_unlock(&cli->cl_loi_list_lock);
837 CDEBUG(D_CACHE, "dirty: %llu undirty: %u dropped %u grant: %llu\n", 841 CDEBUG(D_CACHE, "dirty: %llu undirty: %u dropped %u grant: %llu\n",
838 oa->o_dirty, oa->o_undirty, oa->o_dropped, oa->o_grant); 842 oa->o_dirty, oa->o_undirty, oa->o_dropped, oa->o_grant);
839
840} 843}
841 844
842void osc_update_next_shrink(struct client_obd *cli) 845void osc_update_next_shrink(struct client_obd *cli)
@@ -849,9 +852,9 @@ void osc_update_next_shrink(struct client_obd *cli)
849 852
850static void __osc_update_grant(struct client_obd *cli, u64 grant) 853static void __osc_update_grant(struct client_obd *cli, u64 grant)
851{ 854{
852 client_obd_list_lock(&cli->cl_loi_list_lock); 855 spin_lock(&cli->cl_loi_list_lock);
853 cli->cl_avail_grant += grant; 856 cli->cl_avail_grant += grant;
854 client_obd_list_unlock(&cli->cl_loi_list_lock); 857 spin_unlock(&cli->cl_loi_list_lock);
855} 858}
856 859
857static void osc_update_grant(struct client_obd *cli, struct ost_body *body) 860static void osc_update_grant(struct client_obd *cli, struct ost_body *body)
@@ -889,10 +892,10 @@ out:
889 892
890static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa) 893static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa)
891{ 894{
892 client_obd_list_lock(&cli->cl_loi_list_lock); 895 spin_lock(&cli->cl_loi_list_lock);
893 oa->o_grant = cli->cl_avail_grant / 4; 896 oa->o_grant = cli->cl_avail_grant / 4;
894 cli->cl_avail_grant -= oa->o_grant; 897 cli->cl_avail_grant -= oa->o_grant;
895 client_obd_list_unlock(&cli->cl_loi_list_lock); 898 spin_unlock(&cli->cl_loi_list_lock);
896 if (!(oa->o_valid & OBD_MD_FLFLAGS)) { 899 if (!(oa->o_valid & OBD_MD_FLFLAGS)) {
897 oa->o_valid |= OBD_MD_FLFLAGS; 900 oa->o_valid |= OBD_MD_FLFLAGS;
898 oa->o_flags = 0; 901 oa->o_flags = 0;
@@ -911,10 +914,10 @@ static int osc_shrink_grant(struct client_obd *cli)
911 __u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) * 914 __u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) *
912 (cli->cl_max_pages_per_rpc << PAGE_SHIFT); 915 (cli->cl_max_pages_per_rpc << PAGE_SHIFT);
913 916
914 client_obd_list_lock(&cli->cl_loi_list_lock); 917 spin_lock(&cli->cl_loi_list_lock);
915 if (cli->cl_avail_grant <= target_bytes) 918 if (cli->cl_avail_grant <= target_bytes)
916 target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT; 919 target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
917 client_obd_list_unlock(&cli->cl_loi_list_lock); 920 spin_unlock(&cli->cl_loi_list_lock);
918 921
919 return osc_shrink_grant_to_target(cli, target_bytes); 922 return osc_shrink_grant_to_target(cli, target_bytes);
920} 923}
@@ -924,7 +927,7 @@ int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
924 int rc = 0; 927 int rc = 0;
925 struct ost_body *body; 928 struct ost_body *body;
926 929
927 client_obd_list_lock(&cli->cl_loi_list_lock); 930 spin_lock(&cli->cl_loi_list_lock);
928 /* Don't shrink if we are already above or below the desired limit 931 /* Don't shrink if we are already above or below the desired limit
929 * We don't want to shrink below a single RPC, as that will negatively 932 * We don't want to shrink below a single RPC, as that will negatively
930 * impact block allocation and long-term performance. 933 * impact block allocation and long-term performance.
@@ -933,10 +936,10 @@ int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
933 target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT; 936 target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
934 937
935 if (target_bytes >= cli->cl_avail_grant) { 938 if (target_bytes >= cli->cl_avail_grant) {
936 client_obd_list_unlock(&cli->cl_loi_list_lock); 939 spin_unlock(&cli->cl_loi_list_lock);
937 return 0; 940 return 0;
938 } 941 }
939 client_obd_list_unlock(&cli->cl_loi_list_lock); 942 spin_unlock(&cli->cl_loi_list_lock);
940 943
941 body = kzalloc(sizeof(*body), GFP_NOFS); 944 body = kzalloc(sizeof(*body), GFP_NOFS);
942 if (!body) 945 if (!body)
@@ -944,10 +947,10 @@ int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
944 947
945 osc_announce_cached(cli, &body->oa, 0); 948 osc_announce_cached(cli, &body->oa, 0);
946 949
947 client_obd_list_lock(&cli->cl_loi_list_lock); 950 spin_lock(&cli->cl_loi_list_lock);
948 body->oa.o_grant = cli->cl_avail_grant - target_bytes; 951 body->oa.o_grant = cli->cl_avail_grant - target_bytes;
949 cli->cl_avail_grant = target_bytes; 952 cli->cl_avail_grant = target_bytes;
950 client_obd_list_unlock(&cli->cl_loi_list_lock); 953 spin_unlock(&cli->cl_loi_list_lock);
951 if (!(body->oa.o_valid & OBD_MD_FLFLAGS)) { 954 if (!(body->oa.o_valid & OBD_MD_FLFLAGS)) {
952 body->oa.o_valid |= OBD_MD_FLFLAGS; 955 body->oa.o_valid |= OBD_MD_FLFLAGS;
953 body->oa.o_flags = 0; 956 body->oa.o_flags = 0;
@@ -1035,7 +1038,7 @@ static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
1035 * race is tolerable here: if we're evicted, but imp_state already 1038 * race is tolerable here: if we're evicted, but imp_state already
1036 * left EVICTED state, then cl_dirty must be 0 already. 1039 * left EVICTED state, then cl_dirty must be 0 already.
1037 */ 1040 */
1038 client_obd_list_lock(&cli->cl_loi_list_lock); 1041 spin_lock(&cli->cl_loi_list_lock);
1039 if (cli->cl_import->imp_state == LUSTRE_IMP_EVICTED) 1042 if (cli->cl_import->imp_state == LUSTRE_IMP_EVICTED)
1040 cli->cl_avail_grant = ocd->ocd_grant; 1043 cli->cl_avail_grant = ocd->ocd_grant;
1041 else 1044 else
@@ -1053,7 +1056,7 @@ static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
1053 1056
1054 /* determine the appropriate chunk size used by osc_extent. */ 1057 /* determine the appropriate chunk size used by osc_extent. */
1055 cli->cl_chunkbits = max_t(int, PAGE_SHIFT, ocd->ocd_blocksize); 1058 cli->cl_chunkbits = max_t(int, PAGE_SHIFT, ocd->ocd_blocksize);
1056 client_obd_list_unlock(&cli->cl_loi_list_lock); 1059 spin_unlock(&cli->cl_loi_list_lock);
1057 1060
1058 CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld chunk bits: %d\n", 1061 CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld chunk bits: %d\n",
1059 cli->cl_import->imp_obd->obd_name, 1062 cli->cl_import->imp_obd->obd_name,
@@ -1082,7 +1085,7 @@ static void handle_short_read(int nob_read, u32 page_count,
1082 if (pga[i]->count > nob_read) { 1085 if (pga[i]->count > nob_read) {
1083 /* EOF inside this page */ 1086 /* EOF inside this page */
1084 ptr = kmap(pga[i]->pg) + 1087 ptr = kmap(pga[i]->pg) +
1085 (pga[i]->off & ~CFS_PAGE_MASK); 1088 (pga[i]->off & ~PAGE_MASK);
1086 memset(ptr + nob_read, 0, pga[i]->count - nob_read); 1089 memset(ptr + nob_read, 0, pga[i]->count - nob_read);
1087 kunmap(pga[i]->pg); 1090 kunmap(pga[i]->pg);
1088 page_count--; 1091 page_count--;
@@ -1097,7 +1100,7 @@ static void handle_short_read(int nob_read, u32 page_count,
1097 1100
1098 /* zero remaining pages */ 1101 /* zero remaining pages */
1099 while (page_count-- > 0) { 1102 while (page_count-- > 0) {
1100 ptr = kmap(pga[i]->pg) + (pga[i]->off & ~CFS_PAGE_MASK); 1103 ptr = kmap(pga[i]->pg) + (pga[i]->off & ~PAGE_MASK);
1101 memset(ptr, 0, pga[i]->count); 1104 memset(ptr, 0, pga[i]->count);
1102 kunmap(pga[i]->pg); 1105 kunmap(pga[i]->pg);
1103 i++; 1106 i++;
@@ -1144,7 +1147,8 @@ static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
1144{ 1147{
1145 if (p1->flag != p2->flag) { 1148 if (p1->flag != p2->flag) {
1146 unsigned mask = ~(OBD_BRW_FROM_GRANT | OBD_BRW_NOCACHE | 1149 unsigned mask = ~(OBD_BRW_FROM_GRANT | OBD_BRW_NOCACHE |
1147 OBD_BRW_SYNC | OBD_BRW_ASYNC|OBD_BRW_NOQUOTA); 1150 OBD_BRW_SYNC | OBD_BRW_ASYNC |
1151 OBD_BRW_NOQUOTA | OBD_BRW_SOFT_SYNC);
1148 1152
1149 /* warn if we try to combine flags that we don't know to be 1153 /* warn if we try to combine flags that we don't know to be
1150 * safe to combine 1154 * safe to combine
@@ -1188,32 +1192,29 @@ static u32 osc_checksum_bulk(int nob, u32 pg_count,
1188 if (i == 0 && opc == OST_READ && 1192 if (i == 0 && opc == OST_READ &&
1189 OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE)) { 1193 OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE)) {
1190 unsigned char *ptr = kmap(pga[i]->pg); 1194 unsigned char *ptr = kmap(pga[i]->pg);
1191 int off = pga[i]->off & ~CFS_PAGE_MASK; 1195 int off = pga[i]->off & ~PAGE_MASK;
1192 1196
1193 memcpy(ptr + off, "bad1", min(4, nob)); 1197 memcpy(ptr + off, "bad1", min(4, nob));
1194 kunmap(pga[i]->pg); 1198 kunmap(pga[i]->pg);
1195 } 1199 }
1196 cfs_crypto_hash_update_page(hdesc, pga[i]->pg, 1200 cfs_crypto_hash_update_page(hdesc, pga[i]->pg,
1197 pga[i]->off & ~CFS_PAGE_MASK, 1201 pga[i]->off & ~PAGE_MASK,
1198 count); 1202 count);
1199 CDEBUG(D_PAGE, 1203 CDEBUG(D_PAGE,
1200 "page %p map %p index %lu flags %lx count %u priv %0lx: off %d\n", 1204 "page %p map %p index %lu flags %lx count %u priv %0lx: off %d\n",
1201 pga[i]->pg, pga[i]->pg->mapping, pga[i]->pg->index, 1205 pga[i]->pg, pga[i]->pg->mapping, pga[i]->pg->index,
1202 (long)pga[i]->pg->flags, page_count(pga[i]->pg), 1206 (long)pga[i]->pg->flags, page_count(pga[i]->pg),
1203 page_private(pga[i]->pg), 1207 page_private(pga[i]->pg),
1204 (int)(pga[i]->off & ~CFS_PAGE_MASK)); 1208 (int)(pga[i]->off & ~PAGE_MASK));
1205 1209
1206 nob -= pga[i]->count; 1210 nob -= pga[i]->count;
1207 pg_count--; 1211 pg_count--;
1208 i++; 1212 i++;
1209 } 1213 }
1210 1214
1211 bufsize = 4; 1215 bufsize = sizeof(cksum);
1212 err = cfs_crypto_hash_final(hdesc, (unsigned char *)&cksum, &bufsize); 1216 err = cfs_crypto_hash_final(hdesc, (unsigned char *)&cksum, &bufsize);
1213 1217
1214 if (err)
1215 cfs_crypto_hash_final(hdesc, NULL, NULL);
1216
1217 /* For sending we only compute the wrong checksum instead 1218 /* For sending we only compute the wrong checksum instead
1218 * of corrupting the data so it is still correct on a redo 1219 * of corrupting the data so it is still correct on a redo
1219 */ 1220 */
@@ -1312,7 +1313,7 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,
1312 pg_prev = pga[0]; 1313 pg_prev = pga[0];
1313 for (requested_nob = i = 0; i < page_count; i++, niobuf++) { 1314 for (requested_nob = i = 0; i < page_count; i++, niobuf++) {
1314 struct brw_page *pg = pga[i]; 1315 struct brw_page *pg = pga[i];
1315 int poff = pg->off & ~CFS_PAGE_MASK; 1316 int poff = pg->off & ~PAGE_MASK;
1316 1317
1317 LASSERT(pg->count > 0); 1318 LASSERT(pg->count > 0);
1318 /* make sure there is no gap in the middle of page array */ 1319 /* make sure there is no gap in the middle of page array */
@@ -1658,6 +1659,7 @@ static int osc_brw_redo_request(struct ptlrpc_request *request,
1658 aa->aa_resends++; 1659 aa->aa_resends++;
1659 new_req->rq_interpret_reply = request->rq_interpret_reply; 1660 new_req->rq_interpret_reply = request->rq_interpret_reply;
1660 new_req->rq_async_args = request->rq_async_args; 1661 new_req->rq_async_args = request->rq_async_args;
1662 new_req->rq_commit_cb = request->rq_commit_cb;
1661 /* cap resend delay to the current request timeout, this is similar to 1663 /* cap resend delay to the current request timeout, this is similar to
1662 * what ptlrpc does (see after_reply()) 1664 * what ptlrpc does (see after_reply())
1663 */ 1665 */
@@ -1737,7 +1739,6 @@ static int brw_interpret(const struct lu_env *env,
1737 struct osc_brw_async_args *aa = data; 1739 struct osc_brw_async_args *aa = data;
1738 struct osc_extent *ext; 1740 struct osc_extent *ext;
1739 struct osc_extent *tmp; 1741 struct osc_extent *tmp;
1740 struct cl_object *obj = NULL;
1741 struct client_obd *cli = aa->aa_cli; 1742 struct client_obd *cli = aa->aa_cli;
1742 1743
1743 rc = osc_brw_fini_request(req, rc); 1744 rc = osc_brw_fini_request(req, rc);
@@ -1766,24 +1767,17 @@ static int brw_interpret(const struct lu_env *env,
1766 rc = -EIO; 1767 rc = -EIO;
1767 } 1768 }
1768 1769
1769 list_for_each_entry_safe(ext, tmp, &aa->aa_exts, oe_link) { 1770 if (rc == 0) {
1770 if (!obj && rc == 0) {
1771 obj = osc2cl(ext->oe_obj);
1772 cl_object_get(obj);
1773 }
1774
1775 list_del_init(&ext->oe_link);
1776 osc_extent_finish(env, ext, 1, rc);
1777 }
1778 LASSERT(list_empty(&aa->aa_exts));
1779 LASSERT(list_empty(&aa->aa_oaps));
1780
1781 if (obj) {
1782 struct obdo *oa = aa->aa_oa; 1771 struct obdo *oa = aa->aa_oa;
1783 struct cl_attr *attr = &osc_env_info(env)->oti_attr; 1772 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
1784 unsigned long valid = 0; 1773 unsigned long valid = 0;
1774 struct cl_object *obj;
1775 struct osc_async_page *last;
1776
1777 last = brw_page2oap(aa->aa_ppga[aa->aa_page_count - 1]);
1778 obj = osc2cl(last->oap_obj);
1785 1779
1786 LASSERT(rc == 0); 1780 cl_object_attr_lock(obj);
1787 if (oa->o_valid & OBD_MD_FLBLOCKS) { 1781 if (oa->o_valid & OBD_MD_FLBLOCKS) {
1788 attr->cat_blocks = oa->o_blocks; 1782 attr->cat_blocks = oa->o_blocks;
1789 valid |= CAT_BLOCKS; 1783 valid |= CAT_BLOCKS;
@@ -1800,21 +1794,45 @@ static int brw_interpret(const struct lu_env *env,
1800 attr->cat_ctime = oa->o_ctime; 1794 attr->cat_ctime = oa->o_ctime;
1801 valid |= CAT_CTIME; 1795 valid |= CAT_CTIME;
1802 } 1796 }
1803 if (valid != 0) { 1797
1804 cl_object_attr_lock(obj); 1798 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
1805 cl_object_attr_set(env, obj, attr, valid); 1799 struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
1806 cl_object_attr_unlock(obj); 1800 loff_t last_off = last->oap_count + last->oap_obj_off;
1801
1802 /* Change file size if this is an out of quota or
1803 * direct IO write and it extends the file size
1804 */
1805 if (loi->loi_lvb.lvb_size < last_off) {
1806 attr->cat_size = last_off;
1807 valid |= CAT_SIZE;
1808 }
1809 /* Extend KMS if it's not a lockless write */
1810 if (loi->loi_kms < last_off &&
1811 oap2osc_page(last)->ops_srvlock == 0) {
1812 attr->cat_kms = last_off;
1813 valid |= CAT_KMS;
1814 }
1807 } 1815 }
1808 cl_object_put(env, obj); 1816
1817 if (valid != 0)
1818 cl_object_attr_set(env, obj, attr, valid);
1819 cl_object_attr_unlock(obj);
1809 } 1820 }
1810 kmem_cache_free(obdo_cachep, aa->aa_oa); 1821 kmem_cache_free(obdo_cachep, aa->aa_oa);
1811 1822
1823 list_for_each_entry_safe(ext, tmp, &aa->aa_exts, oe_link) {
1824 list_del_init(&ext->oe_link);
1825 osc_extent_finish(env, ext, 1, rc);
1826 }
1827 LASSERT(list_empty(&aa->aa_exts));
1828 LASSERT(list_empty(&aa->aa_oaps));
1829
1812 cl_req_completion(env, aa->aa_clerq, rc < 0 ? rc : 1830 cl_req_completion(env, aa->aa_clerq, rc < 0 ? rc :
1813 req->rq_bulk->bd_nob_transferred); 1831 req->rq_bulk->bd_nob_transferred);
1814 osc_release_ppga(aa->aa_ppga, aa->aa_page_count); 1832 osc_release_ppga(aa->aa_ppga, aa->aa_page_count);
1815 ptlrpc_lprocfs_brw(req, req->rq_bulk->bd_nob_transferred); 1833 ptlrpc_lprocfs_brw(req, req->rq_bulk->bd_nob_transferred);
1816 1834
1817 client_obd_list_lock(&cli->cl_loi_list_lock); 1835 spin_lock(&cli->cl_loi_list_lock);
1818 /* We need to decrement before osc_ap_completion->osc_wake_cache_waiters 1836 /* We need to decrement before osc_ap_completion->osc_wake_cache_waiters
1819 * is called so we know whether to go to sync BRWs or wait for more 1837 * is called so we know whether to go to sync BRWs or wait for more
1820 * RPCs to complete 1838 * RPCs to complete
@@ -1824,12 +1842,31 @@ static int brw_interpret(const struct lu_env *env,
1824 else 1842 else
1825 cli->cl_r_in_flight--; 1843 cli->cl_r_in_flight--;
1826 osc_wake_cache_waiters(cli); 1844 osc_wake_cache_waiters(cli);
1827 client_obd_list_unlock(&cli->cl_loi_list_lock); 1845 spin_unlock(&cli->cl_loi_list_lock);
1828 1846
1829 osc_io_unplug(env, cli, NULL); 1847 osc_io_unplug(env, cli, NULL);
1830 return rc; 1848 return rc;
1831} 1849}
1832 1850
1851static void brw_commit(struct ptlrpc_request *req)
1852{
1853 spin_lock(&req->rq_lock);
1854 /*
1855 * If osc_inc_unstable_pages (via osc_extent_finish) races with
1856 * this called via the rq_commit_cb, I need to ensure
1857 * osc_dec_unstable_pages is still called. Otherwise unstable
1858 * pages may be leaked.
1859 */
1860 if (req->rq_unstable) {
1861 spin_unlock(&req->rq_lock);
1862 osc_dec_unstable_pages(req);
1863 spin_lock(&req->rq_lock);
1864 } else {
1865 req->rq_committed = 1;
1866 }
1867 spin_unlock(&req->rq_lock);
1868}
1869
1833/** 1870/**
1834 * Build an RPC by the list of extent @ext_list. The caller must ensure 1871 * Build an RPC by the list of extent @ext_list. The caller must ensure
1835 * that the total pages in this list are NOT over max pages per RPC. 1872 * that the total pages in this list are NOT over max pages per RPC.
@@ -1920,7 +1957,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
1920 pga[i] = &oap->oap_brw_page; 1957 pga[i] = &oap->oap_brw_page;
1921 pga[i]->off = oap->oap_obj_off + oap->oap_page_off; 1958 pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
1922 CDEBUG(0, "put page %p index %lu oap %p flg %x to pga\n", 1959 CDEBUG(0, "put page %p index %lu oap %p flg %x to pga\n",
1923 pga[i]->pg, page_index(oap->oap_page), oap, 1960 pga[i]->pg, oap->oap_page->index, oap,
1924 pga[i]->flag); 1961 pga[i]->flag);
1925 i++; 1962 i++;
1926 cl_req_page_add(env, clerq, page); 1963 cl_req_page_add(env, clerq, page);
@@ -1949,6 +1986,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
1949 goto out; 1986 goto out;
1950 } 1987 }
1951 1988
1989 req->rq_commit_cb = brw_commit;
1952 req->rq_interpret_reply = brw_interpret; 1990 req->rq_interpret_reply = brw_interpret;
1953 1991
1954 if (mem_tight != 0) 1992 if (mem_tight != 0)
@@ -1992,7 +2030,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
1992 if (tmp) 2030 if (tmp)
1993 tmp->oap_request = ptlrpc_request_addref(req); 2031 tmp->oap_request = ptlrpc_request_addref(req);
1994 2032
1995 client_obd_list_lock(&cli->cl_loi_list_lock); 2033 spin_lock(&cli->cl_loi_list_lock);
1996 starting_offset >>= PAGE_SHIFT; 2034 starting_offset >>= PAGE_SHIFT;
1997 if (cmd == OBD_BRW_READ) { 2035 if (cmd == OBD_BRW_READ) {
1998 cli->cl_r_in_flight++; 2036 cli->cl_r_in_flight++;
@@ -2007,7 +2045,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
2007 lprocfs_oh_tally_log2(&cli->cl_write_offset_hist, 2045 lprocfs_oh_tally_log2(&cli->cl_write_offset_hist,
2008 starting_offset + 1); 2046 starting_offset + 1);
2009 } 2047 }
2010 client_obd_list_unlock(&cli->cl_loi_list_lock); 2048 spin_unlock(&cli->cl_loi_list_lock);
2011 2049
2012 DEBUG_REQ(D_INODE, req, "%d pages, aa %p. now %dr/%dw in flight", 2050 DEBUG_REQ(D_INODE, req, "%d pages, aa %p. now %dr/%dw in flight",
2013 page_count, aa, cli->cl_r_in_flight, 2051 page_count, aa, cli->cl_r_in_flight,
@@ -2055,14 +2093,12 @@ static int osc_set_lock_data_with_check(struct ldlm_lock *lock,
2055 LASSERT(lock->l_glimpse_ast == einfo->ei_cb_gl); 2093 LASSERT(lock->l_glimpse_ast == einfo->ei_cb_gl);
2056 2094
2057 lock_res_and_lock(lock); 2095 lock_res_and_lock(lock);
2058 spin_lock(&osc_ast_guard);
2059 2096
2060 if (!lock->l_ast_data) 2097 if (!lock->l_ast_data)
2061 lock->l_ast_data = data; 2098 lock->l_ast_data = data;
2062 if (lock->l_ast_data == data) 2099 if (lock->l_ast_data == data)
2063 set = 1; 2100 set = 1;
2064 2101
2065 spin_unlock(&osc_ast_guard);
2066 unlock_res_and_lock(lock); 2102 unlock_res_and_lock(lock);
2067 2103
2068 return set; 2104 return set;
@@ -2104,36 +2140,38 @@ static int osc_find_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm,
2104 return rc; 2140 return rc;
2105} 2141}
2106 2142
2107static int osc_enqueue_fini(struct ptlrpc_request *req, struct ost_lvb *lvb, 2143static int osc_enqueue_fini(struct ptlrpc_request *req,
2108 obd_enqueue_update_f upcall, void *cookie, 2144 osc_enqueue_upcall_f upcall, void *cookie,
2109 __u64 *flags, int agl, int rc) 2145 struct lustre_handle *lockh, enum ldlm_mode mode,
2146 __u64 *flags, int agl, int errcode)
2110{ 2147{
2111 int intent = *flags & LDLM_FL_HAS_INTENT; 2148 bool intent = *flags & LDLM_FL_HAS_INTENT;
2112 2149 int rc;
2113 if (intent) {
2114 /* The request was created before ldlm_cli_enqueue call. */
2115 if (rc == ELDLM_LOCK_ABORTED) {
2116 struct ldlm_reply *rep;
2117 2150
2118 rep = req_capsule_server_get(&req->rq_pill, 2151 /* The request was created before ldlm_cli_enqueue call. */
2119 &RMF_DLM_REP); 2152 if (intent && errcode == ELDLM_LOCK_ABORTED) {
2153 struct ldlm_reply *rep;
2120 2154
2121 rep->lock_policy_res1 = 2155 rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
2122 ptlrpc_status_ntoh(rep->lock_policy_res1);
2123 if (rep->lock_policy_res1)
2124 rc = rep->lock_policy_res1;
2125 }
2126 }
2127 2156
2128 if ((intent != 0 && rc == ELDLM_LOCK_ABORTED && agl == 0) || 2157 rep->lock_policy_res1 =
2129 (rc == 0)) { 2158 ptlrpc_status_ntoh(rep->lock_policy_res1);
2159 if (rep->lock_policy_res1)
2160 errcode = rep->lock_policy_res1;
2161 if (!agl)
2162 *flags |= LDLM_FL_LVB_READY;
2163 } else if (errcode == ELDLM_OK) {
2130 *flags |= LDLM_FL_LVB_READY; 2164 *flags |= LDLM_FL_LVB_READY;
2131 CDEBUG(D_INODE, "got kms %llu blocks %llu mtime %llu\n",
2132 lvb->lvb_size, lvb->lvb_blocks, lvb->lvb_mtime);
2133 } 2165 }
2134 2166
2135 /* Call the update callback. */ 2167 /* Call the update callback. */
2136 rc = (*upcall)(cookie, rc); 2168 rc = (*upcall)(cookie, lockh, errcode);
2169 /* release the reference taken in ldlm_cli_enqueue() */
2170 if (errcode == ELDLM_LOCK_MATCHED)
2171 errcode = ELDLM_OK;
2172 if (errcode == ELDLM_OK && lustre_handle_is_used(lockh))
2173 ldlm_lock_decref(lockh, mode);
2174
2137 return rc; 2175 return rc;
2138} 2176}
2139 2177
@@ -2142,62 +2180,50 @@ static int osc_enqueue_interpret(const struct lu_env *env,
2142 struct osc_enqueue_args *aa, int rc) 2180 struct osc_enqueue_args *aa, int rc)
2143{ 2181{
2144 struct ldlm_lock *lock; 2182 struct ldlm_lock *lock;
2145 struct lustre_handle handle; 2183 struct lustre_handle *lockh = &aa->oa_lockh;
2146 __u32 mode; 2184 enum ldlm_mode mode = aa->oa_mode;
2147 struct ost_lvb *lvb; 2185 struct ost_lvb *lvb = aa->oa_lvb;
2148 __u32 lvb_len; 2186 __u32 lvb_len = sizeof(*lvb);
2149 __u64 *flags = aa->oa_flags; 2187 __u64 flags = 0;
2150 2188
2151 /* Make a local copy of a lock handle and a mode, because aa->oa_*
2152 * might be freed anytime after lock upcall has been called.
2153 */
2154 lustre_handle_copy(&handle, aa->oa_lockh);
2155 mode = aa->oa_ei->ei_mode;
2156 2189
2157 /* ldlm_cli_enqueue is holding a reference on the lock, so it must 2190 /* ldlm_cli_enqueue is holding a reference on the lock, so it must
2158 * be valid. 2191 * be valid.
2159 */ 2192 */
2160 lock = ldlm_handle2lock(&handle); 2193 lock = ldlm_handle2lock(lockh);
2194 LASSERTF(lock, "lockh %llx, req %p, aa %p - client evicted?\n",
2195 lockh->cookie, req, aa);
2161 2196
2162 /* Take an additional reference so that a blocking AST that 2197 /* Take an additional reference so that a blocking AST that
2163 * ldlm_cli_enqueue_fini() might post for a failed lock, is guaranteed 2198 * ldlm_cli_enqueue_fini() might post for a failed lock, is guaranteed
2164 * to arrive after an upcall has been executed by 2199 * to arrive after an upcall has been executed by
2165 * osc_enqueue_fini(). 2200 * osc_enqueue_fini().
2166 */ 2201 */
2167 ldlm_lock_addref(&handle, mode); 2202 ldlm_lock_addref(lockh, mode);
2203
2204 /* Let cl_lock_state_wait fail with -ERESTARTSYS to unuse sublocks. */
2205 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_HANG, 2);
2168 2206
2169 /* Let CP AST to grant the lock first. */ 2207 /* Let CP AST to grant the lock first. */
2170 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 1); 2208 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 1);
2171 2209
2172 if (aa->oa_agl && rc == ELDLM_LOCK_ABORTED) { 2210 if (aa->oa_agl) {
2173 lvb = NULL; 2211 LASSERT(!aa->oa_lvb);
2174 lvb_len = 0; 2212 LASSERT(!aa->oa_flags);
2175 } else { 2213 aa->oa_flags = &flags;
2176 lvb = aa->oa_lvb;
2177 lvb_len = sizeof(*aa->oa_lvb);
2178 } 2214 }
2179 2215
2180 /* Complete obtaining the lock procedure. */ 2216 /* Complete obtaining the lock procedure. */
2181 rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, aa->oa_ei->ei_type, 1, 2217 rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, aa->oa_type, 1,
2182 mode, flags, lvb, lvb_len, &handle, rc); 2218 aa->oa_mode, aa->oa_flags, lvb, lvb_len,
2219 lockh, rc);
2183 /* Complete osc stuff. */ 2220 /* Complete osc stuff. */
2184 rc = osc_enqueue_fini(req, aa->oa_lvb, aa->oa_upcall, aa->oa_cookie, 2221 rc = osc_enqueue_fini(req, aa->oa_upcall, aa->oa_cookie, lockh, mode,
2185 flags, aa->oa_agl, rc); 2222 aa->oa_flags, aa->oa_agl, rc);
2186 2223
2187 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_CANCEL_RACE, 10); 2224 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_CANCEL_RACE, 10);
2188 2225
2189 /* Release the lock for async request. */ 2226 ldlm_lock_decref(lockh, mode);
2190 if (lustre_handle_is_used(&handle) && rc == ELDLM_OK)
2191 /*
2192 * Releases a reference taken by ldlm_cli_enqueue(), if it is
2193 * not already released by
2194 * ldlm_cli_enqueue_fini()->failed_lock_cleanup()
2195 */
2196 ldlm_lock_decref(&handle, mode);
2197
2198 LASSERTF(lock, "lockh %p, req %p, aa %p - client evicted?\n",
2199 aa->oa_lockh, req, aa);
2200 ldlm_lock_decref(&handle, mode);
2201 LDLM_LOCK_PUT(lock); 2227 LDLM_LOCK_PUT(lock);
2202 return rc; 2228 return rc;
2203} 2229}
@@ -2209,29 +2235,29 @@ struct ptlrpc_request_set *PTLRPCD_SET = (void *)1;
2209 * other synchronous requests, however keeping some locks and trying to obtain 2235 * other synchronous requests, however keeping some locks and trying to obtain
2210 * others may take a considerable amount of time in a case of ost failure; and 2236 * others may take a considerable amount of time in a case of ost failure; and
2211 * when other sync requests do not get released lock from a client, the client 2237 * when other sync requests do not get released lock from a client, the client
2212 * is excluded from the cluster -- such scenarious make the life difficult, so 2238 * is evicted from the cluster -- such scenaries make the life difficult, so
2213 * release locks just after they are obtained. 2239 * release locks just after they are obtained.
2214 */ 2240 */
2215int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id, 2241int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
2216 __u64 *flags, ldlm_policy_data_t *policy, 2242 __u64 *flags, ldlm_policy_data_t *policy,
2217 struct ost_lvb *lvb, int kms_valid, 2243 struct ost_lvb *lvb, int kms_valid,
2218 obd_enqueue_update_f upcall, void *cookie, 2244 osc_enqueue_upcall_f upcall, void *cookie,
2219 struct ldlm_enqueue_info *einfo, 2245 struct ldlm_enqueue_info *einfo,
2220 struct lustre_handle *lockh,
2221 struct ptlrpc_request_set *rqset, int async, int agl) 2246 struct ptlrpc_request_set *rqset, int async, int agl)
2222{ 2247{
2223 struct obd_device *obd = exp->exp_obd; 2248 struct obd_device *obd = exp->exp_obd;
2249 struct lustre_handle lockh = { 0 };
2224 struct ptlrpc_request *req = NULL; 2250 struct ptlrpc_request *req = NULL;
2225 int intent = *flags & LDLM_FL_HAS_INTENT; 2251 int intent = *flags & LDLM_FL_HAS_INTENT;
2226 __u64 match_lvb = (agl != 0 ? 0 : LDLM_FL_LVB_READY); 2252 __u64 match_lvb = agl ? 0 : LDLM_FL_LVB_READY;
2227 enum ldlm_mode mode; 2253 enum ldlm_mode mode;
2228 int rc; 2254 int rc;
2229 2255
2230 /* Filesystem lock extents are extended to page boundaries so that 2256 /* Filesystem lock extents are extended to page boundaries so that
2231 * dealing with the page cache is a little smoother. 2257 * dealing with the page cache is a little smoother.
2232 */ 2258 */
2233 policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK; 2259 policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
2234 policy->l_extent.end |= ~CFS_PAGE_MASK; 2260 policy->l_extent.end |= ~PAGE_MASK;
2235 2261
2236 /* 2262 /*
2237 * kms is not valid when either object is completely fresh (so that no 2263 * kms is not valid when either object is completely fresh (so that no
@@ -2259,64 +2285,46 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
2259 if (einfo->ei_mode == LCK_PR) 2285 if (einfo->ei_mode == LCK_PR)
2260 mode |= LCK_PW; 2286 mode |= LCK_PW;
2261 mode = ldlm_lock_match(obd->obd_namespace, *flags | match_lvb, res_id, 2287 mode = ldlm_lock_match(obd->obd_namespace, *flags | match_lvb, res_id,
2262 einfo->ei_type, policy, mode, lockh, 0); 2288 einfo->ei_type, policy, mode, &lockh, 0);
2263 if (mode) { 2289 if (mode) {
2264 struct ldlm_lock *matched = ldlm_handle2lock(lockh); 2290 struct ldlm_lock *matched;
2265 2291
2266 if ((agl != 0) && !(matched->l_flags & LDLM_FL_LVB_READY)) { 2292 if (*flags & LDLM_FL_TEST_LOCK)
2267 /* For AGL, if enqueue RPC is sent but the lock is not 2293 return ELDLM_OK;
2268 * granted, then skip to process this strpe. 2294
2269 * Return -ECANCELED to tell the caller. 2295 matched = ldlm_handle2lock(&lockh);
2296 if (agl) {
2297 /* AGL enqueues DLM locks speculatively. Therefore if
2298 * it already exists a DLM lock, it wll just inform the
2299 * caller to cancel the AGL process for this stripe.
2270 */ 2300 */
2271 ldlm_lock_decref(lockh, mode); 2301 ldlm_lock_decref(&lockh, mode);
2272 LDLM_LOCK_PUT(matched); 2302 LDLM_LOCK_PUT(matched);
2273 return -ECANCELED; 2303 return -ECANCELED;
2274 } 2304 } else if (osc_set_lock_data_with_check(matched, einfo)) {
2275
2276 if (osc_set_lock_data_with_check(matched, einfo)) {
2277 *flags |= LDLM_FL_LVB_READY; 2305 *flags |= LDLM_FL_LVB_READY;
2278 /* addref the lock only if not async requests and PW 2306 /* We already have a lock, and it's referenced. */
2279 * lock is matched whereas we asked for PR. 2307 (*upcall)(cookie, &lockh, ELDLM_LOCK_MATCHED);
2280 */
2281 if (!rqset && einfo->ei_mode != mode)
2282 ldlm_lock_addref(lockh, LCK_PR);
2283 if (intent) {
2284 /* I would like to be able to ASSERT here that
2285 * rss <= kms, but I can't, for reasons which
2286 * are explained in lov_enqueue()
2287 */
2288 }
2289
2290 /* We already have a lock, and it's referenced.
2291 *
2292 * At this point, the cl_lock::cll_state is CLS_QUEUING,
2293 * AGL upcall may change it to CLS_HELD directly.
2294 */
2295 (*upcall)(cookie, ELDLM_OK);
2296 2308
2297 if (einfo->ei_mode != mode) 2309 ldlm_lock_decref(&lockh, mode);
2298 ldlm_lock_decref(lockh, LCK_PW);
2299 else if (rqset)
2300 /* For async requests, decref the lock. */
2301 ldlm_lock_decref(lockh, einfo->ei_mode);
2302 LDLM_LOCK_PUT(matched); 2310 LDLM_LOCK_PUT(matched);
2303 return ELDLM_OK; 2311 return ELDLM_OK;
2312 } else {
2313 ldlm_lock_decref(&lockh, mode);
2314 LDLM_LOCK_PUT(matched);
2304 } 2315 }
2305
2306 ldlm_lock_decref(lockh, mode);
2307 LDLM_LOCK_PUT(matched);
2308 } 2316 }
2309 2317
2310 no_match: 2318no_match:
2319 if (*flags & LDLM_FL_TEST_LOCK)
2320 return -ENOLCK;
2311 if (intent) { 2321 if (intent) {
2312 LIST_HEAD(cancels);
2313
2314 req = ptlrpc_request_alloc(class_exp2cliimp(exp), 2322 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
2315 &RQF_LDLM_ENQUEUE_LVB); 2323 &RQF_LDLM_ENQUEUE_LVB);
2316 if (!req) 2324 if (!req)
2317 return -ENOMEM; 2325 return -ENOMEM;
2318 2326
2319 rc = ldlm_prep_enqueue_req(exp, req, &cancels, 0); 2327 rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
2320 if (rc) { 2328 if (rc) {
2321 ptlrpc_request_free(req); 2329 ptlrpc_request_free(req);
2322 return rc; 2330 return rc;
@@ -2331,21 +2339,31 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
2331 *flags &= ~LDLM_FL_BLOCK_GRANTED; 2339 *flags &= ~LDLM_FL_BLOCK_GRANTED;
2332 2340
2333 rc = ldlm_cli_enqueue(exp, &req, einfo, res_id, policy, flags, lvb, 2341 rc = ldlm_cli_enqueue(exp, &req, einfo, res_id, policy, flags, lvb,
2334 sizeof(*lvb), LVB_T_OST, lockh, async); 2342 sizeof(*lvb), LVB_T_OST, &lockh, async);
2335 if (rqset) { 2343 if (async) {
2336 if (!rc) { 2344 if (!rc) {
2337 struct osc_enqueue_args *aa; 2345 struct osc_enqueue_args *aa;
2338 2346
2339 CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args)); 2347 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
2340 aa = ptlrpc_req_async_args(req); 2348 aa = ptlrpc_req_async_args(req);
2341 aa->oa_ei = einfo;
2342 aa->oa_exp = exp; 2349 aa->oa_exp = exp;
2343 aa->oa_flags = flags; 2350 aa->oa_mode = einfo->ei_mode;
2351 aa->oa_type = einfo->ei_type;
2352 lustre_handle_copy(&aa->oa_lockh, &lockh);
2344 aa->oa_upcall = upcall; 2353 aa->oa_upcall = upcall;
2345 aa->oa_cookie = cookie; 2354 aa->oa_cookie = cookie;
2346 aa->oa_lvb = lvb;
2347 aa->oa_lockh = lockh;
2348 aa->oa_agl = !!agl; 2355 aa->oa_agl = !!agl;
2356 if (!agl) {
2357 aa->oa_flags = flags;
2358 aa->oa_lvb = lvb;
2359 } else {
2360 /* AGL is essentially to enqueue an DLM lock
2361 * in advance, so we don't care about the
2362 * result of AGL enqueue.
2363 */
2364 aa->oa_lvb = NULL;
2365 aa->oa_flags = NULL;
2366 }
2349 2367
2350 req->rq_interpret_reply = 2368 req->rq_interpret_reply =
2351 (ptlrpc_interpterer_t)osc_enqueue_interpret; 2369 (ptlrpc_interpterer_t)osc_enqueue_interpret;
@@ -2359,7 +2377,8 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
2359 return rc; 2377 return rc;
2360 } 2378 }
2361 2379
2362 rc = osc_enqueue_fini(req, lvb, upcall, cookie, flags, agl, rc); 2380 rc = osc_enqueue_fini(req, upcall, cookie, &lockh, einfo->ei_mode,
2381 flags, agl, rc);
2363 if (intent) 2382 if (intent)
2364 ptlrpc_req_finished(req); 2383 ptlrpc_req_finished(req);
2365 2384
@@ -2381,8 +2400,8 @@ int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id,
2381 /* Filesystem lock extents are extended to page boundaries so that 2400 /* Filesystem lock extents are extended to page boundaries so that
2382 * dealing with the page cache is a little smoother 2401 * dealing with the page cache is a little smoother
2383 */ 2402 */
2384 policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK; 2403 policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
2385 policy->l_extent.end |= ~CFS_PAGE_MASK; 2404 policy->l_extent.end |= ~PAGE_MASK;
2386 2405
2387 /* Next, search for already existing extent locks that will cover us */ 2406 /* Next, search for already existing extent locks that will cover us */
2388 /* If we're trying to read, we also search for an existing PW lock. The 2407 /* If we're trying to read, we also search for an existing PW lock. The
@@ -2493,7 +2512,7 @@ static int osc_statfs_async(struct obd_export *exp,
2493 } 2512 }
2494 2513
2495 req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_statfs_interpret; 2514 req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_statfs_interpret;
2496 CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args)); 2515 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
2497 aa = ptlrpc_req_async_args(req); 2516 aa = ptlrpc_req_async_args(req);
2498 aa->aa_oi = oinfo; 2517 aa->aa_oi = oinfo;
2499 2518
@@ -2787,7 +2806,7 @@ out:
2787 goto skip_locking; 2806 goto skip_locking;
2788 2807
2789 policy.l_extent.start = fm_key->fiemap.fm_start & 2808 policy.l_extent.start = fm_key->fiemap.fm_start &
2790 CFS_PAGE_MASK; 2809 PAGE_MASK;
2791 2810
2792 if (OBD_OBJECT_EOF - fm_key->fiemap.fm_length <= 2811 if (OBD_OBJECT_EOF - fm_key->fiemap.fm_length <=
2793 fm_key->fiemap.fm_start + PAGE_SIZE - 1) 2812 fm_key->fiemap.fm_start + PAGE_SIZE - 1)
@@ -2795,7 +2814,7 @@ out:
2795 else 2814 else
2796 policy.l_extent.end = (fm_key->fiemap.fm_start + 2815 policy.l_extent.end = (fm_key->fiemap.fm_start +
2797 fm_key->fiemap.fm_length + 2816 fm_key->fiemap.fm_length +
2798 PAGE_SIZE - 1) & CFS_PAGE_MASK; 2817 PAGE_SIZE - 1) & PAGE_MASK;
2799 2818
2800 ostid_build_res_name(&fm_key->oa.o_oi, &res_id); 2819 ostid_build_res_name(&fm_key->oa.o_oi, &res_id);
2801 mode = ldlm_lock_match(exp->exp_obd->obd_namespace, 2820 mode = ldlm_lock_match(exp->exp_obd->obd_namespace,
@@ -2913,7 +2932,7 @@ static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
2913 int nr = atomic_read(&cli->cl_lru_in_list) >> 1; 2932 int nr = atomic_read(&cli->cl_lru_in_list) >> 1;
2914 int target = *(int *)val; 2933 int target = *(int *)val;
2915 2934
2916 nr = osc_lru_shrink(cli, min(nr, target)); 2935 nr = osc_lru_shrink(env, cli, min(nr, target), true);
2917 *(int *)val -= nr; 2936 *(int *)val -= nr;
2918 return 0; 2937 return 0;
2919 } 2938 }
@@ -2992,12 +3011,12 @@ static int osc_reconnect(const struct lu_env *env,
2992 if (data && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) { 3011 if (data && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) {
2993 long lost_grant; 3012 long lost_grant;
2994 3013
2995 client_obd_list_lock(&cli->cl_loi_list_lock); 3014 spin_lock(&cli->cl_loi_list_lock);
2996 data->ocd_grant = (cli->cl_avail_grant + cli->cl_dirty) ?: 3015 data->ocd_grant = (cli->cl_avail_grant + cli->cl_dirty) ?:
2997 2 * cli_brw_size(obd); 3016 2 * cli_brw_size(obd);
2998 lost_grant = cli->cl_lost_grant; 3017 lost_grant = cli->cl_lost_grant;
2999 cli->cl_lost_grant = 0; 3018 cli->cl_lost_grant = 0;
3000 client_obd_list_unlock(&cli->cl_loi_list_lock); 3019 spin_unlock(&cli->cl_loi_list_lock);
3001 3020
3002 CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d ocd_grant: %d, lost: %ld.\n", 3021 CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d ocd_grant: %d, lost: %ld.\n",
3003 data->ocd_connect_flags, 3022 data->ocd_connect_flags,
@@ -3047,10 +3066,10 @@ static int osc_import_event(struct obd_device *obd,
3047 switch (event) { 3066 switch (event) {
3048 case IMP_EVENT_DISCON: { 3067 case IMP_EVENT_DISCON: {
3049 cli = &obd->u.cli; 3068 cli = &obd->u.cli;
3050 client_obd_list_lock(&cli->cl_loi_list_lock); 3069 spin_lock(&cli->cl_loi_list_lock);
3051 cli->cl_avail_grant = 0; 3070 cli->cl_avail_grant = 0;
3052 cli->cl_lost_grant = 0; 3071 cli->cl_lost_grant = 0;
3053 client_obd_list_unlock(&cli->cl_loi_list_lock); 3072 spin_unlock(&cli->cl_loi_list_lock);
3054 break; 3073 break;
3055 } 3074 }
3056 case IMP_EVENT_INACTIVE: { 3075 case IMP_EVENT_INACTIVE: {
@@ -3073,8 +3092,9 @@ static int osc_import_event(struct obd_device *obd,
3073 3092
3074 ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY); 3093 ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
3075 cl_env_put(env, &refcheck); 3094 cl_env_put(env, &refcheck);
3076 } else 3095 } else {
3077 rc = PTR_ERR(env); 3096 rc = PTR_ERR(env);
3097 }
3078 break; 3098 break;
3079 } 3099 }
3080 case IMP_EVENT_ACTIVE: { 3100 case IMP_EVENT_ACTIVE: {
@@ -3116,20 +3136,14 @@ static int osc_import_event(struct obd_device *obd,
3116 * \retval zero the lock can't be canceled 3136 * \retval zero the lock can't be canceled
3117 * \retval other ok to cancel 3137 * \retval other ok to cancel
3118 */ 3138 */
3119static int osc_cancel_for_recovery(struct ldlm_lock *lock) 3139static int osc_cancel_weight(struct ldlm_lock *lock)
3120{ 3140{
3121 check_res_locked(lock->l_resource);
3122
3123 /* 3141 /*
3124 * Cancel all unused extent lock in granted mode LCK_PR or LCK_CR. 3142 * Cancel all unused and granted extent lock.
3125 *
3126 * XXX as a future improvement, we can also cancel unused write lock
3127 * if it doesn't have dirty data and active mmaps.
3128 */ 3143 */
3129 if (lock->l_resource->lr_type == LDLM_EXTENT && 3144 if (lock->l_resource->lr_type == LDLM_EXTENT &&
3130 (lock->l_granted_mode == LCK_PR || 3145 lock->l_granted_mode == lock->l_req_mode &&
3131 lock->l_granted_mode == LCK_CR) && 3146 osc_ldlm_weigh_ast(lock) == 0)
3132 (osc_dlm_lock_pageref(lock) == 0))
3133 return 1; 3147 return 1;
3134 3148
3135 return 0; 3149 return 0;
@@ -3170,6 +3184,14 @@ int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
3170 } 3184 }
3171 cli->cl_writeback_work = handler; 3185 cli->cl_writeback_work = handler;
3172 3186
3187 handler = ptlrpcd_alloc_work(cli->cl_import, lru_queue_work, cli);
3188 if (IS_ERR(handler)) {
3189 rc = PTR_ERR(handler);
3190 goto out_ptlrpcd_work;
3191 }
3192
3193 cli->cl_lru_work = handler;
3194
3173 rc = osc_quota_setup(obd); 3195 rc = osc_quota_setup(obd);
3174 if (rc) 3196 if (rc)
3175 goto out_ptlrpcd_work; 3197 goto out_ptlrpcd_work;
@@ -3198,11 +3220,18 @@ int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
3198 } 3220 }
3199 3221
3200 INIT_LIST_HEAD(&cli->cl_grant_shrink_list); 3222 INIT_LIST_HEAD(&cli->cl_grant_shrink_list);
3201 ns_register_cancel(obd->obd_namespace, osc_cancel_for_recovery); 3223 ns_register_cancel(obd->obd_namespace, osc_cancel_weight);
3202 return rc; 3224 return rc;
3203 3225
3204out_ptlrpcd_work: 3226out_ptlrpcd_work:
3205 ptlrpcd_destroy_work(handler); 3227 if (cli->cl_writeback_work) {
3228 ptlrpcd_destroy_work(cli->cl_writeback_work);
3229 cli->cl_writeback_work = NULL;
3230 }
3231 if (cli->cl_lru_work) {
3232 ptlrpcd_destroy_work(cli->cl_lru_work);
3233 cli->cl_lru_work = NULL;
3234 }
3206out_client_setup: 3235out_client_setup:
3207 client_obd_cleanup(obd); 3236 client_obd_cleanup(obd);
3208out_ptlrpcd: 3237out_ptlrpcd:
@@ -3241,6 +3270,10 @@ static int osc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
3241 ptlrpcd_destroy_work(cli->cl_writeback_work); 3270 ptlrpcd_destroy_work(cli->cl_writeback_work);
3242 cli->cl_writeback_work = NULL; 3271 cli->cl_writeback_work = NULL;
3243 } 3272 }
3273 if (cli->cl_lru_work) {
3274 ptlrpcd_destroy_work(cli->cl_lru_work);
3275 cli->cl_lru_work = NULL;
3276 }
3244 obd_cleanup_client_import(obd); 3277 obd_cleanup_client_import(obd);
3245 ptlrpc_lprocfs_unregister_obd(obd); 3278 ptlrpc_lprocfs_unregister_obd(obd);
3246 lprocfs_obd_cleanup(obd); 3279 lprocfs_obd_cleanup(obd);
@@ -3330,7 +3363,6 @@ static struct obd_ops osc_obd_ops = {
3330}; 3363};
3331 3364
3332extern struct lu_kmem_descr osc_caches[]; 3365extern struct lu_kmem_descr osc_caches[];
3333extern spinlock_t osc_ast_guard;
3334extern struct lock_class_key osc_ast_guard_class; 3366extern struct lock_class_key osc_ast_guard_class;
3335 3367
3336static int __init osc_init(void) 3368static int __init osc_init(void)
@@ -3357,9 +3389,6 @@ static int __init osc_init(void)
3357 if (rc) 3389 if (rc)
3358 goto out_kmem; 3390 goto out_kmem;
3359 3391
3360 spin_lock_init(&osc_ast_guard);
3361 lockdep_set_class(&osc_ast_guard, &osc_ast_guard_class);
3362
3363 /* This is obviously too much memory, only prevent overflow here */ 3392 /* This is obviously too much memory, only prevent overflow here */
3364 if (osc_reqpool_mem_max >= 1 << 12 || osc_reqpool_mem_max == 0) { 3393 if (osc_reqpool_mem_max >= 1 << 12 || osc_reqpool_mem_max == 0) {
3365 rc = -EINVAL; 3394 rc = -EINVAL;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c
index cf3ac8eee9ee..4b7912a2cb52 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/client.c
@@ -595,9 +595,9 @@ static int __ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
595 struct obd_import *imp = request->rq_import; 595 struct obd_import *imp = request->rq_import;
596 int rc; 596 int rc;
597 597
598 if (unlikely(ctx)) 598 if (unlikely(ctx)) {
599 request->rq_cli_ctx = sptlrpc_cli_ctx_get(ctx); 599 request->rq_cli_ctx = sptlrpc_cli_ctx_get(ctx);
600 else { 600 } else {
601 rc = sptlrpc_req_get_ctx(request); 601 rc = sptlrpc_req_get_ctx(request);
602 if (rc) 602 if (rc)
603 goto out_free; 603 goto out_free;
@@ -1082,7 +1082,6 @@ static int ptlrpc_console_allow(struct ptlrpc_request *req)
1082 */ 1082 */
1083 if ((lustre_handle_is_used(&req->rq_import->imp_remote_handle)) && 1083 if ((lustre_handle_is_used(&req->rq_import->imp_remote_handle)) &&
1084 (opc == OST_CONNECT || opc == MDS_CONNECT || opc == MGS_CONNECT)) { 1084 (opc == OST_CONNECT || opc == MDS_CONNECT || opc == MGS_CONNECT)) {
1085
1086 /* Suppress timed out reconnect requests */ 1085 /* Suppress timed out reconnect requests */
1087 if (req->rq_timedout) 1086 if (req->rq_timedout)
1088 return 0; 1087 return 0;
@@ -2087,7 +2086,7 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set)
2087 CDEBUG(D_RPCTRACE, "set %p going to sleep for %d seconds\n", 2086 CDEBUG(D_RPCTRACE, "set %p going to sleep for %d seconds\n",
2088 set, timeout); 2087 set, timeout);
2089 2088
2090 if (timeout == 0 && !cfs_signal_pending()) 2089 if (timeout == 0 && !signal_pending(current))
2091 /* 2090 /*
2092 * No requests are in-flight (ether timed out 2091 * No requests are in-flight (ether timed out
2093 * or delayed), so we can allow interrupts. 2092 * or delayed), so we can allow interrupts.
@@ -2114,7 +2113,7 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set)
2114 * it being ignored forever 2113 * it being ignored forever
2115 */ 2114 */
2116 if (rc == -ETIMEDOUT && !lwi.lwi_allow_intr && 2115 if (rc == -ETIMEDOUT && !lwi.lwi_allow_intr &&
2117 cfs_signal_pending()) { 2116 signal_pending(current)) {
2118 sigset_t blocked_sigs = 2117 sigset_t blocked_sigs =
2119 cfs_block_sigsinv(LUSTRE_FATAL_SIGS); 2118 cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
2120 2119
@@ -2124,7 +2123,7 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set)
2124 * important signals since ptlrpc set is not easily 2123 * important signals since ptlrpc set is not easily
2125 * reentrant from userspace again 2124 * reentrant from userspace again
2126 */ 2125 */
2127 if (cfs_signal_pending()) 2126 if (signal_pending(current))
2128 ptlrpc_interrupted_set(set); 2127 ptlrpc_interrupted_set(set);
2129 cfs_restore_sigs(blocked_sigs); 2128 cfs_restore_sigs(blocked_sigs);
2130 } 2129 }
diff --git a/drivers/staging/lustre/lustre/ptlrpc/events.c b/drivers/staging/lustre/lustre/ptlrpc/events.c
index 47be21ac9f10..fdcde9bbd788 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/events.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/events.c
@@ -69,7 +69,6 @@ void request_out_callback(lnet_event_t *ev)
69 req->rq_req_unlink = 0; 69 req->rq_req_unlink = 0;
70 70
71 if (ev->type == LNET_EVENT_UNLINK || ev->status != 0) { 71 if (ev->type == LNET_EVENT_UNLINK || ev->status != 0) {
72
73 /* Failed send: make it seem like the reply timed out, just 72 /* Failed send: make it seem like the reply timed out, just
74 * like failing sends in client.c does currently... 73 * like failing sends in client.c does currently...
75 */ 74 */
diff --git a/drivers/staging/lustre/lustre/ptlrpc/import.c b/drivers/staging/lustre/lustre/ptlrpc/import.c
index cd94fed0ffdf..a4f7544f46b8 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/import.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/import.c
@@ -1001,6 +1001,7 @@ finish:
1001 return 0; 1001 return 0;
1002 } 1002 }
1003 } else { 1003 } else {
1004 static bool warned;
1004 1005
1005 spin_lock(&imp->imp_lock); 1006 spin_lock(&imp->imp_lock);
1006 list_del(&imp->imp_conn_current->oic_item); 1007 list_del(&imp->imp_conn_current->oic_item);
@@ -1021,7 +1022,7 @@ finish:
1021 goto out; 1022 goto out;
1022 } 1023 }
1023 1024
1024 if ((ocd->ocd_connect_flags & OBD_CONNECT_VERSION) && 1025 if (!warned && (ocd->ocd_connect_flags & OBD_CONNECT_VERSION) &&
1025 (ocd->ocd_version > LUSTRE_VERSION_CODE + 1026 (ocd->ocd_version > LUSTRE_VERSION_CODE +
1026 LUSTRE_VERSION_OFFSET_WARN || 1027 LUSTRE_VERSION_OFFSET_WARN ||
1027 ocd->ocd_version < LUSTRE_VERSION_CODE - 1028 ocd->ocd_version < LUSTRE_VERSION_CODE -
@@ -1029,10 +1030,8 @@ finish:
1029 /* Sigh, some compilers do not like #ifdef in the middle 1030 /* Sigh, some compilers do not like #ifdef in the middle
1030 * of macro arguments 1031 * of macro arguments
1031 */ 1032 */
1032 const char *older = "older. Consider upgrading server or downgrading client" 1033 const char *older = "older than client. Consider upgrading server";
1033 ; 1034 const char *newer = "newer than client. Consider recompiling application";
1034 const char *newer = "newer than client version. Consider upgrading client"
1035 ;
1036 1035
1037 LCONSOLE_WARN("Server %s version (%d.%d.%d.%d) is much %s (%s)\n", 1036 LCONSOLE_WARN("Server %s version (%d.%d.%d.%d) is much %s (%s)\n",
1038 obd2cli_tgt(imp->imp_obd), 1037 obd2cli_tgt(imp->imp_obd),
@@ -1042,6 +1041,7 @@ finish:
1042 OBD_OCD_VERSION_FIX(ocd->ocd_version), 1041 OBD_OCD_VERSION_FIX(ocd->ocd_version),
1043 ocd->ocd_version > LUSTRE_VERSION_CODE ? 1042 ocd->ocd_version > LUSTRE_VERSION_CODE ?
1044 newer : older, LUSTRE_VERSION_STRING); 1043 newer : older, LUSTRE_VERSION_STRING);
1044 warned = true;
1045 } 1045 }
1046 1046
1047#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(3, 2, 50, 0) 1047#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(3, 2, 50, 0)
@@ -1370,7 +1370,6 @@ int ptlrpc_import_recovery_state_machine(struct obd_import *imp)
1370 if (rc) 1370 if (rc)
1371 goto out; 1371 goto out;
1372 } 1372 }
1373
1374 } 1373 }
1375 1374
1376 if (imp->imp_state == LUSTRE_IMP_REPLAY_WAIT) { 1375 if (imp->imp_state == LUSTRE_IMP_REPLAY_WAIT) {
@@ -1453,7 +1452,6 @@ int ptlrpc_disconnect_import(struct obd_import *imp, int noclose)
1453 back_to_sleep, LWI_ON_SIGNAL_NOOP, NULL); 1452 back_to_sleep, LWI_ON_SIGNAL_NOOP, NULL);
1454 rc = l_wait_event(imp->imp_recovery_waitq, 1453 rc = l_wait_event(imp->imp_recovery_waitq,
1455 !ptlrpc_import_in_recovery(imp), &lwi); 1454 !ptlrpc_import_in_recovery(imp), &lwi);
1456
1457 } 1455 }
1458 1456
1459 spin_lock(&imp->imp_lock); 1457 spin_lock(&imp->imp_lock);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/layout.c b/drivers/staging/lustre/lustre/ptlrpc/layout.c
index 5b06901e5729..c0ecd1625dc4 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/layout.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/layout.c
@@ -160,6 +160,16 @@ static const struct req_msg_field *fld_query_server[] = {
160 &RMF_FLD_MDFLD 160 &RMF_FLD_MDFLD
161}; 161};
162 162
163static const struct req_msg_field *fld_read_client[] = {
164 &RMF_PTLRPC_BODY,
165 &RMF_FLD_MDFLD
166};
167
168static const struct req_msg_field *fld_read_server[] = {
169 &RMF_PTLRPC_BODY,
170 &RMF_GENERIC_DATA
171};
172
163static const struct req_msg_field *mds_getattr_name_client[] = { 173static const struct req_msg_field *mds_getattr_name_client[] = {
164 &RMF_PTLRPC_BODY, 174 &RMF_PTLRPC_BODY,
165 &RMF_MDT_BODY, 175 &RMF_MDT_BODY,
@@ -566,7 +576,7 @@ static const struct req_msg_field *ost_get_info_generic_server[] = {
566 576
567static const struct req_msg_field *ost_get_info_generic_client[] = { 577static const struct req_msg_field *ost_get_info_generic_client[] = {
568 &RMF_PTLRPC_BODY, 578 &RMF_PTLRPC_BODY,
569 &RMF_SETINFO_KEY 579 &RMF_GETINFO_KEY
570}; 580};
571 581
572static const struct req_msg_field *ost_get_last_id_server[] = { 582static const struct req_msg_field *ost_get_last_id_server[] = {
@@ -574,6 +584,12 @@ static const struct req_msg_field *ost_get_last_id_server[] = {
574 &RMF_OBD_ID 584 &RMF_OBD_ID
575}; 585};
576 586
587static const struct req_msg_field *ost_get_last_fid_client[] = {
588 &RMF_PTLRPC_BODY,
589 &RMF_GETINFO_KEY,
590 &RMF_FID,
591};
592
577static const struct req_msg_field *ost_get_last_fid_server[] = { 593static const struct req_msg_field *ost_get_last_fid_server[] = {
578 &RMF_PTLRPC_BODY, 594 &RMF_PTLRPC_BODY,
579 &RMF_FID, 595 &RMF_FID,
@@ -643,6 +659,7 @@ static struct req_format *req_formats[] = {
643 &RQF_MGS_CONFIG_READ, 659 &RQF_MGS_CONFIG_READ,
644 &RQF_SEQ_QUERY, 660 &RQF_SEQ_QUERY,
645 &RQF_FLD_QUERY, 661 &RQF_FLD_QUERY,
662 &RQF_FLD_READ,
646 &RQF_MDS_CONNECT, 663 &RQF_MDS_CONNECT,
647 &RQF_MDS_DISCONNECT, 664 &RQF_MDS_DISCONNECT,
648 &RQF_MDS_GET_INFO, 665 &RQF_MDS_GET_INFO,
@@ -696,7 +713,7 @@ static struct req_format *req_formats[] = {
696 &RQF_OST_BRW_WRITE, 713 &RQF_OST_BRW_WRITE,
697 &RQF_OST_STATFS, 714 &RQF_OST_STATFS,
698 &RQF_OST_SET_GRANT_INFO, 715 &RQF_OST_SET_GRANT_INFO,
699 &RQF_OST_GET_INFO_GENERIC, 716 &RQF_OST_GET_INFO,
700 &RQF_OST_GET_INFO_LAST_ID, 717 &RQF_OST_GET_INFO_LAST_ID,
701 &RQF_OST_GET_INFO_LAST_FID, 718 &RQF_OST_GET_INFO_LAST_FID,
702 &RQF_OST_SET_INFO_LAST_FID, 719 &RQF_OST_SET_INFO_LAST_FID,
@@ -1162,6 +1179,10 @@ struct req_format RQF_FLD_QUERY =
1162 DEFINE_REQ_FMT0("FLD_QUERY", fld_query_client, fld_query_server); 1179 DEFINE_REQ_FMT0("FLD_QUERY", fld_query_client, fld_query_server);
1163EXPORT_SYMBOL(RQF_FLD_QUERY); 1180EXPORT_SYMBOL(RQF_FLD_QUERY);
1164 1181
1182struct req_format RQF_FLD_READ =
1183 DEFINE_REQ_FMT0("FLD_READ", fld_read_client, fld_read_server);
1184EXPORT_SYMBOL(RQF_FLD_READ);
1185
1165struct req_format RQF_LOG_CANCEL = 1186struct req_format RQF_LOG_CANCEL =
1166 DEFINE_REQ_FMT0("OBD_LOG_CANCEL", log_cancel_client, empty); 1187 DEFINE_REQ_FMT0("OBD_LOG_CANCEL", log_cancel_client, empty);
1167EXPORT_SYMBOL(RQF_LOG_CANCEL); 1188EXPORT_SYMBOL(RQF_LOG_CANCEL);
@@ -1519,10 +1540,10 @@ struct req_format RQF_OST_SET_GRANT_INFO =
1519 ost_body_only); 1540 ost_body_only);
1520EXPORT_SYMBOL(RQF_OST_SET_GRANT_INFO); 1541EXPORT_SYMBOL(RQF_OST_SET_GRANT_INFO);
1521 1542
1522struct req_format RQF_OST_GET_INFO_GENERIC = 1543struct req_format RQF_OST_GET_INFO =
1523 DEFINE_REQ_FMT0("OST_GET_INFO", ost_get_info_generic_client, 1544 DEFINE_REQ_FMT0("OST_GET_INFO", ost_get_info_generic_client,
1524 ost_get_info_generic_server); 1545 ost_get_info_generic_server);
1525EXPORT_SYMBOL(RQF_OST_GET_INFO_GENERIC); 1546EXPORT_SYMBOL(RQF_OST_GET_INFO);
1526 1547
1527struct req_format RQF_OST_GET_INFO_LAST_ID = 1548struct req_format RQF_OST_GET_INFO_LAST_ID =
1528 DEFINE_REQ_FMT0("OST_GET_INFO_LAST_ID", ost_get_info_generic_client, 1549 DEFINE_REQ_FMT0("OST_GET_INFO_LAST_ID", ost_get_info_generic_client,
@@ -1530,7 +1551,7 @@ struct req_format RQF_OST_GET_INFO_LAST_ID =
1530EXPORT_SYMBOL(RQF_OST_GET_INFO_LAST_ID); 1551EXPORT_SYMBOL(RQF_OST_GET_INFO_LAST_ID);
1531 1552
1532struct req_format RQF_OST_GET_INFO_LAST_FID = 1553struct req_format RQF_OST_GET_INFO_LAST_FID =
1533 DEFINE_REQ_FMT0("OST_GET_INFO_LAST_FID", obd_set_info_client, 1554 DEFINE_REQ_FMT0("OST_GET_INFO_LAST_FID", ost_get_last_fid_client,
1534 ost_get_last_fid_server); 1555 ost_get_last_fid_server);
1535EXPORT_SYMBOL(RQF_OST_GET_INFO_LAST_FID); 1556EXPORT_SYMBOL(RQF_OST_GET_INFO_LAST_FID);
1536 1557
diff --git a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
index c95a91ce26c9..64c0f1e17f36 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
@@ -131,6 +131,7 @@ static struct ll_rpc_opcode {
131 { SEC_CTX_INIT_CONT, "sec_ctx_init_cont" }, 131 { SEC_CTX_INIT_CONT, "sec_ctx_init_cont" },
132 { SEC_CTX_FINI, "sec_ctx_fini" }, 132 { SEC_CTX_FINI, "sec_ctx_fini" },
133 { FLD_QUERY, "fld_query" }, 133 { FLD_QUERY, "fld_query" },
134 { FLD_READ, "fld_read" },
134}; 135};
135 136
136static struct ll_eopcode { 137static struct ll_eopcode {
@@ -679,11 +680,11 @@ static ssize_t ptlrpc_lprocfs_nrs_seq_write(struct file *file,
679 /** 680 /**
680 * The second token is either NULL, or an optional [reg|hp] string 681 * The second token is either NULL, or an optional [reg|hp] string
681 */ 682 */
682 if (strcmp(cmd, "reg") == 0) 683 if (strcmp(cmd, "reg") == 0) {
683 queue = PTLRPC_NRS_QUEUE_REG; 684 queue = PTLRPC_NRS_QUEUE_REG;
684 else if (strcmp(cmd, "hp") == 0) 685 } else if (strcmp(cmd, "hp") == 0) {
685 queue = PTLRPC_NRS_QUEUE_HP; 686 queue = PTLRPC_NRS_QUEUE_HP;
686 else { 687 } else {
687 rc = -EINVAL; 688 rc = -EINVAL;
688 goto out; 689 goto out;
689 } 690 }
@@ -693,8 +694,9 @@ default_queue:
693 if (queue == PTLRPC_NRS_QUEUE_HP && !nrs_svc_has_hp(svc)) { 694 if (queue == PTLRPC_NRS_QUEUE_HP && !nrs_svc_has_hp(svc)) {
694 rc = -ENODEV; 695 rc = -ENODEV;
695 goto out; 696 goto out;
696 } else if (queue == PTLRPC_NRS_QUEUE_BOTH && !nrs_svc_has_hp(svc)) 697 } else if (queue == PTLRPC_NRS_QUEUE_BOTH && !nrs_svc_has_hp(svc)) {
697 queue = PTLRPC_NRS_QUEUE_REG; 698 queue = PTLRPC_NRS_QUEUE_REG;
699 }
698 700
699 /** 701 /**
700 * Serialize NRS core lprocfs operations with policy registration/ 702 * Serialize NRS core lprocfs operations with policy registration/
@@ -1320,6 +1322,5 @@ int lprocfs_wr_pinger_recov(struct file *file, const char __user *buffer,
1320 up_read(&obd->u.cli.cl_sem); 1322 up_read(&obd->u.cli.cl_sem);
1321 1323
1322 return count; 1324 return count;
1323
1324} 1325}
1325EXPORT_SYMBOL(lprocfs_wr_pinger_recov); 1326EXPORT_SYMBOL(lprocfs_wr_pinger_recov);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/nrs.c b/drivers/staging/lustre/lustre/ptlrpc/nrs.c
index 710fb806f122..c444f516856f 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/nrs.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/nrs.c
@@ -975,7 +975,11 @@ static void nrs_svcpt_cleanup_locked(struct ptlrpc_service_part *svcpt)
975 LASSERT(mutex_is_locked(&nrs_core.nrs_mutex)); 975 LASSERT(mutex_is_locked(&nrs_core.nrs_mutex));
976 976
977again: 977again:
978 nrs = nrs_svcpt2nrs(svcpt, hp); 978 /* scp_nrs_hp could be NULL due to short of memory. */
979 nrs = hp ? svcpt->scp_nrs_hp : &svcpt->scp_nrs_reg;
980 /* check the nrs_svcpt to see if nrs is initialized. */
981 if (!nrs || !nrs->nrs_svcpt)
982 return;
979 nrs->nrs_stopping = 1; 983 nrs->nrs_stopping = 1;
980 984
981 list_for_each_entry_safe(policy, tmp, &nrs->nrs_policy_list, pol_list) { 985 list_for_each_entry_safe(policy, tmp, &nrs->nrs_policy_list, pol_list) {
@@ -1038,7 +1042,6 @@ static int nrs_policy_unregister_locked(struct ptlrpc_nrs_pol_desc *desc)
1038 LASSERT(mutex_is_locked(&ptlrpc_all_services_mutex)); 1042 LASSERT(mutex_is_locked(&ptlrpc_all_services_mutex));
1039 1043
1040 list_for_each_entry(svc, &ptlrpc_all_services, srv_list) { 1044 list_for_each_entry(svc, &ptlrpc_all_services, srv_list) {
1041
1042 if (!nrs_policy_compatible(svc, desc) || 1045 if (!nrs_policy_compatible(svc, desc) ||
1043 unlikely(svc->srv_is_stopping)) 1046 unlikely(svc->srv_is_stopping))
1044 continue; 1047 continue;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
index 492d63fad6f9..811acf6fc786 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
@@ -1160,7 +1160,6 @@ __u32 lustre_msg_get_timeout(struct lustre_msg *msg)
1160 if (!pb) { 1160 if (!pb) {
1161 CERROR("invalid msg %p: no ptlrpc body!\n", msg); 1161 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1162 return 0; 1162 return 0;
1163
1164 } 1163 }
1165 return pb->pb_timeout; 1164 return pb->pb_timeout;
1166 } 1165 }
@@ -1179,7 +1178,6 @@ __u32 lustre_msg_get_service_time(struct lustre_msg *msg)
1179 if (!pb) { 1178 if (!pb) {
1180 CERROR("invalid msg %p: no ptlrpc body!\n", msg); 1179 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1181 return 0; 1180 return 0;
1182
1183 } 1181 }
1184 return pb->pb_service_time; 1182 return pb->pb_service_time;
1185 } 1183 }
@@ -1572,7 +1570,6 @@ static void lustre_swab_obdo(struct obdo *o)
1572 CLASSERT(offsetof(typeof(*o), o_padding_4) != 0); 1570 CLASSERT(offsetof(typeof(*o), o_padding_4) != 0);
1573 CLASSERT(offsetof(typeof(*o), o_padding_5) != 0); 1571 CLASSERT(offsetof(typeof(*o), o_padding_5) != 0);
1574 CLASSERT(offsetof(typeof(*o), o_padding_6) != 0); 1572 CLASSERT(offsetof(typeof(*o), o_padding_6) != 0);
1575
1576} 1573}
1577 1574
1578void lustre_swab_obd_statfs(struct obd_statfs *os) 1575void lustre_swab_obd_statfs(struct obd_statfs *os)
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
index db003f5da09e..76a355a9db8b 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
@@ -387,7 +387,8 @@ static int ptlrpcd(void *arg)
387{ 387{
388 struct ptlrpcd_ctl *pc = arg; 388 struct ptlrpcd_ctl *pc = arg;
389 struct ptlrpc_request_set *set; 389 struct ptlrpc_request_set *set;
390 struct lu_env env = { .le_ses = NULL }; 390 struct lu_context ses = { 0 };
391 struct lu_env env = { .le_ses = &ses };
391 int rc = 0; 392 int rc = 0;
392 int exit = 0; 393 int exit = 0;
393 394
@@ -416,6 +417,13 @@ static int ptlrpcd(void *arg)
416 */ 417 */
417 rc = lu_context_init(&env.le_ctx, 418 rc = lu_context_init(&env.le_ctx,
418 LCT_CL_THREAD|LCT_REMEMBER|LCT_NOREF); 419 LCT_CL_THREAD|LCT_REMEMBER|LCT_NOREF);
420 if (rc == 0) {
421 rc = lu_context_init(env.le_ses,
422 LCT_SESSION | LCT_REMEMBER | LCT_NOREF);
423 if (rc != 0)
424 lu_context_fini(&env.le_ctx);
425 }
426
419 if (rc != 0) 427 if (rc != 0)
420 goto failed; 428 goto failed;
421 429
@@ -436,9 +444,10 @@ static int ptlrpcd(void *arg)
436 ptlrpc_expired_set, set); 444 ptlrpc_expired_set, set);
437 445
438 lu_context_enter(&env.le_ctx); 446 lu_context_enter(&env.le_ctx);
439 l_wait_event(set->set_waitq, 447 lu_context_enter(env.le_ses);
440 ptlrpcd_check(&env, pc), &lwi); 448 l_wait_event(set->set_waitq, ptlrpcd_check(&env, pc), &lwi);
441 lu_context_exit(&env.le_ctx); 449 lu_context_exit(&env.le_ctx);
450 lu_context_exit(env.le_ses);
442 451
443 /* 452 /*
444 * Abort inflight rpcs for forced stop case. 453 * Abort inflight rpcs for forced stop case.
@@ -461,6 +470,7 @@ static int ptlrpcd(void *arg)
461 if (!list_empty(&set->set_requests)) 470 if (!list_empty(&set->set_requests))
462 ptlrpc_set_wait(set); 471 ptlrpc_set_wait(set);
463 lu_context_fini(&env.le_ctx); 472 lu_context_fini(&env.le_ctx);
473 lu_context_fini(env.le_ses);
464 474
465 complete(&pc->pc_finishing); 475 complete(&pc->pc_finishing);
466 476
@@ -899,8 +909,11 @@ int ptlrpcd_addref(void)
899 int rc = 0; 909 int rc = 0;
900 910
901 mutex_lock(&ptlrpcd_mutex); 911 mutex_lock(&ptlrpcd_mutex);
902 if (++ptlrpcd_users == 1) 912 if (++ptlrpcd_users == 1) {
903 rc = ptlrpcd_init(); 913 rc = ptlrpcd_init();
914 if (rc < 0)
915 ptlrpcd_users--;
916 }
904 mutex_unlock(&ptlrpcd_mutex); 917 mutex_unlock(&ptlrpcd_mutex);
905 return rc; 918 return rc;
906} 919}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
index d3872b8c9a6e..02e6cda4c995 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
@@ -41,7 +41,6 @@
41#define DEBUG_SUBSYSTEM S_SEC 41#define DEBUG_SUBSYSTEM S_SEC
42 42
43#include "../../include/linux/libcfs/libcfs.h" 43#include "../../include/linux/libcfs/libcfs.h"
44#include <linux/crypto.h>
45 44
46#include "../include/obd.h" 45#include "../include/obd.h"
47#include "../include/obd_cksum.h" 46#include "../include/obd_cksum.h"
@@ -511,7 +510,6 @@ int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg,
511{ 510{
512 struct cfs_crypto_hash_desc *hdesc; 511 struct cfs_crypto_hash_desc *hdesc;
513 int hashsize; 512 int hashsize;
514 char hashbuf[64];
515 unsigned int bufsize; 513 unsigned int bufsize;
516 int i, err; 514 int i, err;
517 515
@@ -529,21 +527,23 @@ int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg,
529 527
530 for (i = 0; i < desc->bd_iov_count; i++) { 528 for (i = 0; i < desc->bd_iov_count; i++) {
531 cfs_crypto_hash_update_page(hdesc, desc->bd_iov[i].kiov_page, 529 cfs_crypto_hash_update_page(hdesc, desc->bd_iov[i].kiov_page,
532 desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK, 530 desc->bd_iov[i].kiov_offset & ~PAGE_MASK,
533 desc->bd_iov[i].kiov_len); 531 desc->bd_iov[i].kiov_len);
534 } 532 }
533
535 if (hashsize > buflen) { 534 if (hashsize > buflen) {
535 unsigned char hashbuf[CFS_CRYPTO_HASH_DIGESTSIZE_MAX];
536
536 bufsize = sizeof(hashbuf); 537 bufsize = sizeof(hashbuf);
537 err = cfs_crypto_hash_final(hdesc, (unsigned char *)hashbuf, 538 LASSERTF(bufsize >= hashsize, "bufsize = %u < hashsize %u\n",
538 &bufsize); 539 bufsize, hashsize);
540 err = cfs_crypto_hash_final(hdesc, hashbuf, &bufsize);
539 memcpy(buf, hashbuf, buflen); 541 memcpy(buf, hashbuf, buflen);
540 } else { 542 } else {
541 bufsize = buflen; 543 bufsize = buflen;
542 err = cfs_crypto_hash_final(hdesc, buf, &bufsize); 544 err = cfs_crypto_hash_final(hdesc, buf, &bufsize);
543 } 545 }
544 546
545 if (err)
546 cfs_crypto_hash_final(hdesc, NULL, NULL);
547 return err; 547 return err;
548} 548}
549EXPORT_SYMBOL(sptlrpc_get_bulk_checksum); 549EXPORT_SYMBOL(sptlrpc_get_bulk_checksum);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
index 6276bf59c3aa..37c9f4c453de 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
@@ -162,7 +162,7 @@ static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
162 continue; 162 continue;
163 163
164 ptr = kmap(desc->bd_iov[i].kiov_page); 164 ptr = kmap(desc->bd_iov[i].kiov_page);
165 off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK; 165 off = desc->bd_iov[i].kiov_offset & ~PAGE_MASK;
166 ptr[off] ^= 0x1; 166 ptr[off] ^= 0x1;
167 kunmap(desc->bd_iov[i].kiov_page); 167 kunmap(desc->bd_iov[i].kiov_page);
168 return; 168 return;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c
index 1bbd1d39ccf8..17c7b9749f67 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/service.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/service.c
@@ -838,6 +838,11 @@ static void ptlrpc_server_finish_request(struct ptlrpc_service_part *svcpt,
838{ 838{
839 ptlrpc_server_hpreq_fini(req); 839 ptlrpc_server_hpreq_fini(req);
840 840
841 if (req->rq_session.lc_thread) {
842 lu_context_exit(&req->rq_session);
843 lu_context_fini(&req->rq_session);
844 }
845
841 ptlrpc_server_drop_request(req); 846 ptlrpc_server_drop_request(req);
842} 847}
843 848
@@ -1579,6 +1584,21 @@ ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt,
1579 } 1584 }
1580 1585
1581 req->rq_svc_thread = thread; 1586 req->rq_svc_thread = thread;
1587 if (thread) {
1588 /* initialize request session, it is needed for request
1589 * processing by target
1590 */
1591 rc = lu_context_init(&req->rq_session,
1592 LCT_SERVER_SESSION | LCT_NOREF);
1593 if (rc) {
1594 CERROR("%s: failure to initialize session: rc = %d\n",
1595 thread->t_name, rc);
1596 goto err_req;
1597 }
1598 req->rq_session.lc_thread = thread;
1599 lu_context_enter(&req->rq_session);
1600 req->rq_svc_thread->t_env->le_ses = &req->rq_session;
1601 }
1582 1602
1583 ptlrpc_at_add_timed(req); 1603 ptlrpc_at_add_timed(req);
1584 1604
@@ -1612,7 +1632,6 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
1612 struct timespec64 arrived; 1632 struct timespec64 arrived;
1613 unsigned long timediff_usecs; 1633 unsigned long timediff_usecs;
1614 unsigned long arrived_usecs; 1634 unsigned long arrived_usecs;
1615 int rc;
1616 int fail_opc = 0; 1635 int fail_opc = 0;
1617 1636
1618 request = ptlrpc_server_request_get(svcpt, false); 1637 request = ptlrpc_server_request_get(svcpt, false);
@@ -1649,21 +1668,6 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
1649 at_get(&svcpt->scp_at_estimate)); 1668 at_get(&svcpt->scp_at_estimate));
1650 } 1669 }
1651 1670
1652 rc = lu_context_init(&request->rq_session, LCT_SESSION | LCT_NOREF);
1653 if (rc) {
1654 CERROR("Failure to initialize session: %d\n", rc);
1655 goto out_req;
1656 }
1657 request->rq_session.lc_thread = thread;
1658 request->rq_session.lc_cookie = 0x5;
1659 lu_context_enter(&request->rq_session);
1660
1661 CDEBUG(D_NET, "got req %llu\n", request->rq_xid);
1662
1663 request->rq_svc_thread = thread;
1664 if (thread)
1665 request->rq_svc_thread->t_env->le_ses = &request->rq_session;
1666
1667 if (likely(request->rq_export)) { 1671 if (likely(request->rq_export)) {
1668 if (unlikely(ptlrpc_check_req(request))) 1672 if (unlikely(ptlrpc_check_req(request)))
1669 goto put_conn; 1673 goto put_conn;
@@ -1695,14 +1699,21 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
1695 if (lustre_msg_get_opc(request->rq_reqmsg) != OBD_PING) 1699 if (lustre_msg_get_opc(request->rq_reqmsg) != OBD_PING)
1696 CFS_FAIL_TIMEOUT_MS(OBD_FAIL_PTLRPC_PAUSE_REQ, cfs_fail_val); 1700 CFS_FAIL_TIMEOUT_MS(OBD_FAIL_PTLRPC_PAUSE_REQ, cfs_fail_val);
1697 1701
1698 rc = svc->srv_ops.so_req_handler(request); 1702 CDEBUG(D_NET, "got req %llu\n", request->rq_xid);
1703
1704 /* re-assign request and sesson thread to the current one */
1705 request->rq_svc_thread = thread;
1706 if (thread) {
1707 LASSERT(request->rq_session.lc_thread);
1708 request->rq_session.lc_thread = thread;
1709 request->rq_session.lc_cookie = 0x55;
1710 thread->t_env->le_ses = &request->rq_session;
1711 }
1712 svc->srv_ops.so_req_handler(request);
1699 1713
1700 ptlrpc_rqphase_move(request, RQ_PHASE_COMPLETE); 1714 ptlrpc_rqphase_move(request, RQ_PHASE_COMPLETE);
1701 1715
1702put_conn: 1716put_conn:
1703 lu_context_exit(&request->rq_session);
1704 lu_context_fini(&request->rq_session);
1705
1706 if (unlikely(ktime_get_real_seconds() > request->rq_deadline)) { 1717 if (unlikely(ktime_get_real_seconds() > request->rq_deadline)) {
1707 DEBUG_REQ(D_WARNING, request, 1718 DEBUG_REQ(D_WARNING, request,
1708 "Request took longer than estimated (%lld:%llds); " 1719 "Request took longer than estimated (%lld:%llds); "
@@ -1756,7 +1767,6 @@ put_conn:
1756 request->rq_arrival_time.tv_sec); 1767 request->rq_arrival_time.tv_sec);
1757 } 1768 }
1758 1769
1759out_req:
1760 ptlrpc_server_finish_active_request(svcpt, request); 1770 ptlrpc_server_finish_active_request(svcpt, request);
1761 1771
1762 return 1; 1772 return 1;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
index 3ffd2d91f274..aacc8108391d 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
@@ -276,7 +276,9 @@ void lustre_assert_wire_constants(void)
276 (long long)FLD_QUERY); 276 (long long)FLD_QUERY);
277 LASSERTF(FLD_FIRST_OPC == 900, "found %lld\n", 277 LASSERTF(FLD_FIRST_OPC == 900, "found %lld\n",
278 (long long)FLD_FIRST_OPC); 278 (long long)FLD_FIRST_OPC);
279 LASSERTF(FLD_LAST_OPC == 901, "found %lld\n", 279 LASSERTF(FLD_READ == 901, "found %lld\n",
280 (long long)FLD_READ);
281 LASSERTF(FLD_LAST_OPC == 902, "found %lld\n",
280 (long long)FLD_LAST_OPC); 282 (long long)FLD_LAST_OPC);
281 LASSERTF(SEQ_QUERY == 700, "found %lld\n", 283 LASSERTF(SEQ_QUERY == 700, "found %lld\n",
282 (long long)SEQ_QUERY); 284 (long long)SEQ_QUERY);
@@ -1069,6 +1071,8 @@ void lustre_assert_wire_constants(void)
1069 OBD_CONNECT_PINGLESS); 1071 OBD_CONNECT_PINGLESS);
1070 LASSERTF(OBD_CONNECT_FLOCK_DEAD == 0x8000000000000ULL, 1072 LASSERTF(OBD_CONNECT_FLOCK_DEAD == 0x8000000000000ULL,
1071 "found 0x%.16llxULL\n", OBD_CONNECT_FLOCK_DEAD); 1073 "found 0x%.16llxULL\n", OBD_CONNECT_FLOCK_DEAD);
1074 LASSERTF(OBD_CONNECT_OPEN_BY_FID == 0x20000000000000ULL,
1075 "found 0x%.16llxULL\n", OBD_CONNECT_OPEN_BY_FID);
1072 LASSERTF(OBD_CKSUM_CRC32 == 0x00000001UL, "found 0x%.8xUL\n", 1076 LASSERTF(OBD_CKSUM_CRC32 == 0x00000001UL, "found 0x%.8xUL\n",
1073 (unsigned)OBD_CKSUM_CRC32); 1077 (unsigned)OBD_CKSUM_CRC32);
1074 LASSERTF(OBD_CKSUM_ADLER == 0x00000002UL, "found 0x%.8xUL\n", 1078 LASSERTF(OBD_CKSUM_ADLER == 0x00000002UL, "found 0x%.8xUL\n",
@@ -1639,6 +1643,12 @@ void lustre_assert_wire_constants(void)
1639 OBD_BRW_ASYNC); 1643 OBD_BRW_ASYNC);
1640 LASSERTF(OBD_BRW_MEMALLOC == 0x800, "found 0x%.8x\n", 1644 LASSERTF(OBD_BRW_MEMALLOC == 0x800, "found 0x%.8x\n",
1641 OBD_BRW_MEMALLOC); 1645 OBD_BRW_MEMALLOC);
1646 LASSERTF(OBD_BRW_OVER_USRQUOTA == 0x1000, "found 0x%.8x\n",
1647 OBD_BRW_OVER_USRQUOTA);
1648 LASSERTF(OBD_BRW_OVER_GRPQUOTA == 0x2000, "found 0x%.8x\n",
1649 OBD_BRW_OVER_GRPQUOTA);
1650 LASSERTF(OBD_BRW_SOFT_SYNC == 0x4000, "found 0x%.8x\n",
1651 OBD_BRW_SOFT_SYNC);
1642 1652
1643 /* Checks for struct ost_body */ 1653 /* Checks for struct ost_body */
1644 LASSERTF((int)sizeof(struct ost_body) == 208, "found %lld\n", 1654 LASSERTF((int)sizeof(struct ost_body) == 208, "found %lld\n",
diff --git a/drivers/staging/media/omap1/omap1_camera.c b/drivers/staging/media/omap1/omap1_camera.c
index bd721e35474a..54b8dd2d2bba 100644
--- a/drivers/staging/media/omap1/omap1_camera.c
+++ b/drivers/staging/media/omap1/omap1_camera.c
@@ -1569,27 +1569,21 @@ static int omap1_cam_probe(struct platform_device *pdev)
1569 unsigned int irq; 1569 unsigned int irq;
1570 int err = 0; 1570 int err = 0;
1571 1571
1572 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1573 irq = platform_get_irq(pdev, 0); 1572 irq = platform_get_irq(pdev, 0);
1574 if (!res || (int)irq <= 0) { 1573 if ((int)irq <= 0) {
1575 err = -ENODEV; 1574 err = -ENODEV;
1576 goto exit; 1575 goto exit;
1577 } 1576 }
1578 1577
1579 clk = clk_get(&pdev->dev, "armper_ck"); 1578 clk = devm_clk_get(&pdev->dev, "armper_ck");
1580 if (IS_ERR(clk)) { 1579 if (IS_ERR(clk))
1581 err = PTR_ERR(clk); 1580 return PTR_ERR(clk);
1582 goto exit;
1583 }
1584 1581
1585 pcdev = kzalloc(sizeof(*pcdev) + resource_size(res), GFP_KERNEL); 1582 pcdev = devm_kzalloc(&pdev->dev, sizeof(*pcdev) + resource_size(res),
1586 if (!pcdev) { 1583 GFP_KERNEL);
1587 dev_err(&pdev->dev, "Could not allocate pcdev\n"); 1584 if (!pcdev)
1588 err = -ENOMEM; 1585 return -ENOMEM;
1589 goto exit_put_clk;
1590 }
1591 1586
1592 pcdev->res = res;
1593 pcdev->clk = clk; 1587 pcdev->clk = clk;
1594 1588
1595 pcdev->pdata = pdev->dev.platform_data; 1589 pcdev->pdata = pdev->dev.platform_data;
@@ -1620,19 +1614,11 @@ static int omap1_cam_probe(struct platform_device *pdev)
1620 INIT_LIST_HEAD(&pcdev->capture); 1614 INIT_LIST_HEAD(&pcdev->capture);
1621 spin_lock_init(&pcdev->lock); 1615 spin_lock_init(&pcdev->lock);
1622 1616
1623 /* 1617 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1624 * Request the region. 1618 base = devm_ioremap_resource(&pdev->dev, res);
1625 */ 1619 if (IS_ERR(base))
1626 if (!request_mem_region(res->start, resource_size(res), DRIVER_NAME)) { 1620 return PTR_ERR(base);
1627 err = -EBUSY;
1628 goto exit_kfree;
1629 }
1630 1621
1631 base = ioremap(res->start, resource_size(res));
1632 if (!base) {
1633 err = -ENOMEM;
1634 goto exit_release;
1635 }
1636 pcdev->irq = irq; 1622 pcdev->irq = irq;
1637 pcdev->base = base; 1623 pcdev->base = base;
1638 1624
@@ -1642,8 +1628,7 @@ static int omap1_cam_probe(struct platform_device *pdev)
1642 dma_isr, (void *)pcdev, &pcdev->dma_ch); 1628 dma_isr, (void *)pcdev, &pcdev->dma_ch);
1643 if (err < 0) { 1629 if (err < 0) {
1644 dev_err(&pdev->dev, "Can't request DMA for OMAP1 Camera\n"); 1630 dev_err(&pdev->dev, "Can't request DMA for OMAP1 Camera\n");
1645 err = -EBUSY; 1631 return -EBUSY;
1646 goto exit_iounmap;
1647 } 1632 }
1648 dev_dbg(&pdev->dev, "got DMA channel %d\n", pcdev->dma_ch); 1633 dev_dbg(&pdev->dev, "got DMA channel %d\n", pcdev->dma_ch);
1649 1634
@@ -1655,7 +1640,8 @@ static int omap1_cam_probe(struct platform_device *pdev)
1655 /* setup DMA autoinitialization */ 1640 /* setup DMA autoinitialization */
1656 omap_dma_link_lch(pcdev->dma_ch, pcdev->dma_ch); 1641 omap_dma_link_lch(pcdev->dma_ch, pcdev->dma_ch);
1657 1642
1658 err = request_irq(pcdev->irq, cam_isr, 0, DRIVER_NAME, pcdev); 1643 err = devm_request_irq(&pdev->dev, pcdev->irq, cam_isr, 0, DRIVER_NAME,
1644 pcdev);
1659 if (err) { 1645 if (err) {
1660 dev_err(&pdev->dev, "Camera interrupt register failed\n"); 1646 dev_err(&pdev->dev, "Camera interrupt register failed\n");
1661 goto exit_free_dma; 1647 goto exit_free_dma;
@@ -1669,24 +1655,14 @@ static int omap1_cam_probe(struct platform_device *pdev)
1669 1655
1670 err = soc_camera_host_register(&pcdev->soc_host); 1656 err = soc_camera_host_register(&pcdev->soc_host);
1671 if (err) 1657 if (err)
1672 goto exit_free_irq; 1658 return err;
1673 1659
1674 dev_info(&pdev->dev, "OMAP1 Camera Interface driver loaded\n"); 1660 dev_info(&pdev->dev, "OMAP1 Camera Interface driver loaded\n");
1675 1661
1676 return 0; 1662 return 0;
1677 1663
1678exit_free_irq:
1679 free_irq(pcdev->irq, pcdev);
1680exit_free_dma: 1664exit_free_dma:
1681 omap_free_dma(pcdev->dma_ch); 1665 omap_free_dma(pcdev->dma_ch);
1682exit_iounmap:
1683 iounmap(base);
1684exit_release:
1685 release_mem_region(res->start, resource_size(res));
1686exit_kfree:
1687 kfree(pcdev);
1688exit_put_clk:
1689 clk_put(clk);
1690exit: 1666exit:
1691 return err; 1667 return err;
1692} 1668}
@@ -1696,23 +1672,11 @@ static int omap1_cam_remove(struct platform_device *pdev)
1696 struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev); 1672 struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
1697 struct omap1_cam_dev *pcdev = container_of(soc_host, 1673 struct omap1_cam_dev *pcdev = container_of(soc_host,
1698 struct omap1_cam_dev, soc_host); 1674 struct omap1_cam_dev, soc_host);
1699 struct resource *res;
1700
1701 free_irq(pcdev->irq, pcdev);
1702 1675
1703 omap_free_dma(pcdev->dma_ch); 1676 omap_free_dma(pcdev->dma_ch);
1704 1677
1705 soc_camera_host_unregister(soc_host); 1678 soc_camera_host_unregister(soc_host);
1706 1679
1707 iounmap(pcdev->base);
1708
1709 res = pcdev->res;
1710 release_mem_region(res->start, resource_size(res));
1711
1712 clk_put(pcdev->clk);
1713
1714 kfree(pcdev);
1715
1716 dev_info(&pdev->dev, "OMAP1 Camera Interface driver unloaded\n"); 1680 dev_info(&pdev->dev, "OMAP1 Camera Interface driver unloaded\n");
1717 1681
1718 return 0; 1682 return 0;
diff --git a/drivers/staging/media/omap4iss/iss.c b/drivers/staging/media/omap4iss/iss.c
index c5a5138b3d3b..6ceb4eb00493 100644
--- a/drivers/staging/media/omap4iss/iss.c
+++ b/drivers/staging/media/omap4iss/iss.c
@@ -1065,7 +1065,7 @@ static int iss_register_entities(struct iss_device *iss)
1065 } 1065 }
1066 1066
1067 ret = media_create_pad_link(&sensor->entity, 0, input, pad, 1067 ret = media_create_pad_link(&sensor->entity, 0, input, pad,
1068 flags); 1068 flags);
1069 if (ret < 0) 1069 if (ret < 0)
1070 goto done; 1070 goto done;
1071 } 1071 }
diff --git a/drivers/staging/most/hdm-dim2/dim2_errors.h b/drivers/staging/most/hdm-dim2/dim2_errors.h
index 5a713df1d1d4..66343ba426c1 100644
--- a/drivers/staging/most/hdm-dim2/dim2_errors.h
+++ b/drivers/staging/most/hdm-dim2/dim2_errors.h
@@ -15,10 +15,6 @@
15#ifndef _MOST_DIM_ERRORS_H 15#ifndef _MOST_DIM_ERRORS_H
16#define _MOST_DIM_ERRORS_H 16#define _MOST_DIM_ERRORS_H
17 17
18#ifdef __cplusplus
19extern "C" {
20#endif
21
22/** 18/**
23 * MOST DIM errors. 19 * MOST DIM errors.
24 */ 20 */
@@ -58,8 +54,4 @@ enum dim_errors_t {
58 DIM_ERR_OVERFLOW, 54 DIM_ERR_OVERFLOW,
59}; 55};
60 56
61#ifdef __cplusplus
62}
63#endif
64
65#endif /* _MOST_DIM_ERRORS_H */ 57#endif /* _MOST_DIM_ERRORS_H */
diff --git a/drivers/staging/most/hdm-dim2/dim2_hal.h b/drivers/staging/most/hdm-dim2/dim2_hal.h
index fc73d4f97734..1c924e869de7 100644
--- a/drivers/staging/most/hdm-dim2/dim2_hal.h
+++ b/drivers/staging/most/hdm-dim2/dim2_hal.h
@@ -18,10 +18,6 @@
18#include <linux/types.h> 18#include <linux/types.h>
19#include "dim2_reg.h" 19#include "dim2_reg.h"
20 20
21#ifdef __cplusplus
22extern "C" {
23#endif
24
25/* 21/*
26 * The values below are specified in the hardware specification. 22 * The values below are specified in the hardware specification.
27 * So, they should not be changed until the hardware specification changes. 23 * So, they should not be changed until the hardware specification changes.
@@ -42,14 +38,12 @@ struct dim_ch_state_t {
42 u16 done_buffers; /* Number of completed buffers */ 38 u16 done_buffers; /* Number of completed buffers */
43}; 39};
44 40
45typedef int atomic_counter_t;
46
47struct int_ch_state { 41struct int_ch_state {
48 /* changed only in interrupt context */ 42 /* changed only in interrupt context */
49 volatile atomic_counter_t request_counter; 43 volatile int request_counter;
50 44
51 /* changed only in task context */ 45 /* changed only in task context */
52 volatile atomic_counter_t service_counter; 46 volatile int service_counter;
53 47
54 u8 idx1; 48 u8 idx1;
55 u8 idx2; 49 u8 idx2;
@@ -110,8 +104,4 @@ void dimcb_io_write(u32 __iomem *ptr32, u32 value);
110 104
111void dimcb_on_error(u8 error_id, const char *error_message); 105void dimcb_on_error(u8 error_id, const char *error_message);
112 106
113#ifdef __cplusplus
114}
115#endif
116
117#endif /* _DIM2_HAL_H */ 107#endif /* _DIM2_HAL_H */
diff --git a/drivers/staging/most/hdm-dim2/dim2_reg.h b/drivers/staging/most/hdm-dim2/dim2_reg.h
index bcf6a79f6744..e0837b6b9ae1 100644
--- a/drivers/staging/most/hdm-dim2/dim2_reg.h
+++ b/drivers/staging/most/hdm-dim2/dim2_reg.h
@@ -17,10 +17,6 @@
17 17
18#include <linux/types.h> 18#include <linux/types.h>
19 19
20#ifdef __cplusplus
21extern "C" {
22#endif
23
24struct dim2_regs { 20struct dim2_regs {
25 /* 0x00 */ u32 MLBC0; 21 /* 0x00 */ u32 MLBC0;
26 /* 0x01 */ u32 rsvd0[1]; 22 /* 0x01 */ u32 rsvd0[1];
@@ -166,8 +162,4 @@ enum {
166 CAT_CL_MASK = DIM2_MASK(6) 162 CAT_CL_MASK = DIM2_MASK(6)
167}; 163};
168 164
169#ifdef __cplusplus
170}
171#endif
172
173#endif /* DIM2_OS62420_H */ 165#endif /* DIM2_OS62420_H */
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
index aa1cdf602cf6..99445d0fcf9c 100644
--- a/drivers/staging/netlogic/xlr_net.c
+++ b/drivers/staging/netlogic/xlr_net.c
@@ -850,7 +850,7 @@ static int xlr_mii_probe(struct xlr_net_priv *priv)
850 850
851 /* Attach MAC to PHY */ 851 /* Attach MAC to PHY */
852 phydev = phy_connect(priv->ndev, phydev_name(phydev), 852 phydev = phy_connect(priv->ndev, phydev_name(phydev),
853 &xlr_gmac_link_adjust, priv->nd->phy_interface); 853 xlr_gmac_link_adjust, priv->nd->phy_interface);
854 854
855 if (IS_ERR(phydev)) { 855 if (IS_ERR(phydev)) {
856 pr_err("could not attach PHY\n"); 856 pr_err("could not attach PHY\n");
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index 9fda136b8e05..c1feccf8d94a 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -264,7 +264,7 @@ int nvec_write_async(struct nvec_chip *nvec, const unsigned char *data,
264 264
265 msg = nvec_msg_alloc(nvec, NVEC_MSG_TX); 265 msg = nvec_msg_alloc(nvec, NVEC_MSG_TX);
266 266
267 if (msg == NULL) 267 if (!msg)
268 return -ENOMEM; 268 return -ENOMEM;
269 269
270 msg->data[0] = size; 270 msg->data[0] = size;
@@ -620,7 +620,7 @@ static irqreturn_t nvec_interrupt(int irq, void *dev)
620 } else { 620 } else {
621 nvec->rx = nvec_msg_alloc(nvec, NVEC_MSG_RX); 621 nvec->rx = nvec_msg_alloc(nvec, NVEC_MSG_RX);
622 /* Should not happen in a normal world */ 622 /* Should not happen in a normal world */
623 if (unlikely(nvec->rx == NULL)) { 623 if (unlikely(!nvec->rx)) {
624 nvec->state = 0; 624 nvec->state = 0;
625 break; 625 break;
626 } 626 }
@@ -659,10 +659,11 @@ static irqreturn_t nvec_interrupt(int irq, void *dev)
659 } else if (nvec->tx && nvec->tx->pos < nvec->tx->size) { 659 } else if (nvec->tx && nvec->tx->pos < nvec->tx->size) {
660 to_send = nvec->tx->data[nvec->tx->pos++]; 660 to_send = nvec->tx->data[nvec->tx->pos++];
661 } else { 661 } else {
662 dev_err(nvec->dev, "tx buffer underflow on %p (%u > %u)\n", 662 dev_err(nvec->dev,
663 "tx buffer underflow on %p (%u > %u)\n",
663 nvec->tx, 664 nvec->tx,
664 (uint) (nvec->tx ? nvec->tx->pos : 0), 665 (uint)(nvec->tx ? nvec->tx->pos : 0),
665 (uint) (nvec->tx ? nvec->tx->size : 0)); 666 (uint)(nvec->tx ? nvec->tx->size : 0));
666 nvec->state = 0; 667 nvec->state = 0;
667 } 668 }
668 break; 669 break;
diff --git a/drivers/staging/nvec/nvec_power.c b/drivers/staging/nvec/nvec_power.c
index b4a0545e8806..fcbb0fa03765 100644
--- a/drivers/staging/nvec/nvec_power.c
+++ b/drivers/staging/nvec/nvec_power.c
@@ -90,7 +90,7 @@ static int nvec_power_notifier(struct notifier_block *nb,
90{ 90{
91 struct nvec_power *power = 91 struct nvec_power *power =
92 container_of(nb, struct nvec_power, notifier); 92 container_of(nb, struct nvec_power, notifier);
93 struct bat_response *res = (struct bat_response *)data; 93 struct bat_response *res = data;
94 94
95 if (event_type != NVEC_SYS) 95 if (event_type != NVEC_SYS)
96 return NOTIFY_DONE; 96 return NOTIFY_DONE;
@@ -126,7 +126,7 @@ static int nvec_power_bat_notifier(struct notifier_block *nb,
126{ 126{
127 struct nvec_power *power = 127 struct nvec_power *power =
128 container_of(nb, struct nvec_power, notifier); 128 container_of(nb, struct nvec_power, notifier);
129 struct bat_response *res = (struct bat_response *)data; 129 struct bat_response *res = data;
130 int status_changed = 0; 130 int status_changed = 0;
131 131
132 if (event_type != NVEC_BAT) 132 if (event_type != NVEC_BAT)
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index b6993b0b8170..a10fe3af9a9c 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -172,12 +172,13 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
172 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) { 172 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
173 old_group_mask = cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); 173 old_group_mask = cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid));
174 cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid), 174 cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid),
175 1ull << pow_receive_group); 175 1ull << pow_receive_group);
176 cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */ 176 cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */
177 } else { 177 } else {
178 old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid)); 178 old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid));
179 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), 179 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid),
180 (old_group_mask & ~0xFFFFull) | 1 << pow_receive_group); 180 (old_group_mask & ~0xFFFFull) |
181 1 << pow_receive_group);
181 } 182 }
182 183
183 if (USE_ASYNC_IOBDMA) { 184 if (USE_ASYNC_IOBDMA) {
@@ -374,7 +375,7 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
374 * doesn't exist. 375 * doesn't exist.
375 */ 376 */
376 printk_ratelimited("Port %d not controlled by Linux, packet dropped\n", 377 printk_ratelimited("Port %d not controlled by Linux, packet dropped\n",
377 port); 378 port);
378 dev_kfree_skb_irq(skb); 379 dev_kfree_skb_irq(skb);
379 } 380 }
380 /* 381 /*
diff --git a/drivers/staging/octeon/ethernet-rx.h b/drivers/staging/octeon/ethernet-rx.h
index a5973fd015fc..315a63d7094f 100644
--- a/drivers/staging/octeon/ethernet-rx.h
+++ b/drivers/staging/octeon/ethernet-rx.h
@@ -30,7 +30,7 @@ static inline void cvm_oct_rx_refill_pool(int fill_threshold)
30 number_to_free); 30 number_to_free);
31 if (num_freed != number_to_free) { 31 if (num_freed != number_to_free) {
32 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 32 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
33 number_to_free - num_freed); 33 number_to_free - num_freed);
34 } 34 }
35 } 35 }
36} 36}
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index ffe9bd77a7bb..6b4c20872323 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -58,9 +58,9 @@ static DECLARE_TASKLET(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup, 0);
58/* Maximum number of SKBs to try to free per xmit packet. */ 58/* Maximum number of SKBs to try to free per xmit packet. */
59#define MAX_SKB_TO_FREE (MAX_OUT_QUEUE_DEPTH * 2) 59#define MAX_SKB_TO_FREE (MAX_OUT_QUEUE_DEPTH * 2)
60 60
61static inline int32_t cvm_oct_adjust_skb_to_free(int32_t skb_to_free, int fau) 61static inline int cvm_oct_adjust_skb_to_free(int skb_to_free, int fau)
62{ 62{
63 int32_t undo; 63 int undo;
64 64
65 undo = skb_to_free > 0 ? MAX_SKB_TO_FREE : skb_to_free + 65 undo = skb_to_free > 0 ? MAX_SKB_TO_FREE : skb_to_free +
66 MAX_SKB_TO_FREE; 66 MAX_SKB_TO_FREE;
@@ -83,7 +83,7 @@ static void cvm_oct_kick_tx_poll_watchdog(void)
83 83
84static void cvm_oct_free_tx_skbs(struct net_device *dev) 84static void cvm_oct_free_tx_skbs(struct net_device *dev)
85{ 85{
86 int32_t skb_to_free; 86 int skb_to_free;
87 int qos, queues_per_port; 87 int qos, queues_per_port;
88 int total_freed = 0; 88 int total_freed = 0;
89 int total_remaining = 0; 89 int total_remaining = 0;
@@ -148,8 +148,8 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
148 enum {QUEUE_CORE, QUEUE_HW, QUEUE_DROP} queue_type; 148 enum {QUEUE_CORE, QUEUE_HW, QUEUE_DROP} queue_type;
149 struct octeon_ethernet *priv = netdev_priv(dev); 149 struct octeon_ethernet *priv = netdev_priv(dev);
150 struct sk_buff *to_free_list; 150 struct sk_buff *to_free_list;
151 int32_t skb_to_free; 151 int skb_to_free;
152 int32_t buffers_to_free; 152 int buffers_to_free;
153 u32 total_to_clean; 153 u32 total_to_clean;
154 unsigned long flags; 154 unsigned long flags;
155#if REUSE_SKBUFFS_WITHOUT_FREE 155#if REUSE_SKBUFFS_WITHOUT_FREE
@@ -220,7 +220,8 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
220 priv->fau + qos * 4, MAX_SKB_TO_FREE); 220 priv->fau + qos * 4, MAX_SKB_TO_FREE);
221 } 221 }
222 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, 222 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
223 priv->fau + qos * 4); 223 priv->fau +
224 qos * 4);
224 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); 225 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
225 goto skip_xmit; 226 goto skip_xmit;
226 } 227 }
@@ -402,7 +403,7 @@ dont_put_skbuff_in_hw:
402 } 403 }
403 404
404 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, 405 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
405 priv->fau + qos * 4); 406 priv->fau + qos * 4);
406 407
407 /* 408 /*
408 * If we're sending faster than the receive can free them then 409 * If we're sending faster than the receive can free them then
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index 271e1b8d8506..e9cd5f242921 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -635,7 +635,7 @@ static struct device_node *cvm_oct_of_get_child(
635} 635}
636 636
637static struct device_node *cvm_oct_node_for_port(struct device_node *pip, 637static struct device_node *cvm_oct_node_for_port(struct device_node *pip,
638 int interface, int port) 638 int interface, int port)
639{ 639{
640 struct device_node *ni, *np; 640 struct device_node *ni, *np;
641 641
@@ -815,7 +815,7 @@ static int cvm_oct_probe(struct platform_device *pdev)
815 free_netdev(dev); 815 free_netdev(dev);
816 } else if (register_netdev(dev) < 0) { 816 } else if (register_netdev(dev) < 0) {
817 pr_err("Failed to register ethernet device for interface %d, port %d\n", 817 pr_err("Failed to register ethernet device for interface %d, port %d\n",
818 interface, priv->port); 818 interface, priv->port);
819 free_netdev(dev); 819 free_netdev(dev);
820 } else { 820 } else {
821 cvm_oct_device[priv->port] = dev; 821 cvm_oct_device[priv->port] = dev;
diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
index 012860b34651..a5755358cc5d 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ap.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#define _RTW_AP_C_ 15#define _RTW_AP_C_
21 16
diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c
index e5a6b7a70df7..77485235c615 100644
--- a/drivers/staging/rtl8188eu/core/rtw_cmd.c
+++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#define _RTW_CMD_C_ 15#define _RTW_CMD_C_
21 16
@@ -263,11 +258,11 @@ u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid,
263 rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_SCAN, 1); 258 rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_SCAN, 1);
264 259
265 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC); 260 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
266 if (ph2c == NULL) 261 if (!ph2c)
267 return _FAIL; 262 return _FAIL;
268 263
269 psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_ATOMIC); 264 psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_ATOMIC);
270 if (psurveyPara == NULL) { 265 if (!psurveyPara) {
271 kfree(ph2c); 266 kfree(ph2c);
272 return _FAIL; 267 return _FAIL;
273 } 268 }
@@ -350,7 +345,7 @@ u8 rtw_createbss_cmd(struct adapter *padapter)
350 RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, (" createbss for SSid:%s\n", pmlmepriv->assoc_ssid.Ssid)); 345 RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, (" createbss for SSid:%s\n", pmlmepriv->assoc_ssid.Ssid));
351 346
352 pcmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); 347 pcmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
353 if (pcmd == NULL) { 348 if (!pcmd) {
354 res = _FAIL; 349 res = _FAIL;
355 goto exit; 350 goto exit;
356 } 351 }
@@ -521,7 +516,7 @@ u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueu
521 516
522 /* prepare cmd parameter */ 517 /* prepare cmd parameter */
523 param = kzalloc(sizeof(*param), GFP_KERNEL); 518 param = kzalloc(sizeof(*param), GFP_KERNEL);
524 if (param == NULL) { 519 if (!param) {
525 res = _FAIL; 520 res = _FAIL;
526 goto exit; 521 goto exit;
527 } 522 }
@@ -530,7 +525,7 @@ u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueu
530 if (enqueue) { 525 if (enqueue) {
531 /* need enqueue, prepare cmd_obj and enqueue */ 526 /* need enqueue, prepare cmd_obj and enqueue */
532 cmdobj = kzalloc(sizeof(*cmdobj), GFP_KERNEL); 527 cmdobj = kzalloc(sizeof(*cmdobj), GFP_KERNEL);
533 if (cmdobj == NULL) { 528 if (!cmdobj) {
534 res = _FAIL; 529 res = _FAIL;
535 kfree(param); 530 kfree(param);
536 goto exit; 531 goto exit;
@@ -629,20 +624,20 @@ u8 rtw_clearstakey_cmd(struct adapter *padapter, u8 *psta, u8 entry, u8 enqueue)
629 clear_cam_entry(padapter, entry); 624 clear_cam_entry(padapter, entry);
630 } else { 625 } else {
631 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC); 626 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
632 if (ph2c == NULL) { 627 if (!ph2c) {
633 res = _FAIL; 628 res = _FAIL;
634 goto exit; 629 goto exit;
635 } 630 }
636 631
637 psetstakey_para = kzalloc(sizeof(struct set_stakey_parm), GFP_ATOMIC); 632 psetstakey_para = kzalloc(sizeof(struct set_stakey_parm), GFP_ATOMIC);
638 if (psetstakey_para == NULL) { 633 if (!psetstakey_para) {
639 kfree(ph2c); 634 kfree(ph2c);
640 res = _FAIL; 635 res = _FAIL;
641 goto exit; 636 goto exit;
642 } 637 }
643 638
644 psetstakey_rsp = kzalloc(sizeof(struct set_stakey_rsp), GFP_ATOMIC); 639 psetstakey_rsp = kzalloc(sizeof(struct set_stakey_rsp), GFP_ATOMIC);
645 if (psetstakey_rsp == NULL) { 640 if (!psetstakey_rsp) {
646 kfree(ph2c); 641 kfree(ph2c);
647 kfree(psetstakey_para); 642 kfree(psetstakey_para);
648 res = _FAIL; 643 res = _FAIL;
@@ -676,13 +671,13 @@ u8 rtw_addbareq_cmd(struct adapter *padapter, u8 tid, u8 *addr)
676 671
677 672
678 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); 673 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
679 if (ph2c == NULL) { 674 if (!ph2c) {
680 res = _FAIL; 675 res = _FAIL;
681 goto exit; 676 goto exit;
682 } 677 }
683 678
684 paddbareq_parm = kzalloc(sizeof(struct addBaReq_parm), GFP_KERNEL); 679 paddbareq_parm = kzalloc(sizeof(struct addBaReq_parm), GFP_KERNEL);
685 if (paddbareq_parm == NULL) { 680 if (!paddbareq_parm) {
686 kfree(ph2c); 681 kfree(ph2c);
687 res = _FAIL; 682 res = _FAIL;
688 goto exit; 683 goto exit;
@@ -713,13 +708,13 @@ u8 rtw_dynamic_chk_wk_cmd(struct adapter *padapter)
713 708
714 709
715 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC); 710 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
716 if (ph2c == NULL) { 711 if (!ph2c) {
717 res = _FAIL; 712 res = _FAIL;
718 goto exit; 713 goto exit;
719 } 714 }
720 715
721 pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC); 716 pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
722 if (pdrvextra_cmd_parm == NULL) { 717 if (!pdrvextra_cmd_parm) {
723 kfree(ph2c); 718 kfree(ph2c);
724 res = _FAIL; 719 res = _FAIL;
725 goto exit; 720 goto exit;
@@ -757,7 +752,7 @@ u8 rtw_set_chplan_cmd(struct adapter *padapter, u8 chplan, u8 enqueue)
757 752
758 /* prepare cmd parameter */ 753 /* prepare cmd parameter */
759 setChannelPlan_param = kzalloc(sizeof(struct SetChannelPlan_param), GFP_KERNEL); 754 setChannelPlan_param = kzalloc(sizeof(struct SetChannelPlan_param), GFP_KERNEL);
760 if (setChannelPlan_param == NULL) { 755 if (!setChannelPlan_param) {
761 res = _FAIL; 756 res = _FAIL;
762 goto exit; 757 goto exit;
763 } 758 }
@@ -766,7 +761,7 @@ u8 rtw_set_chplan_cmd(struct adapter *padapter, u8 chplan, u8 enqueue)
766 if (enqueue) { 761 if (enqueue) {
767 /* need enqueue, prepare cmd_obj and enqueue */ 762 /* need enqueue, prepare cmd_obj and enqueue */
768 pcmdobj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); 763 pcmdobj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
769 if (pcmdobj == NULL) { 764 if (!pcmdobj) {
770 kfree(setChannelPlan_param); 765 kfree(setChannelPlan_param);
771 res = _FAIL; 766 res = _FAIL;
772 goto exit; 767 goto exit;
@@ -925,13 +920,13 @@ u8 rtw_lps_ctrl_wk_cmd(struct adapter *padapter, u8 lps_ctrl_type, u8 enqueue)
925 920
926 if (enqueue) { 921 if (enqueue) {
927 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC); 922 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
928 if (ph2c == NULL) { 923 if (!ph2c) {
929 res = _FAIL; 924 res = _FAIL;
930 goto exit; 925 goto exit;
931 } 926 }
932 927
933 pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC); 928 pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
934 if (pdrvextra_cmd_parm == NULL) { 929 if (!pdrvextra_cmd_parm) {
935 kfree(ph2c); 930 kfree(ph2c);
936 res = _FAIL; 931 res = _FAIL;
937 goto exit; 932 goto exit;
@@ -968,13 +963,13 @@ u8 rtw_rpt_timer_cfg_cmd(struct adapter *padapter, u16 min_time)
968 u8 res = _SUCCESS; 963 u8 res = _SUCCESS;
969 964
970 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC); 965 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
971 if (ph2c == NULL) { 966 if (!ph2c) {
972 res = _FAIL; 967 res = _FAIL;
973 goto exit; 968 goto exit;
974 } 969 }
975 970
976 pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC); 971 pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
977 if (pdrvextra_cmd_parm == NULL) { 972 if (!pdrvextra_cmd_parm) {
978 kfree(ph2c); 973 kfree(ph2c);
979 res = _FAIL; 974 res = _FAIL;
980 goto exit; 975 goto exit;
@@ -1010,13 +1005,13 @@ u8 rtw_antenna_select_cmd(struct adapter *padapter, u8 antenna, u8 enqueue)
1010 1005
1011 if (enqueue) { 1006 if (enqueue) {
1012 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); 1007 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
1013 if (ph2c == NULL) { 1008 if (!ph2c) {
1014 res = _FAIL; 1009 res = _FAIL;
1015 goto exit; 1010 goto exit;
1016 } 1011 }
1017 1012
1018 pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_KERNEL); 1013 pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_KERNEL);
1019 if (pdrvextra_cmd_parm == NULL) { 1014 if (!pdrvextra_cmd_parm) {
1020 kfree(ph2c); 1015 kfree(ph2c);
1021 res = _FAIL; 1016 res = _FAIL;
1022 goto exit; 1017 goto exit;
@@ -1108,13 +1103,13 @@ u8 rtw_chk_hi_queue_cmd(struct adapter *padapter)
1108 u8 res = _SUCCESS; 1103 u8 res = _SUCCESS;
1109 1104
1110 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); 1105 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
1111 if (ph2c == NULL) { 1106 if (!ph2c) {
1112 res = _FAIL; 1107 res = _FAIL;
1113 goto exit; 1108 goto exit;
1114 } 1109 }
1115 1110
1116 pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_KERNEL); 1111 pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_KERNEL);
1117 if (pdrvextra_cmd_parm == NULL) { 1112 if (!pdrvextra_cmd_parm) {
1118 kfree(ph2c); 1113 kfree(ph2c);
1119 res = _FAIL; 1114 res = _FAIL;
1120 goto exit; 1115 goto exit;
diff --git a/drivers/staging/rtl8188eu/core/rtw_debug.c b/drivers/staging/rtl8188eu/core/rtw_debug.c
index 93e898d598fe..db5c952ac852 100644
--- a/drivers/staging/rtl8188eu/core/rtw_debug.c
+++ b/drivers/staging/rtl8188eu/core/rtw_debug.c
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#define _RTW_DEBUG_C_ 15#define _RTW_DEBUG_C_
21 16
diff --git a/drivers/staging/rtl8188eu/core/rtw_efuse.c b/drivers/staging/rtl8188eu/core/rtw_efuse.c
index 19f11d04d152..c17870cddb5b 100644
--- a/drivers/staging/rtl8188eu/core/rtw_efuse.c
+++ b/drivers/staging/rtl8188eu/core/rtw_efuse.c
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#define _RTW_EFUSE_C_ 15#define _RTW_EFUSE_C_
21 16
@@ -107,7 +102,7 @@ efuse_phymap_to_logical(u8 *phymap, u16 _offset, u16 _size_byte, u8 *pbuf)
107 if (!efuseTbl) 102 if (!efuseTbl)
108 return; 103 return;
109 104
110 eFuseWord = (u16 **)rtw_malloc2d(EFUSE_MAX_SECTION_88E, EFUSE_MAX_WORD_UNIT, sizeof(u16)); 105 eFuseWord = (u16 **)rtw_malloc2d(EFUSE_MAX_SECTION_88E, EFUSE_MAX_WORD_UNIT, sizeof(*eFuseWord));
111 if (!eFuseWord) { 106 if (!eFuseWord) {
112 DBG_88E("%s: alloc eFuseWord fail!\n", __func__); 107 DBG_88E("%s: alloc eFuseWord fail!\n", __func__);
113 goto eFuseWord_failed; 108 goto eFuseWord_failed;
diff --git a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
index f4e4baf6054a..0b0d78fe83ed 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#define _IEEE80211_C 15#define _IEEE80211_C
21 16
diff --git a/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c b/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
index cf60717a6c19..f85a6abec3a3 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#define _RTW_IOCTL_SET_C_ 15#define _RTW_IOCTL_SET_C_
21 16
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c
index a645a620ebe2..1456499b84bf 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#define _RTW_MLME_C_ 15#define _RTW_MLME_C_
21 16
@@ -1584,13 +1579,13 @@ int rtw_set_auth(struct adapter *adapter, struct security_priv *psecuritypriv)
1584 int res = _SUCCESS; 1579 int res = _SUCCESS;
1585 1580
1586 pcmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); 1581 pcmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
1587 if (pcmd == NULL) { 1582 if (!pcmd) {
1588 res = _FAIL; /* try again */ 1583 res = _FAIL; /* try again */
1589 goto exit; 1584 goto exit;
1590 } 1585 }
1591 1586
1592 psetauthparm = kzalloc(sizeof(struct setauth_parm), GFP_KERNEL); 1587 psetauthparm = kzalloc(sizeof(struct setauth_parm), GFP_KERNEL);
1593 if (psetauthparm == NULL) { 1588 if (!psetauthparm) {
1594 kfree(pcmd); 1589 kfree(pcmd);
1595 res = _FAIL; 1590 res = _FAIL;
1596 goto exit; 1591 goto exit;
@@ -1621,11 +1616,11 @@ int rtw_set_key(struct adapter *adapter, struct security_priv *psecuritypriv, in
1621 int res = _SUCCESS; 1616 int res = _SUCCESS;
1622 1617
1623 pcmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); 1618 pcmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
1624 if (pcmd == NULL) 1619 if (!pcmd)
1625 return _FAIL; /* try again */ 1620 return _FAIL; /* try again */
1626 1621
1627 psetkeyparm = kzalloc(sizeof(struct setkey_parm), GFP_KERNEL); 1622 psetkeyparm = kzalloc(sizeof(struct setkey_parm), GFP_KERNEL);
1628 if (psetkeyparm == NULL) { 1623 if (!psetkeyparm) {
1629 res = _FAIL; 1624 res = _FAIL;
1630 goto err_free_cmd; 1625 goto err_free_cmd;
1631 } 1626 }
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
index 591a9127b573..7f32b39e5869 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#define _RTW_MLME_EXT_C_ 15#define _RTW_MLME_EXT_C_
21 16
@@ -606,8 +601,6 @@ static void issue_probersp(struct adapter *padapter, unsigned char *da)
606 pattrib->last_txcmdsz = pattrib->pktlen; 601 pattrib->last_txcmdsz = pattrib->pktlen;
607 602
608 dump_mgntframe(padapter, pmgntframe); 603 dump_mgntframe(padapter, pmgntframe);
609
610 return;
611} 604}
612 605
613static int issue_probereq(struct adapter *padapter, struct ndis_802_11_ssid *pssid, u8 *da, bool wait_ack) 606static int issue_probereq(struct adapter *padapter, struct ndis_802_11_ssid *pssid, u8 *da, bool wait_ack)
@@ -888,8 +881,6 @@ static void issue_auth(struct adapter *padapter, struct sta_info *psta,
888 rtw_wep_encrypt(padapter, (u8 *)pmgntframe); 881 rtw_wep_encrypt(padapter, (u8 *)pmgntframe);
889 DBG_88E("%s\n", __func__); 882 DBG_88E("%s\n", __func__);
890 dump_mgntframe(padapter, pmgntframe); 883 dump_mgntframe(padapter, pmgntframe);
891
892 return;
893} 884}
894 885
895 886
@@ -1212,8 +1203,6 @@ exit:
1212 rtw_buf_update(&pmlmepriv->assoc_req, &pmlmepriv->assoc_req_len, (u8 *)pwlanhdr, pattrib->pktlen); 1203 rtw_buf_update(&pmlmepriv->assoc_req, &pmlmepriv->assoc_req_len, (u8 *)pwlanhdr, pattrib->pktlen);
1213 else 1204 else
1214 rtw_buf_free(&pmlmepriv->assoc_req, &pmlmepriv->assoc_req_len); 1205 rtw_buf_free(&pmlmepriv->assoc_req, &pmlmepriv->assoc_req_len);
1215
1216 return;
1217} 1206}
1218 1207
1219/* when wait_ack is true, this function should be called at process context */ 1208/* when wait_ack is true, this function should be called at process context */
@@ -2105,7 +2094,6 @@ static void site_survey(struct adapter *padapter)
2105 issue_action_BSSCoexistPacket(padapter); 2094 issue_action_BSSCoexistPacket(padapter);
2106 issue_action_BSSCoexistPacket(padapter); 2095 issue_action_BSSCoexistPacket(padapter);
2107 } 2096 }
2108 return;
2109} 2097}
2110 2098
2111/* collect bss info from Beacon and Probe request/response frames. */ 2099/* collect bss info from Beacon and Probe request/response frames. */
@@ -4295,12 +4283,12 @@ void report_survey_event(struct adapter *padapter,
4295 4283
4296 4284
4297 pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC); 4285 pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
4298 if (pcmd_obj == NULL) 4286 if (!pcmd_obj)
4299 return; 4287 return;
4300 4288
4301 cmdsz = sizeof(struct survey_event) + sizeof(struct C2HEvent_Header); 4289 cmdsz = sizeof(struct survey_event) + sizeof(struct C2HEvent_Header);
4302 pevtcmd = kzalloc(cmdsz, GFP_ATOMIC); 4290 pevtcmd = kzalloc(cmdsz, GFP_ATOMIC);
4303 if (pevtcmd == NULL) { 4291 if (!pevtcmd) {
4304 kfree(pcmd_obj); 4292 kfree(pcmd_obj);
4305 return; 4293 return;
4306 } 4294 }
@@ -4332,8 +4320,6 @@ void report_survey_event(struct adapter *padapter,
4332 rtw_enqueue_cmd(pcmdpriv, pcmd_obj); 4320 rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
4333 4321
4334 pmlmeext->sitesurvey_res.bss_cnt++; 4322 pmlmeext->sitesurvey_res.bss_cnt++;
4335
4336 return;
4337} 4323}
4338 4324
4339void report_surveydone_event(struct adapter *padapter) 4325void report_surveydone_event(struct adapter *padapter)
@@ -4347,12 +4333,12 @@ void report_surveydone_event(struct adapter *padapter)
4347 struct cmd_priv *pcmdpriv = &padapter->cmdpriv; 4333 struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
4348 4334
4349 pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); 4335 pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
4350 if (pcmd_obj == NULL) 4336 if (!pcmd_obj)
4351 return; 4337 return;
4352 4338
4353 cmdsz = sizeof(struct surveydone_event) + sizeof(struct C2HEvent_Header); 4339 cmdsz = sizeof(struct surveydone_event) + sizeof(struct C2HEvent_Header);
4354 pevtcmd = kzalloc(cmdsz, GFP_KERNEL); 4340 pevtcmd = kzalloc(cmdsz, GFP_KERNEL);
4355 if (pevtcmd == NULL) { 4341 if (!pevtcmd) {
4356 kfree(pcmd_obj); 4342 kfree(pcmd_obj);
4357 return; 4343 return;
4358 } 4344 }
@@ -4377,8 +4363,6 @@ void report_surveydone_event(struct adapter *padapter)
4377 DBG_88E("survey done event(%x)\n", psurveydone_evt->bss_cnt); 4363 DBG_88E("survey done event(%x)\n", psurveydone_evt->bss_cnt);
4378 4364
4379 rtw_enqueue_cmd(pcmdpriv, pcmd_obj); 4365 rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
4380
4381 return;
4382} 4366}
4383 4367
4384void report_join_res(struct adapter *padapter, int res) 4368void report_join_res(struct adapter *padapter, int res)
@@ -4393,12 +4377,12 @@ void report_join_res(struct adapter *padapter, int res)
4393 struct cmd_priv *pcmdpriv = &padapter->cmdpriv; 4377 struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
4394 4378
4395 pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC); 4379 pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
4396 if (pcmd_obj == NULL) 4380 if (!pcmd_obj)
4397 return; 4381 return;
4398 4382
4399 cmdsz = sizeof(struct joinbss_event) + sizeof(struct C2HEvent_Header); 4383 cmdsz = sizeof(struct joinbss_event) + sizeof(struct C2HEvent_Header);
4400 pevtcmd = kzalloc(cmdsz, GFP_ATOMIC); 4384 pevtcmd = kzalloc(cmdsz, GFP_ATOMIC);
4401 if (pevtcmd == NULL) { 4385 if (!pevtcmd) {
4402 kfree(pcmd_obj); 4386 kfree(pcmd_obj);
4403 return; 4387 return;
4404 } 4388 }
@@ -4429,8 +4413,6 @@ void report_join_res(struct adapter *padapter, int res)
4429 4413
4430 4414
4431 rtw_enqueue_cmd(pcmdpriv, pcmd_obj); 4415 rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
4432
4433 return;
4434} 4416}
4435 4417
4436void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr, unsigned short reason) 4418void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr, unsigned short reason)
@@ -4446,12 +4428,12 @@ void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr, unsi
4446 struct cmd_priv *pcmdpriv = &padapter->cmdpriv; 4428 struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
4447 4429
4448 pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); 4430 pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
4449 if (pcmd_obj == NULL) 4431 if (!pcmd_obj)
4450 return; 4432 return;
4451 4433
4452 cmdsz = sizeof(struct stadel_event) + sizeof(struct C2HEvent_Header); 4434 cmdsz = sizeof(struct stadel_event) + sizeof(struct C2HEvent_Header);
4453 pevtcmd = kzalloc(cmdsz, GFP_KERNEL); 4435 pevtcmd = kzalloc(cmdsz, GFP_KERNEL);
4454 if (pevtcmd == NULL) { 4436 if (!pevtcmd) {
4455 kfree(pcmd_obj); 4437 kfree(pcmd_obj);
4456 return; 4438 return;
4457 } 4439 }
@@ -4486,8 +4468,6 @@ void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr, unsi
4486 DBG_88E("report_del_sta_event: delete STA, mac_id =%d\n", mac_id); 4468 DBG_88E("report_del_sta_event: delete STA, mac_id =%d\n", mac_id);
4487 4469
4488 rtw_enqueue_cmd(pcmdpriv, pcmd_obj); 4470 rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
4489
4490 return;
4491} 4471}
4492 4472
4493void report_add_sta_event(struct adapter *padapter, unsigned char *MacAddr, int cam_idx) 4473void report_add_sta_event(struct adapter *padapter, unsigned char *MacAddr, int cam_idx)
@@ -4501,12 +4481,12 @@ void report_add_sta_event(struct adapter *padapter, unsigned char *MacAddr, int
4501 struct cmd_priv *pcmdpriv = &padapter->cmdpriv; 4481 struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
4502 4482
4503 pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); 4483 pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
4504 if (pcmd_obj == NULL) 4484 if (!pcmd_obj)
4505 return; 4485 return;
4506 4486
4507 cmdsz = sizeof(struct stassoc_event) + sizeof(struct C2HEvent_Header); 4487 cmdsz = sizeof(struct stassoc_event) + sizeof(struct C2HEvent_Header);
4508 pevtcmd = kzalloc(cmdsz, GFP_KERNEL); 4488 pevtcmd = kzalloc(cmdsz, GFP_KERNEL);
4509 if (pevtcmd == NULL) { 4489 if (!pevtcmd) {
4510 kfree(pcmd_obj); 4490 kfree(pcmd_obj);
4511 return; 4491 return;
4512 } 4492 }
@@ -4532,8 +4512,6 @@ void report_add_sta_event(struct adapter *padapter, unsigned char *MacAddr, int
4532 DBG_88E("report_add_sta_event: add STA\n"); 4512 DBG_88E("report_add_sta_event: add STA\n");
4533 4513
4534 rtw_enqueue_cmd(pcmdpriv, pcmd_obj); 4514 rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
4535
4536 return;
4537} 4515}
4538 4516
4539 4517
@@ -4917,11 +4895,11 @@ void survey_timer_hdl(unsigned long data)
4917 } 4895 }
4918 4896
4919 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC); 4897 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
4920 if (ph2c == NULL) 4898 if (!ph2c)
4921 goto exit_survey_timer_hdl; 4899 goto exit_survey_timer_hdl;
4922 4900
4923 psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_ATOMIC); 4901 psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_ATOMIC);
4924 if (psurveyPara == NULL) { 4902 if (!psurveyPara) {
4925 kfree(ph2c); 4903 kfree(ph2c);
4926 goto exit_survey_timer_hdl; 4904 goto exit_survey_timer_hdl;
4927 } 4905 }
@@ -4969,7 +4947,6 @@ void link_timer_hdl(unsigned long data)
4969 issue_assocreq(padapter); 4947 issue_assocreq(padapter);
4970 set_link_timer(pmlmeext, REASSOC_TO); 4948 set_link_timer(pmlmeext, REASSOC_TO);
4971 } 4949 }
4972 return;
4973} 4950}
4974 4951
4975void addba_timer_hdl(unsigned long data) 4952void addba_timer_hdl(unsigned long data)
@@ -5485,7 +5462,7 @@ u8 set_tx_beacon_cmd(struct adapter *padapter)
5485 5462
5486 5463
5487 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); 5464 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
5488 if (ph2c == NULL) { 5465 if (!ph2c) {
5489 res = _FAIL; 5466 res = _FAIL;
5490 goto exit; 5467 goto exit;
5491 } 5468 }
diff --git a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
index 5e1ef9fdcf47..59c6d8ab60f6 100644
--- a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#define _RTW_PWRCTRL_C_ 15#define _RTW_PWRCTRL_C_
21 16
diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
index 5f53aa1cfd8a..977bb2532c3e 100644
--- a/drivers/staging/rtl8188eu/core/rtw_recv.c
+++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#define _RTW_RECV_C_ 15#define _RTW_RECV_C_
21 16
diff --git a/drivers/staging/rtl8188eu/core/rtw_rf.c b/drivers/staging/rtl8188eu/core/rtw_rf.c
index 4ad2d8f63acf..3fc1a8fd367c 100644
--- a/drivers/staging/rtl8188eu/core/rtw_rf.c
+++ b/drivers/staging/rtl8188eu/core/rtw_rf.c
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#define _RTW_RF_C_ 15#define _RTW_RF_C_
21 16
diff --git a/drivers/staging/rtl8188eu/core/rtw_security.c b/drivers/staging/rtl8188eu/core/rtw_security.c
index b781ccf45bc0..442a614a3726 100644
--- a/drivers/staging/rtl8188eu/core/rtw_security.c
+++ b/drivers/staging/rtl8188eu/core/rtw_security.c
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#define _RTW_SECURITY_C_ 15#define _RTW_SECURITY_C_
21 16
diff --git a/drivers/staging/rtl8188eu/core/rtw_sreset.c b/drivers/staging/rtl8188eu/core/rtw_sreset.c
index e725a4708775..13a5bf4730ab 100644
--- a/drivers/staging/rtl8188eu/core/rtw_sreset.c
+++ b/drivers/staging/rtl8188eu/core/rtw_sreset.c
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20 15
21#include <rtw_sreset.h> 16#include <rtw_sreset.h>
diff --git a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
index 78a9b9bf3b32..a71e25294add 100644
--- a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
+++ b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#define _RTW_STA_MGT_C_ 15#define _RTW_STA_MGT_C_
21 16
diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
index 83096696cd5b..4410fe8d7c68 100644
--- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#define _RTW_WLAN_UTIL_C_ 15#define _RTW_WLAN_UTIL_C_
21 16
diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c
index f2dd7a60f67c..e0a5567f5942 100644
--- a/drivers/staging/rtl8188eu/core/rtw_xmit.c
+++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#define _RTW_XMIT_C_ 15#define _RTW_XMIT_C_
21 16
diff --git a/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c b/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c
index a108e8032327..201c15b07f9e 100644
--- a/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c
+++ b/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c
@@ -557,7 +557,7 @@ int ODM_RAInfo_Init(struct odm_dm_struct *dm_odm, u8 macid)
557 u8 WirelessMode = 0xFF; /* invalid value */ 557 u8 WirelessMode = 0xFF; /* invalid value */
558 u8 max_rate_idx = 0x13; /* MCS7 */ 558 u8 max_rate_idx = 0x13; /* MCS7 */
559 559
560 if (dm_odm->pWirelessMode != NULL) 560 if (dm_odm->pWirelessMode)
561 WirelessMode = *(dm_odm->pWirelessMode); 561 WirelessMode = *(dm_odm->pWirelessMode);
562 562
563 if (WirelessMode != 0xFF) { 563 if (WirelessMode != 0xFF) {
diff --git a/drivers/staging/rtl8188eu/hal/bb_cfg.c b/drivers/staging/rtl8188eu/hal/bb_cfg.c
index c2ad6a3b99da..cce1ea259b76 100644
--- a/drivers/staging/rtl8188eu/hal/bb_cfg.c
+++ b/drivers/staging/rtl8188eu/hal/bb_cfg.c
@@ -11,11 +11,6 @@
11* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12* more details. 12* more details.
13* 13*
14* You should have received a copy of the GNU General Public License along with
15* this program; if not, write to the Free Software Foundation, Inc.,
16* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17*
18*
19******************************************************************************/ 14******************************************************************************/
20 15
21#include "odm_precomp.h" 16#include "odm_precomp.h"
diff --git a/drivers/staging/rtl8188eu/hal/fw.c b/drivers/staging/rtl8188eu/hal/fw.c
index 656133c47426..03d091bad13a 100644
--- a/drivers/staging/rtl8188eu/hal/fw.c
+++ b/drivers/staging/rtl8188eu/hal/fw.c
@@ -11,10 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the 14 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE. 15 * file called LICENSE.
20 * 16 *
diff --git a/drivers/staging/rtl8188eu/hal/hal_com.c b/drivers/staging/rtl8188eu/hal/hal_com.c
index 3871cda2eec2..960cc406d238 100644
--- a/drivers/staging/rtl8188eu/hal/hal_com.c
+++ b/drivers/staging/rtl8188eu/hal/hal_com.c
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#include <osdep_service.h> 15#include <osdep_service.h>
21#include <drv_types.h> 16#include <drv_types.h>
diff --git a/drivers/staging/rtl8188eu/hal/hal_intf.c b/drivers/staging/rtl8188eu/hal/hal_intf.c
index 85c17ef942f3..085f0fbd0c43 100644
--- a/drivers/staging/rtl8188eu/hal/hal_intf.c
+++ b/drivers/staging/rtl8188eu/hal/hal_intf.c
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20 15
21#define _HAL_INTF_C_ 16#define _HAL_INTF_C_
@@ -186,7 +181,7 @@ s32 rtw_hal_mgnt_xmit(struct adapter *adapt, struct xmit_frame *pmgntframe)
186 181
187s32 rtw_hal_init_xmit_priv(struct adapter *adapt) 182s32 rtw_hal_init_xmit_priv(struct adapter *adapt)
188{ 183{
189 if (adapt->HalFunc.init_xmit_priv != NULL) 184 if (adapt->HalFunc.init_xmit_priv)
190 return adapt->HalFunc.init_xmit_priv(adapt); 185 return adapt->HalFunc.init_xmit_priv(adapt);
191 return _FAIL; 186 return _FAIL;
192} 187}
diff --git a/drivers/staging/rtl8188eu/hal/mac_cfg.c b/drivers/staging/rtl8188eu/hal/mac_cfg.c
index 0bc1b215219a..6ed5e15ce661 100644
--- a/drivers/staging/rtl8188eu/hal/mac_cfg.c
+++ b/drivers/staging/rtl8188eu/hal/mac_cfg.c
@@ -11,11 +11,6 @@
11* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12* more details. 12* more details.
13* 13*
14* You should have received a copy of the GNU General Public License along with
15* this program; if not, write to the Free Software Foundation, Inc.,
16* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17*
18*
19******************************************************************************/ 14******************************************************************************/
20 15
21#include "odm_precomp.h" 16#include "odm_precomp.h"
diff --git a/drivers/staging/rtl8188eu/hal/odm.c b/drivers/staging/rtl8188eu/hal/odm.c
index 8d2316b9e6e5..57a127501694 100644
--- a/drivers/staging/rtl8188eu/hal/odm.c
+++ b/drivers/staging/rtl8188eu/hal/odm.c
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20 15
21/* include files */ 16/* include files */
diff --git a/drivers/staging/rtl8188eu/hal/odm_HWConfig.c b/drivers/staging/rtl8188eu/hal/odm_HWConfig.c
index 28b9f7f591c0..0555e42a3787 100644
--- a/drivers/staging/rtl8188eu/hal/odm_HWConfig.c
+++ b/drivers/staging/rtl8188eu/hal/odm_HWConfig.c
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20 15
21/* include files */ 16/* include files */
diff --git a/drivers/staging/rtl8188eu/hal/odm_RTL8188E.c b/drivers/staging/rtl8188eu/hal/odm_RTL8188E.c
index c0242a095c19..dd9b902c8ae3 100644
--- a/drivers/staging/rtl8188eu/hal/odm_RTL8188E.c
+++ b/drivers/staging/rtl8188eu/hal/odm_RTL8188E.c
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20 15
21#include "odm_precomp.h" 16#include "odm_precomp.h"
diff --git a/drivers/staging/rtl8188eu/hal/phy.c b/drivers/staging/rtl8188eu/hal/phy.c
index ae42b4492c77..a83bbea9be93 100644
--- a/drivers/staging/rtl8188eu/hal/phy.c
+++ b/drivers/staging/rtl8188eu/hal/phy.c
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#define _RTL8188E_PHYCFG_C_ 15#define _RTL8188E_PHYCFG_C_
21 16
diff --git a/drivers/staging/rtl8188eu/hal/pwrseq.c b/drivers/staging/rtl8188eu/hal/pwrseq.c
index 20dce42cee1d..d92a34ea8d60 100644
--- a/drivers/staging/rtl8188eu/hal/pwrseq.c
+++ b/drivers/staging/rtl8188eu/hal/pwrseq.c
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20 15
21#include "pwrseq.h" 16#include "pwrseq.h"
diff --git a/drivers/staging/rtl8188eu/hal/pwrseqcmd.c b/drivers/staging/rtl8188eu/hal/pwrseqcmd.c
index b76b0f5d6220..2867864bbfbe 100644
--- a/drivers/staging/rtl8188eu/hal/pwrseqcmd.c
+++ b/drivers/staging/rtl8188eu/hal/pwrseqcmd.c
@@ -11,10 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 ******************************************************************************/ 14 ******************************************************************************/
19 15
20#include <pwrseqcmd.h> 16#include <pwrseqcmd.h>
diff --git a/drivers/staging/rtl8188eu/hal/rf.c b/drivers/staging/rtl8188eu/hal/rf.c
index 38845d17d593..1596274eefc5 100644
--- a/drivers/staging/rtl8188eu/hal/rf.c
+++ b/drivers/staging/rtl8188eu/hal/rf.c
@@ -11,10 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 ******************************************************************************/ 14 ******************************************************************************/
19 15
20#include <osdep_service.h> 16#include <osdep_service.h>
diff --git a/drivers/staging/rtl8188eu/hal/rf_cfg.c b/drivers/staging/rtl8188eu/hal/rf_cfg.c
index 44945427cc34..453f9e729067 100644
--- a/drivers/staging/rtl8188eu/hal/rf_cfg.c
+++ b/drivers/staging/rtl8188eu/hal/rf_cfg.c
@@ -11,11 +11,6 @@
11* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12* more details. 12* more details.
13* 13*
14* You should have received a copy of the GNU General Public License along with
15* this program; if not, write to the Free Software Foundation, Inc.,
16* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17*
18*
19******************************************************************************/ 14******************************************************************************/
20 15
21#include "odm_precomp.h" 16#include "odm_precomp.h"
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
index 580876313e98..2422c0297a50 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#define _RTL8188E_CMD_C_ 15#define _RTL8188E_CMD_C_
21 16
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
index f9919a94a77e..81f2931876f8 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20/* */ 15/* */
21/* Description: */ 16/* Description: */
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
index 2592bc298f84..0b444fd3e550 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#define _HAL_INIT_C_ 15#define _HAL_INIT_C_
21 16
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c b/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c
index 53cf3baf46e0..f110c961df70 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#define _RTL8188E_REDESC_C_ 15#define _RTL8188E_REDESC_C_
21 16
@@ -45,7 +40,7 @@ static void process_link_qual(struct adapter *padapter,
45 struct rx_pkt_attrib *pattrib; 40 struct rx_pkt_attrib *pattrib;
46 struct signal_stat *signal_stat; 41 struct signal_stat *signal_stat;
47 42
48 if (prframe == NULL || padapter == NULL) 43 if (!prframe || !padapter)
49 return; 44 return;
50 45
51 pattrib = &prframe->attrib; 46 pattrib = &prframe->attrib;
@@ -64,7 +59,7 @@ static void process_link_qual(struct adapter *padapter,
64 59
65void rtl8188e_process_phy_info(struct adapter *padapter, void *prframe) 60void rtl8188e_process_phy_info(struct adapter *padapter, void *prframe)
66{ 61{
67 struct recv_frame *precvframe = (struct recv_frame *)prframe; 62 struct recv_frame *precvframe = prframe;
68 63
69 /* Check RSSI */ 64 /* Check RSSI */
70 process_rssi(padapter, precvframe); 65 process_rssi(padapter, precvframe);
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c b/drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c
index a6ba53b488e3..460a20558bc0 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#define _RTL8188E_XMIT_C_ 15#define _RTL8188E_XMIT_C_
21 16
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c
index 564cf53bff1b..d9e677ef8f84 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20 15
21#include <osdep_service.h> 16#include <osdep_service.h>
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
index d6d009aafcf0..255d6f215091 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#define _RTL8188EU_RECV_C_ 15#define _RTL8188EU_RECV_C_
21#include <osdep_service.h> 16#include <osdep_service.h>
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
index c96d80487a56..ec21d8c82eba 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#define _RTL8188E_XMIT_C_ 15#define _RTL8188E_XMIT_C_
21#include <osdep_service.h> 16#include <osdep_service.h>
diff --git a/drivers/staging/rtl8188eu/hal/usb_halinit.c b/drivers/staging/rtl8188eu/hal/usb_halinit.c
index 07a61b8271f0..87ea3b844951 100644
--- a/drivers/staging/rtl8188eu/hal/usb_halinit.c
+++ b/drivers/staging/rtl8188eu/hal/usb_halinit.c
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#define _HCI_HAL_INIT_C_ 15#define _HCI_HAL_INIT_C_
21 16
@@ -62,8 +57,8 @@ static bool HalUsbSetQueuePipeMapping8188EUsb(struct adapter *adapt, u8 NumInPip
62 _ConfigNormalChipOutEP_8188E(adapt, NumOutPipe); 57 _ConfigNormalChipOutEP_8188E(adapt, NumOutPipe);
63 58
64 /* Normal chip with one IN and one OUT doesn't have interrupt IN EP. */ 59 /* Normal chip with one IN and one OUT doesn't have interrupt IN EP. */
65 if (1 == haldata->OutEpNumber) { 60 if (haldata->OutEpNumber == 1) {
66 if (1 != NumInPipe) 61 if (NumInPipe != 1)
67 return result; 62 return result;
68 } 63 }
69 64
@@ -179,7 +174,7 @@ static void _InitQueueReservedPage(struct adapter *Adapter)
179 if (haldata->OutEpQueueSel & TX_SELE_LQ) 174 if (haldata->OutEpQueueSel & TX_SELE_LQ)
180 numLQ = 0x1C; 175 numLQ = 0x1C;
181 176
182 /* NOTE: This step shall be proceed before writting REG_RQPN. */ 177 /* NOTE: This step shall be proceed before writing REG_RQPN. */
183 if (haldata->OutEpQueueSel & TX_SELE_NQ) 178 if (haldata->OutEpQueueSel & TX_SELE_NQ)
184 numNQ = 0x1C; 179 numNQ = 0x1C;
185 value8 = (u8)_NPQ(numNQ); 180 value8 = (u8)_NPQ(numNQ);
@@ -457,7 +452,8 @@ static void _InitRetryFunction(struct adapter *Adapter)
457 * When Who Remark 452 * When Who Remark
458 * 12/10/2010 MHC Separate to smaller function. 453 * 12/10/2010 MHC Separate to smaller function.
459 * 454 *
460 *---------------------------------------------------------------------------*/ 455 *---------------------------------------------------------------------------
456 */
461static void usb_AggSettingTxUpdate(struct adapter *Adapter) 457static void usb_AggSettingTxUpdate(struct adapter *Adapter)
462{ 458{
463 struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter); 459 struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
@@ -489,7 +485,8 @@ static void usb_AggSettingTxUpdate(struct adapter *Adapter)
489 * When Who Remark 485 * When Who Remark
490 * 12/10/2010 MHC Separate to smaller function. 486 * 12/10/2010 MHC Separate to smaller function.
491 * 487 *
492 *---------------------------------------------------------------------------*/ 488 *---------------------------------------------------------------------------
489 */
493static void 490static void
494usb_AggSettingRxUpdate( 491usb_AggSettingRxUpdate(
495 struct adapter *Adapter 492 struct adapter *Adapter
@@ -655,7 +652,8 @@ static void _InitAntenna_Selection(struct adapter *Adapter)
655 * Revised History: 652 * Revised History:
656 * When Who Remark 653 * When Who Remark
657 * 08/23/2010 MHC HW suspend mode switch test.. 654 * 08/23/2010 MHC HW suspend mode switch test..
658 *---------------------------------------------------------------------------*/ 655 *---------------------------------------------------------------------------
656 */
659enum rt_rf_power_state RfOnOffDetect(struct adapter *adapt) 657enum rt_rf_power_state RfOnOffDetect(struct adapter *adapt)
660{ 658{
661 u8 val8; 659 u8 val8;
@@ -687,11 +685,9 @@ static u32 rtl8188eu_hal_init(struct adapter *Adapter)
687 685
688 #define HAL_INIT_PROFILE_TAG(stage) do {} while (0) 686 #define HAL_INIT_PROFILE_TAG(stage) do {} while (0)
689 687
690
691 HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_BEGIN); 688 HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_BEGIN);
692 689
693 if (Adapter->pwrctrlpriv.bkeepfwalive) { 690 if (Adapter->pwrctrlpriv.bkeepfwalive) {
694
695 if (haldata->odmpriv.RFCalibrateInfo.bIQKInitialized) { 691 if (haldata->odmpriv.RFCalibrateInfo.bIQKInitialized) {
696 rtl88eu_phy_iq_calibrate(Adapter, true); 692 rtl88eu_phy_iq_calibrate(Adapter, true);
697 } else { 693 } else {
@@ -715,9 +711,8 @@ static u32 rtl8188eu_hal_init(struct adapter *Adapter)
715 /* Save target channel */ 711 /* Save target channel */
716 haldata->CurrentChannel = 6;/* default set to 6 */ 712 haldata->CurrentChannel = 6;/* default set to 6 */
717 713
718 if (pwrctrlpriv->reg_rfoff) { 714 if (pwrctrlpriv->reg_rfoff)
719 pwrctrlpriv->rf_pwrstate = rf_off; 715 pwrctrlpriv->rf_pwrstate = rf_off;
720 }
721 716
722 /* 2010/08/09 MH We need to check if we need to turnon or off RF after detecting */ 717 /* 2010/08/09 MH We need to check if we need to turnon or off RF after detecting */
723 /* HW GPIO pin. Before PHY_RFConfig8192C. */ 718 /* HW GPIO pin. Before PHY_RFConfig8192C. */
@@ -749,10 +744,9 @@ static u32 rtl8188eu_hal_init(struct adapter *Adapter)
749 DBG_88E("%s: Download Firmware failed!!\n", __func__); 744 DBG_88E("%s: Download Firmware failed!!\n", __func__);
750 Adapter->bFWReady = false; 745 Adapter->bFWReady = false;
751 return status; 746 return status;
752 } else {
753 RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("Initializeadapt8192CSdio(): Download Firmware Success!!\n"));
754 Adapter->bFWReady = true;
755 } 747 }
748 RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("Initializeadapt8192CSdio(): Download Firmware Success!!\n"));
749 Adapter->bFWReady = true;
756 } 750 }
757 rtl8188e_InitializeFirmwareVars(Adapter); 751 rtl8188e_InitializeFirmwareVars(Adapter);
758 752
@@ -878,7 +872,7 @@ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_IQK);
878 /* 2010/08/26 MH Merge from 8192CE. */ 872 /* 2010/08/26 MH Merge from 8192CE. */
879 if (pwrctrlpriv->rf_pwrstate == rf_on) { 873 if (pwrctrlpriv->rf_pwrstate == rf_on) {
880 if (haldata->odmpriv.RFCalibrateInfo.bIQKInitialized) { 874 if (haldata->odmpriv.RFCalibrateInfo.bIQKInitialized) {
881 rtl88eu_phy_iq_calibrate(Adapter, true); 875 rtl88eu_phy_iq_calibrate(Adapter, true);
882 } else { 876 } else {
883 rtl88eu_phy_iq_calibrate(Adapter, false); 877 rtl88eu_phy_iq_calibrate(Adapter, false);
884 haldata->odmpriv.RFCalibrateInfo.bIQKInitialized = true; 878 haldata->odmpriv.RFCalibrateInfo.bIQKInitialized = true;
@@ -905,7 +899,6 @@ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_END);
905 DBG_88E("%s in %dms\n", __func__, 899 DBG_88E("%s in %dms\n", __func__,
906 jiffies_to_msecs(jiffies - init_start_time)); 900 jiffies_to_msecs(jiffies - init_start_time));
907 901
908
909 return status; 902 return status;
910} 903}
911 904
@@ -968,6 +961,7 @@ static void CardDisableRTL8188EU(struct adapter *Adapter)
968 haldata->bMacPwrCtrlOn = false; 961 haldata->bMacPwrCtrlOn = false;
969 Adapter->bFWReady = false; 962 Adapter->bFWReady = false;
970} 963}
964
971static void rtl8192cu_hw_power_down(struct adapter *adapt) 965static void rtl8192cu_hw_power_down(struct adapter *adapt)
972{ 966{
973 /* 2010/-8/09 MH For power down module, we need to enable register block contrl reg at 0x1c. */ 967 /* 2010/-8/09 MH For power down module, we need to enable register block contrl reg at 0x1c. */
@@ -980,7 +974,6 @@ static void rtl8192cu_hw_power_down(struct adapter *adapt)
980 974
981static u32 rtl8188eu_hal_deinit(struct adapter *Adapter) 975static u32 rtl8188eu_hal_deinit(struct adapter *Adapter)
982{ 976{
983
984 DBG_88E("==> %s\n", __func__); 977 DBG_88E("==> %s\n", __func__);
985 978
986 usb_write32(Adapter, REG_HIMR_88E, IMR_DISABLED_88E); 979 usb_write32(Adapter, REG_HIMR_88E, IMR_DISABLED_88E);
@@ -999,14 +992,14 @@ static u32 rtl8188eu_hal_deinit(struct adapter *Adapter)
999 } 992 }
1000 } 993 }
1001 return _SUCCESS; 994 return _SUCCESS;
1002 } 995}
1003 996
1004static unsigned int rtl8188eu_inirp_init(struct adapter *Adapter) 997static unsigned int rtl8188eu_inirp_init(struct adapter *Adapter)
1005{ 998{
1006 u8 i; 999 u8 i;
1007 struct recv_buf *precvbuf; 1000 struct recv_buf *precvbuf;
1008 uint status; 1001 uint status;
1009 struct recv_priv *precvpriv = &(Adapter->recvpriv); 1002 struct recv_priv *precvpriv = &Adapter->recvpriv;
1010 1003
1011 status = _SUCCESS; 1004 status = _SUCCESS;
1012 1005
@@ -1116,7 +1109,6 @@ readAdapterInfo_8188EU(
1116 Hal_ReadAntennaDiversity88E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag); 1109 Hal_ReadAntennaDiversity88E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
1117 Hal_EfuseParseBoardType88E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag); 1110 Hal_EfuseParseBoardType88E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
1118 Hal_ReadThermalMeter_88E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag); 1111 Hal_ReadThermalMeter_88E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
1119
1120} 1112}
1121 1113
1122static void _ReadPROMContent( 1114static void _ReadPROMContent(
@@ -1212,7 +1204,7 @@ static void hw_var_set_opmode(struct adapter *Adapter, u8 variable, u8 *val)
1212 StopTxBeacon(Adapter); 1204 StopTxBeacon(Adapter);
1213 1205
1214 usb_write8(Adapter, REG_BCN_CTRL, 0x19);/* disable atim wnd */ 1206 usb_write8(Adapter, REG_BCN_CTRL, 0x19);/* disable atim wnd */
1215 } else if ((mode == _HW_STATE_ADHOC_)) { 1207 } else if (mode == _HW_STATE_ADHOC_) {
1216 ResumeTxBeacon(Adapter); 1208 ResumeTxBeacon(Adapter);
1217 usb_write8(Adapter, REG_BCN_CTRL, 0x1a); 1209 usb_write8(Adapter, REG_BCN_CTRL, 0x1a);
1218 } else if (mode == _HW_STATE_AP_) { 1210 } else if (mode == _HW_STATE_AP_) {
@@ -1363,7 +1355,7 @@ static void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
1363 { 1355 {
1364 u64 tsf; 1356 u64 tsf;
1365 struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv; 1357 struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
1366 struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); 1358 struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
1367 1359
1368 tsf = pmlmeext->TSFValue - rtw_modular64(pmlmeext->TSFValue, (pmlmeinfo->bcn_interval*1024)) - 1024; /* us */ 1360 tsf = pmlmeext->TSFValue - rtw_modular64(pmlmeext->TSFValue, (pmlmeinfo->bcn_interval*1024)) - 1024; /* us */
1369 1361
@@ -1420,7 +1412,7 @@ static void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
1420 usb_write8(Adapter, REG_BCN_CTRL, usb_read8(Adapter, REG_BCN_CTRL) | BIT(4)); 1412 usb_write8(Adapter, REG_BCN_CTRL, usb_read8(Adapter, REG_BCN_CTRL) | BIT(4));
1421 } else { /* sitesurvey done */ 1413 } else { /* sitesurvey done */
1422 struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv; 1414 struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
1423 struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); 1415 struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
1424 1416
1425 if ((is_client_associated_to_ap(Adapter)) || 1417 if ((is_client_associated_to_ap(Adapter)) ||
1426 ((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE)) { 1418 ((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE)) {
@@ -1490,7 +1482,7 @@ static void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
1490 { 1482 {
1491 u8 u1bAIFS, aSifsTime; 1483 u8 u1bAIFS, aSifsTime;
1492 struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv; 1484 struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
1493 struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); 1485 struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
1494 1486
1495 usb_write8(Adapter, REG_SLOT, val[0]); 1487 usb_write8(Adapter, REG_SLOT, val[0]);
1496 1488
@@ -1790,7 +1782,7 @@ static void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
1790 } 1782 }
1791 break; 1783 break;
1792 case HW_VAR_H2C_MEDIA_STATUS_RPT: 1784 case HW_VAR_H2C_MEDIA_STATUS_RPT:
1793 rtl8188e_set_FwMediaStatus_cmd(Adapter , (*(__le16 *)val)); 1785 rtl8188e_set_FwMediaStatus_cmd(Adapter, (*(__le16 *)val));
1794 break; 1786 break;
1795 case HW_VAR_BCN_VALID: 1787 case HW_VAR_BCN_VALID:
1796 /* BCN_VALID, BIT16 of REG_TDECTRL = BIT0 of REG_TDECTRL+2, write 1 to clear, Clear by sw */ 1788 /* BCN_VALID, BIT16 of REG_TDECTRL = BIT0 of REG_TDECTRL+2, write 1 to clear, Clear by sw */
@@ -1855,7 +1847,6 @@ static void GetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
1855 default: 1847 default:
1856 break; 1848 break;
1857 } 1849 }
1858
1859} 1850}
1860 1851
1861/* */ 1852/* */
@@ -1904,19 +1895,19 @@ GetHalDefVar8188EUsb(
1904 case HAL_DEF_RA_DECISION_RATE: 1895 case HAL_DEF_RA_DECISION_RATE:
1905 { 1896 {
1906 u8 MacID = *((u8 *)pValue); 1897 u8 MacID = *((u8 *)pValue);
1907 *((u8 *)pValue) = ODM_RA_GetDecisionRate_8188E(&(haldata->odmpriv), MacID); 1898 *((u8 *)pValue) = ODM_RA_GetDecisionRate_8188E(&haldata->odmpriv, MacID);
1908 } 1899 }
1909 break; 1900 break;
1910 case HAL_DEF_RA_SGI: 1901 case HAL_DEF_RA_SGI:
1911 { 1902 {
1912 u8 MacID = *((u8 *)pValue); 1903 u8 MacID = *((u8 *)pValue);
1913 *((u8 *)pValue) = ODM_RA_GetShortGI_8188E(&(haldata->odmpriv), MacID); 1904 *((u8 *)pValue) = ODM_RA_GetShortGI_8188E(&haldata->odmpriv, MacID);
1914 } 1905 }
1915 break; 1906 break;
1916 case HAL_DEF_PT_PWR_STATUS: 1907 case HAL_DEF_PT_PWR_STATUS:
1917 { 1908 {
1918 u8 MacID = *((u8 *)pValue); 1909 u8 MacID = *((u8 *)pValue);
1919 *((u8 *)pValue) = ODM_RA_GetHwPwrStatus_8188E(&(haldata->odmpriv), MacID); 1910 *((u8 *)pValue) = ODM_RA_GetHwPwrStatus_8188E(&haldata->odmpriv, MacID);
1920 } 1911 }
1921 break; 1912 break;
1922 case HW_VAR_MAX_RX_AMPDU_FACTOR: 1913 case HW_VAR_MAX_RX_AMPDU_FACTOR:
@@ -1939,7 +1930,7 @@ GetHalDefVar8188EUsb(
1939 break; 1930 break;
1940 case HW_DEF_ODM_DBG_FLAG: 1931 case HW_DEF_ODM_DBG_FLAG:
1941 { 1932 {
1942 struct odm_dm_struct *dm_ocm = &(haldata->odmpriv); 1933 struct odm_dm_struct *dm_ocm = &haldata->odmpriv;
1943 pr_info("dm_ocm->DebugComponents = 0x%llx\n", dm_ocm->DebugComponents); 1934 pr_info("dm_ocm->DebugComponents = 0x%llx\n", dm_ocm->DebugComponents);
1944 } 1935 }
1945 break; 1936 break;
@@ -1967,8 +1958,8 @@ static void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_l
1967 struct sta_info *psta; 1958 struct sta_info *psta;
1968 struct hal_data_8188e *haldata = GET_HAL_DATA(adapt); 1959 struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
1969 struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv; 1960 struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
1970 struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); 1961 struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
1971 struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network); 1962 struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
1972 1963
1973 if (mac_id >= NUM_STA) /* CAM_SIZE */ 1964 if (mac_id >= NUM_STA) /* CAM_SIZE */
1974 return; 1965 return;
@@ -1981,8 +1972,8 @@ static void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_l
1981 networkType = judge_network_type(adapt, cur_network->SupportedRates, supportRateNum) & 0xf; 1972 networkType = judge_network_type(adapt, cur_network->SupportedRates, supportRateNum) & 0xf;
1982 raid = networktype_to_raid(networkType); 1973 raid = networktype_to_raid(networkType);
1983 mask = update_supported_rate(cur_network->SupportedRates, supportRateNum); 1974 mask = update_supported_rate(cur_network->SupportedRates, supportRateNum);
1984 mask |= (pmlmeinfo->HT_enable) ? update_MSC_rate(&(pmlmeinfo->HT_caps)) : 0; 1975 mask |= (pmlmeinfo->HT_enable) ? update_MSC_rate(&pmlmeinfo->HT_caps) : 0;
1985 if (support_short_GI(adapt, &(pmlmeinfo->HT_caps))) 1976 if (support_short_GI(adapt, &pmlmeinfo->HT_caps))
1986 shortGIrate = true; 1977 shortGIrate = true;
1987 break; 1978 break;
1988 case 1:/* for broadcast/multicast */ 1979 case 1:/* for broadcast/multicast */
@@ -2023,8 +2014,8 @@ static void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_l
2023static void SetBeaconRelatedRegisters8188EUsb(struct adapter *adapt) 2014static void SetBeaconRelatedRegisters8188EUsb(struct adapter *adapt)
2024{ 2015{
2025 u32 value32; 2016 u32 value32;
2026 struct mlme_ext_priv *pmlmeext = &(adapt->mlmeextpriv); 2017 struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
2027 struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); 2018 struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
2028 u32 bcn_ctrl_reg = REG_BCN_CTRL; 2019 u32 bcn_ctrl_reg = REG_BCN_CTRL;
2029 /* reset TSF, enable update TSF, correcting TSF On Beacon */ 2020 /* reset TSF, enable update TSF, correcting TSF On Beacon */
2030 2021
@@ -2081,9 +2072,8 @@ void rtl8188eu_set_hal_ops(struct adapter *adapt)
2081{ 2072{
2082 struct hal_ops *halfunc = &adapt->HalFunc; 2073 struct hal_ops *halfunc = &adapt->HalFunc;
2083 2074
2084 2075 adapt->HalData = kzalloc(sizeof(*adapt->HalData), GFP_KERNEL);
2085 adapt->HalData = kzalloc(sizeof(struct hal_data_8188e), GFP_KERNEL); 2076 if (!adapt->HalData)
2086 if (adapt->HalData == NULL)
2087 DBG_88E("cant not alloc memory for HAL DATA\n"); 2077 DBG_88E("cant not alloc memory for HAL DATA\n");
2088 2078
2089 halfunc->hal_power_on = rtl8188eu_InitPowerOn; 2079 halfunc->hal_power_on = rtl8188eu_InitPowerOn;
diff --git a/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h b/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
index 2670d6b6a79e..8990748a1919 100644
--- a/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
+++ b/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#ifndef __INC_HAL8188EPHYCFG_H__ 15#ifndef __INC_HAL8188EPHYCFG_H__
21#define __INC_HAL8188EPHYCFG_H__ 16#define __INC_HAL8188EPHYCFG_H__
diff --git a/drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h b/drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h
index 9f2969bf8355..344c73d1081b 100644
--- a/drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h
+++ b/drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#ifndef __INC_HAL8188EPHYREG_H__ 15#ifndef __INC_HAL8188EPHYREG_H__
21#define __INC_HAL8188EPHYREG_H__ 16#define __INC_HAL8188EPHYREG_H__
diff --git a/drivers/staging/rtl8188eu/include/HalHWImg8188E_FW.h b/drivers/staging/rtl8188eu/include/HalHWImg8188E_FW.h
index 1bf9bc70a696..dbb55247b0c6 100644
--- a/drivers/staging/rtl8188eu/include/HalHWImg8188E_FW.h
+++ b/drivers/staging/rtl8188eu/include/HalHWImg8188E_FW.h
@@ -11,11 +11,6 @@
11* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12* more details. 12* more details.
13* 13*
14* You should have received a copy of the GNU General Public License along with
15* this program; if not, write to the Free Software Foundation, Inc.,
16* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17*
18*
19******************************************************************************/ 14******************************************************************************/
20 15
21#ifndef __INC_FW_8188E_HW_IMG_H 16#ifndef __INC_FW_8188E_HW_IMG_H
diff --git a/drivers/staging/rtl8188eu/include/HalVerDef.h b/drivers/staging/rtl8188eu/include/HalVerDef.h
index 6f2b2a436b04..d244efff3593 100644
--- a/drivers/staging/rtl8188eu/include/HalVerDef.h
+++ b/drivers/staging/rtl8188eu/include/HalVerDef.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#ifndef __HAL_VERSION_DEF_H__ 15#ifndef __HAL_VERSION_DEF_H__
21#define __HAL_VERSION_DEF_H__ 16#define __HAL_VERSION_DEF_H__
diff --git a/drivers/staging/rtl8188eu/include/basic_types.h b/drivers/staging/rtl8188eu/include/basic_types.h
index 3fb691daa5af..2c1676d2ac6e 100644
--- a/drivers/staging/rtl8188eu/include/basic_types.h
+++ b/drivers/staging/rtl8188eu/include/basic_types.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#ifndef __BASIC_TYPES_H__ 15#ifndef __BASIC_TYPES_H__
21#define __BASIC_TYPES_H__ 16#define __BASIC_TYPES_H__
diff --git a/drivers/staging/rtl8188eu/include/drv_types.h b/drivers/staging/rtl8188eu/include/drv_types.h
index dcb032b6c3a7..55506a7da1a4 100644
--- a/drivers/staging/rtl8188eu/include/drv_types.h
+++ b/drivers/staging/rtl8188eu/include/drv_types.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20/*----------------------------------------------------------------------------- 15/*-----------------------------------------------------------------------------
21 16
diff --git a/drivers/staging/rtl8188eu/include/fw.h b/drivers/staging/rtl8188eu/include/fw.h
index 7884d8f65763..b016f32a8992 100644
--- a/drivers/staging/rtl8188eu/include/fw.h
+++ b/drivers/staging/rtl8188eu/include/fw.h
@@ -11,10 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the 14 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE. 15 * file called LICENSE.
20 * 16 *
diff --git a/drivers/staging/rtl8188eu/include/hal_com.h b/drivers/staging/rtl8188eu/include/hal_com.h
index 47715d949d54..aaf444733507 100644
--- a/drivers/staging/rtl8188eu/include/hal_com.h
+++ b/drivers/staging/rtl8188eu/include/hal_com.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#ifndef __HAL_COMMON_H__ 15#ifndef __HAL_COMMON_H__
21#define __HAL_COMMON_H__ 16#define __HAL_COMMON_H__
diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
index 1b1c10292456..eaf939bd4103 100644
--- a/drivers/staging/rtl8188eu/include/hal_intf.h
+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#ifndef __HAL_INTF_H__ 15#ifndef __HAL_INTF_H__
21#define __HAL_INTF_H__ 16#define __HAL_INTF_H__
diff --git a/drivers/staging/rtl8188eu/include/ieee80211.h b/drivers/staging/rtl8188eu/include/ieee80211.h
index f8f5eb6b7976..d8284c84f09c 100644
--- a/drivers/staging/rtl8188eu/include/ieee80211.h
+++ b/drivers/staging/rtl8188eu/include/ieee80211.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#ifndef __IEEE80211_H 15#ifndef __IEEE80211_H
21#define __IEEE80211_H 16#define __IEEE80211_H
diff --git a/drivers/staging/rtl8188eu/include/mlme_osdep.h b/drivers/staging/rtl8188eu/include/mlme_osdep.h
index ae1722c67032..5a35b0866db6 100644
--- a/drivers/staging/rtl8188eu/include/mlme_osdep.h
+++ b/drivers/staging/rtl8188eu/include/mlme_osdep.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#ifndef __MLME_OSDEP_H_ 15#ifndef __MLME_OSDEP_H_
21#define __MLME_OSDEP_H_ 16#define __MLME_OSDEP_H_
diff --git a/drivers/staging/rtl8188eu/include/mp_custom_oid.h b/drivers/staging/rtl8188eu/include/mp_custom_oid.h
index 6fa52cf99c4e..1a06ee6ad460 100644
--- a/drivers/staging/rtl8188eu/include/mp_custom_oid.h
+++ b/drivers/staging/rtl8188eu/include/mp_custom_oid.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#ifndef __CUSTOM_OID_H 15#ifndef __CUSTOM_OID_H
21#define __CUSTOM_OID_H 16#define __CUSTOM_OID_H
diff --git a/drivers/staging/rtl8188eu/include/odm.h b/drivers/staging/rtl8188eu/include/odm.h
index af781c7cd3a5..dbebf17f36d3 100644
--- a/drivers/staging/rtl8188eu/include/odm.h
+++ b/drivers/staging/rtl8188eu/include/odm.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20 15
21 16
diff --git a/drivers/staging/rtl8188eu/include/odm_HWConfig.h b/drivers/staging/rtl8188eu/include/odm_HWConfig.h
index ef792bfd535e..da7325d599c6 100644
--- a/drivers/staging/rtl8188eu/include/odm_HWConfig.h
+++ b/drivers/staging/rtl8188eu/include/odm_HWConfig.h
@@ -11,10 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * 14 *
19 ******************************************************************************/ 15 ******************************************************************************/
20 16
diff --git a/drivers/staging/rtl8188eu/include/odm_RTL8188E.h b/drivers/staging/rtl8188eu/include/odm_RTL8188E.h
index 14dce6c4b1bc..72b4db67ac33 100644
--- a/drivers/staging/rtl8188eu/include/odm_RTL8188E.h
+++ b/drivers/staging/rtl8188eu/include/odm_RTL8188E.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#ifndef __ODM_RTL8188E_H__ 15#ifndef __ODM_RTL8188E_H__
21#define __ODM_RTL8188E_H__ 16#define __ODM_RTL8188E_H__
diff --git a/drivers/staging/rtl8188eu/include/odm_RegDefine11N.h b/drivers/staging/rtl8188eu/include/odm_RegDefine11N.h
index 5a61f902bc1b..c82c09013487 100644
--- a/drivers/staging/rtl8188eu/include/odm_RegDefine11N.h
+++ b/drivers/staging/rtl8188eu/include/odm_RegDefine11N.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20 15
21#ifndef __ODM_REGDEFINE11N_H__ 16#ifndef __ODM_REGDEFINE11N_H__
diff --git a/drivers/staging/rtl8188eu/include/odm_debug.h b/drivers/staging/rtl8188eu/include/odm_debug.h
index e9390963d6ff..52e51f19f752 100644
--- a/drivers/staging/rtl8188eu/include/odm_debug.h
+++ b/drivers/staging/rtl8188eu/include/odm_debug.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20 15
21 16
diff --git a/drivers/staging/rtl8188eu/include/odm_precomp.h b/drivers/staging/rtl8188eu/include/odm_precomp.h
index 0f236da09277..9e5fe1777e6c 100644
--- a/drivers/staging/rtl8188eu/include/odm_precomp.h
+++ b/drivers/staging/rtl8188eu/include/odm_precomp.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20 15
21#ifndef __ODM_PRECOMP_H__ 16#ifndef __ODM_PRECOMP_H__
diff --git a/drivers/staging/rtl8188eu/include/odm_reg.h b/drivers/staging/rtl8188eu/include/odm_reg.h
index 7f10b695cf9d..3405a44a19ed 100644
--- a/drivers/staging/rtl8188eu/include/odm_reg.h
+++ b/drivers/staging/rtl8188eu/include/odm_reg.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20/* */ 15/* */
21/* File Name: odm_reg.h */ 16/* File Name: odm_reg.h */
diff --git a/drivers/staging/rtl8188eu/include/odm_types.h b/drivers/staging/rtl8188eu/include/odm_types.h
index c1355b959c55..3474a9c72640 100644
--- a/drivers/staging/rtl8188eu/include/odm_types.h
+++ b/drivers/staging/rtl8188eu/include/odm_types.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#ifndef __ODM_TYPES_H__ 15#ifndef __ODM_TYPES_H__
21#define __ODM_TYPES_H__ 16#define __ODM_TYPES_H__
diff --git a/drivers/staging/rtl8188eu/include/osdep_intf.h b/drivers/staging/rtl8188eu/include/osdep_intf.h
index 1521744d626c..54fca79827e3 100644
--- a/drivers/staging/rtl8188eu/include/osdep_intf.h
+++ b/drivers/staging/rtl8188eu/include/osdep_intf.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20 15
21#ifndef __OSDEP_INTF_H_ 16#ifndef __OSDEP_INTF_H_
diff --git a/drivers/staging/rtl8188eu/include/osdep_service.h b/drivers/staging/rtl8188eu/include/osdep_service.h
index 22de53d6539a..5475956c5ee5 100644
--- a/drivers/staging/rtl8188eu/include/osdep_service.h
+++ b/drivers/staging/rtl8188eu/include/osdep_service.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#ifndef __OSDEP_SERVICE_H_ 15#ifndef __OSDEP_SERVICE_H_
21#define __OSDEP_SERVICE_H_ 16#define __OSDEP_SERVICE_H_
diff --git a/drivers/staging/rtl8188eu/include/pwrseq.h b/drivers/staging/rtl8188eu/include/pwrseq.h
index 9dbf8435f147..afd61cf4cb15 100644
--- a/drivers/staging/rtl8188eu/include/pwrseq.h
+++ b/drivers/staging/rtl8188eu/include/pwrseq.h
@@ -12,11 +12,6 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
18 *
19 *
20 ******************************************************************************/ 15 ******************************************************************************/
21 16
22#ifndef __HAL8188EPWRSEQ_H__ 17#ifndef __HAL8188EPWRSEQ_H__
diff --git a/drivers/staging/rtl8188eu/include/pwrseqcmd.h b/drivers/staging/rtl8188eu/include/pwrseqcmd.h
index 468a3fb28e00..c4a919ea17ea 100644
--- a/drivers/staging/rtl8188eu/include/pwrseqcmd.h
+++ b/drivers/staging/rtl8188eu/include/pwrseqcmd.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#ifndef __HALPWRSEQCMD_H__ 15#ifndef __HALPWRSEQCMD_H__
21#define __HALPWRSEQCMD_H__ 16#define __HALPWRSEQCMD_H__
diff --git a/drivers/staging/rtl8188eu/include/recv_osdep.h b/drivers/staging/rtl8188eu/include/recv_osdep.h
index fdeb603b6cc1..cad31587c30a 100644
--- a/drivers/staging/rtl8188eu/include/recv_osdep.h
+++ b/drivers/staging/rtl8188eu/include/recv_osdep.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#ifndef __RECV_OSDEP_H_ 15#ifndef __RECV_OSDEP_H_
21#define __RECV_OSDEP_H_ 16#define __RECV_OSDEP_H_
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_cmd.h b/drivers/staging/rtl8188eu/include/rtl8188e_cmd.h
index f813ce0563f8..4d7d804658c2 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_cmd.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_cmd.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#ifndef __RTL8188E_CMD_H__ 15#ifndef __RTL8188E_CMD_H__
21#define __RTL8188E_CMD_H__ 16#define __RTL8188E_CMD_H__
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_dm.h b/drivers/staging/rtl8188eu/include/rtl8188e_dm.h
index 5e0ac31ef464..4190112a50bf 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_dm.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_dm.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#ifndef __RTL8188E_DM_H__ 15#ifndef __RTL8188E_DM_H__
21#define __RTL8188E_DM_H__ 16#define __RTL8188E_DM_H__
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
index 9f5050e6f6ab..9dd5c293a54b 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#ifndef __RTL8188E_HAL_H__ 15#ifndef __RTL8188E_HAL_H__
21#define __RTL8188E_HAL_H__ 16#define __RTL8188E_HAL_H__
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_led.h b/drivers/staging/rtl8188eu/include/rtl8188e_led.h
index c0147e73cd8c..fca6d8c81e90 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_led.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_led.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#ifndef __RTL8188E_LED_H__ 15#ifndef __RTL8188E_LED_H__
21#define __RTL8188E_LED_H__ 16#define __RTL8188E_LED_H__
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_recv.h b/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
index 5fed30d389a2..54048bc826e5 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#ifndef __RTL8188E_RECV_H__ 15#ifndef __RTL8188E_RECV_H__
21#define __RTL8188E_RECV_H__ 16#define __RTL8188E_RECV_H__
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_spec.h b/drivers/staging/rtl8188eu/include/rtl8188e_spec.h
index beeee4a6b0bc..fb82f663b1f5 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_spec.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_spec.h
@@ -11,10 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *******************************************************************************/ 14 *******************************************************************************/
19#ifndef __RTL8188E_SPEC_H__ 15#ifndef __RTL8188E_SPEC_H__
20#define __RTL8188E_SPEC_H__ 16#define __RTL8188E_SPEC_H__
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h b/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h
index 0b96d42e290b..65a63df2077f 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#ifndef __RTL8188E_XMIT_H__ 15#ifndef __RTL8188E_XMIT_H__
21#define __RTL8188E_XMIT_H__ 16#define __RTL8188E_XMIT_H__
diff --git a/drivers/staging/rtl8188eu/include/rtw_android.h b/drivers/staging/rtl8188eu/include/rtw_android.h
index e85bf1ff01f8..e81ee92b0ae2 100644
--- a/drivers/staging/rtl8188eu/include/rtw_android.h
+++ b/drivers/staging/rtl8188eu/include/rtw_android.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20 15
21#ifndef __RTW_ANDROID_H__ 16#ifndef __RTW_ANDROID_H__
diff --git a/drivers/staging/rtl8188eu/include/rtw_ap.h b/drivers/staging/rtl8188eu/include/rtw_ap.h
index 6128ccce91ba..b820684bc3fe 100644
--- a/drivers/staging/rtl8188eu/include/rtw_ap.h
+++ b/drivers/staging/rtl8188eu/include/rtw_ap.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#ifndef __RTW_AP_H_ 15#ifndef __RTW_AP_H_
21#define __RTW_AP_H_ 16#define __RTW_AP_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_cmd.h b/drivers/staging/rtl8188eu/include/rtw_cmd.h
index 9e9f5f4af8f1..08ca59217cb7 100644
--- a/drivers/staging/rtl8188eu/include/rtw_cmd.h
+++ b/drivers/staging/rtl8188eu/include/rtw_cmd.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#ifndef __RTW_CMD_H_ 15#ifndef __RTW_CMD_H_
21#define __RTW_CMD_H_ 16#define __RTW_CMD_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_debug.h b/drivers/staging/rtl8188eu/include/rtw_debug.h
index 971bf457f32d..7ed4cada7efa 100644
--- a/drivers/staging/rtl8188eu/include/rtw_debug.h
+++ b/drivers/staging/rtl8188eu/include/rtw_debug.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#ifndef __RTW_DEBUG_H__ 15#ifndef __RTW_DEBUG_H__
21#define __RTW_DEBUG_H__ 16#define __RTW_DEBUG_H__
diff --git a/drivers/staging/rtl8188eu/include/rtw_eeprom.h b/drivers/staging/rtl8188eu/include/rtw_eeprom.h
index 904fea1fad6c..5dd73841dd9e 100644
--- a/drivers/staging/rtl8188eu/include/rtw_eeprom.h
+++ b/drivers/staging/rtl8188eu/include/rtw_eeprom.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#ifndef __RTW_EEPROM_H__ 15#ifndef __RTW_EEPROM_H__
21#define __RTW_EEPROM_H__ 16#define __RTW_EEPROM_H__
diff --git a/drivers/staging/rtl8188eu/include/rtw_efuse.h b/drivers/staging/rtl8188eu/include/rtw_efuse.h
index 5660eed7196b..9bfb10c302b5 100644
--- a/drivers/staging/rtl8188eu/include/rtw_efuse.h
+++ b/drivers/staging/rtl8188eu/include/rtw_efuse.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#ifndef __RTW_EFUSE_H__ 15#ifndef __RTW_EFUSE_H__
21#define __RTW_EFUSE_H__ 16#define __RTW_EFUSE_H__
diff --git a/drivers/staging/rtl8188eu/include/rtw_event.h b/drivers/staging/rtl8188eu/include/rtw_event.h
index 52151dc4495a..5c34e567d341 100644
--- a/drivers/staging/rtl8188eu/include/rtw_event.h
+++ b/drivers/staging/rtl8188eu/include/rtw_event.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#ifndef _RTW_EVENT_H_ 15#ifndef _RTW_EVENT_H_
21#define _RTW_EVENT_H_ 16#define _RTW_EVENT_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_ht.h b/drivers/staging/rtl8188eu/include/rtw_ht.h
index beb210b37083..b45483fd069f 100644
--- a/drivers/staging/rtl8188eu/include/rtw_ht.h
+++ b/drivers/staging/rtl8188eu/include/rtw_ht.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#ifndef _RTW_HT_H_ 15#ifndef _RTW_HT_H_
21#define _RTW_HT_H_ 16#define _RTW_HT_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_ioctl.h b/drivers/staging/rtl8188eu/include/rtw_ioctl.h
index ee2cb54a7552..3a652df4b26c 100644
--- a/drivers/staging/rtl8188eu/include/rtw_ioctl.h
+++ b/drivers/staging/rtl8188eu/include/rtw_ioctl.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#ifndef _RTW_IOCTL_H_ 15#ifndef _RTW_IOCTL_H_
21#define _RTW_IOCTL_H_ 16#define _RTW_IOCTL_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_ioctl_rtl.h b/drivers/staging/rtl8188eu/include/rtw_ioctl_rtl.h
index 8fa3858cb776..da4949f94f4c 100644
--- a/drivers/staging/rtl8188eu/include/rtw_ioctl_rtl.h
+++ b/drivers/staging/rtl8188eu/include/rtw_ioctl_rtl.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#ifndef _RTW_IOCTL_RTL_H_ 15#ifndef _RTW_IOCTL_RTL_H_
21#define _RTW_IOCTL_RTL_H_ 16#define _RTW_IOCTL_RTL_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_ioctl_set.h b/drivers/staging/rtl8188eu/include/rtw_ioctl_set.h
index fa9d655eaab9..b6e14a8b7a11 100644
--- a/drivers/staging/rtl8188eu/include/rtw_ioctl_set.h
+++ b/drivers/staging/rtl8188eu/include/rtw_ioctl_set.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#ifndef __RTW_IOCTL_SET_H_ 15#ifndef __RTW_IOCTL_SET_H_
21#define __RTW_IOCTL_SET_H_ 16#define __RTW_IOCTL_SET_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_iol.h b/drivers/staging/rtl8188eu/include/rtw_iol.h
index 68aae7f0b02f..1f324e68d2ae 100644
--- a/drivers/staging/rtl8188eu/include/rtw_iol.h
+++ b/drivers/staging/rtl8188eu/include/rtw_iol.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#ifndef __RTW_IOL_H_ 15#ifndef __RTW_IOL_H_
21#define __RTW_IOL_H_ 16#define __RTW_IOL_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme.h b/drivers/staging/rtl8188eu/include/rtw_mlme.h
index 4c992573e3ca..5d8bce0f58db 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#ifndef __RTW_MLME_H_ 15#ifndef __RTW_MLME_H_
21#define __RTW_MLME_H_ 16#define __RTW_MLME_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
index 44711332b90c..27382ff24a84 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#ifndef __RTW_MLME_EXT_H_ 15#ifndef __RTW_MLME_EXT_H_
21#define __RTW_MLME_EXT_H_ 16#define __RTW_MLME_EXT_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h b/drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h
index 30fd17f23bf1..02b300217185 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20/***************************************************************************** 15/*****************************************************************************
21 * 16 *
diff --git a/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h b/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h
index a493d4c37ef1..9680e2eab62f 100644
--- a/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h
+++ b/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#ifndef __RTW_PWRCTRL_H_ 15#ifndef __RTW_PWRCTRL_H_
21#define __RTW_PWRCTRL_H_ 16#define __RTW_PWRCTRL_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_qos.h b/drivers/staging/rtl8188eu/include/rtw_qos.h
index bbee1ddc00bb..45a77f6f8427 100644
--- a/drivers/staging/rtl8188eu/include/rtw_qos.h
+++ b/drivers/staging/rtl8188eu/include/rtw_qos.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#ifndef _RTW_QOS_H_ 15#ifndef _RTW_QOS_H_
21#define _RTW_QOS_H_ 16#define _RTW_QOS_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_recv.h b/drivers/staging/rtl8188eu/include/rtw_recv.h
index eb1ac3d03123..b0373b6216d6 100644
--- a/drivers/staging/rtl8188eu/include/rtw_recv.h
+++ b/drivers/staging/rtl8188eu/include/rtw_recv.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#ifndef _RTW_RECV_H_ 15#ifndef _RTW_RECV_H_
21#define _RTW_RECV_H_ 16#define _RTW_RECV_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_rf.h b/drivers/staging/rtl8188eu/include/rtw_rf.h
index 35f61be12acd..66896af02042 100644
--- a/drivers/staging/rtl8188eu/include/rtw_rf.h
+++ b/drivers/staging/rtl8188eu/include/rtw_rf.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#ifndef __RTW_RF_H_ 15#ifndef __RTW_RF_H_
21#define __RTW_RF_H_ 16#define __RTW_RF_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_security.h b/drivers/staging/rtl8188eu/include/rtw_security.h
index a1aebe6c8452..ca1247bce6e3 100644
--- a/drivers/staging/rtl8188eu/include/rtw_security.h
+++ b/drivers/staging/rtl8188eu/include/rtw_security.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#ifndef __RTW_SECURITY_H_ 15#ifndef __RTW_SECURITY_H_
21#define __RTW_SECURITY_H_ 16#define __RTW_SECURITY_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_sreset.h b/drivers/staging/rtl8188eu/include/rtw_sreset.h
index 3a62ed010875..ce027dfdecc5 100644
--- a/drivers/staging/rtl8188eu/include/rtw_sreset.h
+++ b/drivers/staging/rtl8188eu/include/rtw_sreset.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#ifndef _RTW_SRESET_C_ 15#ifndef _RTW_SRESET_C_
21#define _RTW_SRESET_C_ 16#define _RTW_SRESET_C_
diff --git a/drivers/staging/rtl8188eu/include/rtw_xmit.h b/drivers/staging/rtl8188eu/include/rtw_xmit.h
index b7c20883d355..a0853bab3edb 100644
--- a/drivers/staging/rtl8188eu/include/rtw_xmit.h
+++ b/drivers/staging/rtl8188eu/include/rtw_xmit.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#ifndef _RTW_XMIT_H_ 15#ifndef _RTW_XMIT_H_
21#define _RTW_XMIT_H_ 16#define _RTW_XMIT_H_
diff --git a/drivers/staging/rtl8188eu/include/sta_info.h b/drivers/staging/rtl8188eu/include/sta_info.h
index d4e78326fc8d..42a035123365 100644
--- a/drivers/staging/rtl8188eu/include/sta_info.h
+++ b/drivers/staging/rtl8188eu/include/sta_info.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#ifndef __STA_INFO_H_ 15#ifndef __STA_INFO_H_
21#define __STA_INFO_H_ 16#define __STA_INFO_H_
diff --git a/drivers/staging/rtl8188eu/include/usb_hal.h b/drivers/staging/rtl8188eu/include/usb_hal.h
index 8a65995d5e48..b1bf07a9013e 100644
--- a/drivers/staging/rtl8188eu/include/usb_hal.h
+++ b/drivers/staging/rtl8188eu/include/usb_hal.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#ifndef __USB_HAL_H__ 15#ifndef __USB_HAL_H__
21#define __USB_HAL_H__ 16#define __USB_HAL_H__
diff --git a/drivers/staging/rtl8188eu/include/usb_ops_linux.h b/drivers/staging/rtl8188eu/include/usb_ops_linux.h
index 4fdc536cba79..220733314f8b 100644
--- a/drivers/staging/rtl8188eu/include/usb_ops_linux.h
+++ b/drivers/staging/rtl8188eu/include/usb_ops_linux.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#ifndef __USB_OPS_LINUX_H__ 15#ifndef __USB_OPS_LINUX_H__
21#define __USB_OPS_LINUX_H__ 16#define __USB_OPS_LINUX_H__
diff --git a/drivers/staging/rtl8188eu/include/wifi.h b/drivers/staging/rtl8188eu/include/wifi.h
index 6cb5beca1672..e7c512183619 100644
--- a/drivers/staging/rtl8188eu/include/wifi.h
+++ b/drivers/staging/rtl8188eu/include/wifi.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#ifndef _WIFI_H_ 15#ifndef _WIFI_H_
21#define _WIFI_H_ 16#define _WIFI_H_
diff --git a/drivers/staging/rtl8188eu/include/wlan_bssdef.h b/drivers/staging/rtl8188eu/include/wlan_bssdef.h
index 85b99da49a2d..560966cd7dfe 100644
--- a/drivers/staging/rtl8188eu/include/wlan_bssdef.h
+++ b/drivers/staging/rtl8188eu/include/wlan_bssdef.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#ifndef __WLAN_BSSDEF_H__ 15#ifndef __WLAN_BSSDEF_H__
21#define __WLAN_BSSDEF_H__ 16#define __WLAN_BSSDEF_H__
diff --git a/drivers/staging/rtl8188eu/include/xmit_osdep.h b/drivers/staging/rtl8188eu/include/xmit_osdep.h
index 13965f2489db..f96ca6af934d 100644
--- a/drivers/staging/rtl8188eu/include/xmit_osdep.h
+++ b/drivers/staging/rtl8188eu/include/xmit_osdep.h
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#ifndef __XMIT_OSDEP_H_ 15#ifndef __XMIT_OSDEP_H_
21#define __XMIT_OSDEP_H_ 16#define __XMIT_OSDEP_H_
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index 911980495fb2..5672f014cc46 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#define _IOCTL_LINUX_C_ 15#define _IOCTL_LINUX_C_
21 16
@@ -2120,13 +2115,13 @@ static u8 set_pairwise_key(struct adapter *padapter, struct sta_info *psta)
2120 u8 res = _SUCCESS; 2115 u8 res = _SUCCESS;
2121 2116
2122 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); 2117 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
2123 if (ph2c == NULL) { 2118 if (!ph2c) {
2124 res = _FAIL; 2119 res = _FAIL;
2125 goto exit; 2120 goto exit;
2126 } 2121 }
2127 2122
2128 psetstakey_para = kzalloc(sizeof(struct set_stakey_parm), GFP_KERNEL); 2123 psetstakey_para = kzalloc(sizeof(struct set_stakey_parm), GFP_KERNEL);
2129 if (psetstakey_para == NULL) { 2124 if (!psetstakey_para) {
2130 kfree(ph2c); 2125 kfree(ph2c);
2131 res = _FAIL; 2126 res = _FAIL;
2132 goto exit; 2127 goto exit;
@@ -2158,12 +2153,12 @@ static int set_group_key(struct adapter *padapter, u8 *key, u8 alg, int keyid)
2158 DBG_88E("%s\n", __func__); 2153 DBG_88E("%s\n", __func__);
2159 2154
2160 pcmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); 2155 pcmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
2161 if (pcmd == NULL) { 2156 if (!pcmd) {
2162 res = _FAIL; 2157 res = _FAIL;
2163 goto exit; 2158 goto exit;
2164 } 2159 }
2165 psetkeyparm = kzalloc(sizeof(struct setkey_parm), GFP_KERNEL); 2160 psetkeyparm = kzalloc(sizeof(struct setkey_parm), GFP_KERNEL);
2166 if (psetkeyparm == NULL) { 2161 if (!psetkeyparm) {
2167 kfree(pcmd); 2162 kfree(pcmd);
2168 res = _FAIL; 2163 res = _FAIL;
2169 goto exit; 2164 goto exit;
diff --git a/drivers/staging/rtl8188eu/os_dep/mlme_linux.c b/drivers/staging/rtl8188eu/os_dep/mlme_linux.c
index 08bfa76f4975..bc756267c7fc 100644
--- a/drivers/staging/rtl8188eu/os_dep/mlme_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/mlme_linux.c
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20 15
21 16
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
index 7986e678521a..ae2caff030f1 100644
--- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#define _OS_INTFS_C_ 15#define _OS_INTFS_C_
21 16
diff --git a/drivers/staging/rtl8188eu/os_dep/osdep_service.c b/drivers/staging/rtl8188eu/os_dep/osdep_service.c
index f090bef59594..764250b4ba86 100644
--- a/drivers/staging/rtl8188eu/os_dep/osdep_service.c
+++ b/drivers/staging/rtl8188eu/os_dep/osdep_service.c
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20 15
21 16
diff --git a/drivers/staging/rtl8188eu/os_dep/recv_linux.c b/drivers/staging/rtl8188eu/os_dep/recv_linux.c
index d4734baffc8a..0c44914ea3e6 100644
--- a/drivers/staging/rtl8188eu/os_dep/recv_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/recv_linux.c
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#include <osdep_service.h> 15#include <osdep_service.h>
21#include <drv_types.h> 16#include <drv_types.h>
diff --git a/drivers/staging/rtl8188eu/os_dep/rtw_android.c b/drivers/staging/rtl8188eu/os_dep/rtw_android.c
index 5f3337c281ee..41e1b1d15b81 100644
--- a/drivers/staging/rtl8188eu/os_dep/rtw_android.c
+++ b/drivers/staging/rtl8188eu/os_dep/rtw_android.c
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20 15
21#include <linux/module.h> 16#include <linux/module.h>
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 794cc114348c..11d51a30170f 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20 15
21#define pr_fmt(fmt) "R8188EU: " fmt 16#define pr_fmt(fmt) "R8188EU: " fmt
@@ -65,7 +60,7 @@ static struct dvobj_priv *usb_dvobj_init(struct usb_interface *usb_intf)
65 struct usb_device *pusbd; 60 struct usb_device *pusbd;
66 61
67 pdvobjpriv = kzalloc(sizeof(*pdvobjpriv), GFP_KERNEL); 62 pdvobjpriv = kzalloc(sizeof(*pdvobjpriv), GFP_KERNEL);
68 if (pdvobjpriv == NULL) 63 if (!pdvobjpriv)
69 return NULL; 64 return NULL;
70 65
71 pdvobjpriv->pusbintf = usb_intf; 66 pdvobjpriv->pusbintf = usb_intf;
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
index 0fea338d7313..ce1e1a135f1b 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
@@ -11,10 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 ******************************************************************************/ 14 ******************************************************************************/
19#define _USB_OPS_LINUX_C_ 15#define _USB_OPS_LINUX_C_
20 16
diff --git a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
index 1593e280e060..221e2750652e 100644
--- a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
@@ -11,11 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/ 14 ******************************************************************************/
20#define _XMIT_OSDEP_C_ 15#define _XMIT_OSDEP_C_
21 16
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index f18fc0b6775b..051c2be842d0 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -746,7 +746,7 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
746 746
747 // Indicate packets 747 // Indicate packets
748 if(index>REORDER_WIN_SIZE){ 748 if(index>REORDER_WIN_SIZE){
749 IEEE80211_DEBUG(IEEE80211_DL_ERR, "RxReorderIndicatePacket(): Rx Reorer buffer full!! \n"); 749 IEEE80211_DEBUG(IEEE80211_DL_ERR, "RxReorderIndicatePacket(): Rx Reorder buffer full!! \n");
750 kfree(prxbIndicateArray); 750 kfree(prxbIndicateArray);
751 return; 751 return;
752 } 752 }
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
index 148d0d45547b..6033502eff3d 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
@@ -75,7 +75,7 @@ static void RxPktPendingTimeout(unsigned long data)
75 75
76 // Indicate packets 76 // Indicate packets
77 if(index > REORDER_WIN_SIZE){ 77 if(index > REORDER_WIN_SIZE){
78 IEEE80211_DEBUG(IEEE80211_DL_ERR, "RxReorderIndicatePacket(): Rx Reorer buffer full!! \n"); 78 IEEE80211_DEBUG(IEEE80211_DL_ERR, "RxReorderIndicatePacket(): Rx Reorder buffer full!! \n");
79 spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags); 79 spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
80 return; 80 return;
81 } 81 }
diff --git a/drivers/staging/rtl8192u/r8190_rtl8256.c b/drivers/staging/rtl8192u/r8190_rtl8256.c
index 5c3bb3be2720..d733fb2ade91 100644
--- a/drivers/staging/rtl8192u/r8190_rtl8256.c
+++ b/drivers/staging/rtl8192u/r8190_rtl8256.c
@@ -194,7 +194,7 @@ void phy_RF8256_Config_ParaFile(struct net_device *dev)
194 break; 194 break;
195 } 195 }
196 196
197 /*----Restore RFENV control type----*/; 197 /*----Restore RFENV control type----*/
198 switch (eRFPath) { 198 switch (eRFPath) {
199 case RF90_PATH_A: 199 case RF90_PATH_A:
200 case RF90_PATH_C: 200 case RF90_PATH_C:
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index 4af0140c6ead..8c1d73719147 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -235,7 +235,6 @@ static void CamResetAllEntry(struct net_device *dev)
235 */ 235 */
236 ulcommand |= BIT(31) | BIT(30); 236 ulcommand |= BIT(31) | BIT(30);
237 write_nic_dword(dev, RWCAM, ulcommand); 237 write_nic_dword(dev, RWCAM, ulcommand);
238
239} 238}
240 239
241 240
@@ -298,6 +297,7 @@ int read_nic_byte_E(struct net_device *dev, int indx, u8 *data)
298 297
299 return 0; 298 return 0;
300} 299}
300
301/* as 92U has extend page from 4 to 16, so modify functions below. */ 301/* as 92U has extend page from 4 to 16, so modify functions below. */
302void write_nic_byte(struct net_device *dev, int indx, u8 data) 302void write_nic_byte(struct net_device *dev, int indx, u8 data)
303{ 303{
@@ -319,14 +319,11 @@ void write_nic_byte(struct net_device *dev, int indx, u8 data)
319 319
320 if (status < 0) 320 if (status < 0)
321 netdev_err(dev, "write_nic_byte TimeOut! status: %d\n", status); 321 netdev_err(dev, "write_nic_byte TimeOut! status: %d\n", status);
322
323
324} 322}
325 323
326 324
327void write_nic_word(struct net_device *dev, int indx, u16 data) 325void write_nic_word(struct net_device *dev, int indx, u16 data)
328{ 326{
329
330 int status; 327 int status;
331 328
332 struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev); 329 struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
@@ -345,13 +342,11 @@ void write_nic_word(struct net_device *dev, int indx, u16 data)
345 342
346 if (status < 0) 343 if (status < 0)
347 netdev_err(dev, "write_nic_word TimeOut! status: %d\n", status); 344 netdev_err(dev, "write_nic_word TimeOut! status: %d\n", status);
348
349} 345}
350 346
351 347
352void write_nic_dword(struct net_device *dev, int indx, u32 data) 348void write_nic_dword(struct net_device *dev, int indx, u32 data)
353{ 349{
354
355 int status; 350 int status;
356 351
357 struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev); 352 struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
@@ -372,7 +367,6 @@ void write_nic_dword(struct net_device *dev, int indx, u32 data)
372 if (status < 0) 367 if (status < 0)
373 netdev_err(dev, "write_nic_dword TimeOut! status: %d\n", 368 netdev_err(dev, "write_nic_dword TimeOut! status: %d\n",
374 status); 369 status);
375
376} 370}
377 371
378 372
@@ -738,7 +732,6 @@ void rtl8192_update_msr(struct net_device *dev)
738 * master (see the create BSS/IBSS func) 732 * master (see the create BSS/IBSS func)
739 */ 733 */
740 if (priv->ieee80211->state == IEEE80211_LINKED) { 734 if (priv->ieee80211->state == IEEE80211_LINKED) {
741
742 if (priv->ieee80211->iw_mode == IW_MODE_INFRA) 735 if (priv->ieee80211->iw_mode == IW_MODE_INFRA)
743 msr |= (MSR_LINK_MANAGED << MSR_LINK_SHIFT); 736 msr |= (MSR_LINK_MANAGED << MSR_LINK_SHIFT);
744 else if (priv->ieee80211->iw_mode == IW_MODE_ADHOC) 737 else if (priv->ieee80211->iw_mode == IW_MODE_ADHOC)
@@ -773,11 +766,10 @@ static void rtl8192_rx_isr(struct urb *urb);
773 766
774static u32 get_rxpacket_shiftbytes_819xusb(struct ieee80211_rx_stats *pstats) 767static u32 get_rxpacket_shiftbytes_819xusb(struct ieee80211_rx_stats *pstats)
775{ 768{
776
777 return (sizeof(rx_desc_819x_usb) + pstats->RxDrvInfoSize 769 return (sizeof(rx_desc_819x_usb) + pstats->RxDrvInfoSize
778 + pstats->RxBufShift); 770 + pstats->RxBufShift);
779
780} 771}
772
781static int rtl8192_rx_initiate(struct net_device *dev) 773static int rtl8192_rx_initiate(struct net_device *dev)
782{ 774{
783 struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev); 775 struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
@@ -874,6 +866,7 @@ void rtl8192_set_rxconf(struct net_device *dev)
874 866
875 write_nic_dword(dev, RCR, rxconf); 867 write_nic_dword(dev, RCR, rxconf);
876} 868}
869
877/* wait to be removed */ 870/* wait to be removed */
878void rtl8192_rx_enable(struct net_device *dev) 871void rtl8192_rx_enable(struct net_device *dev)
879{ 872{
@@ -943,9 +936,9 @@ inline u16 ieeerate2rtlrate(int rate)
943 return 11; 936 return 11;
944 default: 937 default:
945 return 3; 938 return 3;
946
947 } 939 }
948} 940}
941
949static u16 rtl_rate[] = {10, 20, 55, 110, 60, 90, 120, 180, 240, 360, 480, 540}; 942static u16 rtl_rate[] = {10, 20, 55, 110, 60, 90, 120, 180, 240, 360, 480, 540};
950inline u16 rtl8192_rate2rate(short rate) 943inline u16 rtl8192_rate2rate(short rate)
951{ 944{
@@ -1050,7 +1043,7 @@ static void rtl8192_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
1050 1043
1051 spin_lock_irqsave(&priv->tx_lock, flags); 1044 spin_lock_irqsave(&priv->tx_lock, flags);
1052 1045
1053 memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev)); 1046 *(struct net_device **)(skb->cb) = dev;
1054 tcb_desc->bTxEnableFwCalcDur = 1; 1047 tcb_desc->bTxEnableFwCalcDur = 1;
1055 skb_push(skb, priv->ieee80211->tx_headroom); 1048 skb_push(skb, priv->ieee80211->tx_headroom);
1056 ret = rtl8192_tx(dev, skb); 1049 ret = rtl8192_tx(dev, skb);
@@ -1100,7 +1093,7 @@ static void rtl8192_tx_isr(struct urb *tx_urb)
1100 if (!skb) 1093 if (!skb)
1101 return; 1094 return;
1102 1095
1103 dev = (struct net_device *)(skb->cb); 1096 dev = *(struct net_device **)(skb->cb);
1104 tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); 1097 tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
1105 queue_index = tcb_desc->queue_index; 1098 queue_index = tcb_desc->queue_index;
1106 1099
@@ -1149,7 +1142,6 @@ static void rtl8192_tx_isr(struct urb *tx_urb)
1149 return; /* avoid further processing AMSDU */ 1142 return; /* avoid further processing AMSDU */
1150 } 1143 }
1151 } 1144 }
1152
1153} 1145}
1154 1146
1155static void rtl8192_config_rate(struct net_device *dev, u16 *rate_config) 1147static void rtl8192_config_rate(struct net_device *dev, u16 *rate_config)
@@ -1272,11 +1264,10 @@ static void rtl8192_update_cap(struct net_device *dev, u16 cap)
1272 priv->slot_time = slot_time; 1264 priv->slot_time = slot_time;
1273 write_nic_byte(dev, SLOT_TIME, slot_time); 1265 write_nic_byte(dev, SLOT_TIME, slot_time);
1274 } 1266 }
1275
1276} 1267}
1268
1277static void rtl8192_net_update(struct net_device *dev) 1269static void rtl8192_net_update(struct net_device *dev)
1278{ 1270{
1279
1280 struct r8192_priv *priv = ieee80211_priv(dev); 1271 struct r8192_priv *priv = ieee80211_priv(dev);
1281 struct ieee80211_network *net; 1272 struct ieee80211_network *net;
1282 u16 BcnTimeCfg = 0, BcnCW = 6, BcnIFS = 0xf; 1273 u16 BcnTimeCfg = 0, BcnCW = 6, BcnIFS = 0xf;
@@ -1303,9 +1294,6 @@ static void rtl8192_net_update(struct net_device *dev)
1303 1294
1304 write_nic_word(dev, BCN_TCFG, BcnTimeCfg); 1295 write_nic_word(dev, BCN_TCFG, BcnTimeCfg);
1305 } 1296 }
1306
1307
1308
1309} 1297}
1310 1298
1311/* temporary hw beacon is not used any more. 1299/* temporary hw beacon is not used any more.
@@ -1315,6 +1303,7 @@ void rtl819xusb_beacon_tx(struct net_device *dev, u16 tx_rate)
1315{ 1303{
1316 1304
1317} 1305}
1306
1318inline u8 rtl8192_IsWirelessBMode(u16 rate) 1307inline u8 rtl8192_IsWirelessBMode(u16 rate)
1319{ 1308{
1320 if (((rate <= 110) && (rate != 60) && (rate != 90)) || (rate == 220)) 1309 if (((rate <= 110) && (rate != 60) && (rate != 90)) || (rate == 220))
@@ -1737,7 +1726,6 @@ static short rtl8192_usb_initendpoints(struct net_device *dev)
1737 1726
1738#ifndef JACKSON_NEW_RX 1727#ifndef JACKSON_NEW_RX
1739 for (i = 0; i < (MAX_RX_URB + 1); i++) { 1728 for (i = 0; i < (MAX_RX_URB + 1); i++) {
1740
1741 priv->rx_urb[i] = usb_alloc_urb(0, GFP_KERNEL); 1729 priv->rx_urb[i] = usb_alloc_urb(0, GFP_KERNEL);
1742 1730
1743 priv->rx_urb[i]->transfer_buffer = 1731 priv->rx_urb[i]->transfer_buffer =
@@ -1782,8 +1770,8 @@ static short rtl8192_usb_initendpoints(struct net_device *dev)
1782 1770
1783 netdev_dbg(dev, "End of initendpoints\n"); 1771 netdev_dbg(dev, "End of initendpoints\n");
1784 return 0; 1772 return 0;
1785
1786} 1773}
1774
1787#ifdef THOMAS_BEACON 1775#ifdef THOMAS_BEACON
1788static void rtl8192_usb_deleteendpoints(struct net_device *dev) 1776static void rtl8192_usb_deleteendpoints(struct net_device *dev)
1789{ 1777{
@@ -1820,7 +1808,6 @@ void rtl8192_usb_deleteendpoints(struct net_device *dev)
1820 } 1808 }
1821 kfree(priv->rx_urb); 1809 kfree(priv->rx_urb);
1822 priv->rx_urb = NULL; 1810 priv->rx_urb = NULL;
1823
1824 } 1811 }
1825#else 1812#else
1826 kfree(priv->rx_urb); 1813 kfree(priv->rx_urb);
@@ -1888,6 +1875,7 @@ static void rtl8192_update_beacon(struct work_struct *work)
1888 net->bssht.bdRT2RTLongSlotTime; 1875 net->bssht.bdRT2RTLongSlotTime;
1889 rtl8192_update_cap(dev, net->capability); 1876 rtl8192_update_cap(dev, net->capability);
1890} 1877}
1878
1891/* 1879/*
1892* background support to run QoS activate functionality 1880* background support to run QoS activate functionality
1893*/ 1881*/
@@ -1992,7 +1980,6 @@ static int rtl8192_handle_beacon(struct net_device *dev,
1992 rtl8192_qos_handle_probe_response(priv, 1, network); 1980 rtl8192_qos_handle_probe_response(priv, 1, network);
1993 schedule_delayed_work(&priv->update_beacon_wq, 0); 1981 schedule_delayed_work(&priv->update_beacon_wq, 0);
1994 return 0; 1982 return 0;
1995
1996} 1983}
1997 1984
1998/* 1985/*
@@ -2007,7 +1994,7 @@ static int rtl8192_qos_association_resp(struct r8192_priv *priv,
2007 u32 size = sizeof(struct ieee80211_qos_parameters); 1994 u32 size = sizeof(struct ieee80211_qos_parameters);
2008 int set_qos_param = 0; 1995 int set_qos_param = 0;
2009 1996
2010 if ((priv == NULL) || (network == NULL)) 1997 if (!priv || !network)
2011 return 0; 1998 return 0;
2012 1999
2013 if (priv->ieee80211->state != IEEE80211_LINKED) 2000 if (priv->ieee80211->state != IEEE80211_LINKED)
@@ -2182,6 +2169,7 @@ static u8 rtl8192_getSupportedWireleeMode(struct net_device *dev)
2182 } 2169 }
2183 return ret; 2170 return ret;
2184} 2171}
2172
2185static void rtl8192_SetWirelessMode(struct net_device *dev, u8 wireless_mode) 2173static void rtl8192_SetWirelessMode(struct net_device *dev, u8 wireless_mode)
2186{ 2174{
2187 struct r8192_priv *priv = ieee80211_priv(dev); 2175 struct r8192_priv *priv = ieee80211_priv(dev);
@@ -2223,8 +2211,8 @@ static void rtl8192_SetWirelessMode(struct net_device *dev, u8 wireless_mode)
2223 priv->ieee80211->pHTInfo->bEnableHT = 0; 2211 priv->ieee80211->pHTInfo->bEnableHT = 0;
2224 RT_TRACE(COMP_INIT, "Current Wireless Mode is %x\n", wireless_mode); 2212 RT_TRACE(COMP_INIT, "Current Wireless Mode is %x\n", wireless_mode);
2225 rtl8192_refresh_supportrate(priv); 2213 rtl8192_refresh_supportrate(priv);
2226
2227} 2214}
2215
2228/* init priv variables here. only non_zero value should be initialized here. */ 2216/* init priv variables here. only non_zero value should be initialized here. */
2229static void rtl8192_init_priv_variable(struct net_device *dev) 2217static void rtl8192_init_priv_variable(struct net_device *dev)
2230{ 2218{
@@ -2432,6 +2420,7 @@ static inline u16 endian_swap(u16 *data)
2432 *data = (tmp >> 8) | (tmp << 8); 2420 *data = (tmp >> 8) | (tmp << 8);
2433 return *data; 2421 return *data;
2434} 2422}
2423
2435static void rtl8192_read_eeprom_info(struct net_device *dev) 2424static void rtl8192_read_eeprom_info(struct net_device *dev)
2436{ 2425{
2437 u16 wEPROM_ID = 0; 2426 u16 wEPROM_ID = 0;
@@ -2627,7 +2616,6 @@ static void rtl8192_read_eeprom_info(struct net_device *dev)
2627 default: 2616 default:
2628 priv->CustomerID = RT_CID_DEFAULT; 2617 priv->CustomerID = RT_CID_DEFAULT;
2629 break; 2618 break;
2630
2631 } 2619 }
2632 2620
2633 switch (priv->CustomerID) { 2621 switch (priv->CustomerID) {
@@ -2642,7 +2630,6 @@ static void rtl8192_read_eeprom_info(struct net_device *dev)
2642 default: 2630 default:
2643 priv->LedStrategy = SW_LED_MODE0; 2631 priv->LedStrategy = SW_LED_MODE0;
2644 break; 2632 break;
2645
2646 } 2633 }
2647 2634
2648 2635
@@ -2676,7 +2663,6 @@ static short rtl8192_get_channel_map(struct net_device *dev)
2676 2663
2677static short rtl8192_init(struct net_device *dev) 2664static short rtl8192_init(struct net_device *dev)
2678{ 2665{
2679
2680 struct r8192_priv *priv = ieee80211_priv(dev); 2666 struct r8192_priv *priv = ieee80211_priv(dev);
2681 2667
2682 memset(&(priv->stats), 0, sizeof(struct Stats)); 2668 memset(&(priv->stats), 0, sizeof(struct Stats));
@@ -2797,8 +2783,6 @@ static void rtl8192_hwconfig(struct net_device *dev)
2797 /* Set Tx Antenna including Feedback control */ 2783 /* Set Tx Antenna including Feedback control */
2798 2784
2799 /* Set Auto Rate fallback control */ 2785 /* Set Auto Rate fallback control */
2800
2801
2802} 2786}
2803 2787
2804 2788
@@ -3027,7 +3011,6 @@ static bool rtl8192_adapter_start(struct net_device *dev)
3027 bMaskByte2); 3011 bMaskByte2);
3028 3012
3029 for (i = 0; i < CCKTxBBGainTableLength; i++) { 3013 for (i = 0; i < CCKTxBBGainTableLength; i++) {
3030
3031 if (TempCCk == priv->cck_txbbgain_table[i].ccktxbb_valuearray[0]) { 3014 if (TempCCk == priv->cck_txbbgain_table[i].ccktxbb_valuearray[0]) {
3032 priv->cck_present_attentuation_20Mdefault = (u8)i; 3015 priv->cck_present_attentuation_20Mdefault = (u8)i;
3033 break; 3016 break;
@@ -3037,7 +3020,6 @@ static bool rtl8192_adapter_start(struct net_device *dev)
3037 priv->cck_present_attentuation_difference = 0; 3020 priv->cck_present_attentuation_difference = 0;
3038 priv->cck_present_attentuation = 3021 priv->cck_present_attentuation =
3039 priv->cck_present_attentuation_20Mdefault; 3022 priv->cck_present_attentuation_20Mdefault;
3040
3041 } 3023 }
3042 } 3024 }
3043 write_nic_byte(dev, 0x87, 0x0); 3025 write_nic_byte(dev, 0x87, 0x0);
@@ -3222,7 +3204,6 @@ static RESET_TYPE rtl819x_ifcheck_resetornot(struct net_device *dev)
3222 } else { 3204 } else {
3223 return RESET_TYPE_NORESET; 3205 return RESET_TYPE_NORESET;
3224 } 3206 }
3225
3226} 3207}
3227 3208
3228static void rtl8192_cancel_deferred_work(struct r8192_priv *priv); 3209static void rtl8192_cancel_deferred_work(struct r8192_priv *priv);
@@ -3250,7 +3231,6 @@ static void CamRestoreAllEntry(struct net_device *dev)
3250 3231
3251 if ((priv->ieee80211->pairwise_key_type == KEY_TYPE_WEP40) || 3232 if ((priv->ieee80211->pairwise_key_type == KEY_TYPE_WEP40) ||
3252 (priv->ieee80211->pairwise_key_type == KEY_TYPE_WEP104)) { 3233 (priv->ieee80211->pairwise_key_type == KEY_TYPE_WEP104)) {
3253
3254 for (EntryId = 0; EntryId < 4; EntryId++) { 3234 for (EntryId = 0; EntryId < 4; EntryId++) {
3255 MacAddr = CAM_CONST_ADDR[EntryId]; 3235 MacAddr = CAM_CONST_ADDR[EntryId];
3256 setKey(dev, EntryId, EntryId, 3236 setKey(dev, EntryId, EntryId,
@@ -3259,7 +3239,6 @@ static void CamRestoreAllEntry(struct net_device *dev)
3259 } 3239 }
3260 3240
3261 } else if (priv->ieee80211->pairwise_key_type == KEY_TYPE_TKIP) { 3241 } else if (priv->ieee80211->pairwise_key_type == KEY_TYPE_TKIP) {
3262
3263 if (priv->ieee80211->iw_mode == IW_MODE_ADHOC) 3242 if (priv->ieee80211->iw_mode == IW_MODE_ADHOC)
3264 setKey(dev, 4, 0, priv->ieee80211->pairwise_key_type, 3243 setKey(dev, 4, 0, priv->ieee80211->pairwise_key_type,
3265 (u8 *)dev->dev_addr, 0, NULL); 3244 (u8 *)dev->dev_addr, 0, NULL);
@@ -3267,7 +3246,6 @@ static void CamRestoreAllEntry(struct net_device *dev)
3267 setKey(dev, 4, 0, priv->ieee80211->pairwise_key_type, 3246 setKey(dev, 4, 0, priv->ieee80211->pairwise_key_type,
3268 MacAddr, 0, NULL); 3247 MacAddr, 0, NULL);
3269 } else if (priv->ieee80211->pairwise_key_type == KEY_TYPE_CCMP) { 3248 } else if (priv->ieee80211->pairwise_key_type == KEY_TYPE_CCMP) {
3270
3271 if (priv->ieee80211->iw_mode == IW_MODE_ADHOC) 3249 if (priv->ieee80211->iw_mode == IW_MODE_ADHOC)
3272 setKey(dev, 4, 0, priv->ieee80211->pairwise_key_type, 3250 setKey(dev, 4, 0, priv->ieee80211->pairwise_key_type,
3273 (u8 *)dev->dev_addr, 0, NULL); 3251 (u8 *)dev->dev_addr, 0, NULL);
@@ -3301,6 +3279,7 @@ static void CamRestoreAllEntry(struct net_device *dev)
3301 CAM_CONST_ADDR[0], 0, NULL); 3279 CAM_CONST_ADDR[0], 0, NULL);
3302 } 3280 }
3303} 3281}
3282
3304/* This function is used to fix Tx/Rx stop bug temporarily. 3283/* This function is used to fix Tx/Rx stop bug temporarily.
3305 * This function will do "system reset" to NIC when Tx or Rx is stuck. 3284 * This function will do "system reset" to NIC when Tx or Rx is stuck.
3306 * The method checking Tx/Rx stuck of this function is supported by FW, 3285 * The method checking Tx/Rx stuck of this function is supported by FW,
@@ -3468,7 +3447,6 @@ static void rtl819x_watchdog_wqcallback(struct work_struct *work)
3468 /* for AP roaming */ 3447 /* for AP roaming */
3469 if (priv->ieee80211->state == IEEE80211_LINKED && 3448 if (priv->ieee80211->state == IEEE80211_LINKED &&
3470 priv->ieee80211->iw_mode == IW_MODE_INFRA) { 3449 priv->ieee80211->iw_mode == IW_MODE_INFRA) {
3471
3472 rtl819x_update_rxcounts(priv, &TotalRxBcnNum, &TotalRxDataNum); 3450 rtl819x_update_rxcounts(priv, &TotalRxBcnNum, &TotalRxDataNum);
3473 if ((TotalRxBcnNum + TotalRxDataNum) == 0) { 3451 if ((TotalRxBcnNum + TotalRxDataNum) == 0) {
3474#ifdef TODO 3452#ifdef TODO
@@ -3485,7 +3463,6 @@ static void rtl819x_watchdog_wqcallback(struct work_struct *work)
3485 priv->ieee80211->link_change(dev); 3463 priv->ieee80211->link_change(dev);
3486 queue_work(priv->ieee80211->wq, 3464 queue_work(priv->ieee80211->wq,
3487 &priv->ieee80211->associate_procedure_wq); 3465 &priv->ieee80211->associate_procedure_wq);
3488
3489 } 3466 }
3490 } 3467 }
3491 priv->ieee80211->LinkDetectInfo.NumRecvBcnInPeriod = 0; 3468 priv->ieee80211->LinkDetectInfo.NumRecvBcnInPeriod = 0;
@@ -3510,7 +3487,6 @@ static void rtl819x_watchdog_wqcallback(struct work_struct *work)
3510 priv->bForcedSilentReset = false; 3487 priv->bForcedSilentReset = false;
3511 priv->bResetInProgress = false; 3488 priv->bResetInProgress = false;
3512 RT_TRACE(COMP_TRACE, " <==RtUsbCheckForHangWorkItemCallback()\n"); 3489 RT_TRACE(COMP_TRACE, " <==RtUsbCheckForHangWorkItemCallback()\n");
3513
3514} 3490}
3515 3491
3516static void watch_dog_timer_callback(unsigned long data) 3492static void watch_dog_timer_callback(unsigned long data)
@@ -3521,6 +3497,7 @@ static void watch_dog_timer_callback(unsigned long data)
3521 mod_timer(&priv->watch_dog_timer, 3497 mod_timer(&priv->watch_dog_timer,
3522 jiffies + msecs_to_jiffies(IEEE80211_WATCH_DOG_TIME)); 3498 jiffies + msecs_to_jiffies(IEEE80211_WATCH_DOG_TIME));
3523} 3499}
3500
3524static int _rtl8192_up(struct net_device *dev) 3501static int _rtl8192_up(struct net_device *dev)
3525{ 3502{
3526 struct r8192_priv *priv = ieee80211_priv(dev); 3503 struct r8192_priv *priv = ieee80211_priv(dev);
@@ -3560,7 +3537,6 @@ static int rtl8192_open(struct net_device *dev)
3560 ret = rtl8192_up(dev); 3537 ret = rtl8192_up(dev);
3561 up(&priv->wx_sem); 3538 up(&priv->wx_sem);
3562 return ret; 3539 return ret;
3563
3564} 3540}
3565 3541
3566 3542
@@ -3587,7 +3563,6 @@ static int rtl8192_close(struct net_device *dev)
3587 up(&priv->wx_sem); 3563 up(&priv->wx_sem);
3588 3564
3589 return ret; 3565 return ret;
3590
3591} 3566}
3592 3567
3593int rtl8192_down(struct net_device *dev) 3568int rtl8192_down(struct net_device *dev)
@@ -3649,7 +3624,6 @@ void rtl8192_commit(struct net_device *dev)
3649 3624
3650 rtl8192_rtx_disable(dev); 3625 rtl8192_rtx_disable(dev);
3651 reset_status = _rtl8192_up(dev); 3626 reset_status = _rtl8192_up(dev);
3652
3653} 3627}
3654 3628
3655static void rtl8192_restart(struct work_struct *work) 3629static void rtl8192_restart(struct work_struct *work)
@@ -4111,7 +4085,6 @@ static void rtl8192_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
4111 (((priv->undecorated_smoothed_pwdb) * (Rx_Smooth_Factor - 1)) + 4085 (((priv->undecorated_smoothed_pwdb) * (Rx_Smooth_Factor - 1)) +
4112 (pprevious_stats->RxPWDBAll)) / (Rx_Smooth_Factor); 4086 (pprevious_stats->RxPWDBAll)) / (Rx_Smooth_Factor);
4113 } 4087 }
4114
4115 } 4088 }
4116 4089
4117 /* Check EVM */ 4090 /* Check EVM */
@@ -4159,8 +4132,6 @@ static void rtl8192_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
4159 } 4132 }
4160 } 4133 }
4161 } 4134 }
4162
4163
4164} 4135}
4165 4136
4166/*----------------------------------------------------------------------------- 4137/*-----------------------------------------------------------------------------
@@ -4201,6 +4172,7 @@ static u8 rtl819x_evm_dbtopercentage(char value)
4201 ret_val = 100; 4172 ret_val = 100;
4202 return ret_val; 4173 return ret_val;
4203} 4174}
4175
4204/* We want good-looking for signal strength/quality */ 4176/* We want good-looking for signal strength/quality */
4205static long rtl819x_signal_scale_mapping(long currsig) 4177static long rtl819x_signal_scale_mapping(long currsig)
4206{ 4178{
@@ -4542,7 +4514,6 @@ static void TranslateRxSignalStuff819xUsb(struct sk_buff *skb,
4542 bpacket_match_bssid, bpacket_toself, 4514 bpacket_match_bssid, bpacket_toself,
4543 bPacketBeacon, bToSelfBA); 4515 bPacketBeacon, bToSelfBA);
4544 rtl8192_record_rxdesc_forlateruse(pstats, &previous_stats); 4516 rtl8192_record_rxdesc_forlateruse(pstats, &previous_stats);
4545
4546} 4517}
4547 4518
4548/** 4519/**
@@ -4758,7 +4729,6 @@ static void query_rxdesc_status(struct sk_buff *skb,
4758 RT_TRACE(COMP_RXDESC, 4729 RT_TRACE(COMP_RXDESC,
4759 "driver_info->FirstAGGR = %d, driver_info->PartAggr = %d\n", 4730 "driver_info->FirstAGGR = %d, driver_info->PartAggr = %d\n",
4760 driver_info->FirstAGGR, driver_info->PartAggr); 4731 driver_info->FirstAGGR, driver_info->PartAggr);
4761
4762 } 4732 }
4763 4733
4764 skb_pull(skb, sizeof(rx_desc_819x_usb)); 4734 skb_pull(skb, sizeof(rx_desc_819x_usb));
@@ -4822,7 +4792,6 @@ static void rtl8192_rx_nomal(struct sk_buff *skb)
4822 netdev_dbg(dev, "actual_length: %d\n", skb->len); 4792 netdev_dbg(dev, "actual_length: %d\n", skb->len);
4823 dev_kfree_skb_any(skb); 4793 dev_kfree_skb_any(skb);
4824 } 4794 }
4825
4826} 4795}
4827 4796
4828static void rtl819xusb_process_received_packet( 4797static void rtl819xusb_process_received_packet(
@@ -4898,7 +4867,6 @@ static void rtl8192_rx_cmd(struct sk_buff *skb)
4898 }; 4867 };
4899 4868
4900 if ((skb->len >= (20 + sizeof(rx_desc_819x_usb))) && (skb->len < RX_URB_SIZE)) { 4869 if ((skb->len >= (20 + sizeof(rx_desc_819x_usb))) && (skb->len < RX_URB_SIZE)) {
4901
4902 query_rx_cmdpkt_desc_status(skb, &stats); 4870 query_rx_cmdpkt_desc_status(skb, &stats);
4903 /* prfd->queue_id = 1; */ 4871 /* prfd->queue_id = 1; */
4904 4872
@@ -4937,7 +4905,6 @@ static void rtl8192_irq_rx_tasklet(struct r8192_priv *priv)
4937 info->out_pipe); 4905 info->out_pipe);
4938 dev_kfree_skb(skb); 4906 dev_kfree_skb(skb);
4939 break; 4907 break;
4940
4941 } 4908 }
4942 } 4909 }
4943} 4910}
@@ -4971,7 +4938,7 @@ static int rtl8192_usb_probe(struct usb_interface *intf,
4971 RT_TRACE(COMP_INIT, "Oops: i'm coming\n"); 4938 RT_TRACE(COMP_INIT, "Oops: i'm coming\n");
4972 4939
4973 dev = alloc_ieee80211(sizeof(struct r8192_priv)); 4940 dev = alloc_ieee80211(sizeof(struct r8192_priv));
4974 if (dev == NULL) 4941 if (!dev)
4975 return -ENOMEM; 4942 return -ENOMEM;
4976 4943
4977 usb_set_intfdata(intf, dev); 4944 usb_set_intfdata(intf, dev);
@@ -5034,7 +5001,6 @@ fail:
5034 */ 5001 */
5035static void rtl8192_cancel_deferred_work(struct r8192_priv *priv) 5002static void rtl8192_cancel_deferred_work(struct r8192_priv *priv)
5036{ 5003{
5037
5038 cancel_work_sync(&priv->reset_wq); 5004 cancel_work_sync(&priv->reset_wq);
5039 cancel_delayed_work(&priv->watch_dog_wq); 5005 cancel_delayed_work(&priv->watch_dog_wq);
5040 cancel_delayed_work(&priv->update_beacon_wq); 5006 cancel_delayed_work(&priv->update_beacon_wq);
@@ -5191,13 +5157,12 @@ void setKey(struct net_device *dev, u8 EntryNo, u8 KeyIndex, u16 KeyType,
5191 write_nic_dword(dev, RWCAM, TargetCommand); 5157 write_nic_dword(dev, RWCAM, TargetCommand);
5192 } else { 5158 } else {
5193 /* Key Material */ 5159 /* Key Material */
5194 if (KeyContent != NULL) { 5160 if (KeyContent) {
5195 write_nic_dword(dev, WCAMI, (u32)(*(KeyContent + i - 2))); 5161 write_nic_dword(dev, WCAMI, (u32)(*(KeyContent + i - 2)));
5196 write_nic_dword(dev, RWCAM, TargetCommand); 5162 write_nic_dword(dev, RWCAM, TargetCommand);
5197 } 5163 }
5198 } 5164 }
5199 } 5165 }
5200
5201} 5166}
5202 5167
5203/*************************************************************************** 5168/***************************************************************************
diff --git a/drivers/staging/rtl8192u/r8192U_wx.c b/drivers/staging/rtl8192u/r8192U_wx.c
index f828e6441f2d..837704de3ea4 100644
--- a/drivers/staging/rtl8192u/r8192U_wx.c
+++ b/drivers/staging/rtl8192u/r8192U_wx.c
@@ -30,7 +30,6 @@
30static const u32 rtl8180_rates[] = {1000000, 2000000, 5500000, 11000000, 30static const u32 rtl8180_rates[] = {1000000, 2000000, 5500000, 11000000,
31 6000000, 9000000, 12000000, 18000000, 24000000, 36000000, 48000000, 54000000}; 31 6000000, 9000000, 12000000, 18000000, 24000000, 36000000, 48000000, 54000000};
32 32
33
34#ifndef ENETDOWN 33#ifndef ENETDOWN
35#define ENETDOWN 1 34#define ENETDOWN 1
36#endif 35#endif
@@ -44,7 +43,6 @@ static int r8192_wx_get_freq(struct net_device *dev,
44 return ieee80211_wx_get_freq(priv->ieee80211, a, wrqu, b); 43 return ieee80211_wx_get_freq(priv->ieee80211, a, wrqu, b);
45} 44}
46 45
47
48static int r8192_wx_get_mode(struct net_device *dev, struct iw_request_info *a, 46static int r8192_wx_get_mode(struct net_device *dev, struct iw_request_info *a,
49 union iwreq_data *wrqu, char *b) 47 union iwreq_data *wrqu, char *b)
50{ 48{
@@ -53,8 +51,6 @@ static int r8192_wx_get_mode(struct net_device *dev, struct iw_request_info *a,
53 return ieee80211_wx_get_mode(priv->ieee80211, a, wrqu, b); 51 return ieee80211_wx_get_mode(priv->ieee80211, a, wrqu, b);
54} 52}
55 53
56
57
58static int r8192_wx_get_rate(struct net_device *dev, 54static int r8192_wx_get_rate(struct net_device *dev,
59 struct iw_request_info *info, 55 struct iw_request_info *info,
60 union iwreq_data *wrqu, char *extra) 56 union iwreq_data *wrqu, char *extra)
@@ -64,8 +60,6 @@ static int r8192_wx_get_rate(struct net_device *dev,
64 return ieee80211_wx_get_rate(priv->ieee80211, info, wrqu, extra); 60 return ieee80211_wx_get_rate(priv->ieee80211, info, wrqu, extra);
65} 61}
66 62
67
68
69static int r8192_wx_set_rate(struct net_device *dev, 63static int r8192_wx_set_rate(struct net_device *dev,
70 struct iw_request_info *info, 64 struct iw_request_info *info,
71 union iwreq_data *wrqu, char *extra) 65 union iwreq_data *wrqu, char *extra)
@@ -82,7 +76,6 @@ static int r8192_wx_set_rate(struct net_device *dev,
82 return ret; 76 return ret;
83} 77}
84 78
85
86static int r8192_wx_set_rts(struct net_device *dev, 79static int r8192_wx_set_rts(struct net_device *dev,
87 struct iw_request_info *info, 80 struct iw_request_info *info,
88 union iwreq_data *wrqu, char *extra) 81 union iwreq_data *wrqu, char *extra)
@@ -148,7 +141,6 @@ static int r8192_wx_force_reset(struct net_device *dev,
148 141
149} 142}
150 143
151
152static int r8192_wx_set_rawtx(struct net_device *dev, 144static int r8192_wx_set_rawtx(struct net_device *dev,
153 struct iw_request_info *info, 145 struct iw_request_info *info,
154 union iwreq_data *wrqu, char *extra) 146 union iwreq_data *wrqu, char *extra)
@@ -301,7 +293,6 @@ static int rtl8180_wx_get_range(struct net_device *dev,
301 /* range->min_r_time; */ /* Minimal retry lifetime */ 293 /* range->min_r_time; */ /* Minimal retry lifetime */
302 /* range->max_r_time; */ /* Maximal retry lifetime */ 294 /* range->max_r_time; */ /* Maximal retry lifetime */
303 295
304
305 for (i = 0, val = 0; i < 14; i++) { 296 for (i = 0, val = 0; i < 14; i++) {
306 297
307 /* Include only legal frequencies for some countries */ 298 /* Include only legal frequencies for some countries */
@@ -326,7 +317,6 @@ static int rtl8180_wx_get_range(struct net_device *dev,
326 return 0; 317 return 0;
327} 318}
328 319
329
330static int r8192_wx_set_scan(struct net_device *dev, struct iw_request_info *a, 320static int r8192_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
331 union iwreq_data *wrqu, char *b) 321 union iwreq_data *wrqu, char *b)
332{ 322{
@@ -396,9 +386,6 @@ static int r8192_wx_set_essid(struct net_device *dev,
396 return ret; 386 return ret;
397} 387}
398 388
399
400
401
402static int r8192_wx_get_essid(struct net_device *dev, 389static int r8192_wx_get_essid(struct net_device *dev,
403 struct iw_request_info *a, 390 struct iw_request_info *a,
404 union iwreq_data *wrqu, char *b) 391 union iwreq_data *wrqu, char *b)
@@ -415,7 +402,6 @@ static int r8192_wx_get_essid(struct net_device *dev,
415 return ret; 402 return ret;
416} 403}
417 404
418
419static int r8192_wx_set_freq(struct net_device *dev, struct iw_request_info *a, 405static int r8192_wx_set_freq(struct net_device *dev, struct iw_request_info *a,
420 union iwreq_data *wrqu, char *b) 406 union iwreq_data *wrqu, char *b)
421{ 407{
@@ -439,7 +425,6 @@ static int r8192_wx_get_name(struct net_device *dev,
439 return ieee80211_wx_get_name(priv->ieee80211, info, wrqu, extra); 425 return ieee80211_wx_get_name(priv->ieee80211, info, wrqu, extra);
440} 426}
441 427
442
443static int r8192_wx_set_frag(struct net_device *dev, 428static int r8192_wx_set_frag(struct net_device *dev,
444 struct iw_request_info *info, 429 struct iw_request_info *info,
445 union iwreq_data *wrqu, char *extra) 430 union iwreq_data *wrqu, char *extra)
@@ -493,7 +478,6 @@ static int r8192_wx_set_wap(struct net_device *dev,
493 478
494} 479}
495 480
496
497static int r8192_wx_get_wap(struct net_device *dev, 481static int r8192_wx_get_wap(struct net_device *dev,
498 struct iw_request_info *info, 482 struct iw_request_info *info,
499 union iwreq_data *wrqu, char *extra) 483 union iwreq_data *wrqu, char *extra)
@@ -503,7 +487,6 @@ static int r8192_wx_get_wap(struct net_device *dev,
503 return ieee80211_wx_get_wap(priv->ieee80211, info, wrqu, extra); 487 return ieee80211_wx_get_wap(priv->ieee80211, info, wrqu, extra);
504} 488}
505 489
506
507static int r8192_wx_get_enc(struct net_device *dev, 490static int r8192_wx_get_enc(struct net_device *dev,
508 struct iw_request_info *info, 491 struct iw_request_info *info,
509 union iwreq_data *wrqu, char *key) 492 union iwreq_data *wrqu, char *key)
@@ -695,7 +678,6 @@ static int r8192_wx_get_retry(struct net_device *dev,
695 wrqu->retry.value = priv->retry_data; 678 wrqu->retry.value = priv->retry_data;
696 } 679 }
697 680
698
699 return 0; 681 return 0;
700} 682}
701 683
@@ -711,7 +693,6 @@ static int r8192_wx_get_sens(struct net_device *dev,
711 return 0; 693 return 0;
712} 694}
713 695
714
715static int r8192_wx_set_sens(struct net_device *dev, 696static int r8192_wx_set_sens(struct net_device *dev,
716 struct iw_request_info *info, 697 struct iw_request_info *info,
717 union iwreq_data *wrqu, char *extra) 698 union iwreq_data *wrqu, char *extra)
@@ -862,7 +843,6 @@ static int dummy(struct net_device *dev, struct iw_request_info *a,
862 return -1; 843 return -1;
863} 844}
864 845
865
866static iw_handler r8192_wx_handlers[] = { 846static iw_handler r8192_wx_handlers[] = {
867 NULL, /* SIOCSIWCOMMIT */ 847 NULL, /* SIOCSIWCOMMIT */
868 r8192_wx_get_name, /* SIOCGIWNAME */ 848 r8192_wx_get_name, /* SIOCGIWNAME */
@@ -949,7 +929,6 @@ static const struct iw_priv_args r8192_private_args[] = {
949 929
950}; 930};
951 931
952
953static iw_handler r8192_private_handler[] = { 932static iw_handler r8192_private_handler[] = {
954 r8192_wx_set_crcmon, 933 r8192_wx_set_crcmon,
955 r8192_wx_set_scan_type, 934 r8192_wx_set_scan_type,
@@ -985,7 +964,6 @@ struct iw_statistics *r8192_get_wireless_stats(struct net_device *dev)
985 return wstats; 964 return wstats;
986} 965}
987 966
988
989struct iw_handler_def r8192_wx_handlers_def = { 967struct iw_handler_def r8192_wx_handlers_def = {
990 .standard = r8192_wx_handlers, 968 .standard = r8192_wx_handlers,
991 .num_standard = ARRAY_SIZE(r8192_wx_handlers), 969 .num_standard = ARRAY_SIZE(r8192_wx_handlers),
diff --git a/drivers/staging/rtl8712/basic_types.h b/drivers/staging/rtl8712/basic_types.h
index 7561bed5dd44..f5c0231891b1 100644
--- a/drivers/staging/rtl8712/basic_types.h
+++ b/drivers/staging/rtl8712/basic_types.h
@@ -11,10 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * Modifications for inclusion into the Linux staging tree are 14 * Modifications for inclusion into the Linux staging tree are
19 * Copyright(c) 2010 Larry Finger. All rights reserved. 15 * Copyright(c) 2010 Larry Finger. All rights reserved.
20 * 16 *
diff --git a/drivers/staging/rtl8712/drv_types.h b/drivers/staging/rtl8712/drv_types.h
index 29e47e1501c5..ae79047ac6dc 100644
--- a/drivers/staging/rtl8712/drv_types.h
+++ b/drivers/staging/rtl8712/drv_types.h
@@ -11,10 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * Modifications for inclusion into the Linux staging tree are 14 * Modifications for inclusion into the Linux staging tree are
19 * Copyright(c) 2010 Larry Finger. All rights reserved. 15 * Copyright(c) 2010 Larry Finger. All rights reserved.
20 * 16 *
diff --git a/drivers/staging/rtl8712/ethernet.h b/drivers/staging/rtl8712/ethernet.h
index fad173f4097e..039da36fad3d 100644
--- a/drivers/staging/rtl8712/ethernet.h
+++ b/drivers/staging/rtl8712/ethernet.h
@@ -11,10 +11,6 @@
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * Modifications for inclusion into the Linux staging tree are 14 * Modifications for inclusion into the Linux staging tree are
19 * Copyright(c) 2010 Larry Finger. All rights reserved. 15 * Copyright(c) 2010 Larry Finger. All rights reserved.
20 * 16 *
diff --git a/drivers/staging/rtl8712/hal_init.c b/drivers/staging/rtl8712/hal_init.c
index 8008efe5686d..0dd458d1402c 100644
--- a/drivers/staging/rtl8712/hal_init.c
+++ b/drivers/staging/rtl8712/hal_init.c
@@ -13,10 +13,6 @@
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details. 14 * more details.
15 * 15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
19 *
20 * Modifications for inclusion into the Linux staging tree are 16 * Modifications for inclusion into the Linux staging tree are
21 * Copyright(c) 2010 Larry Finger. All rights reserved. 17 * Copyright(c) 2010 Larry Finger. All rights reserved.
22 * 18 *
@@ -201,8 +197,8 @@ static u8 rtl8712_dl_fw(struct _adapter *padapter)
201 0x0000ffff); 197 0x0000ffff);
202 memcpy(ppayload, ptr, dump_imem_sz); 198 memcpy(ppayload, ptr, dump_imem_sz);
203 r8712_write_mem(padapter, RTL8712_DMA_VOQ, 199 r8712_write_mem(padapter, RTL8712_DMA_VOQ,
204 dump_imem_sz + TXDESC_SIZE, 200 dump_imem_sz + TXDESC_SIZE,
205 (u8 *)ptx_desc); 201 (u8 *)ptx_desc);
206 ptr += dump_imem_sz; 202 ptr += dump_imem_sz;
207 imem_sz -= dump_imem_sz; 203 imem_sz -= dump_imem_sz;
208 } while (imem_sz > 0); 204 } while (imem_sz > 0);
@@ -230,7 +226,8 @@ static u8 rtl8712_dl_fw(struct _adapter *padapter)
230 0x0000ffff); 226 0x0000ffff);
231 memcpy(ppayload, ptr, dump_emem_sz); 227 memcpy(ppayload, ptr, dump_emem_sz);
232 r8712_write_mem(padapter, RTL8712_DMA_VOQ, 228 r8712_write_mem(padapter, RTL8712_DMA_VOQ,
233 dump_emem_sz + TXDESC_SIZE, (u8 *)ptx_desc); 229 dump_emem_sz + TXDESC_SIZE,
230 (u8 *)ptx_desc);
234 ptr += dump_emem_sz; 231 ptr += dump_emem_sz;
235 emem_sz -= dump_emem_sz; 232 emem_sz -= dump_emem_sz;
236 } while (emem_sz > 0); 233 } while (emem_sz > 0);
@@ -282,7 +279,7 @@ static u8 rtl8712_dl_fw(struct _adapter *padapter)
282 ptx_desc->txdw0 |= cpu_to_le32(BIT(28)); 279 ptx_desc->txdw0 |= cpu_to_le32(BIT(28));
283 memcpy(ppayload, &fwhdr.fwpriv, fwhdr.fw_priv_sz); 280 memcpy(ppayload, &fwhdr.fwpriv, fwhdr.fw_priv_sz);
284 r8712_write_mem(padapter, RTL8712_DMA_VOQ, 281 r8712_write_mem(padapter, RTL8712_DMA_VOQ,
285 fwhdr.fw_priv_sz + TXDESC_SIZE, (u8 *)ptx_desc); 282 fwhdr.fw_priv_sz + TXDESC_SIZE, (u8 *)ptx_desc);
286 283
287 /* polling dmem code done */ 284 /* polling dmem code done */
288 i = 100; 285 i = 100;
@@ -297,7 +294,8 @@ static u8 rtl8712_dl_fw(struct _adapter *padapter)
297 294
298 tmp8 = r8712_read8(padapter, 0x1025000A); 295 tmp8 = r8712_read8(padapter, 0x1025000A);
299 if (tmp8 & BIT(4)) /* When boot from EEPROM, 296 if (tmp8 & BIT(4)) /* When boot from EEPROM,
300 & FW need more time to read EEPROM */ 297 * & FW need more time to read EEPROM
298 */
301 i = 60; 299 i = 60;
302 else /* boot from EFUSE */ 300 else /* boot from EFUSE */
303 i = 30; 301 i = 30;
@@ -332,7 +330,8 @@ uint rtl8712_hal_init(struct _adapter *padapter)
332 r8712_read32(padapter, RCR)); 330 r8712_read32(padapter, RCR));
333 val32 = r8712_read32(padapter, RCR); 331 val32 = r8712_read32(padapter, RCR);
334 r8712_write32(padapter, RCR, (val32 | BIT(26))); /* Enable RX TCP 332 r8712_write32(padapter, RCR, (val32 | BIT(26))); /* Enable RX TCP
335 Checksum offload */ 333 * Checksum offload
334 */
336 netdev_info(padapter->pnetdev, "2 RCR=0x%x\n", 335 netdev_info(padapter->pnetdev, "2 RCR=0x%x\n",
337 r8712_read32(padapter, RCR)); 336 r8712_read32(padapter, RCR));
338 val32 = r8712_read32(padapter, RCR); 337 val32 = r8712_read32(padapter, RCR);
@@ -346,7 +345,8 @@ uint rtl8712_hal_init(struct _adapter *padapter)
346 r8712_write8(padapter, 0x102500BD, r8712_read8(padapter, 0x102500BD) | 345 r8712_write8(padapter, 0x102500BD, r8712_read8(padapter, 0x102500BD) |
347 BIT(7)); /* enable usb rx aggregation */ 346 BIT(7)); /* enable usb rx aggregation */
348 r8712_write8(padapter, 0x102500D9, 1); /* TH=1 => means that invalidate 347 r8712_write8(padapter, 0x102500D9, 1); /* TH=1 => means that invalidate
349 * usb rx aggregation */ 348 * usb rx aggregation
349 */
350 r8712_write8(padapter, 0x1025FE5B, 0x04); /* 1.7ms/4 */ 350 r8712_write8(padapter, 0x1025FE5B, 0x04); /* 1.7ms/4 */
351 /* Fix the RX FIFO issue(USB error) */ 351 /* Fix the RX FIFO issue(USB error) */
352 r8712_write8(padapter, 0x1025fe5C, r8712_read8(padapter, 0x1025fe5C) 352 r8712_write8(padapter, 0x1025fe5C, r8712_read8(padapter, 0x1025fe5C)
@@ -367,7 +367,8 @@ uint rtl8712_hal_deinit(struct _adapter *padapter)
367 r8712_write8(padapter, SYS_FUNC_EN + 1, 0x70); 367 r8712_write8(padapter, SYS_FUNC_EN + 1, 0x70);
368 r8712_write8(padapter, PMC_FSM, 0x06); /* Enable Loader Data Keep */ 368 r8712_write8(padapter, PMC_FSM, 0x06); /* Enable Loader Data Keep */
369 r8712_write8(padapter, SYS_ISO_CTRL, 0xF9); /* Isolation signals from 369 r8712_write8(padapter, SYS_ISO_CTRL, 0xF9); /* Isolation signals from
370 * CORE, PLL */ 370 * CORE, PLL
371 */
371 r8712_write8(padapter, SYS_ISO_CTRL + 1, 0xe8); /* Enable EFUSE 1.2V */ 372 r8712_write8(padapter, SYS_ISO_CTRL + 1, 0xe8); /* Enable EFUSE 1.2V */
372 r8712_write8(padapter, AFE_PLL_CTRL, 0x00); /* Disable AFE PLL. */ 373 r8712_write8(padapter, AFE_PLL_CTRL, 0x00); /* Disable AFE PLL. */
373 r8712_write8(padapter, LDOA15_CTRL, 0x54); /* Disable A15V */ 374 r8712_write8(padapter, LDOA15_CTRL, 0x54); /* Disable A15V */
diff --git a/drivers/staging/rtl8712/ieee80211.c b/drivers/staging/rtl8712/ieee80211.c
index d13b4d53c256..8918654b44ed 100644
--- a/drivers/staging/rtl8712/ieee80211.c
+++ b/drivers/staging/rtl8712/ieee80211.c
@@ -13,10 +13,6 @@
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details. 14 * more details.
15 * 15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
19 *
20 * Modifications for inclusion into the Linux staging tree are 16 * Modifications for inclusion into the Linux staging tree are
21 * Copyright(c) 2010 Larry Finger. All rights reserved. 17 * Copyright(c) 2010 Larry Finger. All rights reserved.
22 * 18 *
diff --git a/drivers/staging/rtl8712/mlme_linux.c b/drivers/staging/rtl8712/mlme_linux.c
index e4e4bdee78be..af7c4a47738a 100644
--- a/drivers/staging/rtl8712/mlme_linux.c
+++ b/drivers/staging/rtl8712/mlme_linux.c
@@ -153,7 +153,7 @@ void r8712_report_sec_ie(struct _adapter *adapter, u8 authmode, u8 *sec_ie)
153 buff = NULL; 153 buff = NULL;
154 if (authmode == _WPA_IE_ID_) { 154 if (authmode == _WPA_IE_ID_) {
155 buff = kzalloc(IW_CUSTOM_MAX, GFP_ATOMIC); 155 buff = kzalloc(IW_CUSTOM_MAX, GFP_ATOMIC);
156 if (buff == NULL) 156 if (!buff)
157 return; 157 return;
158 p = buff; 158 p = buff;
159 p += sprintf(p, "ASSOCINFO(ReqIEs="); 159 p += sprintf(p, "ASSOCINFO(ReqIEs=");
diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c
index ab19112eae13..57211f7e68a5 100644
--- a/drivers/staging/rtl8712/os_intfs.c
+++ b/drivers/staging/rtl8712/os_intfs.c
@@ -389,7 +389,7 @@ static int netdev_open(struct net_device *pnetdev)
389 padapter->bup = true; 389 padapter->bup = true;
390 if (rtl871x_hal_init(padapter) != _SUCCESS) 390 if (rtl871x_hal_init(padapter) != _SUCCESS)
391 goto netdev_open_error; 391 goto netdev_open_error;
392 if (r8712_initmac == NULL) 392 if (!r8712_initmac)
393 /* Use the mac address stored in the Efuse */ 393 /* Use the mac address stored in the Efuse */
394 memcpy(pnetdev->dev_addr, 394 memcpy(pnetdev->dev_addr,
395 padapter->eeprompriv.mac_addr, ETH_ALEN); 395 padapter->eeprompriv.mac_addr, ETH_ALEN);
@@ -413,7 +413,7 @@ static int netdev_open(struct net_device *pnetdev)
413 } 413 }
414 if (start_drv_threads(padapter) != _SUCCESS) 414 if (start_drv_threads(padapter) != _SUCCESS)
415 goto netdev_open_error; 415 goto netdev_open_error;
416 if (padapter->dvobjpriv.inirp_init == NULL) 416 if (!padapter->dvobjpriv.inirp_init)
417 goto netdev_open_error; 417 goto netdev_open_error;
418 else 418 else
419 padapter->dvobjpriv.inirp_init(padapter); 419 padapter->dvobjpriv.inirp_init(padapter);
diff --git a/drivers/staging/rtl8712/osdep_service.h b/drivers/staging/rtl8712/osdep_service.h
index 076d5083c723..ad041c96fdb8 100644
--- a/drivers/staging/rtl8712/osdep_service.h
+++ b/drivers/staging/rtl8712/osdep_service.h
@@ -57,9 +57,6 @@ struct __queue {
57 spin_lock_init(&((pqueue)->lock)); \ 57 spin_lock_init(&((pqueue)->lock)); \
58 } while (0) 58 } while (0)
59 59
60#define LIST_CONTAINOR(ptr, type, member) \
61 ((type *)((char *)(ptr)-(SIZE_T)(&((type *)0)->member)))
62
63static inline u32 _down_sema(struct semaphore *sema) 60static inline u32 _down_sema(struct semaphore *sema)
64{ 61{
65 if (down_interruptible(sema)) 62 if (down_interruptible(sema))
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c
index 50f400234593..13c018340ff2 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.c
+++ b/drivers/staging/rtl8712/rtl8712_cmd.c
@@ -135,7 +135,7 @@ static u8 read_macreg_hdl(struct _adapter *padapter, u8 *pbuf)
135 135
136 /* invoke cmd->callback function */ 136 /* invoke cmd->callback function */
137 pcmd_callback = cmd_callback[pcmd->cmdcode].callback; 137 pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
138 if (pcmd_callback == NULL) 138 if (!pcmd_callback)
139 r8712_free_cmd_obj(pcmd); 139 r8712_free_cmd_obj(pcmd);
140 else 140 else
141 pcmd_callback(padapter, pcmd); 141 pcmd_callback(padapter, pcmd);
@@ -149,7 +149,7 @@ static u8 write_macreg_hdl(struct _adapter *padapter, u8 *pbuf)
149 149
150 /* invoke cmd->callback function */ 150 /* invoke cmd->callback function */
151 pcmd_callback = cmd_callback[pcmd->cmdcode].callback; 151 pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
152 if (pcmd_callback == NULL) 152 if (!pcmd_callback)
153 r8712_free_cmd_obj(pcmd); 153 r8712_free_cmd_obj(pcmd);
154 else 154 else
155 pcmd_callback(padapter, pcmd); 155 pcmd_callback(padapter, pcmd);
@@ -165,7 +165,7 @@ static u8 read_bbreg_hdl(struct _adapter *padapter, u8 *pbuf)
165 if (pcmd->rsp && pcmd->rspsz > 0) 165 if (pcmd->rsp && pcmd->rspsz > 0)
166 memcpy(pcmd->rsp, (u8 *)&val, pcmd->rspsz); 166 memcpy(pcmd->rsp, (u8 *)&val, pcmd->rspsz);
167 pcmd_callback = cmd_callback[pcmd->cmdcode].callback; 167 pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
168 if (pcmd_callback == NULL) 168 if (!pcmd_callback)
169 r8712_free_cmd_obj(pcmd); 169 r8712_free_cmd_obj(pcmd);
170 else 170 else
171 pcmd_callback(padapter, pcmd); 171 pcmd_callback(padapter, pcmd);
@@ -178,7 +178,7 @@ static u8 write_bbreg_hdl(struct _adapter *padapter, u8 *pbuf)
178 struct cmd_obj *pcmd = (struct cmd_obj *)pbuf; 178 struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
179 179
180 pcmd_callback = cmd_callback[pcmd->cmdcode].callback; 180 pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
181 if (pcmd_callback == NULL) 181 if (!pcmd_callback)
182 r8712_free_cmd_obj(pcmd); 182 r8712_free_cmd_obj(pcmd);
183 else 183 else
184 pcmd_callback(padapter, pcmd); 184 pcmd_callback(padapter, pcmd);
@@ -194,7 +194,7 @@ static u8 read_rfreg_hdl(struct _adapter *padapter, u8 *pbuf)
194 if (pcmd->rsp && pcmd->rspsz > 0) 194 if (pcmd->rsp && pcmd->rspsz > 0)
195 memcpy(pcmd->rsp, (u8 *)&val, pcmd->rspsz); 195 memcpy(pcmd->rsp, (u8 *)&val, pcmd->rspsz);
196 pcmd_callback = cmd_callback[pcmd->cmdcode].callback; 196 pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
197 if (pcmd_callback == NULL) 197 if (!pcmd_callback)
198 r8712_free_cmd_obj(pcmd); 198 r8712_free_cmd_obj(pcmd);
199 else 199 else
200 pcmd_callback(padapter, pcmd); 200 pcmd_callback(padapter, pcmd);
@@ -207,7 +207,7 @@ static u8 write_rfreg_hdl(struct _adapter *padapter, u8 *pbuf)
207 struct cmd_obj *pcmd = (struct cmd_obj *)pbuf; 207 struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
208 208
209 pcmd_callback = cmd_callback[pcmd->cmdcode].callback; 209 pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
210 if (pcmd_callback == NULL) 210 if (!pcmd_callback)
211 r8712_free_cmd_obj(pcmd); 211 r8712_free_cmd_obj(pcmd);
212 else 212 else
213 pcmd_callback(padapter, pcmd); 213 pcmd_callback(padapter, pcmd);
@@ -227,7 +227,7 @@ static struct cmd_obj *cmd_hdl_filter(struct _adapter *padapter,
227{ 227{
228 struct cmd_obj *pcmd_r; 228 struct cmd_obj *pcmd_r;
229 229
230 if (pcmd == NULL) 230 if (!pcmd)
231 return pcmd; 231 return pcmd;
232 pcmd_r = NULL; 232 pcmd_r = NULL;
233 233
@@ -416,7 +416,7 @@ _next:
416 /* free all cmd_obj resources */ 416 /* free all cmd_obj resources */
417 do { 417 do {
418 pcmd = r8712_dequeue_cmd(&(pcmdpriv->cmd_queue)); 418 pcmd = r8712_dequeue_cmd(&(pcmdpriv->cmd_queue));
419 if (pcmd == NULL) 419 if (!pcmd)
420 break; 420 break;
421 r8712_free_cmd_obj(pcmd); 421 r8712_free_cmd_obj(pcmd);
422 } while (1); 422 } while (1);
@@ -431,7 +431,7 @@ void r8712_event_handle(struct _adapter *padapter, uint *peventbuf)
431 void (*event_callback)(struct _adapter *dev, u8 *pbuf); 431 void (*event_callback)(struct _adapter *dev, u8 *pbuf);
432 struct evt_priv *pevt_priv = &(padapter->evtpriv); 432 struct evt_priv *pevt_priv = &(padapter->evtpriv);
433 433
434 if (peventbuf == NULL) 434 if (!peventbuf)
435 goto _abort_event_; 435 goto _abort_event_;
436 evt_sz = (u16)(le32_to_cpu(*peventbuf) & 0xffff); 436 evt_sz = (u16)(le32_to_cpu(*peventbuf) & 0xffff);
437 evt_seq = (u8)((le32_to_cpu(*peventbuf) >> 24) & 0x7f); 437 evt_seq = (u8)((le32_to_cpu(*peventbuf) >> 24) & 0x7f);
diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
index d187508dd1e0..f25b34c7d115 100644
--- a/drivers/staging/rtl8712/rtl8712_recv.c
+++ b/drivers/staging/rtl8712/rtl8712_recv.c
@@ -204,7 +204,7 @@ static union recv_frame *recvframe_defrag(struct _adapter *adapter,
204 pfree_recv_queue = &adapter->recvpriv.free_recv_queue; 204 pfree_recv_queue = &adapter->recvpriv.free_recv_queue;
205 phead = &defrag_q->queue; 205 phead = &defrag_q->queue;
206 plist = phead->next; 206 plist = phead->next;
207 prframe = LIST_CONTAINOR(plist, union recv_frame, u); 207 prframe = container_of(plist, union recv_frame, u.list);
208 list_del_init(&prframe->u.list); 208 list_del_init(&prframe->u.list);
209 pfhdr = &prframe->u.hdr; 209 pfhdr = &prframe->u.hdr;
210 curfragnum = 0; 210 curfragnum = 0;
@@ -219,7 +219,7 @@ static union recv_frame *recvframe_defrag(struct _adapter *adapter,
219 plist = &defrag_q->queue; 219 plist = &defrag_q->queue;
220 plist = plist->next; 220 plist = plist->next;
221 while (!end_of_queue_search(phead, plist)) { 221 while (!end_of_queue_search(phead, plist)) {
222 pnextrframe = LIST_CONTAINOR(plist, union recv_frame, u); 222 pnextrframe = container_of(plist, union recv_frame, u.list);
223 pnfhdr = &pnextrframe->u.hdr; 223 pnfhdr = &pnextrframe->u.hdr;
224 /*check the fragment sequence (2nd ~n fragment frame) */ 224 /*check the fragment sequence (2nd ~n fragment frame) */
225 if (curfragnum != pnfhdr->attrib.frag_num) { 225 if (curfragnum != pnfhdr->attrib.frag_num) {
@@ -492,7 +492,7 @@ static int enqueue_reorder_recvframe(struct recv_reorder_ctrl *preorder_ctrl,
492 phead = &ppending_recvframe_queue->queue; 492 phead = &ppending_recvframe_queue->queue;
493 plist = phead->next; 493 plist = phead->next;
494 while (!end_of_queue_search(phead, plist)) { 494 while (!end_of_queue_search(phead, plist)) {
495 pnextrframe = LIST_CONTAINOR(plist, union recv_frame, u); 495 pnextrframe = container_of(plist, union recv_frame, u.list);
496 pnextattrib = &pnextrframe->u.hdr.attrib; 496 pnextattrib = &pnextrframe->u.hdr.attrib;
497 if (SN_LESS(pnextattrib->seq_num, pattrib->seq_num)) 497 if (SN_LESS(pnextattrib->seq_num, pattrib->seq_num))
498 plist = plist->next; 498 plist = plist->next;
@@ -525,14 +525,14 @@ int r8712_recv_indicatepkts_in_order(struct _adapter *padapter,
525 if (list_empty(phead)) 525 if (list_empty(phead))
526 return true; 526 return true;
527 527
528 prframe = LIST_CONTAINOR(plist, union recv_frame, u); 528 prframe = container_of(plist, union recv_frame, u.list);
529 pattrib = &prframe->u.hdr.attrib; 529 pattrib = &prframe->u.hdr.attrib;
530 preorder_ctrl->indicate_seq = pattrib->seq_num; 530 preorder_ctrl->indicate_seq = pattrib->seq_num;
531 } 531 }
532 /* Prepare indication list and indication. 532 /* Prepare indication list and indication.
533 * Check if there is any packet need indicate. */ 533 * Check if there is any packet need indicate. */
534 while (!list_empty(phead)) { 534 while (!list_empty(phead)) {
535 prframe = LIST_CONTAINOR(plist, union recv_frame, u); 535 prframe = container_of(plist, union recv_frame, u.list);
536 pattrib = &prframe->u.hdr.attrib; 536 pattrib = &prframe->u.hdr.attrib;
537 if (!SN_LESS(preorder_ctrl->indicate_seq, pattrib->seq_num)) { 537 if (!SN_LESS(preorder_ctrl->indicate_seq, pattrib->seq_num)) {
538 plist = plist->next; 538 plist = plist->next;
diff --git a/drivers/staging/rtl8712/rtl8712_xmit.c b/drivers/staging/rtl8712/rtl8712_xmit.c
index b21a60e9f8a9..7e0b94503dfc 100644
--- a/drivers/staging/rtl8712/rtl8712_xmit.c
+++ b/drivers/staging/rtl8712/rtl8712_xmit.c
@@ -169,8 +169,8 @@ static struct xmit_frame *dequeue_one_xmitframe(struct xmit_priv *pxmitpriv,
169 xmitframe_phead = &pframe_queue->queue; 169 xmitframe_phead = &pframe_queue->queue;
170 xmitframe_plist = xmitframe_phead->next; 170 xmitframe_plist = xmitframe_phead->next;
171 if (!end_of_queue_search(xmitframe_phead, xmitframe_plist)) { 171 if (!end_of_queue_search(xmitframe_phead, xmitframe_plist)) {
172 pxmitframe = LIST_CONTAINOR(xmitframe_plist, 172 pxmitframe = container_of(xmitframe_plist,
173 struct xmit_frame, list); 173 struct xmit_frame, list);
174 list_del_init(&pxmitframe->list); 174 list_del_init(&pxmitframe->list);
175 ptxservq->qcnt--; 175 ptxservq->qcnt--;
176 phwxmit->txcmdcnt++; 176 phwxmit->txcmdcnt++;
@@ -209,8 +209,8 @@ static struct xmit_frame *dequeue_xframe_ex(struct xmit_priv *pxmitpriv,
209 sta_phead = &phwxmit->sta_queue->queue; 209 sta_phead = &phwxmit->sta_queue->queue;
210 sta_plist = sta_phead->next; 210 sta_plist = sta_phead->next;
211 while (!end_of_queue_search(sta_phead, sta_plist)) { 211 while (!end_of_queue_search(sta_phead, sta_plist)) {
212 ptxservq = LIST_CONTAINOR(sta_plist, struct tx_servq, 212 ptxservq = container_of(sta_plist, struct tx_servq,
213 tx_pending); 213 tx_pending);
214 pframe_queue = &ptxservq->sta_pending; 214 pframe_queue = &ptxservq->sta_pending;
215 pxmitframe = dequeue_one_xmitframe(pxmitpriv, phwxmit, 215 pxmitframe = dequeue_one_xmitframe(pxmitpriv, phwxmit,
216 ptxservq, pframe_queue); 216 ptxservq, pframe_queue);
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c
index 86136cc73672..aed03cfbb1ba 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.c
+++ b/drivers/staging/rtl8712/rtl871x_cmd.c
@@ -225,10 +225,10 @@ u8 r8712_sitesurvey_cmd(struct _adapter *padapter,
225 struct mlme_priv *pmlmepriv = &padapter->mlmepriv; 225 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
226 226
227 ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC); 227 ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
228 if (ph2c == NULL) 228 if (!ph2c)
229 return _FAIL; 229 return _FAIL;
230 psurveyPara = kmalloc(sizeof(*psurveyPara), GFP_ATOMIC); 230 psurveyPara = kmalloc(sizeof(*psurveyPara), GFP_ATOMIC);
231 if (psurveyPara == NULL) { 231 if (!psurveyPara) {
232 kfree(ph2c); 232 kfree(ph2c);
233 return _FAIL; 233 return _FAIL;
234 } 234 }
@@ -258,10 +258,10 @@ u8 r8712_setdatarate_cmd(struct _adapter *padapter, u8 *rateset)
258 struct cmd_priv *pcmdpriv = &padapter->cmdpriv; 258 struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
259 259
260 ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC); 260 ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
261 if (ph2c == NULL) 261 if (!ph2c)
262 return _FAIL; 262 return _FAIL;
263 pbsetdataratepara = kmalloc(sizeof(*pbsetdataratepara), GFP_ATOMIC); 263 pbsetdataratepara = kmalloc(sizeof(*pbsetdataratepara), GFP_ATOMIC);
264 if (pbsetdataratepara == NULL) { 264 if (!pbsetdataratepara) {
265 kfree(ph2c); 265 kfree(ph2c);
266 return _FAIL; 266 return _FAIL;
267 } 267 }
@@ -280,10 +280,10 @@ u8 r8712_set_chplan_cmd(struct _adapter *padapter, int chplan)
280 struct cmd_priv *pcmdpriv = &padapter->cmdpriv; 280 struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
281 281
282 ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC); 282 ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
283 if (ph2c == NULL) 283 if (!ph2c)
284 return _FAIL; 284 return _FAIL;
285 psetchplanpara = kmalloc(sizeof(*psetchplanpara), GFP_ATOMIC); 285 psetchplanpara = kmalloc(sizeof(*psetchplanpara), GFP_ATOMIC);
286 if (psetchplanpara == NULL) { 286 if (!psetchplanpara) {
287 kfree(ph2c); 287 kfree(ph2c);
288 return _FAIL; 288 return _FAIL;
289 } 289 }
@@ -301,10 +301,10 @@ u8 r8712_setbasicrate_cmd(struct _adapter *padapter, u8 *rateset)
301 struct cmd_priv *pcmdpriv = &padapter->cmdpriv; 301 struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
302 302
303 ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC); 303 ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
304 if (ph2c == NULL) 304 if (!ph2c)
305 return _FAIL; 305 return _FAIL;
306 pssetbasicratepara = kmalloc(sizeof(*pssetbasicratepara), GFP_ATOMIC); 306 pssetbasicratepara = kmalloc(sizeof(*pssetbasicratepara), GFP_ATOMIC);
307 if (pssetbasicratepara == NULL) { 307 if (!pssetbasicratepara) {
308 kfree(ph2c); 308 kfree(ph2c);
309 return _FAIL; 309 return _FAIL;
310 } 310 }
@@ -322,10 +322,10 @@ u8 r8712_setfwdig_cmd(struct _adapter *padapter, u8 type)
322 struct cmd_priv *pcmdpriv = &padapter->cmdpriv; 322 struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
323 323
324 ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC); 324 ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
325 if (ph2c == NULL) 325 if (!ph2c)
326 return _FAIL; 326 return _FAIL;
327 pwriteptmparm = kmalloc(sizeof(*pwriteptmparm), GFP_ATOMIC); 327 pwriteptmparm = kmalloc(sizeof(*pwriteptmparm), GFP_ATOMIC);
328 if (pwriteptmparm == NULL) { 328 if (!pwriteptmparm) {
329 kfree(ph2c); 329 kfree(ph2c);
330 return _FAIL; 330 return _FAIL;
331 } 331 }
@@ -342,10 +342,10 @@ u8 r8712_setfwra_cmd(struct _adapter *padapter, u8 type)
342 struct cmd_priv *pcmdpriv = &padapter->cmdpriv; 342 struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
343 343
344 ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC); 344 ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
345 if (ph2c == NULL) 345 if (!ph2c)
346 return _FAIL; 346 return _FAIL;
347 pwriteptmparm = kmalloc(sizeof(*pwriteptmparm), GFP_ATOMIC); 347 pwriteptmparm = kmalloc(sizeof(*pwriteptmparm), GFP_ATOMIC);
348 if (pwriteptmparm == NULL) { 348 if (!pwriteptmparm) {
349 kfree(ph2c); 349 kfree(ph2c);
350 return _FAIL; 350 return _FAIL;
351 } 351 }
@@ -362,10 +362,10 @@ u8 r8712_setrfreg_cmd(struct _adapter *padapter, u8 offset, u32 val)
362 struct cmd_priv *pcmdpriv = &padapter->cmdpriv; 362 struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
363 363
364 ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC); 364 ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
365 if (ph2c == NULL) 365 if (!ph2c)
366 return _FAIL; 366 return _FAIL;
367 pwriterfparm = kmalloc(sizeof(*pwriterfparm), GFP_ATOMIC); 367 pwriterfparm = kmalloc(sizeof(*pwriterfparm), GFP_ATOMIC);
368 if (pwriterfparm == NULL) { 368 if (!pwriterfparm) {
369 kfree(ph2c); 369 kfree(ph2c);
370 return _FAIL; 370 return _FAIL;
371 } 371 }
@@ -383,10 +383,10 @@ u8 r8712_getrfreg_cmd(struct _adapter *padapter, u8 offset, u8 *pval)
383 struct cmd_priv *pcmdpriv = &padapter->cmdpriv; 383 struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
384 384
385 ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC); 385 ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
386 if (ph2c == NULL) 386 if (!ph2c)
387 return _FAIL; 387 return _FAIL;
388 prdrfparm = kmalloc(sizeof(*prdrfparm), GFP_ATOMIC); 388 prdrfparm = kmalloc(sizeof(*prdrfparm), GFP_ATOMIC);
389 if (prdrfparm == NULL) { 389 if (!prdrfparm) {
390 kfree(ph2c); 390 kfree(ph2c);
391 return _FAIL; 391 return _FAIL;
392 } 392 }
@@ -427,7 +427,7 @@ u8 r8712_createbss_cmd(struct _adapter *padapter)
427 427
428 padapter->ledpriv.LedControlHandler(padapter, LED_CTL_START_TO_LINK); 428 padapter->ledpriv.LedControlHandler(padapter, LED_CTL_START_TO_LINK);
429 pcmd = kmalloc(sizeof(*pcmd), GFP_ATOMIC); 429 pcmd = kmalloc(sizeof(*pcmd), GFP_ATOMIC);
430 if (pcmd == NULL) 430 if (!pcmd)
431 return _FAIL; 431 return _FAIL;
432 INIT_LIST_HEAD(&pcmd->list); 432 INIT_LIST_HEAD(&pcmd->list);
433 pcmd->cmdcode = _CreateBss_CMD_; 433 pcmd->cmdcode = _CreateBss_CMD_;
@@ -457,7 +457,7 @@ u8 r8712_joinbss_cmd(struct _adapter *padapter, struct wlan_network *pnetwork)
457 457
458 padapter->ledpriv.LedControlHandler(padapter, LED_CTL_START_TO_LINK); 458 padapter->ledpriv.LedControlHandler(padapter, LED_CTL_START_TO_LINK);
459 pcmd = kmalloc(sizeof(*pcmd), GFP_ATOMIC); 459 pcmd = kmalloc(sizeof(*pcmd), GFP_ATOMIC);
460 if (pcmd == NULL) 460 if (!pcmd)
461 return _FAIL; 461 return _FAIL;
462 462
463 /* for hidden ap to set fw_state here */ 463 /* for hidden ap to set fw_state here */
@@ -587,10 +587,10 @@ u8 r8712_disassoc_cmd(struct _adapter *padapter) /* for sta_mode */
587 struct cmd_priv *pcmdpriv = &padapter->cmdpriv; 587 struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
588 588
589 pdisconnect_cmd = kmalloc(sizeof(*pdisconnect_cmd), GFP_ATOMIC); 589 pdisconnect_cmd = kmalloc(sizeof(*pdisconnect_cmd), GFP_ATOMIC);
590 if (pdisconnect_cmd == NULL) 590 if (!pdisconnect_cmd)
591 return _FAIL; 591 return _FAIL;
592 pdisconnect = kmalloc(sizeof(*pdisconnect), GFP_ATOMIC); 592 pdisconnect = kmalloc(sizeof(*pdisconnect), GFP_ATOMIC);
593 if (pdisconnect == NULL) { 593 if (!pdisconnect) {
594 kfree(pdisconnect_cmd); 594 kfree(pdisconnect_cmd);
595 return _FAIL; 595 return _FAIL;
596 } 596 }
@@ -609,10 +609,10 @@ u8 r8712_setopmode_cmd(struct _adapter *padapter,
609 struct cmd_priv *pcmdpriv = &padapter->cmdpriv; 609 struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
610 610
611 ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC); 611 ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
612 if (ph2c == NULL) 612 if (!ph2c)
613 return _FAIL; 613 return _FAIL;
614 psetop = kmalloc(sizeof(*psetop), GFP_ATOMIC); 614 psetop = kmalloc(sizeof(*psetop), GFP_ATOMIC);
615 if (psetop == NULL) { 615 if (!psetop) {
616 kfree(ph2c); 616 kfree(ph2c);
617 return _FAIL; 617 return _FAIL;
618 } 618 }
@@ -633,15 +633,15 @@ u8 r8712_setstakey_cmd(struct _adapter *padapter, u8 *psta, u8 unicast_key)
633 struct sta_info *sta = (struct sta_info *)psta; 633 struct sta_info *sta = (struct sta_info *)psta;
634 634
635 ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC); 635 ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
636 if (ph2c == NULL) 636 if (!ph2c)
637 return _FAIL; 637 return _FAIL;
638 psetstakey_para = kmalloc(sizeof(*psetstakey_para), GFP_ATOMIC); 638 psetstakey_para = kmalloc(sizeof(*psetstakey_para), GFP_ATOMIC);
639 if (psetstakey_para == NULL) { 639 if (!psetstakey_para) {
640 kfree(ph2c); 640 kfree(ph2c);
641 return _FAIL; 641 return _FAIL;
642 } 642 }
643 psetstakey_rsp = kmalloc(sizeof(*psetstakey_rsp), GFP_ATOMIC); 643 psetstakey_rsp = kmalloc(sizeof(*psetstakey_rsp), GFP_ATOMIC);
644 if (psetstakey_rsp == NULL) { 644 if (!psetstakey_rsp) {
645 kfree(ph2c); 645 kfree(ph2c);
646 kfree(psetstakey_para); 646 kfree(psetstakey_para);
647 return _FAIL; 647 return _FAIL;
@@ -673,10 +673,10 @@ u8 r8712_setrfintfs_cmd(struct _adapter *padapter, u8 mode)
673 struct cmd_priv *pcmdpriv = &padapter->cmdpriv; 673 struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
674 674
675 ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC); 675 ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
676 if (ph2c == NULL) 676 if (!ph2c)
677 return _FAIL; 677 return _FAIL;
678 psetrfintfsparm = kmalloc(sizeof(*psetrfintfsparm), GFP_ATOMIC); 678 psetrfintfsparm = kmalloc(sizeof(*psetrfintfsparm), GFP_ATOMIC);
679 if (psetrfintfsparm == NULL) { 679 if (!psetrfintfsparm) {
680 kfree(ph2c); 680 kfree(ph2c);
681 return _FAIL; 681 return _FAIL;
682 } 682 }
@@ -695,10 +695,10 @@ u8 r8712_setrttbl_cmd(struct _adapter *padapter,
695 struct cmd_priv *pcmdpriv = &padapter->cmdpriv; 695 struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
696 696
697 ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC); 697 ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
698 if (ph2c == NULL) 698 if (!ph2c)
699 return _FAIL; 699 return _FAIL;
700 psetrttblparm = kmalloc(sizeof(*psetrttblparm), GFP_ATOMIC); 700 psetrttblparm = kmalloc(sizeof(*psetrttblparm), GFP_ATOMIC);
701 if (psetrttblparm == NULL) { 701 if (!psetrttblparm) {
702 kfree(ph2c); 702 kfree(ph2c);
703 return _FAIL; 703 return _FAIL;
704 } 704 }
@@ -716,10 +716,10 @@ u8 r8712_setMacAddr_cmd(struct _adapter *padapter, u8 *mac_addr)
716 struct SetMacAddr_param *psetMacAddr_para; 716 struct SetMacAddr_param *psetMacAddr_para;
717 717
718 ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC); 718 ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
719 if (ph2c == NULL) 719 if (!ph2c)
720 return _FAIL; 720 return _FAIL;
721 psetMacAddr_para = kmalloc(sizeof(*psetMacAddr_para), GFP_ATOMIC); 721 psetMacAddr_para = kmalloc(sizeof(*psetMacAddr_para), GFP_ATOMIC);
722 if (psetMacAddr_para == NULL) { 722 if (!psetMacAddr_para) {
723 kfree(ph2c); 723 kfree(ph2c);
724 return _FAIL; 724 return _FAIL;
725 } 725 }
@@ -738,15 +738,15 @@ u8 r8712_setassocsta_cmd(struct _adapter *padapter, u8 *mac_addr)
738 struct set_assocsta_rsp *psetassocsta_rsp = NULL; 738 struct set_assocsta_rsp *psetassocsta_rsp = NULL;
739 739
740 ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC); 740 ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
741 if (ph2c == NULL) 741 if (!ph2c)
742 return _FAIL; 742 return _FAIL;
743 psetassocsta_para = kmalloc(sizeof(*psetassocsta_para), GFP_ATOMIC); 743 psetassocsta_para = kmalloc(sizeof(*psetassocsta_para), GFP_ATOMIC);
744 if (psetassocsta_para == NULL) { 744 if (!psetassocsta_para) {
745 kfree(ph2c); 745 kfree(ph2c);
746 return _FAIL; 746 return _FAIL;
747 } 747 }
748 psetassocsta_rsp = kmalloc(sizeof(*psetassocsta_rsp), GFP_ATOMIC); 748 psetassocsta_rsp = kmalloc(sizeof(*psetassocsta_rsp), GFP_ATOMIC);
749 if (psetassocsta_rsp == NULL) { 749 if (!psetassocsta_rsp) {
750 kfree(ph2c); 750 kfree(ph2c);
751 kfree(psetassocsta_para); 751 kfree(psetassocsta_para);
752 return _FAIL; 752 return _FAIL;
@@ -766,10 +766,10 @@ u8 r8712_addbareq_cmd(struct _adapter *padapter, u8 tid)
766 struct addBaReq_parm *paddbareq_parm; 766 struct addBaReq_parm *paddbareq_parm;
767 767
768 ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC); 768 ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
769 if (ph2c == NULL) 769 if (!ph2c)
770 return _FAIL; 770 return _FAIL;
771 paddbareq_parm = kmalloc(sizeof(*paddbareq_parm), GFP_ATOMIC); 771 paddbareq_parm = kmalloc(sizeof(*paddbareq_parm), GFP_ATOMIC);
772 if (paddbareq_parm == NULL) { 772 if (!paddbareq_parm) {
773 kfree(ph2c); 773 kfree(ph2c);
774 return _FAIL; 774 return _FAIL;
775 } 775 }
@@ -787,10 +787,10 @@ u8 r8712_wdg_wk_cmd(struct _adapter *padapter)
787 struct cmd_priv *pcmdpriv = &padapter->cmdpriv; 787 struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
788 788
789 ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC); 789 ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
790 if (ph2c == NULL) 790 if (!ph2c)
791 return _FAIL; 791 return _FAIL;
792 pdrvintcmd_param = kmalloc(sizeof(*pdrvintcmd_param), GFP_ATOMIC); 792 pdrvintcmd_param = kmalloc(sizeof(*pdrvintcmd_param), GFP_ATOMIC);
793 if (pdrvintcmd_param == NULL) { 793 if (!pdrvintcmd_param) {
794 kfree(ph2c); 794 kfree(ph2c);
795 return _FAIL; 795 return _FAIL;
796 } 796 }
@@ -961,10 +961,10 @@ u8 r8712_disconnectCtrlEx_cmd(struct _adapter *adapter, u32 enableDrvCtrl,
961 struct cmd_priv *pcmdpriv = &adapter->cmdpriv; 961 struct cmd_priv *pcmdpriv = &adapter->cmdpriv;
962 962
963 ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC); 963 ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
964 if (ph2c == NULL) 964 if (!ph2c)
965 return _FAIL; 965 return _FAIL;
966 param = kzalloc(sizeof(*param), GFP_ATOMIC); 966 param = kzalloc(sizeof(*param), GFP_ATOMIC);
967 if (param == NULL) { 967 if (!param) {
968 kfree(ph2c); 968 kfree(ph2c);
969 return _FAIL; 969 return _FAIL;
970 } 970 }
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
index 1b9e24900477..e205adf24da2 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
@@ -399,7 +399,7 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
399 if (wep_key_len > 0) { 399 if (wep_key_len > 0) {
400 wep_key_len = wep_key_len <= 5 ? 5 : 13; 400 wep_key_len = wep_key_len <= 5 ? 5 : 13;
401 pwep = kzalloc(sizeof(*pwep), GFP_ATOMIC); 401 pwep = kzalloc(sizeof(*pwep), GFP_ATOMIC);
402 if (pwep == NULL) 402 if (!pwep)
403 return -ENOMEM; 403 return -ENOMEM;
404 pwep->KeyLength = wep_key_len; 404 pwep->KeyLength = wep_key_len;
405 pwep->Length = wep_key_len + 405 pwep->Length = wep_key_len +
@@ -1060,8 +1060,8 @@ static int r8711_wx_set_wap(struct net_device *dev,
1060 while (1) { 1060 while (1) {
1061 if (end_of_queue_search(phead, pmlmepriv->pscanned)) 1061 if (end_of_queue_search(phead, pmlmepriv->pscanned))
1062 break; 1062 break;
1063 pnetwork = LIST_CONTAINOR(pmlmepriv->pscanned, 1063 pnetwork = container_of(pmlmepriv->pscanned,
1064 struct wlan_network, list); 1064 struct wlan_network, list);
1065 pmlmepriv->pscanned = pmlmepriv->pscanned->next; 1065 pmlmepriv->pscanned = pmlmepriv->pscanned->next;
1066 dst_bssid = pnetwork->network.MacAddress; 1066 dst_bssid = pnetwork->network.MacAddress;
1067 if (!memcmp(dst_bssid, temp->sa_data, ETH_ALEN)) { 1067 if (!memcmp(dst_bssid, temp->sa_data, ETH_ALEN)) {
@@ -1216,7 +1216,7 @@ static int r8711_wx_get_scan(struct net_device *dev,
1216 ret = -E2BIG; 1216 ret = -E2BIG;
1217 break; 1217 break;
1218 } 1218 }
1219 pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list); 1219 pnetwork = container_of(plist, struct wlan_network, list);
1220 ev = translate_scan(padapter, a, pnetwork, ev, stop); 1220 ev = translate_scan(padapter, a, pnetwork, ev, stop);
1221 plist = plist->next; 1221 plist = plist->next;
1222 } 1222 }
@@ -1271,8 +1271,8 @@ static int r8711_wx_set_essid(struct net_device *dev,
1271 while (1) { 1271 while (1) {
1272 if (end_of_queue_search(phead, pmlmepriv->pscanned)) 1272 if (end_of_queue_search(phead, pmlmepriv->pscanned))
1273 break; 1273 break;
1274 pnetwork = LIST_CONTAINOR(pmlmepriv->pscanned, 1274 pnetwork = container_of(pmlmepriv->pscanned,
1275 struct wlan_network, list); 1275 struct wlan_network, list);
1276 pmlmepriv->pscanned = pmlmepriv->pscanned->next; 1276 pmlmepriv->pscanned = pmlmepriv->pscanned->next;
1277 dst_ssid = pnetwork->network.Ssid.Ssid; 1277 dst_ssid = pnetwork->network.Ssid.Ssid;
1278 if ((!memcmp(dst_ssid, src_ssid, ndis_ssid.SsidLength)) 1278 if ((!memcmp(dst_ssid, src_ssid, ndis_ssid.SsidLength))
@@ -1793,7 +1793,7 @@ static int r871x_wx_set_enc_ext(struct net_device *dev,
1793 1793
1794 param_len = sizeof(struct ieee_param) + pext->key_len; 1794 param_len = sizeof(struct ieee_param) + pext->key_len;
1795 param = kzalloc(param_len, GFP_ATOMIC); 1795 param = kzalloc(param_len, GFP_ATOMIC);
1796 if (param == NULL) 1796 if (!param)
1797 return -ENOMEM; 1797 return -ENOMEM;
1798 param->cmd = IEEE_CMD_SET_ENCRYPTION; 1798 param->cmd = IEEE_CMD_SET_ENCRYPTION;
1799 eth_broadcast_addr(param->sta_addr); 1799 eth_broadcast_addr(param->sta_addr);
@@ -1986,7 +1986,7 @@ static int r871x_get_ap_info(struct net_device *dev,
1986 while (1) { 1986 while (1) {
1987 if (end_of_queue_search(phead, plist)) 1987 if (end_of_queue_search(phead, plist))
1988 break; 1988 break;
1989 pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list); 1989 pnetwork = container_of(plist, struct wlan_network, list);
1990 if (!mac_pton(data, bssid)) { 1990 if (!mac_pton(data, bssid)) {
1991 netdev_info(dev, "r8712u: Invalid BSSID '%s'.\n", 1991 netdev_info(dev, "r8712u: Invalid BSSID '%s'.\n",
1992 (u8 *)data); 1992 (u8 *)data);
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_set.c b/drivers/staging/rtl8712/rtl871x_ioctl_set.c
index f772675ae9cd..56760cda8e89 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_set.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_set.c
@@ -34,12 +34,6 @@
34#include "usb_osintf.h" 34#include "usb_osintf.h"
35#include "usb_ops.h" 35#include "usb_ops.h"
36 36
37#define IS_MAC_ADDRESS_BROADCAST(addr) \
38( \
39 ((addr[0] == 0xff) && (addr[1] == 0xff) && \
40 (addr[2] == 0xff) && (addr[3] == 0xff) && \
41 (addr[4] == 0xff) && (addr[5] == 0xff)) ? true : false \
42)
43 37
44static u8 validate_ssid(struct ndis_802_11_ssid *ssid) 38static u8 validate_ssid(struct ndis_802_11_ssid *ssid)
45{ 39{
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c
index 62d4ae85af15..772bf9fa9592 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.c
+++ b/drivers/staging/rtl8712/rtl871x_mlme.c
@@ -155,7 +155,7 @@ static struct wlan_network *_r8712_find_network(struct __queue *scanned_queue,
155 phead = &scanned_queue->queue; 155 phead = &scanned_queue->queue;
156 plist = phead->next; 156 plist = phead->next;
157 while (plist != phead) { 157 while (plist != phead) {
158 pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list); 158 pnetwork = container_of(plist, struct wlan_network, list);
159 plist = plist->next; 159 plist = plist->next;
160 if (!memcmp(addr, pnetwork->network.MacAddress, ETH_ALEN)) 160 if (!memcmp(addr, pnetwork->network.MacAddress, ETH_ALEN))
161 break; 161 break;
@@ -176,7 +176,7 @@ static void _free_network_queue(struct _adapter *padapter)
176 phead = &scanned_queue->queue; 176 phead = &scanned_queue->queue;
177 plist = phead->next; 177 plist = phead->next;
178 while (!end_of_queue_search(phead, plist)) { 178 while (!end_of_queue_search(phead, plist)) {
179 pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list); 179 pnetwork = container_of(plist, struct wlan_network, list);
180 plist = plist->next; 180 plist = plist->next;
181 _free_network(pmlmepriv, pnetwork); 181 _free_network(pmlmepriv, pnetwork);
182 } 182 }
@@ -304,7 +304,7 @@ struct wlan_network *r8712_get_oldest_wlan_network(
304 while (1) { 304 while (1) {
305 if (end_of_queue_search(phead, plist) == true) 305 if (end_of_queue_search(phead, plist) == true)
306 break; 306 break;
307 pwlan = LIST_CONTAINOR(plist, struct wlan_network, list); 307 pwlan = container_of(plist, struct wlan_network, list);
308 if (pwlan->fixed != true) { 308 if (pwlan->fixed != true) {
309 if (oldest == NULL || 309 if (oldest == NULL ||
310 time_after((unsigned long)oldest->last_scanned, 310 time_after((unsigned long)oldest->last_scanned,
@@ -390,7 +390,7 @@ static void update_scanned_network(struct _adapter *adapter,
390 if (end_of_queue_search(phead, plist)) 390 if (end_of_queue_search(phead, plist))
391 break; 391 break;
392 392
393 pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list); 393 pnetwork = container_of(plist, struct wlan_network, list);
394 if (is_same_network(&pnetwork->network, target)) 394 if (is_same_network(&pnetwork->network, target))
395 break; 395 break;
396 if ((oldest == ((struct wlan_network *)0)) || 396 if ((oldest == ((struct wlan_network *)0)) ||
@@ -1135,8 +1135,8 @@ int r8712_select_and_join_from_scan(struct mlme_priv *pmlmepriv)
1135 } 1135 }
1136 return _FAIL; 1136 return _FAIL;
1137 } 1137 }
1138 pnetwork = LIST_CONTAINOR(pmlmepriv->pscanned, 1138 pnetwork = container_of(pmlmepriv->pscanned,
1139 struct wlan_network, list); 1139 struct wlan_network, list);
1140 if (pnetwork == NULL) 1140 if (pnetwork == NULL)
1141 return _FAIL; 1141 return _FAIL;
1142 pmlmepriv->pscanned = pmlmepriv->pscanned->next; 1142 pmlmepriv->pscanned = pmlmepriv->pscanned->next;
@@ -1205,7 +1205,7 @@ sint r8712_set_auth(struct _adapter *adapter,
1205 return _FAIL; 1205 return _FAIL;
1206 1206
1207 psetauthparm = kzalloc(sizeof(*psetauthparm), GFP_ATOMIC); 1207 psetauthparm = kzalloc(sizeof(*psetauthparm), GFP_ATOMIC);
1208 if (psetauthparm == NULL) { 1208 if (!psetauthparm) {
1209 kfree(pcmd); 1209 kfree(pcmd);
1210 return _FAIL; 1210 return _FAIL;
1211 } 1211 }
@@ -1234,7 +1234,7 @@ sint r8712_set_key(struct _adapter *adapter,
1234 if (!pcmd) 1234 if (!pcmd)
1235 return _FAIL; 1235 return _FAIL;
1236 psetkeyparm = kzalloc(sizeof(*psetkeyparm), GFP_ATOMIC); 1236 psetkeyparm = kzalloc(sizeof(*psetkeyparm), GFP_ATOMIC);
1237 if (psetkeyparm == NULL) { 1237 if (!psetkeyparm) {
1238 ret = _FAIL; 1238 ret = _FAIL;
1239 goto err_free_cmd; 1239 goto err_free_cmd;
1240 } 1240 }
diff --git a/drivers/staging/rtl8712/rtl871x_recv.c b/drivers/staging/rtl8712/rtl871x_recv.c
index 616ca3965919..23c143890252 100644
--- a/drivers/staging/rtl8712/rtl871x_recv.c
+++ b/drivers/staging/rtl8712/rtl871x_recv.c
@@ -142,7 +142,7 @@ void r8712_free_recvframe_queue(struct __queue *pframequeue,
142 phead = &pframequeue->queue; 142 phead = &pframequeue->queue;
143 plist = phead->next; 143 plist = phead->next;
144 while (!end_of_queue_search(phead, plist)) { 144 while (!end_of_queue_search(phead, plist)) {
145 precvframe = LIST_CONTAINOR(plist, union recv_frame, u); 145 precvframe = container_of(plist, union recv_frame, u.list);
146 plist = plist->next; 146 plist = plist->next;
147 r8712_free_recvframe(precvframe, pfree_recv_queue); 147 r8712_free_recvframe(precvframe, pfree_recv_queue);
148 } 148 }
diff --git a/drivers/staging/rtl8712/rtl871x_sta_mgt.c b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
index e90c00de7499..e11ce2896893 100644
--- a/drivers/staging/rtl8712/rtl871x_sta_mgt.c
+++ b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
@@ -216,8 +216,8 @@ void r8712_free_all_stainfo(struct _adapter *padapter)
216 phead = &(pstapriv->sta_hash[index]); 216 phead = &(pstapriv->sta_hash[index]);
217 plist = phead->next; 217 plist = phead->next;
218 while (!end_of_queue_search(phead, plist)) { 218 while (!end_of_queue_search(phead, plist)) {
219 psta = LIST_CONTAINOR(plist, 219 psta = container_of(plist,
220 struct sta_info, hash_list); 220 struct sta_info, hash_list);
221 plist = plist->next; 221 plist = plist->next;
222 if (pbcmc_stainfo != psta) 222 if (pbcmc_stainfo != psta)
223 r8712_free_stainfo(padapter, psta); 223 r8712_free_stainfo(padapter, psta);
@@ -241,7 +241,7 @@ struct sta_info *r8712_get_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
241 phead = &(pstapriv->sta_hash[index]); 241 phead = &(pstapriv->sta_hash[index]);
242 plist = phead->next; 242 plist = phead->next;
243 while (!end_of_queue_search(phead, plist)) { 243 while (!end_of_queue_search(phead, plist)) {
244 psta = LIST_CONTAINOR(plist, struct sta_info, hash_list); 244 psta = container_of(plist, struct sta_info, hash_list);
245 if ((!memcmp(psta->hwaddr, hwaddr, ETH_ALEN))) { 245 if ((!memcmp(psta->hwaddr, hwaddr, ETH_ALEN))) {
246 /* if found the matched address */ 246 /* if found the matched address */
247 break; 247 break;
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.c b/drivers/staging/rtl8712/rtl871x_xmit.c
index c6d952f5d8f9..99256baafd38 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.c
+++ b/drivers/staging/rtl8712/rtl871x_xmit.c
@@ -848,7 +848,7 @@ void r8712_free_xmitframe_queue(struct xmit_priv *pxmitpriv,
848 phead = &pframequeue->queue; 848 phead = &pframequeue->queue;
849 plist = phead->next; 849 plist = phead->next;
850 while (!end_of_queue_search(phead, plist)) { 850 while (!end_of_queue_search(phead, plist)) {
851 pxmitframe = LIST_CONTAINOR(plist, struct xmit_frame, list); 851 pxmitframe = container_of(plist, struct xmit_frame, list);
852 plist = plist->next; 852 plist = plist->next;
853 r8712_free_xmitframe(pxmitpriv, pxmitframe); 853 r8712_free_xmitframe(pxmitpriv, pxmitframe);
854 } 854 }
diff --git a/drivers/staging/rtl8712/usb_ops_linux.c b/drivers/staging/rtl8712/usb_ops_linux.c
index 454cdf6c7fa1..6f12345709c2 100644
--- a/drivers/staging/rtl8712/usb_ops_linux.c
+++ b/drivers/staging/rtl8712/usb_ops_linux.c
@@ -504,7 +504,7 @@ int r8712_usbctrl_vendorreq(struct intf_priv *pintfpriv, u8 request, u16 value,
504 u8 *palloc_buf, *pIo_buf; 504 u8 *palloc_buf, *pIo_buf;
505 505
506 palloc_buf = kmalloc((u32)len + 16, GFP_ATOMIC); 506 palloc_buf = kmalloc((u32)len + 16, GFP_ATOMIC);
507 if (palloc_buf == NULL) 507 if (!palloc_buf)
508 return -ENOMEM; 508 return -ENOMEM;
509 pIo_buf = palloc_buf + 16 - ((addr_t)(palloc_buf) & 0x0f); 509 pIo_buf = palloc_buf + 16 - ((addr_t)(palloc_buf) & 0x0f);
510 if (requesttype == 0x01) { 510 if (requesttype == 0x01) {
diff --git a/drivers/staging/rtl8723au/Kconfig b/drivers/staging/rtl8723au/Kconfig
index 435f3594dabe..277c1ab69317 100644
--- a/drivers/staging/rtl8723au/Kconfig
+++ b/drivers/staging/rtl8723au/Kconfig
@@ -1,5 +1,5 @@
1config R8723AU 1config R8723AU
2 tristate "Realtek RTL8723AU Wireless LAN NIC driver" 2 tristate "Realtek RTL8723AU Wireless LAN NIC driver (deprecated)"
3 depends on USB && WLAN && RFKILL 3 depends on USB && WLAN && RFKILL
4 select WIRELESS_EXT 4 select WIRELESS_EXT
5 select WEXT_PRIV 5 select WEXT_PRIV
@@ -7,7 +7,10 @@ config R8723AU
7 default n 7 default n
8 ---help--- 8 ---help---
9 This option adds the Realtek RTL8723AU USB device such as found in 9 This option adds the Realtek RTL8723AU USB device such as found in
10 the Lenovo Yogi 13 tablet. If built as a module, it will be called r8723au. 10 the Lenovo Yoga 13 tablet. If built as a module, it will be called r8723au.
11
12 Note: This driver is deprecated and scheduled to be removed in a
13 future kernel release. Please use rtl8xxxu instead.
11 14
12if R8723AU 15if R8723AU
13 16
diff --git a/drivers/staging/rtl8723au/core/rtw_ap.c b/drivers/staging/rtl8723au/core/rtw_ap.c
index f68e2770255d..aad686da3cf0 100644
--- a/drivers/staging/rtl8723au/core/rtw_ap.c
+++ b/drivers/staging/rtl8723au/core/rtw_ap.c
@@ -1719,7 +1719,8 @@ void stop_ap_mode23a(struct rtw_adapter *padapter)
1719 } 1719 }
1720 spin_unlock_bh(&pacl_node_q->lock); 1720 spin_unlock_bh(&pacl_node_q->lock);
1721 1721
1722 DBG_8723A("%s, free acl_node_queue, num =%d\n", __func__, pacl_list->num); 1722 DBG_8723A("%s, free acl_node_queue, num =%d\n",
1723 __func__, pacl_list->num);
1723 1724
1724 rtw_sta_flush23a(padapter); 1725 rtw_sta_flush23a(padapter);
1725 1726
diff --git a/drivers/staging/rtl8723au/core/rtw_recv.c b/drivers/staging/rtl8723au/core/rtw_recv.c
index 989ed0726817..150dabc2a58d 100644
--- a/drivers/staging/rtl8723au/core/rtw_recv.c
+++ b/drivers/staging/rtl8723au/core/rtw_recv.c
@@ -211,31 +211,6 @@ u32 rtw_free_uc_swdec_pending_queue23a(struct rtw_adapter *adapter)
211 return cnt; 211 return cnt;
212} 212}
213 213
214int rtw_enqueue_recvbuf23a_to_head(struct recv_buf *precvbuf, struct rtw_queue *queue)
215{
216 spin_lock_bh(&queue->lock);
217
218 list_del_init(&precvbuf->list);
219 list_add(&precvbuf->list, get_list_head(queue));
220
221 spin_unlock_bh(&queue->lock);
222
223 return _SUCCESS;
224}
225
226int rtw_enqueue_recvbuf23a(struct recv_buf *precvbuf, struct rtw_queue *queue)
227{
228 unsigned long irqL;
229
230 spin_lock_irqsave(&queue->lock, irqL);
231
232 list_del_init(&precvbuf->list);
233
234 list_add_tail(&precvbuf->list, get_list_head(queue));
235 spin_unlock_irqrestore(&queue->lock, irqL);
236 return _SUCCESS;
237}
238
239struct recv_buf *rtw_dequeue_recvbuf23a (struct rtw_queue *queue) 214struct recv_buf *rtw_dequeue_recvbuf23a (struct rtw_queue *queue)
240{ 215{
241 unsigned long irqL; 216 unsigned long irqL;
diff --git a/drivers/staging/rtl8723au/core/rtw_wlan_util.c b/drivers/staging/rtl8723au/core/rtw_wlan_util.c
index cc2b84be9774..694cf17f82cf 100644
--- a/drivers/staging/rtl8723au/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8723au/core/rtw_wlan_util.c
@@ -304,21 +304,11 @@ inline void rtw_set_oper_ch23a(struct rtw_adapter *adapter, u8 ch)
304 adapter_to_dvobj(adapter)->oper_channel = ch; 304 adapter_to_dvobj(adapter)->oper_channel = ch;
305} 305}
306 306
307inline u8 rtw_get_oper_bw23a(struct rtw_adapter *adapter)
308{
309 return adapter_to_dvobj(adapter)->oper_bwmode;
310}
311
312inline void rtw_set_oper_bw23a(struct rtw_adapter *adapter, u8 bw) 307inline void rtw_set_oper_bw23a(struct rtw_adapter *adapter, u8 bw)
313{ 308{
314 adapter_to_dvobj(adapter)->oper_bwmode = bw; 309 adapter_to_dvobj(adapter)->oper_bwmode = bw;
315} 310}
316 311
317inline u8 rtw_get_oper_ch23aoffset(struct rtw_adapter *adapter)
318{
319 return adapter_to_dvobj(adapter)->oper_ch_offset;
320}
321
322inline void rtw_set_oper_ch23aoffset23a(struct rtw_adapter *adapter, u8 offset) 312inline void rtw_set_oper_ch23aoffset23a(struct rtw_adapter *adapter, u8 offset)
323{ 313{
324 adapter_to_dvobj(adapter)->oper_ch_offset = offset; 314 adapter_to_dvobj(adapter)->oper_ch_offset = offset;
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c b/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c
index e81301fcb01d..1ea0af499ce9 100644
--- a/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c
+++ b/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c
@@ -1175,8 +1175,6 @@ int InitLLTTable23a(struct rtw_adapter *padapter, u32 boundary)
1175 1175
1176 /* Let last entry point to the start entry of ring buffer */ 1176 /* Let last entry point to the start entry of ring buffer */
1177 status = _LLTWrite(padapter, Last_Entry_Of_TxPktBuf, txpktbuf_bndy); 1177 status = _LLTWrite(padapter, Last_Entry_Of_TxPktBuf, txpktbuf_bndy);
1178 if (status != _SUCCESS)
1179 return status;
1180 1178
1181 return status; 1179 return status;
1182} 1180}
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_rf6052.c b/drivers/staging/rtl8723au/hal/rtl8723a_rf6052.c
index ce0d8d894787..24c0ff3d82bc 100644
--- a/drivers/staging/rtl8723au/hal/rtl8723a_rf6052.c
+++ b/drivers/staging/rtl8723au/hal/rtl8723a_rf6052.c
@@ -465,7 +465,7 @@ static int phy_RF6052_Config_ParaFile(struct rtw_adapter *Adapter)
465 break; 465 break;
466 } 466 }
467 467
468 /*----Restore RFENV control type----*/; 468 /*----Restore RFENV control type----*/
469 switch (eRFPath) { 469 switch (eRFPath) {
470 case RF_PATH_A: 470 case RF_PATH_A:
471 PHY_SetBBReg(Adapter, pPhyReg->rfintfs, 471 PHY_SetBBReg(Adapter, pPhyReg->rfintfs,
diff --git a/drivers/staging/rtl8723au/include/rtw_mlme_ext.h b/drivers/staging/rtl8723au/include/rtw_mlme_ext.h
index ea2a6c914d38..0e7d3da91471 100644
--- a/drivers/staging/rtl8723au/include/rtw_mlme_ext.h
+++ b/drivers/staging/rtl8723au/include/rtw_mlme_ext.h
@@ -461,9 +461,7 @@ void Update23aTblForSoftAP(u8 *bssrateset, u32 bssratelen);
461 461
462u8 rtw_get_oper_ch23a(struct rtw_adapter *adapter); 462u8 rtw_get_oper_ch23a(struct rtw_adapter *adapter);
463void rtw_set_oper_ch23a(struct rtw_adapter *adapter, u8 ch); 463void rtw_set_oper_ch23a(struct rtw_adapter *adapter, u8 ch);
464u8 rtw_get_oper_bw23a(struct rtw_adapter *adapter);
465void rtw_set_oper_bw23a(struct rtw_adapter *adapter, u8 bw); 464void rtw_set_oper_bw23a(struct rtw_adapter *adapter, u8 bw);
466u8 rtw_get_oper_ch23aoffset(struct rtw_adapter *adapter);
467void rtw_set_oper_ch23aoffset23a(struct rtw_adapter *adapter, u8 offset); 465void rtw_set_oper_ch23aoffset23a(struct rtw_adapter *adapter, u8 offset);
468 466
469void set_channel_bwmode23a(struct rtw_adapter *padapter, unsigned char channel, 467void set_channel_bwmode23a(struct rtw_adapter *padapter, unsigned char channel,
diff --git a/drivers/staging/rtl8723au/include/rtw_recv.h b/drivers/staging/rtl8723au/include/rtw_recv.h
index dc784be3ddd9..85a5edb450e3 100644
--- a/drivers/staging/rtl8723au/include/rtw_recv.h
+++ b/drivers/staging/rtl8723au/include/rtw_recv.h
@@ -279,8 +279,6 @@ int rtw_enqueue_recvframe23a(struct recv_frame *precvframe, struct rtw_queue *qu
279 279
280u32 rtw_free_uc_swdec_pending_queue23a(struct rtw_adapter *adapter); 280u32 rtw_free_uc_swdec_pending_queue23a(struct rtw_adapter *adapter);
281 281
282int rtw_enqueue_recvbuf23a_to_head(struct recv_buf *precvbuf, struct rtw_queue *queue);
283int rtw_enqueue_recvbuf23a(struct recv_buf *precvbuf, struct rtw_queue *queue);
284struct recv_buf *rtw_dequeue_recvbuf23a(struct rtw_queue *queue); 282struct recv_buf *rtw_dequeue_recvbuf23a(struct rtw_queue *queue);
285 283
286void rtw_reordering_ctrl_timeout_handler23a(unsigned long pcontext); 284void rtw_reordering_ctrl_timeout_handler23a(unsigned long pcontext);
diff --git a/drivers/staging/rtl8723au/os_dep/usb_intf.c b/drivers/staging/rtl8723au/os_dep/usb_intf.c
index 27b3a5b7d8d4..cf83efffbffd 100644
--- a/drivers/staging/rtl8723au/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8723au/os_dep/usb_intf.c
@@ -532,6 +532,7 @@ static int rtw_drv_init(struct usb_interface *pusb_intf,
532{ 532{
533 struct rtw_adapter *if1 = NULL; 533 struct rtw_adapter *if1 = NULL;
534 struct dvobj_priv *dvobj; 534 struct dvobj_priv *dvobj;
535 struct usb_device *udev;
535 int status = _FAIL; 536 int status = _FAIL;
536 537
537 RT_TRACE(_module_hci_intfs_c_, _drv_err_, "+rtw_drv_init\n"); 538 RT_TRACE(_module_hci_intfs_c_, _drv_err_, "+rtw_drv_init\n");
@@ -544,6 +545,10 @@ static int rtw_drv_init(struct usb_interface *pusb_intf,
544 goto exit; 545 goto exit;
545 } 546 }
546 547
548 udev = dvobj->pusbdev;
549 dev_warn(&udev->dev, "WARNING: The rtl8723au driver is deprecated!");
550 dev_warn(&udev->dev, "Please use the rtl8xxxu driver for this device!");
551
547 if1 = rtw_usb_if1_init(dvobj, pusb_intf, pdid); 552 if1 = rtw_usb_if1_init(dvobj, pusb_intf, pdid);
548 if (!if1) { 553 if (!if1) {
549 DBG_8723A("rtw_init_primary_adapter Failed!\n"); 554 DBG_8723A("rtw_init_primary_adapter Failed!\n");
diff --git a/drivers/staging/rts5208/ms.c b/drivers/staging/rts5208/ms.c
index a780185a3754..0f0cd4a03cd4 100644
--- a/drivers/staging/rts5208/ms.c
+++ b/drivers/staging/rts5208/ms.c
@@ -49,7 +49,7 @@ static int ms_parse_err_code(struct rtsx_chip *chip)
49} 49}
50 50
51static int ms_transfer_tpc(struct rtsx_chip *chip, u8 trans_mode, 51static int ms_transfer_tpc(struct rtsx_chip *chip, u8 trans_mode,
52 u8 tpc, u8 cnt, u8 cfg) 52 u8 tpc, u8 cnt, u8 cfg)
53{ 53{
54 struct ms_info *ms_card = &chip->ms_card; 54 struct ms_info *ms_card = &chip->ms_card;
55 int retval; 55 int retval;
@@ -2691,7 +2691,7 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no)
2691 } 2691 }
2692 2692
2693 if ((log_blk < ms_start_idx[seg_no]) || 2693 if ((log_blk < ms_start_idx[seg_no]) ||
2694 (log_blk >= ms_start_idx[seg_no+1])) { 2694 (log_blk >= ms_start_idx[seg_no + 1])) {
2695 if (!(chip->card_wp & MS_CARD)) { 2695 if (!(chip->card_wp & MS_CARD)) {
2696 retval = ms_erase_block(chip, phy_blk); 2696 retval = ms_erase_block(chip, phy_blk);
2697 if (retval != STATUS_SUCCESS) 2697 if (retval != STATUS_SUCCESS)
@@ -3836,7 +3836,7 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
3836 start_page = (u8)(start_sector & ms_card->page_off); 3836 start_page = (u8)(start_sector & ms_card->page_off);
3837 3837
3838 for (seg_no = 0; seg_no < ARRAY_SIZE(ms_start_idx) - 1; seg_no++) { 3838 for (seg_no = 0; seg_no < ARRAY_SIZE(ms_start_idx) - 1; seg_no++) {
3839 if (log_blk < ms_start_idx[seg_no+1]) 3839 if (log_blk < ms_start_idx[seg_no + 1])
3840 break; 3840 break;
3841 } 3841 }
3842 3842
@@ -4264,7 +4264,7 @@ int mg_set_leaf_id(struct scsi_cmnd *srb, struct rtsx_chip *chip)
4264 memset(buf1, 0, 32); 4264 memset(buf1, 0, 32);
4265 rtsx_stor_get_xfer_buf(buf2, min_t(int, 12, scsi_bufflen(srb)), srb); 4265 rtsx_stor_get_xfer_buf(buf2, min_t(int, 12, scsi_bufflen(srb)), srb);
4266 for (i = 0; i < 8; i++) 4266 for (i = 0; i < 8; i++)
4267 buf1[8+i] = buf2[4+i]; 4267 buf1[8 + i] = buf2[4 + i];
4268 4268
4269 retval = ms_write_bytes(chip, PRO_WRITE_SHORT_DATA, 32, WAIT_INT, 4269 retval = ms_write_bytes(chip, PRO_WRITE_SHORT_DATA, 32, WAIT_INT,
4270 buf1, 32); 4270 buf1, 32);
@@ -4399,10 +4399,10 @@ int mg_chg(struct scsi_cmnd *srb, struct rtsx_chip *chip)
4399 rtsx_stor_get_xfer_buf(buf, bufflen, srb); 4399 rtsx_stor_get_xfer_buf(buf, bufflen, srb);
4400 4400
4401 for (i = 0; i < 8; i++) 4401 for (i = 0; i < 8; i++)
4402 buf[i] = buf[4+i]; 4402 buf[i] = buf[4 + i];
4403 4403
4404 for (i = 0; i < 24; i++) 4404 for (i = 0; i < 24; i++)
4405 buf[8+i] = 0; 4405 buf[8 + i] = 0;
4406 4406
4407 retval = ms_write_bytes(chip, PRO_WRITE_SHORT_DATA, 4407 retval = ms_write_bytes(chip, PRO_WRITE_SHORT_DATA,
4408 32, WAIT_INT, buf, 32); 4408 32, WAIT_INT, buf, 32);
@@ -4511,10 +4511,10 @@ int mg_rsp(struct scsi_cmnd *srb, struct rtsx_chip *chip)
4511 rtsx_stor_get_xfer_buf(buf, bufflen, srb); 4511 rtsx_stor_get_xfer_buf(buf, bufflen, srb);
4512 4512
4513 for (i = 0; i < 8; i++) 4513 for (i = 0; i < 8; i++)
4514 buf[i] = buf[4+i]; 4514 buf[i] = buf[4 + i];
4515 4515
4516 for (i = 0; i < 24; i++) 4516 for (i = 0; i < 24; i++)
4517 buf[8+i] = 0; 4517 buf[8 + i] = 0;
4518 4518
4519 retval = ms_write_bytes(chip, PRO_WRITE_SHORT_DATA, 32, WAIT_INT, 4519 retval = ms_write_bytes(chip, PRO_WRITE_SHORT_DATA, 32, WAIT_INT,
4520 buf, 32); 4520 buf, 32);
diff --git a/drivers/staging/rts5208/rtsx_card.c b/drivers/staging/rts5208/rtsx_card.c
index 437436f5dbdd..231833a3045e 100644
--- a/drivers/staging/rts5208/rtsx_card.c
+++ b/drivers/staging/rts5208/rtsx_card.c
@@ -628,11 +628,6 @@ void rtsx_init_cards(struct rtsx_chip *chip)
628 } 628 }
629} 629}
630 630
631static inline u8 double_depth(u8 depth)
632{
633 return (depth > 1) ? (depth - 1) : depth;
634}
635
636int switch_ssc_clock(struct rtsx_chip *chip, int clk) 631int switch_ssc_clock(struct rtsx_chip *chip, int clk)
637{ 632{
638 int retval; 633 int retval;
@@ -1184,22 +1179,6 @@ int check_card_wp(struct rtsx_chip *chip, unsigned int lun)
1184 return 0; 1179 return 0;
1185} 1180}
1186 1181
1187int check_card_fail(struct rtsx_chip *chip, unsigned int lun)
1188{
1189 if (chip->card_fail & chip->lun2card[lun])
1190 return 1;
1191
1192 return 0;
1193}
1194
1195int check_card_ejected(struct rtsx_chip *chip, unsigned int lun)
1196{
1197 if (chip->card_ejected & chip->lun2card[lun])
1198 return 1;
1199
1200 return 0;
1201}
1202
1203u8 get_lun_card(struct rtsx_chip *chip, unsigned int lun) 1182u8 get_lun_card(struct rtsx_chip *chip, unsigned int lun)
1204{ 1183{
1205 if ((chip->card_ready & chip->lun2card[lun]) == XD_CARD) 1184 if ((chip->card_ready & chip->lun2card[lun]) == XD_CARD)
diff --git a/drivers/staging/rts5208/rtsx_card.h b/drivers/staging/rts5208/rtsx_card.h
index 8f2cf9a4ec69..56df9a431d6d 100644
--- a/drivers/staging/rts5208/rtsx_card.h
+++ b/drivers/staging/rts5208/rtsx_card.h
@@ -1024,8 +1024,6 @@ int detect_card_cd(struct rtsx_chip *chip, int card);
1024int check_card_exist(struct rtsx_chip *chip, unsigned int lun); 1024int check_card_exist(struct rtsx_chip *chip, unsigned int lun);
1025int check_card_ready(struct rtsx_chip *chip, unsigned int lun); 1025int check_card_ready(struct rtsx_chip *chip, unsigned int lun);
1026int check_card_wp(struct rtsx_chip *chip, unsigned int lun); 1026int check_card_wp(struct rtsx_chip *chip, unsigned int lun);
1027int check_card_fail(struct rtsx_chip *chip, unsigned int lun);
1028int check_card_ejected(struct rtsx_chip *chip, unsigned int lun);
1029void eject_card(struct rtsx_chip *chip, unsigned int lun); 1027void eject_card(struct rtsx_chip *chip, unsigned int lun);
1030u8 get_lun_card(struct rtsx_chip *chip, unsigned int lun); 1028u8 get_lun_card(struct rtsx_chip *chip, unsigned int lun);
1031 1029
diff --git a/drivers/staging/rts5208/rtsx_chip.c b/drivers/staging/rts5208/rtsx_chip.c
index c0ce659a5aa6..bcc4b666d79f 100644
--- a/drivers/staging/rts5208/rtsx_chip.c
+++ b/drivers/staging/rts5208/rtsx_chip.c
@@ -43,14 +43,6 @@ static void rtsx_calibration(struct rtsx_chip *chip)
43 rtsx_write_phy_register(chip, 0x00, 0x0288); 43 rtsx_write_phy_register(chip, 0x00, 0x0288);
44} 44}
45 45
46void rtsx_disable_card_int(struct rtsx_chip *chip)
47{
48 u32 reg = rtsx_readl(chip, RTSX_BIER);
49
50 reg &= ~(XD_INT_EN | SD_INT_EN | MS_INT_EN);
51 rtsx_writel(chip, RTSX_BIER, reg);
52}
53
54void rtsx_enable_card_int(struct rtsx_chip *chip) 46void rtsx_enable_card_int(struct rtsx_chip *chip)
55{ 47{
56 u32 reg = rtsx_readl(chip, RTSX_BIER); 48 u32 reg = rtsx_readl(chip, RTSX_BIER);
@@ -1447,12 +1439,6 @@ delink_stage:
1447 rtsx_delink_stage(chip); 1439 rtsx_delink_stage(chip);
1448} 1440}
1449 1441
1450void rtsx_undo_delink(struct rtsx_chip *chip)
1451{
1452 chip->auto_delink_allowed = 0;
1453 rtsx_write_register(chip, CHANGE_LINK_STATE, 0x0A, 0x00);
1454}
1455
1456/** 1442/**
1457 * rtsx_stop_cmd - stop command transfer and DMA transfer 1443 * rtsx_stop_cmd - stop command transfer and DMA transfer
1458 * @chip: Realtek's card reader chip 1444 * @chip: Realtek's card reader chip
@@ -2000,27 +1986,6 @@ int rtsx_set_phy_reg_bit(struct rtsx_chip *chip, u8 reg, u8 bit)
2000 return STATUS_SUCCESS; 1986 return STATUS_SUCCESS;
2001} 1987}
2002 1988
2003int rtsx_check_link_ready(struct rtsx_chip *chip)
2004{
2005 int retval;
2006 u8 val;
2007
2008 retval = rtsx_read_register(chip, IRQSTAT0, &val);
2009 if (retval) {
2010 rtsx_trace(chip);
2011 return retval;
2012 }
2013
2014 dev_dbg(rtsx_dev(chip), "IRQSTAT0: 0x%x\n", val);
2015 if (val & LINK_RDY_INT) {
2016 dev_dbg(rtsx_dev(chip), "Delinked!\n");
2017 rtsx_write_register(chip, IRQSTAT0, LINK_RDY_INT, LINK_RDY_INT);
2018 return STATUS_FAIL;
2019 }
2020
2021 return STATUS_SUCCESS;
2022}
2023
2024static void rtsx_handle_pm_dstate(struct rtsx_chip *chip, u8 dstate) 1989static void rtsx_handle_pm_dstate(struct rtsx_chip *chip, u8 dstate)
2025{ 1990{
2026 u32 ultmp; 1991 u32 ultmp;
diff --git a/drivers/staging/rts5208/rtsx_chip.h b/drivers/staging/rts5208/rtsx_chip.h
index c295b1eedb44..c08164f3247e 100644
--- a/drivers/staging/rts5208/rtsx_chip.h
+++ b/drivers/staging/rts5208/rtsx_chip.h
@@ -950,7 +950,6 @@ do { \
950int rtsx_force_power_on(struct rtsx_chip *chip, u8 ctl); 950int rtsx_force_power_on(struct rtsx_chip *chip, u8 ctl);
951int rtsx_force_power_down(struct rtsx_chip *chip, u8 ctl); 951int rtsx_force_power_down(struct rtsx_chip *chip, u8 ctl);
952 952
953void rtsx_disable_card_int(struct rtsx_chip *chip);
954void rtsx_enable_card_int(struct rtsx_chip *chip); 953void rtsx_enable_card_int(struct rtsx_chip *chip);
955void rtsx_enable_bus_int(struct rtsx_chip *chip); 954void rtsx_enable_bus_int(struct rtsx_chip *chip);
956void rtsx_disable_bus_int(struct rtsx_chip *chip); 955void rtsx_disable_bus_int(struct rtsx_chip *chip);
@@ -958,7 +957,6 @@ int rtsx_reset_chip(struct rtsx_chip *chip);
958int rtsx_init_chip(struct rtsx_chip *chip); 957int rtsx_init_chip(struct rtsx_chip *chip);
959void rtsx_release_chip(struct rtsx_chip *chip); 958void rtsx_release_chip(struct rtsx_chip *chip);
960void rtsx_polling_func(struct rtsx_chip *chip); 959void rtsx_polling_func(struct rtsx_chip *chip);
961void rtsx_undo_delink(struct rtsx_chip *chip);
962void rtsx_stop_cmd(struct rtsx_chip *chip, int card); 960void rtsx_stop_cmd(struct rtsx_chip *chip, int card);
963int rtsx_write_register(struct rtsx_chip *chip, u16 addr, u8 mask, u8 data); 961int rtsx_write_register(struct rtsx_chip *chip, u16 addr, u8 mask, u8 data);
964int rtsx_read_register(struct rtsx_chip *chip, u16 addr, u8 *data); 962int rtsx_read_register(struct rtsx_chip *chip, u16 addr, u8 *data);
@@ -975,7 +973,6 @@ int rtsx_read_efuse(struct rtsx_chip *chip, u8 addr, u8 *val);
975int rtsx_write_efuse(struct rtsx_chip *chip, u8 addr, u8 val); 973int rtsx_write_efuse(struct rtsx_chip *chip, u8 addr, u8 val);
976int rtsx_clr_phy_reg_bit(struct rtsx_chip *chip, u8 reg, u8 bit); 974int rtsx_clr_phy_reg_bit(struct rtsx_chip *chip, u8 reg, u8 bit);
977int rtsx_set_phy_reg_bit(struct rtsx_chip *chip, u8 reg, u8 bit); 975int rtsx_set_phy_reg_bit(struct rtsx_chip *chip, u8 reg, u8 bit);
978int rtsx_check_link_ready(struct rtsx_chip *chip);
979void rtsx_enter_ss(struct rtsx_chip *chip); 976void rtsx_enter_ss(struct rtsx_chip *chip);
980void rtsx_exit_ss(struct rtsx_chip *chip); 977void rtsx_exit_ss(struct rtsx_chip *chip);
981int rtsx_pre_handle_interrupt(struct rtsx_chip *chip); 978int rtsx_pre_handle_interrupt(struct rtsx_chip *chip);
diff --git a/drivers/staging/rts5208/sd.c b/drivers/staging/rts5208/sd.c
index 87d697623cba..6219e047557e 100644
--- a/drivers/staging/rts5208/sd.c
+++ b/drivers/staging/rts5208/sd.c
@@ -1928,9 +1928,9 @@ static int sd_tuning_rx(struct rtsx_chip *chip)
1928 tuning_cmd = sd_sdr_tuning_rx_cmd; 1928 tuning_cmd = sd_sdr_tuning_rx_cmd;
1929 1929
1930 } else { 1930 } else {
1931 if (CHK_MMC_DDR52(sd_card)) 1931 if (CHK_MMC_DDR52(sd_card)) {
1932 tuning_cmd = mmc_ddr_tunning_rx_cmd; 1932 tuning_cmd = mmc_ddr_tunning_rx_cmd;
1933 else { 1933 } else {
1934 rtsx_trace(chip); 1934 rtsx_trace(chip);
1935 return STATUS_FAIL; 1935 return STATUS_FAIL;
1936 } 1936 }
@@ -2054,9 +2054,9 @@ static int sd_tuning_tx(struct rtsx_chip *chip)
2054 tuning_cmd = sd_sdr_tuning_tx_cmd; 2054 tuning_cmd = sd_sdr_tuning_tx_cmd;
2055 2055
2056 } else { 2056 } else {
2057 if (CHK_MMC_DDR52(sd_card)) 2057 if (CHK_MMC_DDR52(sd_card)) {
2058 tuning_cmd = sd_ddr_tuning_tx_cmd; 2058 tuning_cmd = sd_ddr_tuning_tx_cmd;
2059 else { 2059 } else {
2060 rtsx_trace(chip); 2060 rtsx_trace(chip);
2061 return STATUS_FAIL; 2061 return STATUS_FAIL;
2062 } 2062 }
@@ -2678,9 +2678,9 @@ RTY_SD_RST:
2678 } 2678 }
2679 2679
2680 j++; 2680 j++;
2681 if (j < 3) 2681 if (j < 3) {
2682 goto RTY_SD_RST; 2682 goto RTY_SD_RST;
2683 else { 2683 } else {
2684 rtsx_trace(chip); 2684 rtsx_trace(chip);
2685 return STATUS_FAIL; 2685 return STATUS_FAIL;
2686 } 2686 }
@@ -2690,9 +2690,9 @@ RTY_SD_RST:
2690 SD_RSP_TYPE_R3, rsp, 5); 2690 SD_RSP_TYPE_R3, rsp, 5);
2691 if (retval != STATUS_SUCCESS) { 2691 if (retval != STATUS_SUCCESS) {
2692 k++; 2692 k++;
2693 if (k < 3) 2693 if (k < 3) {
2694 goto RTY_SD_RST; 2694 goto RTY_SD_RST;
2695 else { 2695 } else {
2696 rtsx_trace(chip); 2696 rtsx_trace(chip);
2697 return STATUS_FAIL; 2697 return STATUS_FAIL;
2698 } 2698 }
diff --git a/drivers/staging/skein/skein_api.c b/drivers/staging/skein/skein_api.c
index 36f849fbba5e..cab26e736111 100644
--- a/drivers/staging/skein/skein_api.c
+++ b/drivers/staging/skein/skein_api.c
@@ -165,7 +165,6 @@ int skein_update(struct skein_ctx *ctx, const u8 *msg,
165 break; 165 break;
166 } 166 }
167 return ret; 167 return ret;
168
169} 168}
170 169
171int skein_update_bits(struct skein_ctx *ctx, const u8 *msg, 170int skein_update_bits(struct skein_ctx *ctx, const u8 *msg,
@@ -210,7 +209,7 @@ int skein_update_bits(struct skein_ctx *ctx, const u8 *msg,
210 /* internal sanity check: there IS a partial byte in the buffer! */ 209 /* internal sanity check: there IS a partial byte in the buffer! */
211 skein_assert(length != 0); 210 skein_assert(length != 0);
212 /* partial byte bit mask */ 211 /* partial byte bit mask */
213 mask = (u8) (1u << (7 - (msg_bit_cnt & 7))); 212 mask = (u8)(1u << (7 - (msg_bit_cnt & 7)));
214 /* apply bit padding on final byte (in the buffer) */ 213 /* apply bit padding on final byte (in the buffer) */
215 up[length - 1] = (u8)((up[length - 1] & (0 - mask)) | mask); 214 up[length - 1] = (u8)((up[length - 1] & (0 - mask)) | mask);
216 215
diff --git a/drivers/staging/skein/skein_base.c b/drivers/staging/skein/skein_base.c
index 25a01ca76953..c24a57396483 100644
--- a/drivers/staging/skein/skein_base.c
+++ b/drivers/staging/skein/skein_base.c
@@ -58,7 +58,7 @@ int skein_256_init(struct skein_256_ctx *ctx, size_t hash_bit_len)
58 cfg.w[1] = skein_swap64(hash_bit_len); 58 cfg.w[1] = skein_swap64(hash_bit_len);
59 cfg.w[2] = skein_swap64(SKEIN_CFG_TREE_INFO_SEQUENTIAL); 59 cfg.w[2] = skein_swap64(SKEIN_CFG_TREE_INFO_SEQUENTIAL);
60 /* zero pad config block */ 60 /* zero pad config block */
61 memset(&cfg.w[3], 0, sizeof(cfg) - 3*sizeof(cfg.w[0])); 61 memset(&cfg.w[3], 0, sizeof(cfg) - 3 * sizeof(cfg.w[0]));
62 62
63 /* compute the initial chaining values from config block */ 63 /* compute the initial chaining values from config block */
64 /* zero the chaining variables */ 64 /* zero the chaining variables */
@@ -98,7 +98,7 @@ int skein_256_init_ext(struct skein_256_ctx *ctx, size_t hash_bit_len,
98 skein_assert(sizeof(cfg.b) >= sizeof(ctx->x)); 98 skein_assert(sizeof(cfg.b) >= sizeof(ctx->x));
99 /* do a mini-Init right here */ 99 /* do a mini-Init right here */
100 /* set output hash bit count = state size */ 100 /* set output hash bit count = state size */
101 ctx->h.hash_bit_len = 8*sizeof(ctx->x); 101 ctx->h.hash_bit_len = 8 * sizeof(ctx->x);
102 /* set tweaks: T0 = 0; T1 = KEY type */ 102 /* set tweaks: T0 = 0; T1 = KEY type */
103 skein_start_new_type(ctx, KEY); 103 skein_start_new_type(ctx, KEY);
104 /* zero the initial chaining variables */ 104 /* zero the initial chaining variables */
@@ -171,7 +171,7 @@ int skein_256_update(struct skein_256_ctx *ctx, const u8 *msg,
171 */ 171 */
172 if (msg_byte_cnt > SKEIN_256_BLOCK_BYTES) { 172 if (msg_byte_cnt > SKEIN_256_BLOCK_BYTES) {
173 /* number of full blocks to process */ 173 /* number of full blocks to process */
174 n = (msg_byte_cnt-1) / SKEIN_256_BLOCK_BYTES; 174 n = (msg_byte_cnt - 1) / SKEIN_256_BLOCK_BYTES;
175 skein_256_process_block(ctx, msg, n, 175 skein_256_process_block(ctx, msg, n,
176 SKEIN_256_BLOCK_BYTES); 176 SKEIN_256_BLOCK_BYTES);
177 msg_byte_cnt -= n * SKEIN_256_BLOCK_BYTES; 177 msg_byte_cnt -= n * SKEIN_256_BLOCK_BYTES;
@@ -205,7 +205,7 @@ int skein_256_final(struct skein_256_ctx *ctx, u8 *hash_val)
205 /* zero pad b[] if necessary */ 205 /* zero pad b[] if necessary */
206 if (ctx->h.b_cnt < SKEIN_256_BLOCK_BYTES) 206 if (ctx->h.b_cnt < SKEIN_256_BLOCK_BYTES)
207 memset(&ctx->b[ctx->h.b_cnt], 0, 207 memset(&ctx->b[ctx->h.b_cnt], 0,
208 SKEIN_256_BLOCK_BYTES - ctx->h.b_cnt); 208 SKEIN_256_BLOCK_BYTES - ctx->h.b_cnt);
209 209
210 /* process the final block */ 210 /* process the final block */
211 skein_256_process_block(ctx, ctx->b, 1, ctx->h.b_cnt); 211 skein_256_process_block(ctx, ctx->b, 1, ctx->h.b_cnt);
@@ -219,19 +219,19 @@ int skein_256_final(struct skein_256_ctx *ctx, u8 *hash_val)
219 memset(ctx->b, 0, sizeof(ctx->b)); 219 memset(ctx->b, 0, sizeof(ctx->b));
220 /* keep a local copy of counter mode "key" */ 220 /* keep a local copy of counter mode "key" */
221 memcpy(x, ctx->x, sizeof(x)); 221 memcpy(x, ctx->x, sizeof(x));
222 for (i = 0; i*SKEIN_256_BLOCK_BYTES < byte_cnt; i++) { 222 for (i = 0; i * SKEIN_256_BLOCK_BYTES < byte_cnt; i++) {
223 /* build the counter block */ 223 /* build the counter block */
224 ((u64 *)ctx->b)[0] = skein_swap64((u64) i); 224 ((u64 *)ctx->b)[0] = skein_swap64((u64)i);
225 skein_start_new_type(ctx, OUT_FINAL); 225 skein_start_new_type(ctx, OUT_FINAL);
226 /* run "counter mode" */ 226 /* run "counter mode" */
227 skein_256_process_block(ctx, ctx->b, 1, sizeof(u64)); 227 skein_256_process_block(ctx, ctx->b, 1, sizeof(u64));
228 /* number of output bytes left to go */ 228 /* number of output bytes left to go */
229 n = byte_cnt - i*SKEIN_256_BLOCK_BYTES; 229 n = byte_cnt - i * SKEIN_256_BLOCK_BYTES;
230 if (n >= SKEIN_256_BLOCK_BYTES) 230 if (n >= SKEIN_256_BLOCK_BYTES)
231 n = SKEIN_256_BLOCK_BYTES; 231 n = SKEIN_256_BLOCK_BYTES;
232 /* "output" the ctr mode bytes */ 232 /* "output" the ctr mode bytes */
233 skein_put64_lsb_first(hash_val+i*SKEIN_256_BLOCK_BYTES, ctx->x, 233 skein_put64_lsb_first(hash_val + (i * SKEIN_256_BLOCK_BYTES),
234 n); 234 ctx->x, n);
235 /* restore the counter mode key for next time */ 235 /* restore the counter mode key for next time */
236 memcpy(ctx->x, x, sizeof(x)); 236 memcpy(ctx->x, x, sizeof(x));
237 } 237 }
@@ -282,7 +282,7 @@ int skein_512_init(struct skein_512_ctx *ctx, size_t hash_bit_len)
282 cfg.w[1] = skein_swap64(hash_bit_len); 282 cfg.w[1] = skein_swap64(hash_bit_len);
283 cfg.w[2] = skein_swap64(SKEIN_CFG_TREE_INFO_SEQUENTIAL); 283 cfg.w[2] = skein_swap64(SKEIN_CFG_TREE_INFO_SEQUENTIAL);
284 /* zero pad config block */ 284 /* zero pad config block */
285 memset(&cfg.w[3], 0, sizeof(cfg) - 3*sizeof(cfg.w[0])); 285 memset(&cfg.w[3], 0, sizeof(cfg) - 3 * sizeof(cfg.w[0]));
286 286
287 /* compute the initial chaining values from config block */ 287 /* compute the initial chaining values from config block */
288 /* zero the chaining variables */ 288 /* zero the chaining variables */
@@ -326,7 +326,7 @@ int skein_512_init_ext(struct skein_512_ctx *ctx, size_t hash_bit_len,
326 skein_assert(sizeof(cfg.b) >= sizeof(ctx->x)); 326 skein_assert(sizeof(cfg.b) >= sizeof(ctx->x));
327 /* do a mini-Init right here */ 327 /* do a mini-Init right here */
328 /* set output hash bit count = state size */ 328 /* set output hash bit count = state size */
329 ctx->h.hash_bit_len = 8*sizeof(ctx->x); 329 ctx->h.hash_bit_len = 8 * sizeof(ctx->x);
330 /* set tweaks: T0 = 0; T1 = KEY type */ 330 /* set tweaks: T0 = 0; T1 = KEY type */
331 skein_start_new_type(ctx, KEY); 331 skein_start_new_type(ctx, KEY);
332 /* zero the initial chaining variables */ 332 /* zero the initial chaining variables */
@@ -398,7 +398,7 @@ int skein_512_update(struct skein_512_ctx *ctx, const u8 *msg,
398 */ 398 */
399 if (msg_byte_cnt > SKEIN_512_BLOCK_BYTES) { 399 if (msg_byte_cnt > SKEIN_512_BLOCK_BYTES) {
400 /* number of full blocks to process */ 400 /* number of full blocks to process */
401 n = (msg_byte_cnt-1) / SKEIN_512_BLOCK_BYTES; 401 n = (msg_byte_cnt - 1) / SKEIN_512_BLOCK_BYTES;
402 skein_512_process_block(ctx, msg, n, 402 skein_512_process_block(ctx, msg, n,
403 SKEIN_512_BLOCK_BYTES); 403 SKEIN_512_BLOCK_BYTES);
404 msg_byte_cnt -= n * SKEIN_512_BLOCK_BYTES; 404 msg_byte_cnt -= n * SKEIN_512_BLOCK_BYTES;
@@ -432,7 +432,7 @@ int skein_512_final(struct skein_512_ctx *ctx, u8 *hash_val)
432 /* zero pad b[] if necessary */ 432 /* zero pad b[] if necessary */
433 if (ctx->h.b_cnt < SKEIN_512_BLOCK_BYTES) 433 if (ctx->h.b_cnt < SKEIN_512_BLOCK_BYTES)
434 memset(&ctx->b[ctx->h.b_cnt], 0, 434 memset(&ctx->b[ctx->h.b_cnt], 0,
435 SKEIN_512_BLOCK_BYTES - ctx->h.b_cnt); 435 SKEIN_512_BLOCK_BYTES - ctx->h.b_cnt);
436 436
437 /* process the final block */ 437 /* process the final block */
438 skein_512_process_block(ctx, ctx->b, 1, ctx->h.b_cnt); 438 skein_512_process_block(ctx, ctx->b, 1, ctx->h.b_cnt);
@@ -446,19 +446,19 @@ int skein_512_final(struct skein_512_ctx *ctx, u8 *hash_val)
446 memset(ctx->b, 0, sizeof(ctx->b)); 446 memset(ctx->b, 0, sizeof(ctx->b));
447 /* keep a local copy of counter mode "key" */ 447 /* keep a local copy of counter mode "key" */
448 memcpy(x, ctx->x, sizeof(x)); 448 memcpy(x, ctx->x, sizeof(x));
449 for (i = 0; i*SKEIN_512_BLOCK_BYTES < byte_cnt; i++) { 449 for (i = 0; i * SKEIN_512_BLOCK_BYTES < byte_cnt; i++) {
450 /* build the counter block */ 450 /* build the counter block */
451 ((u64 *)ctx->b)[0] = skein_swap64((u64) i); 451 ((u64 *)ctx->b)[0] = skein_swap64((u64)i);
452 skein_start_new_type(ctx, OUT_FINAL); 452 skein_start_new_type(ctx, OUT_FINAL);
453 /* run "counter mode" */ 453 /* run "counter mode" */
454 skein_512_process_block(ctx, ctx->b, 1, sizeof(u64)); 454 skein_512_process_block(ctx, ctx->b, 1, sizeof(u64));
455 /* number of output bytes left to go */ 455 /* number of output bytes left to go */
456 n = byte_cnt - i*SKEIN_512_BLOCK_BYTES; 456 n = byte_cnt - i * SKEIN_512_BLOCK_BYTES;
457 if (n >= SKEIN_512_BLOCK_BYTES) 457 if (n >= SKEIN_512_BLOCK_BYTES)
458 n = SKEIN_512_BLOCK_BYTES; 458 n = SKEIN_512_BLOCK_BYTES;
459 /* "output" the ctr mode bytes */ 459 /* "output" the ctr mode bytes */
460 skein_put64_lsb_first(hash_val+i*SKEIN_512_BLOCK_BYTES, ctx->x, 460 skein_put64_lsb_first(hash_val + (i * SKEIN_512_BLOCK_BYTES),
461 n); 461 ctx->x, n);
462 /* restore the counter mode key for next time */ 462 /* restore the counter mode key for next time */
463 memcpy(ctx->x, x, sizeof(x)); 463 memcpy(ctx->x, x, sizeof(x));
464 } 464 }
@@ -506,7 +506,7 @@ int skein_1024_init(struct skein_1024_ctx *ctx, size_t hash_bit_len)
506 cfg.w[1] = skein_swap64(hash_bit_len); 506 cfg.w[1] = skein_swap64(hash_bit_len);
507 cfg.w[2] = skein_swap64(SKEIN_CFG_TREE_INFO_SEQUENTIAL); 507 cfg.w[2] = skein_swap64(SKEIN_CFG_TREE_INFO_SEQUENTIAL);
508 /* zero pad config block */ 508 /* zero pad config block */
509 memset(&cfg.w[3], 0, sizeof(cfg) - 3*sizeof(cfg.w[0])); 509 memset(&cfg.w[3], 0, sizeof(cfg) - 3 * sizeof(cfg.w[0]));
510 510
511 /* compute the initial chaining values from config block */ 511 /* compute the initial chaining values from config block */
512 /* zero the chaining variables */ 512 /* zero the chaining variables */
@@ -547,7 +547,7 @@ int skein_1024_init_ext(struct skein_1024_ctx *ctx, size_t hash_bit_len,
547 skein_assert(sizeof(cfg.b) >= sizeof(ctx->x)); 547 skein_assert(sizeof(cfg.b) >= sizeof(ctx->x));
548 /* do a mini-Init right here */ 548 /* do a mini-Init right here */
549 /* set output hash bit count = state size */ 549 /* set output hash bit count = state size */
550 ctx->h.hash_bit_len = 8*sizeof(ctx->x); 550 ctx->h.hash_bit_len = 8 * sizeof(ctx->x);
551 /* set tweaks: T0 = 0; T1 = KEY type */ 551 /* set tweaks: T0 = 0; T1 = KEY type */
552 skein_start_new_type(ctx, KEY); 552 skein_start_new_type(ctx, KEY);
553 /* zero the initial chaining variables */ 553 /* zero the initial chaining variables */
@@ -620,7 +620,7 @@ int skein_1024_update(struct skein_1024_ctx *ctx, const u8 *msg,
620 */ 620 */
621 if (msg_byte_cnt > SKEIN_1024_BLOCK_BYTES) { 621 if (msg_byte_cnt > SKEIN_1024_BLOCK_BYTES) {
622 /* number of full blocks to process */ 622 /* number of full blocks to process */
623 n = (msg_byte_cnt-1) / SKEIN_1024_BLOCK_BYTES; 623 n = (msg_byte_cnt - 1) / SKEIN_1024_BLOCK_BYTES;
624 skein_1024_process_block(ctx, msg, n, 624 skein_1024_process_block(ctx, msg, n,
625 SKEIN_1024_BLOCK_BYTES); 625 SKEIN_1024_BLOCK_BYTES);
626 msg_byte_cnt -= n * SKEIN_1024_BLOCK_BYTES; 626 msg_byte_cnt -= n * SKEIN_1024_BLOCK_BYTES;
@@ -654,7 +654,7 @@ int skein_1024_final(struct skein_1024_ctx *ctx, u8 *hash_val)
654 /* zero pad b[] if necessary */ 654 /* zero pad b[] if necessary */
655 if (ctx->h.b_cnt < SKEIN_1024_BLOCK_BYTES) 655 if (ctx->h.b_cnt < SKEIN_1024_BLOCK_BYTES)
656 memset(&ctx->b[ctx->h.b_cnt], 0, 656 memset(&ctx->b[ctx->h.b_cnt], 0,
657 SKEIN_1024_BLOCK_BYTES - ctx->h.b_cnt); 657 SKEIN_1024_BLOCK_BYTES - ctx->h.b_cnt);
658 658
659 /* process the final block */ 659 /* process the final block */
660 skein_1024_process_block(ctx, ctx->b, 1, ctx->h.b_cnt); 660 skein_1024_process_block(ctx, ctx->b, 1, ctx->h.b_cnt);
@@ -668,19 +668,19 @@ int skein_1024_final(struct skein_1024_ctx *ctx, u8 *hash_val)
668 memset(ctx->b, 0, sizeof(ctx->b)); 668 memset(ctx->b, 0, sizeof(ctx->b));
669 /* keep a local copy of counter mode "key" */ 669 /* keep a local copy of counter mode "key" */
670 memcpy(x, ctx->x, sizeof(x)); 670 memcpy(x, ctx->x, sizeof(x));
671 for (i = 0; i*SKEIN_1024_BLOCK_BYTES < byte_cnt; i++) { 671 for (i = 0; i * SKEIN_1024_BLOCK_BYTES < byte_cnt; i++) {
672 /* build the counter block */ 672 /* build the counter block */
673 ((u64 *)ctx->b)[0] = skein_swap64((u64) i); 673 ((u64 *)ctx->b)[0] = skein_swap64((u64)i);
674 skein_start_new_type(ctx, OUT_FINAL); 674 skein_start_new_type(ctx, OUT_FINAL);
675 /* run "counter mode" */ 675 /* run "counter mode" */
676 skein_1024_process_block(ctx, ctx->b, 1, sizeof(u64)); 676 skein_1024_process_block(ctx, ctx->b, 1, sizeof(u64));
677 /* number of output bytes left to go */ 677 /* number of output bytes left to go */
678 n = byte_cnt - i*SKEIN_1024_BLOCK_BYTES; 678 n = byte_cnt - i * SKEIN_1024_BLOCK_BYTES;
679 if (n >= SKEIN_1024_BLOCK_BYTES) 679 if (n >= SKEIN_1024_BLOCK_BYTES)
680 n = SKEIN_1024_BLOCK_BYTES; 680 n = SKEIN_1024_BLOCK_BYTES;
681 /* "output" the ctr mode bytes */ 681 /* "output" the ctr mode bytes */
682 skein_put64_lsb_first(hash_val+i*SKEIN_1024_BLOCK_BYTES, ctx->x, 682 skein_put64_lsb_first(hash_val + (i * SKEIN_1024_BLOCK_BYTES),
683 n); 683 ctx->x, n);
684 /* restore the counter mode key for next time */ 684 /* restore the counter mode key for next time */
685 memcpy(ctx->x, x, sizeof(x)); 685 memcpy(ctx->x, x, sizeof(x));
686 } 686 }
@@ -702,7 +702,7 @@ int skein_256_final_pad(struct skein_256_ctx *ctx, u8 *hash_val)
702 /* zero pad b[] if necessary */ 702 /* zero pad b[] if necessary */
703 if (ctx->h.b_cnt < SKEIN_256_BLOCK_BYTES) 703 if (ctx->h.b_cnt < SKEIN_256_BLOCK_BYTES)
704 memset(&ctx->b[ctx->h.b_cnt], 0, 704 memset(&ctx->b[ctx->h.b_cnt], 0,
705 SKEIN_256_BLOCK_BYTES - ctx->h.b_cnt); 705 SKEIN_256_BLOCK_BYTES - ctx->h.b_cnt);
706 /* process the final block */ 706 /* process the final block */
707 skein_256_process_block(ctx, ctx->b, 1, ctx->h.b_cnt); 707 skein_256_process_block(ctx, ctx->b, 1, ctx->h.b_cnt);
708 708
@@ -724,7 +724,7 @@ int skein_512_final_pad(struct skein_512_ctx *ctx, u8 *hash_val)
724 /* zero pad b[] if necessary */ 724 /* zero pad b[] if necessary */
725 if (ctx->h.b_cnt < SKEIN_512_BLOCK_BYTES) 725 if (ctx->h.b_cnt < SKEIN_512_BLOCK_BYTES)
726 memset(&ctx->b[ctx->h.b_cnt], 0, 726 memset(&ctx->b[ctx->h.b_cnt], 0,
727 SKEIN_512_BLOCK_BYTES - ctx->h.b_cnt); 727 SKEIN_512_BLOCK_BYTES - ctx->h.b_cnt);
728 /* process the final block */ 728 /* process the final block */
729 skein_512_process_block(ctx, ctx->b, 1, ctx->h.b_cnt); 729 skein_512_process_block(ctx, ctx->b, 1, ctx->h.b_cnt);
730 730
@@ -746,7 +746,7 @@ int skein_1024_final_pad(struct skein_1024_ctx *ctx, u8 *hash_val)
746 /* zero pad b[] if necessary */ 746 /* zero pad b[] if necessary */
747 if (ctx->h.b_cnt < SKEIN_1024_BLOCK_BYTES) 747 if (ctx->h.b_cnt < SKEIN_1024_BLOCK_BYTES)
748 memset(&ctx->b[ctx->h.b_cnt], 0, 748 memset(&ctx->b[ctx->h.b_cnt], 0,
749 SKEIN_1024_BLOCK_BYTES - ctx->h.b_cnt); 749 SKEIN_1024_BLOCK_BYTES - ctx->h.b_cnt);
750 /* process the final block */ 750 /* process the final block */
751 skein_1024_process_block(ctx, ctx->b, 1, ctx->h.b_cnt); 751 skein_1024_process_block(ctx, ctx->b, 1, ctx->h.b_cnt);
752 752
@@ -775,19 +775,19 @@ int skein_256_output(struct skein_256_ctx *ctx, u8 *hash_val)
775 memset(ctx->b, 0, sizeof(ctx->b)); 775 memset(ctx->b, 0, sizeof(ctx->b));
776 /* keep a local copy of counter mode "key" */ 776 /* keep a local copy of counter mode "key" */
777 memcpy(x, ctx->x, sizeof(x)); 777 memcpy(x, ctx->x, sizeof(x));
778 for (i = 0; i*SKEIN_256_BLOCK_BYTES < byte_cnt; i++) { 778 for (i = 0; i * SKEIN_256_BLOCK_BYTES < byte_cnt; i++) {
779 /* build the counter block */ 779 /* build the counter block */
780 ((u64 *)ctx->b)[0] = skein_swap64((u64) i); 780 ((u64 *)ctx->b)[0] = skein_swap64((u64)i);
781 skein_start_new_type(ctx, OUT_FINAL); 781 skein_start_new_type(ctx, OUT_FINAL);
782 /* run "counter mode" */ 782 /* run "counter mode" */
783 skein_256_process_block(ctx, ctx->b, 1, sizeof(u64)); 783 skein_256_process_block(ctx, ctx->b, 1, sizeof(u64));
784 /* number of output bytes left to go */ 784 /* number of output bytes left to go */
785 n = byte_cnt - i*SKEIN_256_BLOCK_BYTES; 785 n = byte_cnt - i * SKEIN_256_BLOCK_BYTES;
786 if (n >= SKEIN_256_BLOCK_BYTES) 786 if (n >= SKEIN_256_BLOCK_BYTES)
787 n = SKEIN_256_BLOCK_BYTES; 787 n = SKEIN_256_BLOCK_BYTES;
788 /* "output" the ctr mode bytes */ 788 /* "output" the ctr mode bytes */
789 skein_put64_lsb_first(hash_val+i*SKEIN_256_BLOCK_BYTES, ctx->x, 789 skein_put64_lsb_first(hash_val + (i * SKEIN_256_BLOCK_BYTES),
790 n); 790 ctx->x, n);
791 /* restore the counter mode key for next time */ 791 /* restore the counter mode key for next time */
792 memcpy(ctx->x, x, sizeof(x)); 792 memcpy(ctx->x, x, sizeof(x));
793 } 793 }
@@ -812,19 +812,19 @@ int skein_512_output(struct skein_512_ctx *ctx, u8 *hash_val)
812 memset(ctx->b, 0, sizeof(ctx->b)); 812 memset(ctx->b, 0, sizeof(ctx->b));
813 /* keep a local copy of counter mode "key" */ 813 /* keep a local copy of counter mode "key" */
814 memcpy(x, ctx->x, sizeof(x)); 814 memcpy(x, ctx->x, sizeof(x));
815 for (i = 0; i*SKEIN_512_BLOCK_BYTES < byte_cnt; i++) { 815 for (i = 0; i * SKEIN_512_BLOCK_BYTES < byte_cnt; i++) {
816 /* build the counter block */ 816 /* build the counter block */
817 ((u64 *)ctx->b)[0] = skein_swap64((u64) i); 817 ((u64 *)ctx->b)[0] = skein_swap64((u64)i);
818 skein_start_new_type(ctx, OUT_FINAL); 818 skein_start_new_type(ctx, OUT_FINAL);
819 /* run "counter mode" */ 819 /* run "counter mode" */
820 skein_512_process_block(ctx, ctx->b, 1, sizeof(u64)); 820 skein_512_process_block(ctx, ctx->b, 1, sizeof(u64));
821 /* number of output bytes left to go */ 821 /* number of output bytes left to go */
822 n = byte_cnt - i*SKEIN_512_BLOCK_BYTES; 822 n = byte_cnt - i * SKEIN_512_BLOCK_BYTES;
823 if (n >= SKEIN_512_BLOCK_BYTES) 823 if (n >= SKEIN_512_BLOCK_BYTES)
824 n = SKEIN_512_BLOCK_BYTES; 824 n = SKEIN_512_BLOCK_BYTES;
825 /* "output" the ctr mode bytes */ 825 /* "output" the ctr mode bytes */
826 skein_put64_lsb_first(hash_val+i*SKEIN_512_BLOCK_BYTES, ctx->x, 826 skein_put64_lsb_first(hash_val + (i * SKEIN_512_BLOCK_BYTES),
827 n); 827 ctx->x, n);
828 /* restore the counter mode key for next time */ 828 /* restore the counter mode key for next time */
829 memcpy(ctx->x, x, sizeof(x)); 829 memcpy(ctx->x, x, sizeof(x));
830 } 830 }
@@ -849,19 +849,19 @@ int skein_1024_output(struct skein_1024_ctx *ctx, u8 *hash_val)
849 memset(ctx->b, 0, sizeof(ctx->b)); 849 memset(ctx->b, 0, sizeof(ctx->b));
850 /* keep a local copy of counter mode "key" */ 850 /* keep a local copy of counter mode "key" */
851 memcpy(x, ctx->x, sizeof(x)); 851 memcpy(x, ctx->x, sizeof(x));
852 for (i = 0; i*SKEIN_1024_BLOCK_BYTES < byte_cnt; i++) { 852 for (i = 0; i * SKEIN_1024_BLOCK_BYTES < byte_cnt; i++) {
853 /* build the counter block */ 853 /* build the counter block */
854 ((u64 *)ctx->b)[0] = skein_swap64((u64) i); 854 ((u64 *)ctx->b)[0] = skein_swap64((u64)i);
855 skein_start_new_type(ctx, OUT_FINAL); 855 skein_start_new_type(ctx, OUT_FINAL);
856 /* run "counter mode" */ 856 /* run "counter mode" */
857 skein_1024_process_block(ctx, ctx->b, 1, sizeof(u64)); 857 skein_1024_process_block(ctx, ctx->b, 1, sizeof(u64));
858 /* number of output bytes left to go */ 858 /* number of output bytes left to go */
859 n = byte_cnt - i*SKEIN_1024_BLOCK_BYTES; 859 n = byte_cnt - i * SKEIN_1024_BLOCK_BYTES;
860 if (n >= SKEIN_1024_BLOCK_BYTES) 860 if (n >= SKEIN_1024_BLOCK_BYTES)
861 n = SKEIN_1024_BLOCK_BYTES; 861 n = SKEIN_1024_BLOCK_BYTES;
862 /* "output" the ctr mode bytes */ 862 /* "output" the ctr mode bytes */
863 skein_put64_lsb_first(hash_val+i*SKEIN_1024_BLOCK_BYTES, ctx->x, 863 skein_put64_lsb_first(hash_val + (i * SKEIN_1024_BLOCK_BYTES),
864 n); 864 ctx->x, n);
865 /* restore the counter mode key for next time */ 865 /* restore the counter mode key for next time */
866 memcpy(ctx->x, x, sizeof(x)); 866 memcpy(ctx->x, x, sizeof(x));
867 } 867 }
diff --git a/drivers/staging/skein/skein_base.h b/drivers/staging/skein/skein_base.h
index 3c7f8ad3627d..dc464f334a58 100644
--- a/drivers/staging/skein/skein_base.h
+++ b/drivers/staging/skein/skein_base.h
@@ -32,7 +32,7 @@
32/* below two prototype assume we are handed aligned data */ 32/* below two prototype assume we are handed aligned data */
33#define skein_put64_lsb_first(dst08, src64, b_cnt) memcpy(dst08, src64, b_cnt) 33#define skein_put64_lsb_first(dst08, src64, b_cnt) memcpy(dst08, src64, b_cnt)
34#define skein_get64_lsb_first(dst64, src08, w_cnt) \ 34#define skein_get64_lsb_first(dst64, src08, w_cnt) \
35 memcpy(dst64, src08, 8*(w_cnt)) 35 memcpy(dst64, src08, 8 * (w_cnt))
36#define skein_swap64(w64) (w64) 36#define skein_swap64(w64) (w64)
37 37
38enum { 38enum {
@@ -48,17 +48,17 @@ enum {
48#define SKEIN_1024_STATE_WORDS 16 48#define SKEIN_1024_STATE_WORDS 16
49#define SKEIN_MAX_STATE_WORDS 16 49#define SKEIN_MAX_STATE_WORDS 16
50 50
51#define SKEIN_256_STATE_BYTES (8*SKEIN_256_STATE_WORDS) 51#define SKEIN_256_STATE_BYTES (8 * SKEIN_256_STATE_WORDS)
52#define SKEIN_512_STATE_BYTES (8*SKEIN_512_STATE_WORDS) 52#define SKEIN_512_STATE_BYTES (8 * SKEIN_512_STATE_WORDS)
53#define SKEIN_1024_STATE_BYTES (8*SKEIN_1024_STATE_WORDS) 53#define SKEIN_1024_STATE_BYTES (8 * SKEIN_1024_STATE_WORDS)
54 54
55#define SKEIN_256_STATE_BITS (64*SKEIN_256_STATE_WORDS) 55#define SKEIN_256_STATE_BITS (64 * SKEIN_256_STATE_WORDS)
56#define SKEIN_512_STATE_BITS (64*SKEIN_512_STATE_WORDS) 56#define SKEIN_512_STATE_BITS (64 * SKEIN_512_STATE_WORDS)
57#define SKEIN_1024_STATE_BITS (64*SKEIN_1024_STATE_WORDS) 57#define SKEIN_1024_STATE_BITS (64 * SKEIN_1024_STATE_WORDS)
58 58
59#define SKEIN_256_BLOCK_BYTES (8*SKEIN_256_STATE_WORDS) 59#define SKEIN_256_BLOCK_BYTES (8 * SKEIN_256_STATE_WORDS)
60#define SKEIN_512_BLOCK_BYTES (8*SKEIN_512_STATE_WORDS) 60#define SKEIN_512_BLOCK_BYTES (8 * SKEIN_512_STATE_WORDS)
61#define SKEIN_1024_BLOCK_BYTES (8*SKEIN_1024_STATE_WORDS) 61#define SKEIN_1024_BLOCK_BYTES (8 * SKEIN_1024_STATE_WORDS)
62 62
63struct skein_ctx_hdr { 63struct skein_ctx_hdr {
64 size_t hash_bit_len; /* size of hash result, in bits */ 64 size_t hash_bit_len; /* size of hash result, in bits */
@@ -84,11 +84,6 @@ struct skein_1024_ctx { /* 1024-bit Skein hash context structure */
84 u8 b[SKEIN_1024_BLOCK_BYTES]; /* partial block buf (8-byte aligned) */ 84 u8 b[SKEIN_1024_BLOCK_BYTES]; /* partial block buf (8-byte aligned) */
85}; 85};
86 86
87static inline u64 rotl_64(u64 x, u8 N)
88{
89 return (x << N) | (x >> (64 - N));
90}
91
92/* Skein APIs for (incremental) "straight hashing" */ 87/* Skein APIs for (incremental) "straight hashing" */
93int skein_256_init(struct skein_256_ctx *ctx, size_t hash_bit_len); 88int skein_256_init(struct skein_256_ctx *ctx, size_t hash_bit_len);
94int skein_512_init(struct skein_512_ctx *ctx, size_t hash_bit_len); 89int skein_512_init(struct skein_512_ctx *ctx, size_t hash_bit_len);
@@ -162,13 +157,13 @@ int skein_1024_output(struct skein_1024_ctx *ctx, u8 *hash_val);
162#define SKEIN_T1_POS_FINAL SKEIN_T1_BIT(127) /* 127 final blk flag */ 157#define SKEIN_T1_POS_FINAL SKEIN_T1_BIT(127) /* 127 final blk flag */
163 158
164/* tweak word tweak[1]: flag bit definition(s) */ 159/* tweak word tweak[1]: flag bit definition(s) */
165#define SKEIN_T1_FLAG_FIRST (((u64) 1) << SKEIN_T1_POS_FIRST) 160#define SKEIN_T1_FLAG_FIRST (((u64)1) << SKEIN_T1_POS_FIRST)
166#define SKEIN_T1_FLAG_FINAL (((u64) 1) << SKEIN_T1_POS_FINAL) 161#define SKEIN_T1_FLAG_FINAL (((u64)1) << SKEIN_T1_POS_FINAL)
167#define SKEIN_T1_FLAG_BIT_PAD (((u64) 1) << SKEIN_T1_POS_BIT_PAD) 162#define SKEIN_T1_FLAG_BIT_PAD (((u64)1) << SKEIN_T1_POS_BIT_PAD)
168 163
169/* tweak word tweak[1]: tree level bit field mask */ 164/* tweak word tweak[1]: tree level bit field mask */
170#define SKEIN_T1_TREE_LVL_MASK (((u64)0x7F) << SKEIN_T1_POS_TREE_LVL) 165#define SKEIN_T1_TREE_LVL_MASK (((u64)0x7F) << SKEIN_T1_POS_TREE_LVL)
171#define SKEIN_T1_TREE_LEVEL(n) (((u64) (n)) << SKEIN_T1_POS_TREE_LVL) 166#define SKEIN_T1_TREE_LEVEL(n) (((u64)(n)) << SKEIN_T1_POS_TREE_LVL)
172 167
173/* tweak word tweak[1]: block type field */ 168/* tweak word tweak[1]: block type field */
174#define SKEIN_BLK_TYPE_KEY (0) /* key, for MAC and KDF */ 169#define SKEIN_BLK_TYPE_KEY (0) /* key, for MAC and KDF */
@@ -181,7 +176,7 @@ int skein_1024_output(struct skein_1024_ctx *ctx, u8 *hash_val);
181#define SKEIN_BLK_TYPE_OUT (63) /* output stage */ 176#define SKEIN_BLK_TYPE_OUT (63) /* output stage */
182#define SKEIN_BLK_TYPE_MASK (63) /* bit field mask */ 177#define SKEIN_BLK_TYPE_MASK (63) /* bit field mask */
183 178
184#define SKEIN_T1_BLK_TYPE(T) (((u64) (SKEIN_BLK_TYPE_##T)) << \ 179#define SKEIN_T1_BLK_TYPE(T) (((u64)(SKEIN_BLK_TYPE_##T)) << \
185 SKEIN_T1_POS_BLK_TYPE) 180 SKEIN_T1_POS_BLK_TYPE)
186#define SKEIN_T1_BLK_TYPE_KEY SKEIN_T1_BLK_TYPE(KEY) /* for MAC and KDF */ 181#define SKEIN_T1_BLK_TYPE_KEY SKEIN_T1_BLK_TYPE(KEY) /* for MAC and KDF */
187#define SKEIN_T1_BLK_TYPE_CFG SKEIN_T1_BLK_TYPE(CFG) /* config block */ 182#define SKEIN_T1_BLK_TYPE_CFG SKEIN_T1_BLK_TYPE(CFG) /* config block */
@@ -204,11 +199,11 @@ int skein_1024_output(struct skein_1024_ctx *ctx, u8 *hash_val);
204#define SKEIN_ID_STRING_LE (0x33414853) /* "SHA3" (little-endian)*/ 199#define SKEIN_ID_STRING_LE (0x33414853) /* "SHA3" (little-endian)*/
205#endif 200#endif
206 201
207#define SKEIN_MK_64(hi32, lo32) ((lo32) + (((u64) (hi32)) << 32)) 202#define SKEIN_MK_64(hi32, lo32) ((lo32) + (((u64)(hi32)) << 32))
208#define SKEIN_SCHEMA_VER SKEIN_MK_64(SKEIN_VERSION, SKEIN_ID_STRING_LE) 203#define SKEIN_SCHEMA_VER SKEIN_MK_64(SKEIN_VERSION, SKEIN_ID_STRING_LE)
209#define SKEIN_KS_PARITY SKEIN_MK_64(0x1BD11BDA, 0xA9FC1A22) 204#define SKEIN_KS_PARITY SKEIN_MK_64(0x1BD11BDA, 0xA9FC1A22)
210 205
211#define SKEIN_CFG_STR_LEN (4*8) 206#define SKEIN_CFG_STR_LEN (4 * 8)
212 207
213/* bit field definitions in config block tree_info word */ 208/* bit field definitions in config block tree_info word */
214#define SKEIN_CFG_TREE_LEAF_SIZE_POS (0) 209#define SKEIN_CFG_TREE_LEAF_SIZE_POS (0)
@@ -327,9 +322,9 @@ enum {
327#define SKEIN_512_ROUNDS_TOTAL (72) 322#define SKEIN_512_ROUNDS_TOTAL (72)
328#define SKEIN_1024_ROUNDS_TOTAL (80) 323#define SKEIN_1024_ROUNDS_TOTAL (80)
329#else /* allow command-line define in range 8*(5..14) */ 324#else /* allow command-line define in range 8*(5..14) */
330#define SKEIN_256_ROUNDS_TOTAL (8*((((SKEIN_ROUNDS/100) + 5) % 10) + 5)) 325#define SKEIN_256_ROUNDS_TOTAL (8 * ((((SKEIN_ROUNDS / 100) + 5) % 10) + 5))
331#define SKEIN_512_ROUNDS_TOTAL (8*((((SKEIN_ROUNDS/10) + 5) % 10) + 5)) 326#define SKEIN_512_ROUNDS_TOTAL (8 * ((((SKEIN_ROUNDS / 10) + 5) % 10) + 5))
332#define SKEIN_1024_ROUNDS_TOTAL (8*((((SKEIN_ROUNDS) + 5) % 10) + 5)) 327#define SKEIN_1024_ROUNDS_TOTAL (8 * ((((SKEIN_ROUNDS) + 5) % 10) + 5))
333#endif 328#endif
334 329
335#endif /* ifndef _SKEIN_H_ */ 330#endif /* ifndef _SKEIN_H_ */
diff --git a/drivers/staging/skein/skein_block.c b/drivers/staging/skein/skein_block.c
index 45b47327e024..59a0a8a82118 100644
--- a/drivers/staging/skein/skein_block.c
+++ b/drivers/staging/skein/skein_block.c
@@ -15,6 +15,7 @@
15************************************************************************/ 15************************************************************************/
16 16
17#include <linux/string.h> 17#include <linux/string.h>
18#include <linux/bitops.h>
18#include "skein_base.h" 19#include "skein_base.h"
19#include "skein_block.h" 20#include "skein_block.h"
20 21
@@ -59,10 +60,10 @@
59#define ROUND256(p0, p1, p2, p3, ROT, r_num) \ 60#define ROUND256(p0, p1, p2, p3, ROT, r_num) \
60 do { \ 61 do { \
61 X##p0 += X##p1; \ 62 X##p0 += X##p1; \
62 X##p1 = rotl_64(X##p1, ROT##_0); \ 63 X##p1 = rol64(X##p1, ROT##_0); \
63 X##p1 ^= X##p0; \ 64 X##p1 ^= X##p0; \
64 X##p2 += X##p3; \ 65 X##p2 += X##p3; \
65 X##p3 = rotl_64(X##p3, ROT##_1); \ 66 X##p3 = rol64(X##p3, ROT##_1); \
66 X##p3 ^= X##p2; \ 67 X##p3 ^= X##p2; \
67 } while (0) 68 } while (0)
68 69
@@ -120,10 +121,10 @@
120 121
121#if !(SKEIN_USE_ASM & 512) 122#if !(SKEIN_USE_ASM & 512)
122#undef RCNT 123#undef RCNT
123#define RCNT (SKEIN_512_ROUNDS_TOTAL/8) 124#define RCNT (SKEIN_512_ROUNDS_TOTAL / 8)
124 125
125#ifdef SKEIN_LOOP /* configure how much to unroll the loop */ 126#ifdef SKEIN_LOOP /* configure how much to unroll the loop */
126#define SKEIN_UNROLL_512 (((SKEIN_LOOP)/10)%10) 127#define SKEIN_UNROLL_512 (((SKEIN_LOOP) / 10) % 10)
127#else 128#else
128#define SKEIN_UNROLL_512 (0) 129#define SKEIN_UNROLL_512 (0)
129#endif 130#endif
@@ -136,15 +137,16 @@
136#define ROUND512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num) \ 137#define ROUND512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num) \
137 do { \ 138 do { \
138 X##p0 += X##p1; \ 139 X##p0 += X##p1; \
139 X##p1 = rotl_64(X##p1, ROT##_0); \ 140 X##p1 = rol64(X##p1, ROT##_0); \
140 X##p1 ^= X##p0; \ 141 X##p1 ^= X##p0; \
141 X##p2 += X##p3; \ 142 X##p2 += X##p3; \
142 X##p3 = rotl_64(X##p3, ROT##_1); \ 143 X##p3 = rol64(X##p3, ROT##_1); \
143 X##p3 ^= X##p2; \ 144 X##p3 ^= X##p2; \
144 X##p4 += X##p5; \ 145 X##p4 += X##p5; \
145 X##p5 = rotl_64(X##p5, ROT##_2); \ 146 X##p5 = rol64(X##p5, ROT##_2); \
146 X##p5 ^= X##p4; \ 147 X##p5 ^= X##p4; \
147 X##p6 += X##p7; X##p7 = rotl_64(X##p7, ROT##_3);\ 148 X##p6 += X##p7; \
149 X##p7 = rol64(X##p7, ROT##_3); \
148 X##p7 ^= X##p6; \ 150 X##p7 ^= X##p6; \
149 } while (0) 151 } while (0)
150 152
@@ -200,7 +202,7 @@
200 } while (0) 202 } while (0)
201#define R512_UNROLL_R(NN) \ 203#define R512_UNROLL_R(NN) \
202 ((SKEIN_UNROLL_512 == 0 && \ 204 ((SKEIN_UNROLL_512 == 0 && \
203 SKEIN_512_ROUNDS_TOTAL/8 > (NN)) || \ 205 SKEIN_512_ROUNDS_TOTAL / 8 > (NN)) || \
204 (SKEIN_UNROLL_512 > (NN))) 206 (SKEIN_UNROLL_512 > (NN)))
205 207
206#if (SKEIN_UNROLL_512 > 14) 208#if (SKEIN_UNROLL_512 > 14)
@@ -210,7 +212,7 @@
210 212
211#if !(SKEIN_USE_ASM & 1024) 213#if !(SKEIN_USE_ASM & 1024)
212#undef RCNT 214#undef RCNT
213#define RCNT (SKEIN_1024_ROUNDS_TOTAL/8) 215#define RCNT (SKEIN_1024_ROUNDS_TOTAL / 8)
214#ifdef SKEIN_LOOP /* configure how much to unroll the loop */ 216#ifdef SKEIN_LOOP /* configure how much to unroll the loop */
215#define SKEIN_UNROLL_1024 ((SKEIN_LOOP) % 10) 217#define SKEIN_UNROLL_1024 ((SKEIN_LOOP) % 10)
216#else 218#else
@@ -226,28 +228,28 @@
226 pF, ROT, r_num) \ 228 pF, ROT, r_num) \
227 do { \ 229 do { \
228 X##p0 += X##p1; \ 230 X##p0 += X##p1; \
229 X##p1 = rotl_64(X##p1, ROT##_0); \ 231 X##p1 = rol64(X##p1, ROT##_0); \
230 X##p1 ^= X##p0; \ 232 X##p1 ^= X##p0; \
231 X##p2 += X##p3; \ 233 X##p2 += X##p3; \
232 X##p3 = rotl_64(X##p3, ROT##_1); \ 234 X##p3 = rol64(X##p3, ROT##_1); \
233 X##p3 ^= X##p2; \ 235 X##p3 ^= X##p2; \
234 X##p4 += X##p5; \ 236 X##p4 += X##p5; \
235 X##p5 = rotl_64(X##p5, ROT##_2); \ 237 X##p5 = rol64(X##p5, ROT##_2); \
236 X##p5 ^= X##p4; \ 238 X##p5 ^= X##p4; \
237 X##p6 += X##p7; \ 239 X##p6 += X##p7; \
238 X##p7 = rotl_64(X##p7, ROT##_3); \ 240 X##p7 = rol64(X##p7, ROT##_3); \
239 X##p7 ^= X##p6; \ 241 X##p7 ^= X##p6; \
240 X##p8 += X##p9; \ 242 X##p8 += X##p9; \
241 X##p9 = rotl_64(X##p9, ROT##_4); \ 243 X##p9 = rol64(X##p9, ROT##_4); \
242 X##p9 ^= X##p8; \ 244 X##p9 ^= X##p8; \
243 X##pA += X##pB; \ 245 X##pA += X##pB; \
244 X##pB = rotl_64(X##pB, ROT##_5); \ 246 X##pB = rol64(X##pB, ROT##_5); \
245 X##pB ^= X##pA; \ 247 X##pB ^= X##pA; \
246 X##pC += X##pD; \ 248 X##pC += X##pD; \
247 X##pD = rotl_64(X##pD, ROT##_6); \ 249 X##pD = rol64(X##pD, ROT##_6); \
248 X##pD ^= X##pC; \ 250 X##pD ^= X##pC; \
249 X##pE += X##pF; \ 251 X##pE += X##pF; \
250 X##pF = rotl_64(X##pF, ROT##_7); \ 252 X##pF = rol64(X##pF, ROT##_7); \
251 X##pF ^= X##pE; \ 253 X##pF ^= X##pE; \
252 } while (0) 254 } while (0)
253 255
@@ -311,28 +313,28 @@
311#define R1024_8_ROUNDS(R) \ 313#define R1024_8_ROUNDS(R) \
312 do { \ 314 do { \
313 R1024(00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 10, 11, 12, \ 315 R1024(00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 10, 11, 12, \
314 13, 14, 15, R1024_0, 8*(R) + 1); \ 316 13, 14, 15, R1024_0, 8 * (R) + 1); \
315 R1024(00, 09, 02, 13, 06, 11, 04, 15, 10, 07, 12, 03, 14, \ 317 R1024(00, 09, 02, 13, 06, 11, 04, 15, 10, 07, 12, 03, 14, \
316 05, 08, 01, R1024_1, 8*(R) + 2); \ 318 05, 08, 01, R1024_1, 8 * (R) + 2); \
317 R1024(00, 07, 02, 05, 04, 03, 06, 01, 12, 15, 14, 13, 08, \ 319 R1024(00, 07, 02, 05, 04, 03, 06, 01, 12, 15, 14, 13, 08, \
318 11, 10, 09, R1024_2, 8*(R) + 3); \ 320 11, 10, 09, R1024_2, 8 * (R) + 3); \
319 R1024(00, 15, 02, 11, 06, 13, 04, 09, 14, 01, 08, 05, 10, \ 321 R1024(00, 15, 02, 11, 06, 13, 04, 09, 14, 01, 08, 05, 10, \
320 03, 12, 07, R1024_3, 8*(R) + 4); \ 322 03, 12, 07, R1024_3, 8 * (R) + 4); \
321 I1024(2*(R)); \ 323 I1024(2 * (R)); \
322 R1024(00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 10, 11, 12, \ 324 R1024(00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 10, 11, 12, \
323 13, 14, 15, R1024_4, 8*(R) + 5); \ 325 13, 14, 15, R1024_4, 8 * (R) + 5); \
324 R1024(00, 09, 02, 13, 06, 11, 04, 15, 10, 07, 12, 03, 14, \ 326 R1024(00, 09, 02, 13, 06, 11, 04, 15, 10, 07, 12, 03, 14, \
325 05, 08, 01, R1024_5, 8*(R) + 6); \ 327 05, 08, 01, R1024_5, 8 * (R) + 6); \
326 R1024(00, 07, 02, 05, 04, 03, 06, 01, 12, 15, 14, 13, 08, \ 328 R1024(00, 07, 02, 05, 04, 03, 06, 01, 12, 15, 14, 13, 08, \
327 11, 10, 09, R1024_6, 8*(R) + 7); \ 329 11, 10, 09, R1024_6, 8 * (R) + 7); \
328 R1024(00, 15, 02, 11, 06, 13, 04, 09, 14, 01, 08, 05, 10, \ 330 R1024(00, 15, 02, 11, 06, 13, 04, 09, 14, 01, 08, 05, 10, \
329 03, 12, 07, R1024_7, 8*(R) + 8); \ 331 03, 12, 07, R1024_7, 8 * (R) + 8); \
330 I1024(2*(R)+1); \ 332 I1024(2 * (R) + 1); \
331 } while (0) 333 } while (0)
332 334
333#define R1024_UNROLL_R(NN) \ 335#define R1024_UNROLL_R(NN) \
334 ((SKEIN_UNROLL_1024 == 0 && \ 336 ((SKEIN_UNROLL_1024 == 0 && \
335 SKEIN_1024_ROUNDS_TOTAL/8 > (NN)) || \ 337 SKEIN_1024_ROUNDS_TOTAL / 8 > (NN)) || \
336 (SKEIN_UNROLL_1024 > (NN))) 338 (SKEIN_UNROLL_1024 > (NN)))
337 339
338#if (SKEIN_UNROLL_1024 > 14) 340#if (SKEIN_UNROLL_1024 > 14)
@@ -351,10 +353,10 @@ void skein_256_process_block(struct skein_256_ctx *ctx, const u8 *blk_ptr,
351 size_t r; 353 size_t r;
352#if SKEIN_UNROLL_256 354#if SKEIN_UNROLL_256
353 /* key schedule: chaining vars + tweak + "rot"*/ 355 /* key schedule: chaining vars + tweak + "rot"*/
354 u64 kw[WCNT+4+RCNT*2]; 356 u64 kw[WCNT + 4 + (RCNT * 2)];
355#else 357#else
356 /* key schedule words : chaining vars + tweak */ 358 /* key schedule words : chaining vars + tweak */
357 u64 kw[WCNT+4]; 359 u64 kw[WCNT + 4];
358#endif 360#endif
359 u64 X0, X1, X2, X3; /* local copy of context vars, for speed */ 361 u64 X0, X1, X2, X3; /* local copy of context vars, for speed */
360 u64 w[WCNT]; /* local copy of input block */ 362 u64 w[WCNT]; /* local copy of input block */
@@ -460,9 +462,10 @@ void skein_256_process_block(struct skein_256_ctx *ctx, const u8 *blk_ptr,
460#if defined(SKEIN_CODE_SIZE) || defined(SKEIN_PERF) 462#if defined(SKEIN_CODE_SIZE) || defined(SKEIN_PERF)
461size_t skein_256_process_block_code_size(void) 463size_t skein_256_process_block_code_size(void)
462{ 464{
463 return ((u8 *) skein_256_process_block_code_size) - 465 return ((u8 *)skein_256_process_block_code_size) -
464 ((u8 *) skein_256_process_block); 466 ((u8 *)skein_256_process_block);
465} 467}
468
466unsigned int skein_256_unroll_cnt(void) 469unsigned int skein_256_unroll_cnt(void)
467{ 470{
468 return SKEIN_UNROLL_256; 471 return SKEIN_UNROLL_256;
@@ -480,9 +483,11 @@ void skein_512_process_block(struct skein_512_ctx *ctx, const u8 *blk_ptr,
480 }; 483 };
481 size_t r; 484 size_t r;
482#if SKEIN_UNROLL_512 485#if SKEIN_UNROLL_512
483 u64 kw[WCNT+4+RCNT*2]; /* key sched: chaining vars + tweak + "rot"*/ 486 /* key sched: chaining vars + tweak + "rot"*/
487 u64 kw[WCNT + 4 + RCNT * 2];
484#else 488#else
485 u64 kw[WCNT+4]; /* key schedule words : chaining vars + tweak */ 489 /* key schedule words : chaining vars + tweak */
490 u64 kw[WCNT + 4];
486#endif 491#endif
487 u64 X0, X1, X2, X3, X4, X5, X6, X7; /* local copies, for speed */ 492 u64 X0, X1, X2, X3, X4, X5, X6, X7; /* local copies, for speed */
488 u64 w[WCNT]; /* local copy of input block */ 493 u64 w[WCNT]; /* local copy of input block */
@@ -543,7 +548,6 @@ void skein_512_process_block(struct skein_512_ctx *ctx, const u8 *blk_ptr,
543 for (r = 1; 548 for (r = 1;
544 r < (SKEIN_UNROLL_512 ? 2 * RCNT : 2); 549 r < (SKEIN_UNROLL_512 ? 2 * RCNT : 2);
545 r += (SKEIN_UNROLL_512 ? 2 * SKEIN_UNROLL_512 : 1)) { 550 r += (SKEIN_UNROLL_512 ? 2 * SKEIN_UNROLL_512 : 1)) {
546
547 R512_8_ROUNDS(0); 551 R512_8_ROUNDS(0);
548 552
549#if R512_UNROLL_R(1) 553#if R512_UNROLL_R(1)
@@ -609,9 +613,10 @@ void skein_512_process_block(struct skein_512_ctx *ctx, const u8 *blk_ptr,
609#if defined(SKEIN_CODE_SIZE) || defined(SKEIN_PERF) 613#if defined(SKEIN_CODE_SIZE) || defined(SKEIN_PERF)
610size_t skein_512_process_block_code_size(void) 614size_t skein_512_process_block_code_size(void)
611{ 615{
612 return ((u8 *) skein_512_process_block_code_size) - 616 return ((u8 *)skein_512_process_block_code_size) -
613 ((u8 *) skein_512_process_block); 617 ((u8 *)skein_512_process_block);
614} 618}
619
615unsigned int skein_512_unroll_cnt(void) 620unsigned int skein_512_unroll_cnt(void)
616{ 621{
617 return SKEIN_UNROLL_512; 622 return SKEIN_UNROLL_512;
@@ -629,9 +634,11 @@ void skein_1024_process_block(struct skein_1024_ctx *ctx, const u8 *blk_ptr,
629 }; 634 };
630 size_t r; 635 size_t r;
631#if (SKEIN_UNROLL_1024 != 0) 636#if (SKEIN_UNROLL_1024 != 0)
632 u64 kw[WCNT+4+RCNT*2]; /* key sched: chaining vars + tweak + "rot" */ 637 /* key sched: chaining vars + tweak + "rot" */
638 u64 kw[WCNT + 4 + (RCNT * 2)];
633#else 639#else
634 u64 kw[WCNT+4]; /* key schedule words : chaining vars + tweak */ 640 /* key schedule words : chaining vars + tweak */
641 u64 kw[WCNT + 4];
635#endif 642#endif
636 643
637 /* local copy of vars, for speed */ 644 /* local copy of vars, for speed */
@@ -771,9 +778,10 @@ void skein_1024_process_block(struct skein_1024_ctx *ctx, const u8 *blk_ptr,
771#if defined(SKEIN_CODE_SIZE) || defined(SKEIN_PERF) 778#if defined(SKEIN_CODE_SIZE) || defined(SKEIN_PERF)
772size_t skein_1024_process_block_code_size(void) 779size_t skein_1024_process_block_code_size(void)
773{ 780{
774 return ((u8 *) skein_1024_process_block_code_size) - 781 return ((u8 *)skein_1024_process_block_code_size) -
775 ((u8 *) skein_1024_process_block); 782 ((u8 *)skein_1024_process_block);
776} 783}
784
777unsigned int skein_1024_unroll_cnt(void) 785unsigned int skein_1024_unroll_cnt(void)
778{ 786{
779 return SKEIN_UNROLL_1024; 787 return SKEIN_UNROLL_1024;
diff --git a/drivers/staging/skein/skein_generic.c b/drivers/staging/skein/skein_generic.c
index e29b9abaa4e6..11f5e530a75f 100644
--- a/drivers/staging/skein/skein_generic.c
+++ b/drivers/staging/skein/skein_generic.c
@@ -27,7 +27,7 @@ static int skein256_init(struct shash_desc *desc)
27} 27}
28 28
29static int skein256_update(struct shash_desc *desc, const u8 *data, 29static int skein256_update(struct shash_desc *desc, const u8 *data,
30 unsigned int len) 30 unsigned int len)
31{ 31{
32 return skein_256_update((struct skein_256_ctx *)shash_desc_ctx(desc), 32 return skein_256_update((struct skein_256_ctx *)shash_desc_ctx(desc),
33 data, len); 33 data, len);
@@ -62,7 +62,7 @@ static int skein512_init(struct shash_desc *desc)
62} 62}
63 63
64static int skein512_update(struct shash_desc *desc, const u8 *data, 64static int skein512_update(struct shash_desc *desc, const u8 *data,
65 unsigned int len) 65 unsigned int len)
66{ 66{
67 return skein_512_update((struct skein_512_ctx *)shash_desc_ctx(desc), 67 return skein_512_update((struct skein_512_ctx *)shash_desc_ctx(desc),
68 data, len); 68 data, len);
@@ -97,7 +97,7 @@ static int skein1024_init(struct shash_desc *desc)
97} 97}
98 98
99static int skein1024_update(struct shash_desc *desc, const u8 *data, 99static int skein1024_update(struct shash_desc *desc, const u8 *data,
100 unsigned int len) 100 unsigned int len)
101{ 101{
102 return skein_1024_update((struct skein_1024_ctx *)shash_desc_ctx(desc), 102 return skein_1024_update((struct skein_1024_ctx *)shash_desc_ctx(desc),
103 data, len); 103 data, len);
diff --git a/drivers/staging/skein/threefish_api.h b/drivers/staging/skein/threefish_api.h
index 8e0a0b77ecce..615e467579ee 100644
--- a/drivers/staging/skein/threefish_api.h
+++ b/drivers/staging/skein/threefish_api.h
@@ -52,7 +52,7 @@ enum threefish_size {
52 */ 52 */
53struct threefish_key { 53struct threefish_key {
54 u64 state_size; 54 u64 state_size;
55 u64 key[SKEIN_MAX_STATE_WORDS+1]; /* max number of key words*/ 55 u64 key[SKEIN_MAX_STATE_WORDS + 1]; /* max number of key words*/
56 u64 tweak[3]; 56 u64 tweak[3];
57}; 57};
58 58
diff --git a/drivers/staging/skein/threefish_block.c b/drivers/staging/skein/threefish_block.c
index e19ac4368651..a95563fad071 100644
--- a/drivers/staging/skein/threefish_block.c
+++ b/drivers/staging/skein/threefish_block.c
@@ -512,622 +512,622 @@ void threefish_decrypt_256(struct threefish_key *key_ctx, u64 *input,
512 b2 -= k0 + t1; 512 b2 -= k0 + t1;
513 b3 -= k1 + 18; 513 b3 -= k1 + 18;
514 tmp = b3 ^ b0; 514 tmp = b3 ^ b0;
515 b3 = (tmp >> 32) | (tmp << (64 - 32)); 515 b3 = ror64(tmp, 32);
516 b0 -= b3; 516 b0 -= b3;
517 517
518 tmp = b1 ^ b2; 518 tmp = b1 ^ b2;
519 b1 = (tmp >> 32) | (tmp << (64 - 32)); 519 b1 = ror64(tmp, 32);
520 b2 -= b1; 520 b2 -= b1;
521 521
522 tmp = b1 ^ b0; 522 tmp = b1 ^ b0;
523 b1 = (tmp >> 58) | (tmp << (64 - 58)); 523 b1 = ror64(tmp, 58);
524 b0 -= b1; 524 b0 -= b1;
525 525
526 tmp = b3 ^ b2; 526 tmp = b3 ^ b2;
527 b3 = (tmp >> 22) | (tmp << (64 - 22)); 527 b3 = ror64(tmp, 22);
528 b2 -= b3; 528 b2 -= b3;
529 529
530 tmp = b3 ^ b0; 530 tmp = b3 ^ b0;
531 b3 = (tmp >> 46) | (tmp << (64 - 46)); 531 b3 = ror64(tmp, 46);
532 b0 -= b3; 532 b0 -= b3;
533 533
534 tmp = b1 ^ b2; 534 tmp = b1 ^ b2;
535 b1 = (tmp >> 12) | (tmp << (64 - 12)); 535 b1 = ror64(tmp, 12);
536 b2 -= b1; 536 b2 -= b1;
537 537
538 tmp = b1 ^ b0; 538 tmp = b1 ^ b0;
539 b1 = (tmp >> 25) | (tmp << (64 - 25)); 539 b1 = ror64(tmp, 25);
540 b0 -= b1 + k2; 540 b0 -= b1 + k2;
541 b1 -= k3 + t2; 541 b1 -= k3 + t2;
542 542
543 tmp = b3 ^ b2; 543 tmp = b3 ^ b2;
544 b3 = (tmp >> 33) | (tmp << (64 - 33)); 544 b3 = ror64(tmp, 33);
545 b2 -= b3 + k4 + t0; 545 b2 -= b3 + k4 + t0;
546 b3 -= k0 + 17; 546 b3 -= k0 + 17;
547 547
548 tmp = b3 ^ b0; 548 tmp = b3 ^ b0;
549 b3 = (tmp >> 5) | (tmp << (64 - 5)); 549 b3 = ror64(tmp, 5);
550 b0 -= b3; 550 b0 -= b3;
551 551
552 tmp = b1 ^ b2; 552 tmp = b1 ^ b2;
553 b1 = (tmp >> 37) | (tmp << (64 - 37)); 553 b1 = ror64(tmp, 37);
554 b2 -= b1; 554 b2 -= b1;
555 555
556 tmp = b1 ^ b0; 556 tmp = b1 ^ b0;
557 b1 = (tmp >> 23) | (tmp << (64 - 23)); 557 b1 = ror64(tmp, 23);
558 b0 -= b1; 558 b0 -= b1;
559 559
560 tmp = b3 ^ b2; 560 tmp = b3 ^ b2;
561 b3 = (tmp >> 40) | (tmp << (64 - 40)); 561 b3 = ror64(tmp, 40);
562 b2 -= b3; 562 b2 -= b3;
563 563
564 tmp = b3 ^ b0; 564 tmp = b3 ^ b0;
565 b3 = (tmp >> 52) | (tmp << (64 - 52)); 565 b3 = ror64(tmp, 52);
566 b0 -= b3; 566 b0 -= b3;
567 567
568 tmp = b1 ^ b2; 568 tmp = b1 ^ b2;
569 b1 = (tmp >> 57) | (tmp << (64 - 57)); 569 b1 = ror64(tmp, 57);
570 b2 -= b1; 570 b2 -= b1;
571 571
572 tmp = b1 ^ b0; 572 tmp = b1 ^ b0;
573 b1 = (tmp >> 14) | (tmp << (64 - 14)); 573 b1 = ror64(tmp, 14);
574 b0 -= b1 + k1; 574 b0 -= b1 + k1;
575 b1 -= k2 + t1; 575 b1 -= k2 + t1;
576 576
577 tmp = b3 ^ b2; 577 tmp = b3 ^ b2;
578 b3 = (tmp >> 16) | (tmp << (64 - 16)); 578 b3 = ror64(tmp, 16);
579 b2 -= b3 + k3 + t2; 579 b2 -= b3 + k3 + t2;
580 b3 -= k4 + 16; 580 b3 -= k4 + 16;
581 581
582 582
583 tmp = b3 ^ b0; 583 tmp = b3 ^ b0;
584 b3 = (tmp >> 32) | (tmp << (64 - 32)); 584 b3 = ror64(tmp, 32);
585 b0 -= b3; 585 b0 -= b3;
586 586
587 tmp = b1 ^ b2; 587 tmp = b1 ^ b2;
588 b1 = (tmp >> 32) | (tmp << (64 - 32)); 588 b1 = ror64(tmp, 32);
589 b2 -= b1; 589 b2 -= b1;
590 590
591 tmp = b1 ^ b0; 591 tmp = b1 ^ b0;
592 b1 = (tmp >> 58) | (tmp << (64 - 58)); 592 b1 = ror64(tmp, 58);
593 b0 -= b1; 593 b0 -= b1;
594 594
595 tmp = b3 ^ b2; 595 tmp = b3 ^ b2;
596 b3 = (tmp >> 22) | (tmp << (64 - 22)); 596 b3 = ror64(tmp, 22);
597 b2 -= b3; 597 b2 -= b3;
598 598
599 tmp = b3 ^ b0; 599 tmp = b3 ^ b0;
600 b3 = (tmp >> 46) | (tmp << (64 - 46)); 600 b3 = ror64(tmp, 46);
601 b0 -= b3; 601 b0 -= b3;
602 602
603 tmp = b1 ^ b2; 603 tmp = b1 ^ b2;
604 b1 = (tmp >> 12) | (tmp << (64 - 12)); 604 b1 = ror64(tmp, 12);
605 b2 -= b1; 605 b2 -= b1;
606 606
607 tmp = b1 ^ b0; 607 tmp = b1 ^ b0;
608 b1 = (tmp >> 25) | (tmp << (64 - 25)); 608 b1 = ror64(tmp, 25);
609 b0 -= b1 + k0; 609 b0 -= b1 + k0;
610 b1 -= k1 + t0; 610 b1 -= k1 + t0;
611 611
612 tmp = b3 ^ b2; 612 tmp = b3 ^ b2;
613 b3 = (tmp >> 33) | (tmp << (64 - 33)); 613 b3 = ror64(tmp, 33);
614 b2 -= b3 + k2 + t1; 614 b2 -= b3 + k2 + t1;
615 b3 -= k3 + 15; 615 b3 -= k3 + 15;
616 616
617 tmp = b3 ^ b0; 617 tmp = b3 ^ b0;
618 b3 = (tmp >> 5) | (tmp << (64 - 5)); 618 b3 = ror64(tmp, 5);
619 b0 -= b3; 619 b0 -= b3;
620 620
621 tmp = b1 ^ b2; 621 tmp = b1 ^ b2;
622 b1 = (tmp >> 37) | (tmp << (64 - 37)); 622 b1 = ror64(tmp, 37);
623 b2 -= b1; 623 b2 -= b1;
624 624
625 tmp = b1 ^ b0; 625 tmp = b1 ^ b0;
626 b1 = (tmp >> 23) | (tmp << (64 - 23)); 626 b1 = ror64(tmp, 23);
627 b0 -= b1; 627 b0 -= b1;
628 628
629 tmp = b3 ^ b2; 629 tmp = b3 ^ b2;
630 b3 = (tmp >> 40) | (tmp << (64 - 40)); 630 b3 = ror64(tmp, 40);
631 b2 -= b3; 631 b2 -= b3;
632 632
633 tmp = b3 ^ b0; 633 tmp = b3 ^ b0;
634 b3 = (tmp >> 52) | (tmp << (64 - 52)); 634 b3 = ror64(tmp, 52);
635 b0 -= b3; 635 b0 -= b3;
636 636
637 tmp = b1 ^ b2; 637 tmp = b1 ^ b2;
638 b1 = (tmp >> 57) | (tmp << (64 - 57)); 638 b1 = ror64(tmp, 57);
639 b2 -= b1; 639 b2 -= b1;
640 640
641 tmp = b1 ^ b0; 641 tmp = b1 ^ b0;
642 b1 = (tmp >> 14) | (tmp << (64 - 14)); 642 b1 = ror64(tmp, 14);
643 b0 -= b1 + k4; 643 b0 -= b1 + k4;
644 b1 -= k0 + t2; 644 b1 -= k0 + t2;
645 645
646 tmp = b3 ^ b2; 646 tmp = b3 ^ b2;
647 b3 = (tmp >> 16) | (tmp << (64 - 16)); 647 b3 = ror64(tmp, 16);
648 b2 -= b3 + k1 + t0; 648 b2 -= b3 + k1 + t0;
649 b3 -= k2 + 14; 649 b3 -= k2 + 14;
650 650
651 651
652 tmp = b3 ^ b0; 652 tmp = b3 ^ b0;
653 b3 = (tmp >> 32) | (tmp << (64 - 32)); 653 b3 = ror64(tmp, 32);
654 b0 -= b3; 654 b0 -= b3;
655 655
656 tmp = b1 ^ b2; 656 tmp = b1 ^ b2;
657 b1 = (tmp >> 32) | (tmp << (64 - 32)); 657 b1 = ror64(tmp, 32);
658 b2 -= b1; 658 b2 -= b1;
659 659
660 tmp = b1 ^ b0; 660 tmp = b1 ^ b0;
661 b1 = (tmp >> 58) | (tmp << (64 - 58)); 661 b1 = ror64(tmp, 58);
662 b0 -= b1; 662 b0 -= b1;
663 663
664 tmp = b3 ^ b2; 664 tmp = b3 ^ b2;
665 b3 = (tmp >> 22) | (tmp << (64 - 22)); 665 b3 = ror64(tmp, 22);
666 b2 -= b3; 666 b2 -= b3;
667 667
668 tmp = b3 ^ b0; 668 tmp = b3 ^ b0;
669 b3 = (tmp >> 46) | (tmp << (64 - 46)); 669 b3 = ror64(tmp, 46);
670 b0 -= b3; 670 b0 -= b3;
671 671
672 tmp = b1 ^ b2; 672 tmp = b1 ^ b2;
673 b1 = (tmp >> 12) | (tmp << (64 - 12)); 673 b1 = ror64(tmp, 12);
674 b2 -= b1; 674 b2 -= b1;
675 675
676 tmp = b1 ^ b0; 676 tmp = b1 ^ b0;
677 b1 = (tmp >> 25) | (tmp << (64 - 25)); 677 b1 = ror64(tmp, 25);
678 b0 -= b1 + k3; 678 b0 -= b1 + k3;
679 b1 -= k4 + t1; 679 b1 -= k4 + t1;
680 680
681 tmp = b3 ^ b2; 681 tmp = b3 ^ b2;
682 b3 = (tmp >> 33) | (tmp << (64 - 33)); 682 b3 = ror64(tmp, 33);
683 b2 -= b3 + k0 + t2; 683 b2 -= b3 + k0 + t2;
684 b3 -= k1 + 13; 684 b3 -= k1 + 13;
685 685
686 tmp = b3 ^ b0; 686 tmp = b3 ^ b0;
687 b3 = (tmp >> 5) | (tmp << (64 - 5)); 687 b3 = ror64(tmp, 5);
688 b0 -= b3; 688 b0 -= b3;
689 689
690 tmp = b1 ^ b2; 690 tmp = b1 ^ b2;
691 b1 = (tmp >> 37) | (tmp << (64 - 37)); 691 b1 = ror64(tmp, 37);
692 b2 -= b1; 692 b2 -= b1;
693 693
694 tmp = b1 ^ b0; 694 tmp = b1 ^ b0;
695 b1 = (tmp >> 23) | (tmp << (64 - 23)); 695 b1 = ror64(tmp, 23);
696 b0 -= b1; 696 b0 -= b1;
697 697
698 tmp = b3 ^ b2; 698 tmp = b3 ^ b2;
699 b3 = (tmp >> 40) | (tmp << (64 - 40)); 699 b3 = ror64(tmp, 40);
700 b2 -= b3; 700 b2 -= b3;
701 701
702 tmp = b3 ^ b0; 702 tmp = b3 ^ b0;
703 b3 = (tmp >> 52) | (tmp << (64 - 52)); 703 b3 = ror64(tmp, 52);
704 b0 -= b3; 704 b0 -= b3;
705 705
706 tmp = b1 ^ b2; 706 tmp = b1 ^ b2;
707 b1 = (tmp >> 57) | (tmp << (64 - 57)); 707 b1 = ror64(tmp, 57);
708 b2 -= b1; 708 b2 -= b1;
709 709
710 tmp = b1 ^ b0; 710 tmp = b1 ^ b0;
711 b1 = (tmp >> 14) | (tmp << (64 - 14)); 711 b1 = ror64(tmp, 14);
712 b0 -= b1 + k2; 712 b0 -= b1 + k2;
713 b1 -= k3 + t0; 713 b1 -= k3 + t0;
714 714
715 tmp = b3 ^ b2; 715 tmp = b3 ^ b2;
716 b3 = (tmp >> 16) | (tmp << (64 - 16)); 716 b3 = ror64(tmp, 16);
717 b2 -= b3 + k4 + t1; 717 b2 -= b3 + k4 + t1;
718 b3 -= k0 + 12; 718 b3 -= k0 + 12;
719 719
720 720
721 tmp = b3 ^ b0; 721 tmp = b3 ^ b0;
722 b3 = (tmp >> 32) | (tmp << (64 - 32)); 722 b3 = ror64(tmp, 32);
723 b0 -= b3; 723 b0 -= b3;
724 724
725 tmp = b1 ^ b2; 725 tmp = b1 ^ b2;
726 b1 = (tmp >> 32) | (tmp << (64 - 32)); 726 b1 = ror64(tmp, 32);
727 b2 -= b1; 727 b2 -= b1;
728 728
729 tmp = b1 ^ b0; 729 tmp = b1 ^ b0;
730 b1 = (tmp >> 58) | (tmp << (64 - 58)); 730 b1 = ror64(tmp, 58);
731 b0 -= b1; 731 b0 -= b1;
732 732
733 tmp = b3 ^ b2; 733 tmp = b3 ^ b2;
734 b3 = (tmp >> 22) | (tmp << (64 - 22)); 734 b3 = ror64(tmp, 22);
735 b2 -= b3; 735 b2 -= b3;
736 736
737 tmp = b3 ^ b0; 737 tmp = b3 ^ b0;
738 b3 = (tmp >> 46) | (tmp << (64 - 46)); 738 b3 = ror64(tmp, 46);
739 b0 -= b3; 739 b0 -= b3;
740 740
741 tmp = b1 ^ b2; 741 tmp = b1 ^ b2;
742 b1 = (tmp >> 12) | (tmp << (64 - 12)); 742 b1 = ror64(tmp, 12);
743 b2 -= b1; 743 b2 -= b1;
744 744
745 tmp = b1 ^ b0; 745 tmp = b1 ^ b0;
746 b1 = (tmp >> 25) | (tmp << (64 - 25)); 746 b1 = ror64(tmp, 25);
747 b0 -= b1 + k1; 747 b0 -= b1 + k1;
748 b1 -= k2 + t2; 748 b1 -= k2 + t2;
749 749
750 tmp = b3 ^ b2; 750 tmp = b3 ^ b2;
751 b3 = (tmp >> 33) | (tmp << (64 - 33)); 751 b3 = ror64(tmp, 33);
752 b2 -= b3 + k3 + t0; 752 b2 -= b3 + k3 + t0;
753 b3 -= k4 + 11; 753 b3 -= k4 + 11;
754 754
755 tmp = b3 ^ b0; 755 tmp = b3 ^ b0;
756 b3 = (tmp >> 5) | (tmp << (64 - 5)); 756 b3 = ror64(tmp, 5);
757 b0 -= b3; 757 b0 -= b3;
758 758
759 tmp = b1 ^ b2; 759 tmp = b1 ^ b2;
760 b1 = (tmp >> 37) | (tmp << (64 - 37)); 760 b1 = ror64(tmp, 37);
761 b2 -= b1; 761 b2 -= b1;
762 762
763 tmp = b1 ^ b0; 763 tmp = b1 ^ b0;
764 b1 = (tmp >> 23) | (tmp << (64 - 23)); 764 b1 = ror64(tmp, 23);
765 b0 -= b1; 765 b0 -= b1;
766 766
767 tmp = b3 ^ b2; 767 tmp = b3 ^ b2;
768 b3 = (tmp >> 40) | (tmp << (64 - 40)); 768 b3 = ror64(tmp, 40);
769 b2 -= b3; 769 b2 -= b3;
770 770
771 tmp = b3 ^ b0; 771 tmp = b3 ^ b0;
772 b3 = (tmp >> 52) | (tmp << (64 - 52)); 772 b3 = ror64(tmp, 52);
773 b0 -= b3; 773 b0 -= b3;
774 774
775 tmp = b1 ^ b2; 775 tmp = b1 ^ b2;
776 b1 = (tmp >> 57) | (tmp << (64 - 57)); 776 b1 = ror64(tmp, 57);
777 b2 -= b1; 777 b2 -= b1;
778 778
779 tmp = b1 ^ b0; 779 tmp = b1 ^ b0;
780 b1 = (tmp >> 14) | (tmp << (64 - 14)); 780 b1 = ror64(tmp, 14);
781 b0 -= b1 + k0; 781 b0 -= b1 + k0;
782 b1 -= k1 + t1; 782 b1 -= k1 + t1;
783 783
784 tmp = b3 ^ b2; 784 tmp = b3 ^ b2;
785 b3 = (tmp >> 16) | (tmp << (64 - 16)); 785 b3 = ror64(tmp, 16);
786 b2 -= b3 + k2 + t2; 786 b2 -= b3 + k2 + t2;
787 b3 -= k3 + 10; 787 b3 -= k3 + 10;
788 788
789 789
790 tmp = b3 ^ b0; 790 tmp = b3 ^ b0;
791 b3 = (tmp >> 32) | (tmp << (64 - 32)); 791 b3 = ror64(tmp, 32);
792 b0 -= b3; 792 b0 -= b3;
793 793
794 tmp = b1 ^ b2; 794 tmp = b1 ^ b2;
795 b1 = (tmp >> 32) | (tmp << (64 - 32)); 795 b1 = ror64(tmp, 32);
796 b2 -= b1; 796 b2 -= b1;
797 797
798 tmp = b1 ^ b0; 798 tmp = b1 ^ b0;
799 b1 = (tmp >> 58) | (tmp << (64 - 58)); 799 b1 = ror64(tmp, 58);
800 b0 -= b1; 800 b0 -= b1;
801 801
802 tmp = b3 ^ b2; 802 tmp = b3 ^ b2;
803 b3 = (tmp >> 22) | (tmp << (64 - 22)); 803 b3 = ror64(tmp, 22);
804 b2 -= b3; 804 b2 -= b3;
805 805
806 tmp = b3 ^ b0; 806 tmp = b3 ^ b0;
807 b3 = (tmp >> 46) | (tmp << (64 - 46)); 807 b3 = ror64(tmp, 46);
808 b0 -= b3; 808 b0 -= b3;
809 809
810 tmp = b1 ^ b2; 810 tmp = b1 ^ b2;
811 b1 = (tmp >> 12) | (tmp << (64 - 12)); 811 b1 = ror64(tmp, 12);
812 b2 -= b1; 812 b2 -= b1;
813 813
814 tmp = b1 ^ b0; 814 tmp = b1 ^ b0;
815 b1 = (tmp >> 25) | (tmp << (64 - 25)); 815 b1 = ror64(tmp, 25);
816 b0 -= b1 + k4; 816 b0 -= b1 + k4;
817 b1 -= k0 + t0; 817 b1 -= k0 + t0;
818 818
819 tmp = b3 ^ b2; 819 tmp = b3 ^ b2;
820 b3 = (tmp >> 33) | (tmp << (64 - 33)); 820 b3 = ror64(tmp, 33);
821 b2 -= b3 + k1 + t1; 821 b2 -= b3 + k1 + t1;
822 b3 -= k2 + 9; 822 b3 -= k2 + 9;
823 823
824 tmp = b3 ^ b0; 824 tmp = b3 ^ b0;
825 b3 = (tmp >> 5) | (tmp << (64 - 5)); 825 b3 = ror64(tmp, 5);
826 b0 -= b3; 826 b0 -= b3;
827 827
828 tmp = b1 ^ b2; 828 tmp = b1 ^ b2;
829 b1 = (tmp >> 37) | (tmp << (64 - 37)); 829 b1 = ror64(tmp, 37);
830 b2 -= b1; 830 b2 -= b1;
831 831
832 tmp = b1 ^ b0; 832 tmp = b1 ^ b0;
833 b1 = (tmp >> 23) | (tmp << (64 - 23)); 833 b1 = ror64(tmp, 23);
834 b0 -= b1; 834 b0 -= b1;
835 835
836 tmp = b3 ^ b2; 836 tmp = b3 ^ b2;
837 b3 = (tmp >> 40) | (tmp << (64 - 40)); 837 b3 = ror64(tmp, 40);
838 b2 -= b3; 838 b2 -= b3;
839 839
840 tmp = b3 ^ b0; 840 tmp = b3 ^ b0;
841 b3 = (tmp >> 52) | (tmp << (64 - 52)); 841 b3 = ror64(tmp, 52);
842 b0 -= b3; 842 b0 -= b3;
843 843
844 tmp = b1 ^ b2; 844 tmp = b1 ^ b2;
845 b1 = (tmp >> 57) | (tmp << (64 - 57)); 845 b1 = ror64(tmp, 57);
846 b2 -= b1; 846 b2 -= b1;
847 847
848 tmp = b1 ^ b0; 848 tmp = b1 ^ b0;
849 b1 = (tmp >> 14) | (tmp << (64 - 14)); 849 b1 = ror64(tmp, 14);
850 b0 -= b1 + k3; 850 b0 -= b1 + k3;
851 b1 -= k4 + t2; 851 b1 -= k4 + t2;
852 852
853 tmp = b3 ^ b2; 853 tmp = b3 ^ b2;
854 b3 = (tmp >> 16) | (tmp << (64 - 16)); 854 b3 = ror64(tmp, 16);
855 b2 -= b3 + k0 + t0; 855 b2 -= b3 + k0 + t0;
856 b3 -= k1 + 8; 856 b3 -= k1 + 8;
857 857
858 858
859 tmp = b3 ^ b0; 859 tmp = b3 ^ b0;
860 b3 = (tmp >> 32) | (tmp << (64 - 32)); 860 b3 = ror64(tmp, 32);
861 b0 -= b3; 861 b0 -= b3;
862 862
863 tmp = b1 ^ b2; 863 tmp = b1 ^ b2;
864 b1 = (tmp >> 32) | (tmp << (64 - 32)); 864 b1 = ror64(tmp, 32);
865 b2 -= b1; 865 b2 -= b1;
866 866
867 tmp = b1 ^ b0; 867 tmp = b1 ^ b0;
868 b1 = (tmp >> 58) | (tmp << (64 - 58)); 868 b1 = ror64(tmp, 58);
869 b0 -= b1; 869 b0 -= b1;
870 870
871 tmp = b3 ^ b2; 871 tmp = b3 ^ b2;
872 b3 = (tmp >> 22) | (tmp << (64 - 22)); 872 b3 = ror64(tmp, 22);
873 b2 -= b3; 873 b2 -= b3;
874 874
875 tmp = b3 ^ b0; 875 tmp = b3 ^ b0;
876 b3 = (tmp >> 46) | (tmp << (64 - 46)); 876 b3 = ror64(tmp, 46);
877 b0 -= b3; 877 b0 -= b3;
878 878
879 tmp = b1 ^ b2; 879 tmp = b1 ^ b2;
880 b1 = (tmp >> 12) | (tmp << (64 - 12)); 880 b1 = ror64(tmp, 12);
881 b2 -= b1; 881 b2 -= b1;
882 882
883 tmp = b1 ^ b0; 883 tmp = b1 ^ b0;
884 b1 = (tmp >> 25) | (tmp << (64 - 25)); 884 b1 = ror64(tmp, 25);
885 b0 -= b1 + k2; 885 b0 -= b1 + k2;
886 b1 -= k3 + t1; 886 b1 -= k3 + t1;
887 887
888 tmp = b3 ^ b2; 888 tmp = b3 ^ b2;
889 b3 = (tmp >> 33) | (tmp << (64 - 33)); 889 b3 = ror64(tmp, 33);
890 b2 -= b3 + k4 + t2; 890 b2 -= b3 + k4 + t2;
891 b3 -= k0 + 7; 891 b3 -= k0 + 7;
892 892
893 tmp = b3 ^ b0; 893 tmp = b3 ^ b0;
894 b3 = (tmp >> 5) | (tmp << (64 - 5)); 894 b3 = ror64(tmp, 5);
895 b0 -= b3; 895 b0 -= b3;
896 896
897 tmp = b1 ^ b2; 897 tmp = b1 ^ b2;
898 b1 = (tmp >> 37) | (tmp << (64 - 37)); 898 b1 = ror64(tmp, 37);
899 b2 -= b1; 899 b2 -= b1;
900 900
901 tmp = b1 ^ b0; 901 tmp = b1 ^ b0;
902 b1 = (tmp >> 23) | (tmp << (64 - 23)); 902 b1 = ror64(tmp, 23);
903 b0 -= b1; 903 b0 -= b1;
904 904
905 tmp = b3 ^ b2; 905 tmp = b3 ^ b2;
906 b3 = (tmp >> 40) | (tmp << (64 - 40)); 906 b3 = ror64(tmp, 40);
907 b2 -= b3; 907 b2 -= b3;
908 908
909 tmp = b3 ^ b0; 909 tmp = b3 ^ b0;
910 b3 = (tmp >> 52) | (tmp << (64 - 52)); 910 b3 = ror64(tmp, 52);
911 b0 -= b3; 911 b0 -= b3;
912 912
913 tmp = b1 ^ b2; 913 tmp = b1 ^ b2;
914 b1 = (tmp >> 57) | (tmp << (64 - 57)); 914 b1 = ror64(tmp, 57);
915 b2 -= b1; 915 b2 -= b1;
916 916
917 tmp = b1 ^ b0; 917 tmp = b1 ^ b0;
918 b1 = (tmp >> 14) | (tmp << (64 - 14)); 918 b1 = ror64(tmp, 14);
919 b0 -= b1 + k1; 919 b0 -= b1 + k1;
920 b1 -= k2 + t0; 920 b1 -= k2 + t0;
921 921
922 tmp = b3 ^ b2; 922 tmp = b3 ^ b2;
923 b3 = (tmp >> 16) | (tmp << (64 - 16)); 923 b3 = ror64(tmp, 16);
924 b2 -= b3 + k3 + t1; 924 b2 -= b3 + k3 + t1;
925 b3 -= k4 + 6; 925 b3 -= k4 + 6;
926 926
927 927
928 tmp = b3 ^ b0; 928 tmp = b3 ^ b0;
929 b3 = (tmp >> 32) | (tmp << (64 - 32)); 929 b3 = ror64(tmp, 32);
930 b0 -= b3; 930 b0 -= b3;
931 931
932 tmp = b1 ^ b2; 932 tmp = b1 ^ b2;
933 b1 = (tmp >> 32) | (tmp << (64 - 32)); 933 b1 = ror64(tmp, 32);
934 b2 -= b1; 934 b2 -= b1;
935 935
936 tmp = b1 ^ b0; 936 tmp = b1 ^ b0;
937 b1 = (tmp >> 58) | (tmp << (64 - 58)); 937 b1 = ror64(tmp, 58);
938 b0 -= b1; 938 b0 -= b1;
939 939
940 tmp = b3 ^ b2; 940 tmp = b3 ^ b2;
941 b3 = (tmp >> 22) | (tmp << (64 - 22)); 941 b3 = ror64(tmp, 22);
942 b2 -= b3; 942 b2 -= b3;
943 943
944 tmp = b3 ^ b0; 944 tmp = b3 ^ b0;
945 b3 = (tmp >> 46) | (tmp << (64 - 46)); 945 b3 = ror64(tmp, 46);
946 b0 -= b3; 946 b0 -= b3;
947 947
948 tmp = b1 ^ b2; 948 tmp = b1 ^ b2;
949 b1 = (tmp >> 12) | (tmp << (64 - 12)); 949 b1 = ror64(tmp, 12);
950 b2 -= b1; 950 b2 -= b1;
951 951
952 tmp = b1 ^ b0; 952 tmp = b1 ^ b0;
953 b1 = (tmp >> 25) | (tmp << (64 - 25)); 953 b1 = ror64(tmp, 25);
954 b0 -= b1 + k0; 954 b0 -= b1 + k0;
955 b1 -= k1 + t2; 955 b1 -= k1 + t2;
956 956
957 tmp = b3 ^ b2; 957 tmp = b3 ^ b2;
958 b3 = (tmp >> 33) | (tmp << (64 - 33)); 958 b3 = ror64(tmp, 33);
959 b2 -= b3 + k2 + t0; 959 b2 -= b3 + k2 + t0;
960 b3 -= k3 + 5; 960 b3 -= k3 + 5;
961 961
962 tmp = b3 ^ b0; 962 tmp = b3 ^ b0;
963 b3 = (tmp >> 5) | (tmp << (64 - 5)); 963 b3 = ror64(tmp, 5);
964 b0 -= b3; 964 b0 -= b3;
965 965
966 tmp = b1 ^ b2; 966 tmp = b1 ^ b2;
967 b1 = (tmp >> 37) | (tmp << (64 - 37)); 967 b1 = ror64(tmp, 37);
968 b2 -= b1; 968 b2 -= b1;
969 969
970 tmp = b1 ^ b0; 970 tmp = b1 ^ b0;
971 b1 = (tmp >> 23) | (tmp << (64 - 23)); 971 b1 = ror64(tmp, 23);
972 b0 -= b1; 972 b0 -= b1;
973 973
974 tmp = b3 ^ b2; 974 tmp = b3 ^ b2;
975 b3 = (tmp >> 40) | (tmp << (64 - 40)); 975 b3 = ror64(tmp, 40);
976 b2 -= b3; 976 b2 -= b3;
977 977
978 tmp = b3 ^ b0; 978 tmp = b3 ^ b0;
979 b3 = (tmp >> 52) | (tmp << (64 - 52)); 979 b3 = ror64(tmp, 52);
980 b0 -= b3; 980 b0 -= b3;
981 981
982 tmp = b1 ^ b2; 982 tmp = b1 ^ b2;
983 b1 = (tmp >> 57) | (tmp << (64 - 57)); 983 b1 = ror64(tmp, 57);
984 b2 -= b1; 984 b2 -= b1;
985 985
986 tmp = b1 ^ b0; 986 tmp = b1 ^ b0;
987 b1 = (tmp >> 14) | (tmp << (64 - 14)); 987 b1 = ror64(tmp, 14);
988 b0 -= b1 + k4; 988 b0 -= b1 + k4;
989 b1 -= k0 + t1; 989 b1 -= k0 + t1;
990 990
991 tmp = b3 ^ b2; 991 tmp = b3 ^ b2;
992 b3 = (tmp >> 16) | (tmp << (64 - 16)); 992 b3 = ror64(tmp, 16);
993 b2 -= b3 + k1 + t2; 993 b2 -= b3 + k1 + t2;
994 b3 -= k2 + 4; 994 b3 -= k2 + 4;
995 995
996 996
997 tmp = b3 ^ b0; 997 tmp = b3 ^ b0;
998 b3 = (tmp >> 32) | (tmp << (64 - 32)); 998 b3 = ror64(tmp, 32);
999 b0 -= b3; 999 b0 -= b3;
1000 1000
1001 tmp = b1 ^ b2; 1001 tmp = b1 ^ b2;
1002 b1 = (tmp >> 32) | (tmp << (64 - 32)); 1002 b1 = ror64(tmp, 32);
1003 b2 -= b1; 1003 b2 -= b1;
1004 1004
1005 tmp = b1 ^ b0; 1005 tmp = b1 ^ b0;
1006 b1 = (tmp >> 58) | (tmp << (64 - 58)); 1006 b1 = ror64(tmp, 58);
1007 b0 -= b1; 1007 b0 -= b1;
1008 1008
1009 tmp = b3 ^ b2; 1009 tmp = b3 ^ b2;
1010 b3 = (tmp >> 22) | (tmp << (64 - 22)); 1010 b3 = ror64(tmp, 22);
1011 b2 -= b3; 1011 b2 -= b3;
1012 1012
1013 tmp = b3 ^ b0; 1013 tmp = b3 ^ b0;
1014 b3 = (tmp >> 46) | (tmp << (64 - 46)); 1014 b3 = ror64(tmp, 46);
1015 b0 -= b3; 1015 b0 -= b3;
1016 1016
1017 tmp = b1 ^ b2; 1017 tmp = b1 ^ b2;
1018 b1 = (tmp >> 12) | (tmp << (64 - 12)); 1018 b1 = ror64(tmp, 12);
1019 b2 -= b1; 1019 b2 -= b1;
1020 1020
1021 tmp = b1 ^ b0; 1021 tmp = b1 ^ b0;
1022 b1 = (tmp >> 25) | (tmp << (64 - 25)); 1022 b1 = ror64(tmp, 25);
1023 b0 -= b1 + k3; 1023 b0 -= b1 + k3;
1024 b1 -= k4 + t0; 1024 b1 -= k4 + t0;
1025 1025
1026 tmp = b3 ^ b2; 1026 tmp = b3 ^ b2;
1027 b3 = (tmp >> 33) | (tmp << (64 - 33)); 1027 b3 = ror64(tmp, 33);
1028 b2 -= b3 + k0 + t1; 1028 b2 -= b3 + k0 + t1;
1029 b3 -= k1 + 3; 1029 b3 -= k1 + 3;
1030 1030
1031 tmp = b3 ^ b0; 1031 tmp = b3 ^ b0;
1032 b3 = (tmp >> 5) | (tmp << (64 - 5)); 1032 b3 = ror64(tmp, 5);
1033 b0 -= b3; 1033 b0 -= b3;
1034 1034
1035 tmp = b1 ^ b2; 1035 tmp = b1 ^ b2;
1036 b1 = (tmp >> 37) | (tmp << (64 - 37)); 1036 b1 = ror64(tmp, 37);
1037 b2 -= b1; 1037 b2 -= b1;
1038 1038
1039 tmp = b1 ^ b0; 1039 tmp = b1 ^ b0;
1040 b1 = (tmp >> 23) | (tmp << (64 - 23)); 1040 b1 = ror64(tmp, 23);
1041 b0 -= b1; 1041 b0 -= b1;
1042 1042
1043 tmp = b3 ^ b2; 1043 tmp = b3 ^ b2;
1044 b3 = (tmp >> 40) | (tmp << (64 - 40)); 1044 b3 = ror64(tmp, 40);
1045 b2 -= b3; 1045 b2 -= b3;
1046 1046
1047 tmp = b3 ^ b0; 1047 tmp = b3 ^ b0;
1048 b3 = (tmp >> 52) | (tmp << (64 - 52)); 1048 b3 = ror64(tmp, 52);
1049 b0 -= b3; 1049 b0 -= b3;
1050 1050
1051 tmp = b1 ^ b2; 1051 tmp = b1 ^ b2;
1052 b1 = (tmp >> 57) | (tmp << (64 - 57)); 1052 b1 = ror64(tmp, 57);
1053 b2 -= b1; 1053 b2 -= b1;
1054 1054
1055 tmp = b1 ^ b0; 1055 tmp = b1 ^ b0;
1056 b1 = (tmp >> 14) | (tmp << (64 - 14)); 1056 b1 = ror64(tmp, 14);
1057 b0 -= b1 + k2; 1057 b0 -= b1 + k2;
1058 b1 -= k3 + t2; 1058 b1 -= k3 + t2;
1059 1059
1060 tmp = b3 ^ b2; 1060 tmp = b3 ^ b2;
1061 b3 = (tmp >> 16) | (tmp << (64 - 16)); 1061 b3 = ror64(tmp, 16);
1062 b2 -= b3 + k4 + t0; 1062 b2 -= b3 + k4 + t0;
1063 b3 -= k0 + 2; 1063 b3 -= k0 + 2;
1064 1064
1065 1065
1066 tmp = b3 ^ b0; 1066 tmp = b3 ^ b0;
1067 b3 = (tmp >> 32) | (tmp << (64 - 32)); 1067 b3 = ror64(tmp, 32);
1068 b0 -= b3; 1068 b0 -= b3;
1069 1069
1070 tmp = b1 ^ b2; 1070 tmp = b1 ^ b2;
1071 b1 = (tmp >> 32) | (tmp << (64 - 32)); 1071 b1 = ror64(tmp, 32);
1072 b2 -= b1; 1072 b2 -= b1;
1073 1073
1074 tmp = b1 ^ b0; 1074 tmp = b1 ^ b0;
1075 b1 = (tmp >> 58) | (tmp << (64 - 58)); 1075 b1 = ror64(tmp, 58);
1076 b0 -= b1; 1076 b0 -= b1;
1077 1077
1078 tmp = b3 ^ b2; 1078 tmp = b3 ^ b2;
1079 b3 = (tmp >> 22) | (tmp << (64 - 22)); 1079 b3 = ror64(tmp, 22);
1080 b2 -= b3; 1080 b2 -= b3;
1081 1081
1082 tmp = b3 ^ b0; 1082 tmp = b3 ^ b0;
1083 b3 = (tmp >> 46) | (tmp << (64 - 46)); 1083 b3 = ror64(tmp, 46);
1084 b0 -= b3; 1084 b0 -= b3;
1085 1085
1086 tmp = b1 ^ b2; 1086 tmp = b1 ^ b2;
1087 b1 = (tmp >> 12) | (tmp << (64 - 12)); 1087 b1 = ror64(tmp, 12);
1088 b2 -= b1; 1088 b2 -= b1;
1089 1089
1090 tmp = b1 ^ b0; 1090 tmp = b1 ^ b0;
1091 b1 = (tmp >> 25) | (tmp << (64 - 25)); 1091 b1 = ror64(tmp, 25);
1092 b0 -= b1 + k1; 1092 b0 -= b1 + k1;
1093 b1 -= k2 + t1; 1093 b1 -= k2 + t1;
1094 1094
1095 tmp = b3 ^ b2; 1095 tmp = b3 ^ b2;
1096 b3 = (tmp >> 33) | (tmp << (64 - 33)); 1096 b3 = ror64(tmp, 33);
1097 b2 -= b3 + k3 + t2; 1097 b2 -= b3 + k3 + t2;
1098 b3 -= k4 + 1; 1098 b3 -= k4 + 1;
1099 1099
1100 tmp = b3 ^ b0; 1100 tmp = b3 ^ b0;
1101 b3 = (tmp >> 5) | (tmp << (64 - 5)); 1101 b3 = ror64(tmp, 5);
1102 b0 -= b3; 1102 b0 -= b3;
1103 1103
1104 tmp = b1 ^ b2; 1104 tmp = b1 ^ b2;
1105 b1 = (tmp >> 37) | (tmp << (64 - 37)); 1105 b1 = ror64(tmp, 37);
1106 b2 -= b1; 1106 b2 -= b1;
1107 1107
1108 tmp = b1 ^ b0; 1108 tmp = b1 ^ b0;
1109 b1 = (tmp >> 23) | (tmp << (64 - 23)); 1109 b1 = ror64(tmp, 23);
1110 b0 -= b1; 1110 b0 -= b1;
1111 1111
1112 tmp = b3 ^ b2; 1112 tmp = b3 ^ b2;
1113 b3 = (tmp >> 40) | (tmp << (64 - 40)); 1113 b3 = ror64(tmp, 40);
1114 b2 -= b3; 1114 b2 -= b3;
1115 1115
1116 tmp = b3 ^ b0; 1116 tmp = b3 ^ b0;
1117 b3 = (tmp >> 52) | (tmp << (64 - 52)); 1117 b3 = ror64(tmp, 52);
1118 b0 -= b3; 1118 b0 -= b3;
1119 1119
1120 tmp = b1 ^ b2; 1120 tmp = b1 ^ b2;
1121 b1 = (tmp >> 57) | (tmp << (64 - 57)); 1121 b1 = ror64(tmp, 57);
1122 b2 -= b1; 1122 b2 -= b1;
1123 1123
1124 tmp = b1 ^ b0; 1124 tmp = b1 ^ b0;
1125 b1 = (tmp >> 14) | (tmp << (64 - 14)); 1125 b1 = ror64(tmp, 14);
1126 b0 -= b1 + k0; 1126 b0 -= b1 + k0;
1127 b1 -= k1 + t0; 1127 b1 -= k1 + t0;
1128 1128
1129 tmp = b3 ^ b2; 1129 tmp = b3 ^ b2;
1130 b3 = (tmp >> 16) | (tmp << (64 - 16)); 1130 b3 = ror64(tmp, 16);
1131 b2 -= b3 + k2 + t1; 1131 b2 -= b3 + k2 + t1;
1132 b3 -= k3; 1132 b3 -= k3;
1133 1133
@@ -2125,1226 +2125,1226 @@ void threefish_decrypt_512(struct threefish_key *key_ctx, u64 *input,
2125 b7 -= k7 + 18; 2125 b7 -= k7 + 18;
2126 2126
2127 tmp = b3 ^ b4; 2127 tmp = b3 ^ b4;
2128 b3 = (tmp >> 22) | (tmp << (64 - 22)); 2128 b3 = ror64(tmp, 22);
2129 b4 -= b3; 2129 b4 -= b3;
2130 2130
2131 tmp = b5 ^ b2; 2131 tmp = b5 ^ b2;
2132 b5 = (tmp >> 56) | (tmp << (64 - 56)); 2132 b5 = ror64(tmp, 56);
2133 b2 -= b5; 2133 b2 -= b5;
2134 2134
2135 tmp = b7 ^ b0; 2135 tmp = b7 ^ b0;
2136 b7 = (tmp >> 35) | (tmp << (64 - 35)); 2136 b7 = ror64(tmp, 35);
2137 b0 -= b7; 2137 b0 -= b7;
2138 2138
2139 tmp = b1 ^ b6; 2139 tmp = b1 ^ b6;
2140 b1 = (tmp >> 8) | (tmp << (64 - 8)); 2140 b1 = ror64(tmp, 8);
2141 b6 -= b1; 2141 b6 -= b1;
2142 2142
2143 tmp = b7 ^ b2; 2143 tmp = b7 ^ b2;
2144 b7 = (tmp >> 43) | (tmp << (64 - 43)); 2144 b7 = ror64(tmp, 43);
2145 b2 -= b7; 2145 b2 -= b7;
2146 2146
2147 tmp = b5 ^ b0; 2147 tmp = b5 ^ b0;
2148 b5 = (tmp >> 39) | (tmp << (64 - 39)); 2148 b5 = ror64(tmp, 39);
2149 b0 -= b5; 2149 b0 -= b5;
2150 2150
2151 tmp = b3 ^ b6; 2151 tmp = b3 ^ b6;
2152 b3 = (tmp >> 29) | (tmp << (64 - 29)); 2152 b3 = ror64(tmp, 29);
2153 b6 -= b3; 2153 b6 -= b3;
2154 2154
2155 tmp = b1 ^ b4; 2155 tmp = b1 ^ b4;
2156 b1 = (tmp >> 25) | (tmp << (64 - 25)); 2156 b1 = ror64(tmp, 25);
2157 b4 -= b1; 2157 b4 -= b1;
2158 2158
2159 tmp = b3 ^ b0; 2159 tmp = b3 ^ b0;
2160 b3 = (tmp >> 17) | (tmp << (64 - 17)); 2160 b3 = ror64(tmp, 17);
2161 b0 -= b3; 2161 b0 -= b3;
2162 2162
2163 tmp = b5 ^ b6; 2163 tmp = b5 ^ b6;
2164 b5 = (tmp >> 10) | (tmp << (64 - 10)); 2164 b5 = ror64(tmp, 10);
2165 b6 -= b5; 2165 b6 -= b5;
2166 2166
2167 tmp = b7 ^ b4; 2167 tmp = b7 ^ b4;
2168 b7 = (tmp >> 50) | (tmp << (64 - 50)); 2168 b7 = ror64(tmp, 50);
2169 b4 -= b7; 2169 b4 -= b7;
2170 2170
2171 tmp = b1 ^ b2; 2171 tmp = b1 ^ b2;
2172 b1 = (tmp >> 13) | (tmp << (64 - 13)); 2172 b1 = ror64(tmp, 13);
2173 b2 -= b1; 2173 b2 -= b1;
2174 2174
2175 tmp = b7 ^ b6; 2175 tmp = b7 ^ b6;
2176 b7 = (tmp >> 24) | (tmp << (64 - 24)); 2176 b7 = ror64(tmp, 24);
2177 b6 -= b7 + k5 + t0; 2177 b6 -= b7 + k5 + t0;
2178 b7 -= k6 + 17; 2178 b7 -= k6 + 17;
2179 2179
2180 tmp = b5 ^ b4; 2180 tmp = b5 ^ b4;
2181 b5 = (tmp >> 34) | (tmp << (64 - 34)); 2181 b5 = ror64(tmp, 34);
2182 b4 -= b5 + k3; 2182 b4 -= b5 + k3;
2183 b5 -= k4 + t2; 2183 b5 -= k4 + t2;
2184 2184
2185 tmp = b3 ^ b2; 2185 tmp = b3 ^ b2;
2186 b3 = (tmp >> 30) | (tmp << (64 - 30)); 2186 b3 = ror64(tmp, 30);
2187 b2 -= b3 + k1; 2187 b2 -= b3 + k1;
2188 b3 -= k2; 2188 b3 -= k2;
2189 2189
2190 tmp = b1 ^ b0; 2190 tmp = b1 ^ b0;
2191 b1 = (tmp >> 39) | (tmp << (64 - 39)); 2191 b1 = ror64(tmp, 39);
2192 b0 -= b1 + k8; 2192 b0 -= b1 + k8;
2193 b1 -= k0; 2193 b1 -= k0;
2194 2194
2195 tmp = b3 ^ b4; 2195 tmp = b3 ^ b4;
2196 b3 = (tmp >> 56) | (tmp << (64 - 56)); 2196 b3 = ror64(tmp, 56);
2197 b4 -= b3; 2197 b4 -= b3;
2198 2198
2199 tmp = b5 ^ b2; 2199 tmp = b5 ^ b2;
2200 b5 = (tmp >> 54) | (tmp << (64 - 54)); 2200 b5 = ror64(tmp, 54);
2201 b2 -= b5; 2201 b2 -= b5;
2202 2202
2203 tmp = b7 ^ b0; 2203 tmp = b7 ^ b0;
2204 b7 = (tmp >> 9) | (tmp << (64 - 9)); 2204 b7 = ror64(tmp, 9);
2205 b0 -= b7; 2205 b0 -= b7;
2206 2206
2207 tmp = b1 ^ b6; 2207 tmp = b1 ^ b6;
2208 b1 = (tmp >> 44) | (tmp << (64 - 44)); 2208 b1 = ror64(tmp, 44);
2209 b6 -= b1; 2209 b6 -= b1;
2210 2210
2211 tmp = b7 ^ b2; 2211 tmp = b7 ^ b2;
2212 b7 = (tmp >> 39) | (tmp << (64 - 39)); 2212 b7 = ror64(tmp, 39);
2213 b2 -= b7; 2213 b2 -= b7;
2214 2214
2215 tmp = b5 ^ b0; 2215 tmp = b5 ^ b0;
2216 b5 = (tmp >> 36) | (tmp << (64 - 36)); 2216 b5 = ror64(tmp, 36);
2217 b0 -= b5; 2217 b0 -= b5;
2218 2218
2219 tmp = b3 ^ b6; 2219 tmp = b3 ^ b6;
2220 b3 = (tmp >> 49) | (tmp << (64 - 49)); 2220 b3 = ror64(tmp, 49);
2221 b6 -= b3; 2221 b6 -= b3;
2222 2222
2223 tmp = b1 ^ b4; 2223 tmp = b1 ^ b4;
2224 b1 = (tmp >> 17) | (tmp << (64 - 17)); 2224 b1 = ror64(tmp, 17);
2225 b4 -= b1; 2225 b4 -= b1;
2226 2226
2227 tmp = b3 ^ b0; 2227 tmp = b3 ^ b0;
2228 b3 = (tmp >> 42) | (tmp << (64 - 42)); 2228 b3 = ror64(tmp, 42);
2229 b0 -= b3; 2229 b0 -= b3;
2230 2230
2231 tmp = b5 ^ b6; 2231 tmp = b5 ^ b6;
2232 b5 = (tmp >> 14) | (tmp << (64 - 14)); 2232 b5 = ror64(tmp, 14);
2233 b6 -= b5; 2233 b6 -= b5;
2234 2234
2235 tmp = b7 ^ b4; 2235 tmp = b7 ^ b4;
2236 b7 = (tmp >> 27) | (tmp << (64 - 27)); 2236 b7 = ror64(tmp, 27);
2237 b4 -= b7; 2237 b4 -= b7;
2238 2238
2239 tmp = b1 ^ b2; 2239 tmp = b1 ^ b2;
2240 b1 = (tmp >> 33) | (tmp << (64 - 33)); 2240 b1 = ror64(tmp, 33);
2241 b2 -= b1; 2241 b2 -= b1;
2242 2242
2243 tmp = b7 ^ b6; 2243 tmp = b7 ^ b6;
2244 b7 = (tmp >> 37) | (tmp << (64 - 37)); 2244 b7 = ror64(tmp, 37);
2245 b6 -= b7 + k4 + t2; 2245 b6 -= b7 + k4 + t2;
2246 b7 -= k5 + 16; 2246 b7 -= k5 + 16;
2247 2247
2248 tmp = b5 ^ b4; 2248 tmp = b5 ^ b4;
2249 b5 = (tmp >> 19) | (tmp << (64 - 19)); 2249 b5 = ror64(tmp, 19);
2250 b4 -= b5 + k2; 2250 b4 -= b5 + k2;
2251 b5 -= k3 + t1; 2251 b5 -= k3 + t1;
2252 2252
2253 tmp = b3 ^ b2; 2253 tmp = b3 ^ b2;
2254 b3 = (tmp >> 36) | (tmp << (64 - 36)); 2254 b3 = ror64(tmp, 36);
2255 b2 -= b3 + k0; 2255 b2 -= b3 + k0;
2256 b3 -= k1; 2256 b3 -= k1;
2257 2257
2258 tmp = b1 ^ b0; 2258 tmp = b1 ^ b0;
2259 b1 = (tmp >> 46) | (tmp << (64 - 46)); 2259 b1 = ror64(tmp, 46);
2260 b0 -= b1 + k7; 2260 b0 -= b1 + k7;
2261 b1 -= k8; 2261 b1 -= k8;
2262 2262
2263 tmp = b3 ^ b4; 2263 tmp = b3 ^ b4;
2264 b3 = (tmp >> 22) | (tmp << (64 - 22)); 2264 b3 = ror64(tmp, 22);
2265 b4 -= b3; 2265 b4 -= b3;
2266 2266
2267 tmp = b5 ^ b2; 2267 tmp = b5 ^ b2;
2268 b5 = (tmp >> 56) | (tmp << (64 - 56)); 2268 b5 = ror64(tmp, 56);
2269 b2 -= b5; 2269 b2 -= b5;
2270 2270
2271 tmp = b7 ^ b0; 2271 tmp = b7 ^ b0;
2272 b7 = (tmp >> 35) | (tmp << (64 - 35)); 2272 b7 = ror64(tmp, 35);
2273 b0 -= b7; 2273 b0 -= b7;
2274 2274
2275 tmp = b1 ^ b6; 2275 tmp = b1 ^ b6;
2276 b1 = (tmp >> 8) | (tmp << (64 - 8)); 2276 b1 = ror64(tmp, 8);
2277 b6 -= b1; 2277 b6 -= b1;
2278 2278
2279 tmp = b7 ^ b2; 2279 tmp = b7 ^ b2;
2280 b7 = (tmp >> 43) | (tmp << (64 - 43)); 2280 b7 = ror64(tmp, 43);
2281 b2 -= b7; 2281 b2 -= b7;
2282 2282
2283 tmp = b5 ^ b0; 2283 tmp = b5 ^ b0;
2284 b5 = (tmp >> 39) | (tmp << (64 - 39)); 2284 b5 = ror64(tmp, 39);
2285 b0 -= b5; 2285 b0 -= b5;
2286 2286
2287 tmp = b3 ^ b6; 2287 tmp = b3 ^ b6;
2288 b3 = (tmp >> 29) | (tmp << (64 - 29)); 2288 b3 = ror64(tmp, 29);
2289 b6 -= b3; 2289 b6 -= b3;
2290 2290
2291 tmp = b1 ^ b4; 2291 tmp = b1 ^ b4;
2292 b1 = (tmp >> 25) | (tmp << (64 - 25)); 2292 b1 = ror64(tmp, 25);
2293 b4 -= b1; 2293 b4 -= b1;
2294 2294
2295 tmp = b3 ^ b0; 2295 tmp = b3 ^ b0;
2296 b3 = (tmp >> 17) | (tmp << (64 - 17)); 2296 b3 = ror64(tmp, 17);
2297 b0 -= b3; 2297 b0 -= b3;
2298 2298
2299 tmp = b5 ^ b6; 2299 tmp = b5 ^ b6;
2300 b5 = (tmp >> 10) | (tmp << (64 - 10)); 2300 b5 = ror64(tmp, 10);
2301 b6 -= b5; 2301 b6 -= b5;
2302 2302
2303 tmp = b7 ^ b4; 2303 tmp = b7 ^ b4;
2304 b7 = (tmp >> 50) | (tmp << (64 - 50)); 2304 b7 = ror64(tmp, 50);
2305 b4 -= b7; 2305 b4 -= b7;
2306 2306
2307 tmp = b1 ^ b2; 2307 tmp = b1 ^ b2;
2308 b1 = (tmp >> 13) | (tmp << (64 - 13)); 2308 b1 = ror64(tmp, 13);
2309 b2 -= b1; 2309 b2 -= b1;
2310 2310
2311 tmp = b7 ^ b6; 2311 tmp = b7 ^ b6;
2312 b7 = (tmp >> 24) | (tmp << (64 - 24)); 2312 b7 = ror64(tmp, 24);
2313 b6 -= b7 + k3 + t1; 2313 b6 -= b7 + k3 + t1;
2314 b7 -= k4 + 15; 2314 b7 -= k4 + 15;
2315 2315
2316 tmp = b5 ^ b4; 2316 tmp = b5 ^ b4;
2317 b5 = (tmp >> 34) | (tmp << (64 - 34)); 2317 b5 = ror64(tmp, 34);
2318 b4 -= b5 + k1; 2318 b4 -= b5 + k1;
2319 b5 -= k2 + t0; 2319 b5 -= k2 + t0;
2320 2320
2321 tmp = b3 ^ b2; 2321 tmp = b3 ^ b2;
2322 b3 = (tmp >> 30) | (tmp << (64 - 30)); 2322 b3 = ror64(tmp, 30);
2323 b2 -= b3 + k8; 2323 b2 -= b3 + k8;
2324 b3 -= k0; 2324 b3 -= k0;
2325 2325
2326 tmp = b1 ^ b0; 2326 tmp = b1 ^ b0;
2327 b1 = (tmp >> 39) | (tmp << (64 - 39)); 2327 b1 = ror64(tmp, 39);
2328 b0 -= b1 + k6; 2328 b0 -= b1 + k6;
2329 b1 -= k7; 2329 b1 -= k7;
2330 2330
2331 tmp = b3 ^ b4; 2331 tmp = b3 ^ b4;
2332 b3 = (tmp >> 56) | (tmp << (64 - 56)); 2332 b3 = ror64(tmp, 56);
2333 b4 -= b3; 2333 b4 -= b3;
2334 2334
2335 tmp = b5 ^ b2; 2335 tmp = b5 ^ b2;
2336 b5 = (tmp >> 54) | (tmp << (64 - 54)); 2336 b5 = ror64(tmp, 54);
2337 b2 -= b5; 2337 b2 -= b5;
2338 2338
2339 tmp = b7 ^ b0; 2339 tmp = b7 ^ b0;
2340 b7 = (tmp >> 9) | (tmp << (64 - 9)); 2340 b7 = ror64(tmp, 9);
2341 b0 -= b7; 2341 b0 -= b7;
2342 2342
2343 tmp = b1 ^ b6; 2343 tmp = b1 ^ b6;
2344 b1 = (tmp >> 44) | (tmp << (64 - 44)); 2344 b1 = ror64(tmp, 44);
2345 b6 -= b1; 2345 b6 -= b1;
2346 2346
2347 tmp = b7 ^ b2; 2347 tmp = b7 ^ b2;
2348 b7 = (tmp >> 39) | (tmp << (64 - 39)); 2348 b7 = ror64(tmp, 39);
2349 b2 -= b7; 2349 b2 -= b7;
2350 2350
2351 tmp = b5 ^ b0; 2351 tmp = b5 ^ b0;
2352 b5 = (tmp >> 36) | (tmp << (64 - 36)); 2352 b5 = ror64(tmp, 36);
2353 b0 -= b5; 2353 b0 -= b5;
2354 2354
2355 tmp = b3 ^ b6; 2355 tmp = b3 ^ b6;
2356 b3 = (tmp >> 49) | (tmp << (64 - 49)); 2356 b3 = ror64(tmp, 49);
2357 b6 -= b3; 2357 b6 -= b3;
2358 2358
2359 tmp = b1 ^ b4; 2359 tmp = b1 ^ b4;
2360 b1 = (tmp >> 17) | (tmp << (64 - 17)); 2360 b1 = ror64(tmp, 17);
2361 b4 -= b1; 2361 b4 -= b1;
2362 2362
2363 tmp = b3 ^ b0; 2363 tmp = b3 ^ b0;
2364 b3 = (tmp >> 42) | (tmp << (64 - 42)); 2364 b3 = ror64(tmp, 42);
2365 b0 -= b3; 2365 b0 -= b3;
2366 2366
2367 tmp = b5 ^ b6; 2367 tmp = b5 ^ b6;
2368 b5 = (tmp >> 14) | (tmp << (64 - 14)); 2368 b5 = ror64(tmp, 14);
2369 b6 -= b5; 2369 b6 -= b5;
2370 2370
2371 tmp = b7 ^ b4; 2371 tmp = b7 ^ b4;
2372 b7 = (tmp >> 27) | (tmp << (64 - 27)); 2372 b7 = ror64(tmp, 27);
2373 b4 -= b7; 2373 b4 -= b7;
2374 2374
2375 tmp = b1 ^ b2; 2375 tmp = b1 ^ b2;
2376 b1 = (tmp >> 33) | (tmp << (64 - 33)); 2376 b1 = ror64(tmp, 33);
2377 b2 -= b1; 2377 b2 -= b1;
2378 2378
2379 tmp = b7 ^ b6; 2379 tmp = b7 ^ b6;
2380 b7 = (tmp >> 37) | (tmp << (64 - 37)); 2380 b7 = ror64(tmp, 37);
2381 b6 -= b7 + k2 + t0; 2381 b6 -= b7 + k2 + t0;
2382 b7 -= k3 + 14; 2382 b7 -= k3 + 14;
2383 2383
2384 tmp = b5 ^ b4; 2384 tmp = b5 ^ b4;
2385 b5 = (tmp >> 19) | (tmp << (64 - 19)); 2385 b5 = ror64(tmp, 19);
2386 b4 -= b5 + k0; 2386 b4 -= b5 + k0;
2387 b5 -= k1 + t2; 2387 b5 -= k1 + t2;
2388 2388
2389 tmp = b3 ^ b2; 2389 tmp = b3 ^ b2;
2390 b3 = (tmp >> 36) | (tmp << (64 - 36)); 2390 b3 = ror64(tmp, 36);
2391 b2 -= b3 + k7; 2391 b2 -= b3 + k7;
2392 b3 -= k8; 2392 b3 -= k8;
2393 2393
2394 tmp = b1 ^ b0; 2394 tmp = b1 ^ b0;
2395 b1 = (tmp >> 46) | (tmp << (64 - 46)); 2395 b1 = ror64(tmp, 46);
2396 b0 -= b1 + k5; 2396 b0 -= b1 + k5;
2397 b1 -= k6; 2397 b1 -= k6;
2398 2398
2399 tmp = b3 ^ b4; 2399 tmp = b3 ^ b4;
2400 b3 = (tmp >> 22) | (tmp << (64 - 22)); 2400 b3 = ror64(tmp, 22);
2401 b4 -= b3; 2401 b4 -= b3;
2402 2402
2403 tmp = b5 ^ b2; 2403 tmp = b5 ^ b2;
2404 b5 = (tmp >> 56) | (tmp << (64 - 56)); 2404 b5 = ror64(tmp, 56);
2405 b2 -= b5; 2405 b2 -= b5;
2406 2406
2407 tmp = b7 ^ b0; 2407 tmp = b7 ^ b0;
2408 b7 = (tmp >> 35) | (tmp << (64 - 35)); 2408 b7 = ror64(tmp, 35);
2409 b0 -= b7; 2409 b0 -= b7;
2410 2410
2411 tmp = b1 ^ b6; 2411 tmp = b1 ^ b6;
2412 b1 = (tmp >> 8) | (tmp << (64 - 8)); 2412 b1 = ror64(tmp, 8);
2413 b6 -= b1; 2413 b6 -= b1;
2414 2414
2415 tmp = b7 ^ b2; 2415 tmp = b7 ^ b2;
2416 b7 = (tmp >> 43) | (tmp << (64 - 43)); 2416 b7 = ror64(tmp, 43);
2417 b2 -= b7; 2417 b2 -= b7;
2418 2418
2419 tmp = b5 ^ b0; 2419 tmp = b5 ^ b0;
2420 b5 = (tmp >> 39) | (tmp << (64 - 39)); 2420 b5 = ror64(tmp, 39);
2421 b0 -= b5; 2421 b0 -= b5;
2422 2422
2423 tmp = b3 ^ b6; 2423 tmp = b3 ^ b6;
2424 b3 = (tmp >> 29) | (tmp << (64 - 29)); 2424 b3 = ror64(tmp, 29);
2425 b6 -= b3; 2425 b6 -= b3;
2426 2426
2427 tmp = b1 ^ b4; 2427 tmp = b1 ^ b4;
2428 b1 = (tmp >> 25) | (tmp << (64 - 25)); 2428 b1 = ror64(tmp, 25);
2429 b4 -= b1; 2429 b4 -= b1;
2430 2430
2431 tmp = b3 ^ b0; 2431 tmp = b3 ^ b0;
2432 b3 = (tmp >> 17) | (tmp << (64 - 17)); 2432 b3 = ror64(tmp, 17);
2433 b0 -= b3; 2433 b0 -= b3;
2434 2434
2435 tmp = b5 ^ b6; 2435 tmp = b5 ^ b6;
2436 b5 = (tmp >> 10) | (tmp << (64 - 10)); 2436 b5 = ror64(tmp, 10);
2437 b6 -= b5; 2437 b6 -= b5;
2438 2438
2439 tmp = b7 ^ b4; 2439 tmp = b7 ^ b4;
2440 b7 = (tmp >> 50) | (tmp << (64 - 50)); 2440 b7 = ror64(tmp, 50);
2441 b4 -= b7; 2441 b4 -= b7;
2442 2442
2443 tmp = b1 ^ b2; 2443 tmp = b1 ^ b2;
2444 b1 = (tmp >> 13) | (tmp << (64 - 13)); 2444 b1 = ror64(tmp, 13);
2445 b2 -= b1; 2445 b2 -= b1;
2446 2446
2447 tmp = b7 ^ b6; 2447 tmp = b7 ^ b6;
2448 b7 = (tmp >> 24) | (tmp << (64 - 24)); 2448 b7 = ror64(tmp, 24);
2449 b6 -= b7 + k1 + t2; 2449 b6 -= b7 + k1 + t2;
2450 b7 -= k2 + 13; 2450 b7 -= k2 + 13;
2451 2451
2452 tmp = b5 ^ b4; 2452 tmp = b5 ^ b4;
2453 b5 = (tmp >> 34) | (tmp << (64 - 34)); 2453 b5 = ror64(tmp, 34);
2454 b4 -= b5 + k8; 2454 b4 -= b5 + k8;
2455 b5 -= k0 + t1; 2455 b5 -= k0 + t1;
2456 2456
2457 tmp = b3 ^ b2; 2457 tmp = b3 ^ b2;
2458 b3 = (tmp >> 30) | (tmp << (64 - 30)); 2458 b3 = ror64(tmp, 30);
2459 b2 -= b3 + k6; 2459 b2 -= b3 + k6;
2460 b3 -= k7; 2460 b3 -= k7;
2461 2461
2462 tmp = b1 ^ b0; 2462 tmp = b1 ^ b0;
2463 b1 = (tmp >> 39) | (tmp << (64 - 39)); 2463 b1 = ror64(tmp, 39);
2464 b0 -= b1 + k4; 2464 b0 -= b1 + k4;
2465 b1 -= k5; 2465 b1 -= k5;
2466 2466
2467 tmp = b3 ^ b4; 2467 tmp = b3 ^ b4;
2468 b3 = (tmp >> 56) | (tmp << (64 - 56)); 2468 b3 = ror64(tmp, 56);
2469 b4 -= b3; 2469 b4 -= b3;
2470 2470
2471 tmp = b5 ^ b2; 2471 tmp = b5 ^ b2;
2472 b5 = (tmp >> 54) | (tmp << (64 - 54)); 2472 b5 = ror64(tmp, 54);
2473 b2 -= b5; 2473 b2 -= b5;
2474 2474
2475 tmp = b7 ^ b0; 2475 tmp = b7 ^ b0;
2476 b7 = (tmp >> 9) | (tmp << (64 - 9)); 2476 b7 = ror64(tmp, 9);
2477 b0 -= b7; 2477 b0 -= b7;
2478 2478
2479 tmp = b1 ^ b6; 2479 tmp = b1 ^ b6;
2480 b1 = (tmp >> 44) | (tmp << (64 - 44)); 2480 b1 = ror64(tmp, 44);
2481 b6 -= b1; 2481 b6 -= b1;
2482 2482
2483 tmp = b7 ^ b2; 2483 tmp = b7 ^ b2;
2484 b7 = (tmp >> 39) | (tmp << (64 - 39)); 2484 b7 = ror64(tmp, 39);
2485 b2 -= b7; 2485 b2 -= b7;
2486 2486
2487 tmp = b5 ^ b0; 2487 tmp = b5 ^ b0;
2488 b5 = (tmp >> 36) | (tmp << (64 - 36)); 2488 b5 = ror64(tmp, 36);
2489 b0 -= b5; 2489 b0 -= b5;
2490 2490
2491 tmp = b3 ^ b6; 2491 tmp = b3 ^ b6;
2492 b3 = (tmp >> 49) | (tmp << (64 - 49)); 2492 b3 = ror64(tmp, 49);
2493 b6 -= b3; 2493 b6 -= b3;
2494 2494
2495 tmp = b1 ^ b4; 2495 tmp = b1 ^ b4;
2496 b1 = (tmp >> 17) | (tmp << (64 - 17)); 2496 b1 = ror64(tmp, 17);
2497 b4 -= b1; 2497 b4 -= b1;
2498 2498
2499 tmp = b3 ^ b0; 2499 tmp = b3 ^ b0;
2500 b3 = (tmp >> 42) | (tmp << (64 - 42)); 2500 b3 = ror64(tmp, 42);
2501 b0 -= b3; 2501 b0 -= b3;
2502 2502
2503 tmp = b5 ^ b6; 2503 tmp = b5 ^ b6;
2504 b5 = (tmp >> 14) | (tmp << (64 - 14)); 2504 b5 = ror64(tmp, 14);
2505 b6 -= b5; 2505 b6 -= b5;
2506 2506
2507 tmp = b7 ^ b4; 2507 tmp = b7 ^ b4;
2508 b7 = (tmp >> 27) | (tmp << (64 - 27)); 2508 b7 = ror64(tmp, 27);
2509 b4 -= b7; 2509 b4 -= b7;
2510 2510
2511 tmp = b1 ^ b2; 2511 tmp = b1 ^ b2;
2512 b1 = (tmp >> 33) | (tmp << (64 - 33)); 2512 b1 = ror64(tmp, 33);
2513 b2 -= b1; 2513 b2 -= b1;
2514 2514
2515 tmp = b7 ^ b6; 2515 tmp = b7 ^ b6;
2516 b7 = (tmp >> 37) | (tmp << (64 - 37)); 2516 b7 = ror64(tmp, 37);
2517 b6 -= b7 + k0 + t1; 2517 b6 -= b7 + k0 + t1;
2518 b7 -= k1 + 12; 2518 b7 -= k1 + 12;
2519 2519
2520 tmp = b5 ^ b4; 2520 tmp = b5 ^ b4;
2521 b5 = (tmp >> 19) | (tmp << (64 - 19)); 2521 b5 = ror64(tmp, 19);
2522 b4 -= b5 + k7; 2522 b4 -= b5 + k7;
2523 b5 -= k8 + t0; 2523 b5 -= k8 + t0;
2524 2524
2525 tmp = b3 ^ b2; 2525 tmp = b3 ^ b2;
2526 b3 = (tmp >> 36) | (tmp << (64 - 36)); 2526 b3 = ror64(tmp, 36);
2527 b2 -= b3 + k5; 2527 b2 -= b3 + k5;
2528 b3 -= k6; 2528 b3 -= k6;
2529 2529
2530 tmp = b1 ^ b0; 2530 tmp = b1 ^ b0;
2531 b1 = (tmp >> 46) | (tmp << (64 - 46)); 2531 b1 = ror64(tmp, 46);
2532 b0 -= b1 + k3; 2532 b0 -= b1 + k3;
2533 b1 -= k4; 2533 b1 -= k4;
2534 2534
2535 tmp = b3 ^ b4; 2535 tmp = b3 ^ b4;
2536 b3 = (tmp >> 22) | (tmp << (64 - 22)); 2536 b3 = ror64(tmp, 22);
2537 b4 -= b3; 2537 b4 -= b3;
2538 2538
2539 tmp = b5 ^ b2; 2539 tmp = b5 ^ b2;
2540 b5 = (tmp >> 56) | (tmp << (64 - 56)); 2540 b5 = ror64(tmp, 56);
2541 b2 -= b5; 2541 b2 -= b5;
2542 2542
2543 tmp = b7 ^ b0; 2543 tmp = b7 ^ b0;
2544 b7 = (tmp >> 35) | (tmp << (64 - 35)); 2544 b7 = ror64(tmp, 35);
2545 b0 -= b7; 2545 b0 -= b7;
2546 2546
2547 tmp = b1 ^ b6; 2547 tmp = b1 ^ b6;
2548 b1 = (tmp >> 8) | (tmp << (64 - 8)); 2548 b1 = ror64(tmp, 8);
2549 b6 -= b1; 2549 b6 -= b1;
2550 2550
2551 tmp = b7 ^ b2; 2551 tmp = b7 ^ b2;
2552 b7 = (tmp >> 43) | (tmp << (64 - 43)); 2552 b7 = ror64(tmp, 43);
2553 b2 -= b7; 2553 b2 -= b7;
2554 2554
2555 tmp = b5 ^ b0; 2555 tmp = b5 ^ b0;
2556 b5 = (tmp >> 39) | (tmp << (64 - 39)); 2556 b5 = ror64(tmp, 39);
2557 b0 -= b5; 2557 b0 -= b5;
2558 2558
2559 tmp = b3 ^ b6; 2559 tmp = b3 ^ b6;
2560 b3 = (tmp >> 29) | (tmp << (64 - 29)); 2560 b3 = ror64(tmp, 29);
2561 b6 -= b3; 2561 b6 -= b3;
2562 2562
2563 tmp = b1 ^ b4; 2563 tmp = b1 ^ b4;
2564 b1 = (tmp >> 25) | (tmp << (64 - 25)); 2564 b1 = ror64(tmp, 25);
2565 b4 -= b1; 2565 b4 -= b1;
2566 2566
2567 tmp = b3 ^ b0; 2567 tmp = b3 ^ b0;
2568 b3 = (tmp >> 17) | (tmp << (64 - 17)); 2568 b3 = ror64(tmp, 17);
2569 b0 -= b3; 2569 b0 -= b3;
2570 2570
2571 tmp = b5 ^ b6; 2571 tmp = b5 ^ b6;
2572 b5 = (tmp >> 10) | (tmp << (64 - 10)); 2572 b5 = ror64(tmp, 10);
2573 b6 -= b5; 2573 b6 -= b5;
2574 2574
2575 tmp = b7 ^ b4; 2575 tmp = b7 ^ b4;
2576 b7 = (tmp >> 50) | (tmp << (64 - 50)); 2576 b7 = ror64(tmp, 50);
2577 b4 -= b7; 2577 b4 -= b7;
2578 2578
2579 tmp = b1 ^ b2; 2579 tmp = b1 ^ b2;
2580 b1 = (tmp >> 13) | (tmp << (64 - 13)); 2580 b1 = ror64(tmp, 13);
2581 b2 -= b1; 2581 b2 -= b1;
2582 2582
2583 tmp = b7 ^ b6; 2583 tmp = b7 ^ b6;
2584 b7 = (tmp >> 24) | (tmp << (64 - 24)); 2584 b7 = ror64(tmp, 24);
2585 b6 -= b7 + k8 + t0; 2585 b6 -= b7 + k8 + t0;
2586 b7 -= k0 + 11; 2586 b7 -= k0 + 11;
2587 2587
2588 tmp = b5 ^ b4; 2588 tmp = b5 ^ b4;
2589 b5 = (tmp >> 34) | (tmp << (64 - 34)); 2589 b5 = ror64(tmp, 34);
2590 b4 -= b5 + k6; 2590 b4 -= b5 + k6;
2591 b5 -= k7 + t2; 2591 b5 -= k7 + t2;
2592 2592
2593 tmp = b3 ^ b2; 2593 tmp = b3 ^ b2;
2594 b3 = (tmp >> 30) | (tmp << (64 - 30)); 2594 b3 = ror64(tmp, 30);
2595 b2 -= b3 + k4; 2595 b2 -= b3 + k4;
2596 b3 -= k5; 2596 b3 -= k5;
2597 2597
2598 tmp = b1 ^ b0; 2598 tmp = b1 ^ b0;
2599 b1 = (tmp >> 39) | (tmp << (64 - 39)); 2599 b1 = ror64(tmp, 39);
2600 b0 -= b1 + k2; 2600 b0 -= b1 + k2;
2601 b1 -= k3; 2601 b1 -= k3;
2602 2602
2603 tmp = b3 ^ b4; 2603 tmp = b3 ^ b4;
2604 b3 = (tmp >> 56) | (tmp << (64 - 56)); 2604 b3 = ror64(tmp, 56);
2605 b4 -= b3; 2605 b4 -= b3;
2606 2606
2607 tmp = b5 ^ b2; 2607 tmp = b5 ^ b2;
2608 b5 = (tmp >> 54) | (tmp << (64 - 54)); 2608 b5 = ror64(tmp, 54);
2609 b2 -= b5; 2609 b2 -= b5;
2610 2610
2611 tmp = b7 ^ b0; 2611 tmp = b7 ^ b0;
2612 b7 = (tmp >> 9) | (tmp << (64 - 9)); 2612 b7 = ror64(tmp, 9);
2613 b0 -= b7; 2613 b0 -= b7;
2614 2614
2615 tmp = b1 ^ b6; 2615 tmp = b1 ^ b6;
2616 b1 = (tmp >> 44) | (tmp << (64 - 44)); 2616 b1 = ror64(tmp, 44);
2617 b6 -= b1; 2617 b6 -= b1;
2618 2618
2619 tmp = b7 ^ b2; 2619 tmp = b7 ^ b2;
2620 b7 = (tmp >> 39) | (tmp << (64 - 39)); 2620 b7 = ror64(tmp, 39);
2621 b2 -= b7; 2621 b2 -= b7;
2622 2622
2623 tmp = b5 ^ b0; 2623 tmp = b5 ^ b0;
2624 b5 = (tmp >> 36) | (tmp << (64 - 36)); 2624 b5 = ror64(tmp, 36);
2625 b0 -= b5; 2625 b0 -= b5;
2626 2626
2627 tmp = b3 ^ b6; 2627 tmp = b3 ^ b6;
2628 b3 = (tmp >> 49) | (tmp << (64 - 49)); 2628 b3 = ror64(tmp, 49);
2629 b6 -= b3; 2629 b6 -= b3;
2630 2630
2631 tmp = b1 ^ b4; 2631 tmp = b1 ^ b4;
2632 b1 = (tmp >> 17) | (tmp << (64 - 17)); 2632 b1 = ror64(tmp, 17);
2633 b4 -= b1; 2633 b4 -= b1;
2634 2634
2635 tmp = b3 ^ b0; 2635 tmp = b3 ^ b0;
2636 b3 = (tmp >> 42) | (tmp << (64 - 42)); 2636 b3 = ror64(tmp, 42);
2637 b0 -= b3; 2637 b0 -= b3;
2638 2638
2639 tmp = b5 ^ b6; 2639 tmp = b5 ^ b6;
2640 b5 = (tmp >> 14) | (tmp << (64 - 14)); 2640 b5 = ror64(tmp, 14);
2641 b6 -= b5; 2641 b6 -= b5;
2642 2642
2643 tmp = b7 ^ b4; 2643 tmp = b7 ^ b4;
2644 b7 = (tmp >> 27) | (tmp << (64 - 27)); 2644 b7 = ror64(tmp, 27);
2645 b4 -= b7; 2645 b4 -= b7;
2646 2646
2647 tmp = b1 ^ b2; 2647 tmp = b1 ^ b2;
2648 b1 = (tmp >> 33) | (tmp << (64 - 33)); 2648 b1 = ror64(tmp, 33);
2649 b2 -= b1; 2649 b2 -= b1;
2650 2650
2651 tmp = b7 ^ b6; 2651 tmp = b7 ^ b6;
2652 b7 = (tmp >> 37) | (tmp << (64 - 37)); 2652 b7 = ror64(tmp, 37);
2653 b6 -= b7 + k7 + t2; 2653 b6 -= b7 + k7 + t2;
2654 b7 -= k8 + 10; 2654 b7 -= k8 + 10;
2655 2655
2656 tmp = b5 ^ b4; 2656 tmp = b5 ^ b4;
2657 b5 = (tmp >> 19) | (tmp << (64 - 19)); 2657 b5 = ror64(tmp, 19);
2658 b4 -= b5 + k5; 2658 b4 -= b5 + k5;
2659 b5 -= k6 + t1; 2659 b5 -= k6 + t1;
2660 2660
2661 tmp = b3 ^ b2; 2661 tmp = b3 ^ b2;
2662 b3 = (tmp >> 36) | (tmp << (64 - 36)); 2662 b3 = ror64(tmp, 36);
2663 b2 -= b3 + k3; 2663 b2 -= b3 + k3;
2664 b3 -= k4; 2664 b3 -= k4;
2665 2665
2666 tmp = b1 ^ b0; 2666 tmp = b1 ^ b0;
2667 b1 = (tmp >> 46) | (tmp << (64 - 46)); 2667 b1 = ror64(tmp, 46);
2668 b0 -= b1 + k1; 2668 b0 -= b1 + k1;
2669 b1 -= k2; 2669 b1 -= k2;
2670 2670
2671 tmp = b3 ^ b4; 2671 tmp = b3 ^ b4;
2672 b3 = (tmp >> 22) | (tmp << (64 - 22)); 2672 b3 = ror64(tmp, 22);
2673 b4 -= b3; 2673 b4 -= b3;
2674 2674
2675 tmp = b5 ^ b2; 2675 tmp = b5 ^ b2;
2676 b5 = (tmp >> 56) | (tmp << (64 - 56)); 2676 b5 = ror64(tmp, 56);
2677 b2 -= b5; 2677 b2 -= b5;
2678 2678
2679 tmp = b7 ^ b0; 2679 tmp = b7 ^ b0;
2680 b7 = (tmp >> 35) | (tmp << (64 - 35)); 2680 b7 = ror64(tmp, 35);
2681 b0 -= b7; 2681 b0 -= b7;
2682 2682
2683 tmp = b1 ^ b6; 2683 tmp = b1 ^ b6;
2684 b1 = (tmp >> 8) | (tmp << (64 - 8)); 2684 b1 = ror64(tmp, 8);
2685 b6 -= b1; 2685 b6 -= b1;
2686 2686
2687 tmp = b7 ^ b2; 2687 tmp = b7 ^ b2;
2688 b7 = (tmp >> 43) | (tmp << (64 - 43)); 2688 b7 = ror64(tmp, 43);
2689 b2 -= b7; 2689 b2 -= b7;
2690 2690
2691 tmp = b5 ^ b0; 2691 tmp = b5 ^ b0;
2692 b5 = (tmp >> 39) | (tmp << (64 - 39)); 2692 b5 = ror64(tmp, 39);
2693 b0 -= b5; 2693 b0 -= b5;
2694 2694
2695 tmp = b3 ^ b6; 2695 tmp = b3 ^ b6;
2696 b3 = (tmp >> 29) | (tmp << (64 - 29)); 2696 b3 = ror64(tmp, 29);
2697 b6 -= b3; 2697 b6 -= b3;
2698 2698
2699 tmp = b1 ^ b4; 2699 tmp = b1 ^ b4;
2700 b1 = (tmp >> 25) | (tmp << (64 - 25)); 2700 b1 = ror64(tmp, 25);
2701 b4 -= b1; 2701 b4 -= b1;
2702 2702
2703 tmp = b3 ^ b0; 2703 tmp = b3 ^ b0;
2704 b3 = (tmp >> 17) | (tmp << (64 - 17)); 2704 b3 = ror64(tmp, 17);
2705 b0 -= b3; 2705 b0 -= b3;
2706 2706
2707 tmp = b5 ^ b6; 2707 tmp = b5 ^ b6;
2708 b5 = (tmp >> 10) | (tmp << (64 - 10)); 2708 b5 = ror64(tmp, 10);
2709 b6 -= b5; 2709 b6 -= b5;
2710 2710
2711 tmp = b7 ^ b4; 2711 tmp = b7 ^ b4;
2712 b7 = (tmp >> 50) | (tmp << (64 - 50)); 2712 b7 = ror64(tmp, 50);
2713 b4 -= b7; 2713 b4 -= b7;
2714 2714
2715 tmp = b1 ^ b2; 2715 tmp = b1 ^ b2;
2716 b1 = (tmp >> 13) | (tmp << (64 - 13)); 2716 b1 = ror64(tmp, 13);
2717 b2 -= b1; 2717 b2 -= b1;
2718 2718
2719 tmp = b7 ^ b6; 2719 tmp = b7 ^ b6;
2720 b7 = (tmp >> 24) | (tmp << (64 - 24)); 2720 b7 = ror64(tmp, 24);
2721 b6 -= b7 + k6 + t1; 2721 b6 -= b7 + k6 + t1;
2722 b7 -= k7 + 9; 2722 b7 -= k7 + 9;
2723 2723
2724 tmp = b5 ^ b4; 2724 tmp = b5 ^ b4;
2725 b5 = (tmp >> 34) | (tmp << (64 - 34)); 2725 b5 = ror64(tmp, 34);
2726 b4 -= b5 + k4; 2726 b4 -= b5 + k4;
2727 b5 -= k5 + t0; 2727 b5 -= k5 + t0;
2728 2728
2729 tmp = b3 ^ b2; 2729 tmp = b3 ^ b2;
2730 b3 = (tmp >> 30) | (tmp << (64 - 30)); 2730 b3 = ror64(tmp, 30);
2731 b2 -= b3 + k2; 2731 b2 -= b3 + k2;
2732 b3 -= k3; 2732 b3 -= k3;
2733 2733
2734 tmp = b1 ^ b0; 2734 tmp = b1 ^ b0;
2735 b1 = (tmp >> 39) | (tmp << (64 - 39)); 2735 b1 = ror64(tmp, 39);
2736 b0 -= b1 + k0; 2736 b0 -= b1 + k0;
2737 b1 -= k1; 2737 b1 -= k1;
2738 2738
2739 tmp = b3 ^ b4; 2739 tmp = b3 ^ b4;
2740 b3 = (tmp >> 56) | (tmp << (64 - 56)); 2740 b3 = ror64(tmp, 56);
2741 b4 -= b3; 2741 b4 -= b3;
2742 2742
2743 tmp = b5 ^ b2; 2743 tmp = b5 ^ b2;
2744 b5 = (tmp >> 54) | (tmp << (64 - 54)); 2744 b5 = ror64(tmp, 54);
2745 b2 -= b5; 2745 b2 -= b5;
2746 2746
2747 tmp = b7 ^ b0; 2747 tmp = b7 ^ b0;
2748 b7 = (tmp >> 9) | (tmp << (64 - 9)); 2748 b7 = ror64(tmp, 9);
2749 b0 -= b7; 2749 b0 -= b7;
2750 2750
2751 tmp = b1 ^ b6; 2751 tmp = b1 ^ b6;
2752 b1 = (tmp >> 44) | (tmp << (64 - 44)); 2752 b1 = ror64(tmp, 44);
2753 b6 -= b1; 2753 b6 -= b1;
2754 2754
2755 tmp = b7 ^ b2; 2755 tmp = b7 ^ b2;
2756 b7 = (tmp >> 39) | (tmp << (64 - 39)); 2756 b7 = ror64(tmp, 39);
2757 b2 -= b7; 2757 b2 -= b7;
2758 2758
2759 tmp = b5 ^ b0; 2759 tmp = b5 ^ b0;
2760 b5 = (tmp >> 36) | (tmp << (64 - 36)); 2760 b5 = ror64(tmp, 36);
2761 b0 -= b5; 2761 b0 -= b5;
2762 2762
2763 tmp = b3 ^ b6; 2763 tmp = b3 ^ b6;
2764 b3 = (tmp >> 49) | (tmp << (64 - 49)); 2764 b3 = ror64(tmp, 49);
2765 b6 -= b3; 2765 b6 -= b3;
2766 2766
2767 tmp = b1 ^ b4; 2767 tmp = b1 ^ b4;
2768 b1 = (tmp >> 17) | (tmp << (64 - 17)); 2768 b1 = ror64(tmp, 17);
2769 b4 -= b1; 2769 b4 -= b1;
2770 2770
2771 tmp = b3 ^ b0; 2771 tmp = b3 ^ b0;
2772 b3 = (tmp >> 42) | (tmp << (64 - 42)); 2772 b3 = ror64(tmp, 42);
2773 b0 -= b3; 2773 b0 -= b3;
2774 2774
2775 tmp = b5 ^ b6; 2775 tmp = b5 ^ b6;
2776 b5 = (tmp >> 14) | (tmp << (64 - 14)); 2776 b5 = ror64(tmp, 14);
2777 b6 -= b5; 2777 b6 -= b5;
2778 2778
2779 tmp = b7 ^ b4; 2779 tmp = b7 ^ b4;
2780 b7 = (tmp >> 27) | (tmp << (64 - 27)); 2780 b7 = ror64(tmp, 27);
2781 b4 -= b7; 2781 b4 -= b7;
2782 2782
2783 tmp = b1 ^ b2; 2783 tmp = b1 ^ b2;
2784 b1 = (tmp >> 33) | (tmp << (64 - 33)); 2784 b1 = ror64(tmp, 33);
2785 b2 -= b1; 2785 b2 -= b1;
2786 2786
2787 tmp = b7 ^ b6; 2787 tmp = b7 ^ b6;
2788 b7 = (tmp >> 37) | (tmp << (64 - 37)); 2788 b7 = ror64(tmp, 37);
2789 b6 -= b7 + k5 + t0; 2789 b6 -= b7 + k5 + t0;
2790 b7 -= k6 + 8; 2790 b7 -= k6 + 8;
2791 2791
2792 tmp = b5 ^ b4; 2792 tmp = b5 ^ b4;
2793 b5 = (tmp >> 19) | (tmp << (64 - 19)); 2793 b5 = ror64(tmp, 19);
2794 b4 -= b5 + k3; 2794 b4 -= b5 + k3;
2795 b5 -= k4 + t2; 2795 b5 -= k4 + t2;
2796 2796
2797 tmp = b3 ^ b2; 2797 tmp = b3 ^ b2;
2798 b3 = (tmp >> 36) | (tmp << (64 - 36)); 2798 b3 = ror64(tmp, 36);
2799 b2 -= b3 + k1; 2799 b2 -= b3 + k1;
2800 b3 -= k2; 2800 b3 -= k2;
2801 2801
2802 tmp = b1 ^ b0; 2802 tmp = b1 ^ b0;
2803 b1 = (tmp >> 46) | (tmp << (64 - 46)); 2803 b1 = ror64(tmp, 46);
2804 b0 -= b1 + k8; 2804 b0 -= b1 + k8;
2805 b1 -= k0; 2805 b1 -= k0;
2806 2806
2807 tmp = b3 ^ b4; 2807 tmp = b3 ^ b4;
2808 b3 = (tmp >> 22) | (tmp << (64 - 22)); 2808 b3 = ror64(tmp, 22);
2809 b4 -= b3; 2809 b4 -= b3;
2810 2810
2811 tmp = b5 ^ b2; 2811 tmp = b5 ^ b2;
2812 b5 = (tmp >> 56) | (tmp << (64 - 56)); 2812 b5 = ror64(tmp, 56);
2813 b2 -= b5; 2813 b2 -= b5;
2814 2814
2815 tmp = b7 ^ b0; 2815 tmp = b7 ^ b0;
2816 b7 = (tmp >> 35) | (tmp << (64 - 35)); 2816 b7 = ror64(tmp, 35);
2817 b0 -= b7; 2817 b0 -= b7;
2818 2818
2819 tmp = b1 ^ b6; 2819 tmp = b1 ^ b6;
2820 b1 = (tmp >> 8) | (tmp << (64 - 8)); 2820 b1 = ror64(tmp, 8);
2821 b6 -= b1; 2821 b6 -= b1;
2822 2822
2823 tmp = b7 ^ b2; 2823 tmp = b7 ^ b2;
2824 b7 = (tmp >> 43) | (tmp << (64 - 43)); 2824 b7 = ror64(tmp, 43);
2825 b2 -= b7; 2825 b2 -= b7;
2826 2826
2827 tmp = b5 ^ b0; 2827 tmp = b5 ^ b0;
2828 b5 = (tmp >> 39) | (tmp << (64 - 39)); 2828 b5 = ror64(tmp, 39);
2829 b0 -= b5; 2829 b0 -= b5;
2830 2830
2831 tmp = b3 ^ b6; 2831 tmp = b3 ^ b6;
2832 b3 = (tmp >> 29) | (tmp << (64 - 29)); 2832 b3 = ror64(tmp, 29);
2833 b6 -= b3; 2833 b6 -= b3;
2834 2834
2835 tmp = b1 ^ b4; 2835 tmp = b1 ^ b4;
2836 b1 = (tmp >> 25) | (tmp << (64 - 25)); 2836 b1 = ror64(tmp, 25);
2837 b4 -= b1; 2837 b4 -= b1;
2838 2838
2839 tmp = b3 ^ b0; 2839 tmp = b3 ^ b0;
2840 b3 = (tmp >> 17) | (tmp << (64 - 17)); 2840 b3 = ror64(tmp, 17);
2841 b0 -= b3; 2841 b0 -= b3;
2842 2842
2843 tmp = b5 ^ b6; 2843 tmp = b5 ^ b6;
2844 b5 = (tmp >> 10) | (tmp << (64 - 10)); 2844 b5 = ror64(tmp, 10);
2845 b6 -= b5; 2845 b6 -= b5;
2846 2846
2847 tmp = b7 ^ b4; 2847 tmp = b7 ^ b4;
2848 b7 = (tmp >> 50) | (tmp << (64 - 50)); 2848 b7 = ror64(tmp, 50);
2849 b4 -= b7; 2849 b4 -= b7;
2850 2850
2851 tmp = b1 ^ b2; 2851 tmp = b1 ^ b2;
2852 b1 = (tmp >> 13) | (tmp << (64 - 13)); 2852 b1 = ror64(tmp, 13);
2853 b2 -= b1; 2853 b2 -= b1;
2854 2854
2855 tmp = b7 ^ b6; 2855 tmp = b7 ^ b6;
2856 b7 = (tmp >> 24) | (tmp << (64 - 24)); 2856 b7 = ror64(tmp, 24);
2857 b6 -= b7 + k4 + t2; 2857 b6 -= b7 + k4 + t2;
2858 b7 -= k5 + 7; 2858 b7 -= k5 + 7;
2859 2859
2860 tmp = b5 ^ b4; 2860 tmp = b5 ^ b4;
2861 b5 = (tmp >> 34) | (tmp << (64 - 34)); 2861 b5 = ror64(tmp, 34);
2862 b4 -= b5 + k2; 2862 b4 -= b5 + k2;
2863 b5 -= k3 + t1; 2863 b5 -= k3 + t1;
2864 2864
2865 tmp = b3 ^ b2; 2865 tmp = b3 ^ b2;
2866 b3 = (tmp >> 30) | (tmp << (64 - 30)); 2866 b3 = ror64(tmp, 30);
2867 b2 -= b3 + k0; 2867 b2 -= b3 + k0;
2868 b3 -= k1; 2868 b3 -= k1;
2869 2869
2870 tmp = b1 ^ b0; 2870 tmp = b1 ^ b0;
2871 b1 = (tmp >> 39) | (tmp << (64 - 39)); 2871 b1 = ror64(tmp, 39);
2872 b0 -= b1 + k7; 2872 b0 -= b1 + k7;
2873 b1 -= k8; 2873 b1 -= k8;
2874 2874
2875 tmp = b3 ^ b4; 2875 tmp = b3 ^ b4;
2876 b3 = (tmp >> 56) | (tmp << (64 - 56)); 2876 b3 = ror64(tmp, 56);
2877 b4 -= b3; 2877 b4 -= b3;
2878 2878
2879 tmp = b5 ^ b2; 2879 tmp = b5 ^ b2;
2880 b5 = (tmp >> 54) | (tmp << (64 - 54)); 2880 b5 = ror64(tmp, 54);
2881 b2 -= b5; 2881 b2 -= b5;
2882 2882
2883 tmp = b7 ^ b0; 2883 tmp = b7 ^ b0;
2884 b7 = (tmp >> 9) | (tmp << (64 - 9)); 2884 b7 = ror64(tmp, 9);
2885 b0 -= b7; 2885 b0 -= b7;
2886 2886
2887 tmp = b1 ^ b6; 2887 tmp = b1 ^ b6;
2888 b1 = (tmp >> 44) | (tmp << (64 - 44)); 2888 b1 = ror64(tmp, 44);
2889 b6 -= b1; 2889 b6 -= b1;
2890 2890
2891 tmp = b7 ^ b2; 2891 tmp = b7 ^ b2;
2892 b7 = (tmp >> 39) | (tmp << (64 - 39)); 2892 b7 = ror64(tmp, 39);
2893 b2 -= b7; 2893 b2 -= b7;
2894 2894
2895 tmp = b5 ^ b0; 2895 tmp = b5 ^ b0;
2896 b5 = (tmp >> 36) | (tmp << (64 - 36)); 2896 b5 = ror64(tmp, 36);
2897 b0 -= b5; 2897 b0 -= b5;
2898 2898
2899 tmp = b3 ^ b6; 2899 tmp = b3 ^ b6;
2900 b3 = (tmp >> 49) | (tmp << (64 - 49)); 2900 b3 = ror64(tmp, 49);
2901 b6 -= b3; 2901 b6 -= b3;
2902 2902
2903 tmp = b1 ^ b4; 2903 tmp = b1 ^ b4;
2904 b1 = (tmp >> 17) | (tmp << (64 - 17)); 2904 b1 = ror64(tmp, 17);
2905 b4 -= b1; 2905 b4 -= b1;
2906 2906
2907 tmp = b3 ^ b0; 2907 tmp = b3 ^ b0;
2908 b3 = (tmp >> 42) | (tmp << (64 - 42)); 2908 b3 = ror64(tmp, 42);
2909 b0 -= b3; 2909 b0 -= b3;
2910 2910
2911 tmp = b5 ^ b6; 2911 tmp = b5 ^ b6;
2912 b5 = (tmp >> 14) | (tmp << (64 - 14)); 2912 b5 = ror64(tmp, 14);
2913 b6 -= b5; 2913 b6 -= b5;
2914 2914
2915 tmp = b7 ^ b4; 2915 tmp = b7 ^ b4;
2916 b7 = (tmp >> 27) | (tmp << (64 - 27)); 2916 b7 = ror64(tmp, 27);
2917 b4 -= b7; 2917 b4 -= b7;
2918 2918
2919 tmp = b1 ^ b2; 2919 tmp = b1 ^ b2;
2920 b1 = (tmp >> 33) | (tmp << (64 - 33)); 2920 b1 = ror64(tmp, 33);
2921 b2 -= b1; 2921 b2 -= b1;
2922 2922
2923 tmp = b7 ^ b6; 2923 tmp = b7 ^ b6;
2924 b7 = (tmp >> 37) | (tmp << (64 - 37)); 2924 b7 = ror64(tmp, 37);
2925 b6 -= b7 + k3 + t1; 2925 b6 -= b7 + k3 + t1;
2926 b7 -= k4 + 6; 2926 b7 -= k4 + 6;
2927 2927
2928 tmp = b5 ^ b4; 2928 tmp = b5 ^ b4;
2929 b5 = (tmp >> 19) | (tmp << (64 - 19)); 2929 b5 = ror64(tmp, 19);
2930 b4 -= b5 + k1; 2930 b4 -= b5 + k1;
2931 b5 -= k2 + t0; 2931 b5 -= k2 + t0;
2932 2932
2933 tmp = b3 ^ b2; 2933 tmp = b3 ^ b2;
2934 b3 = (tmp >> 36) | (tmp << (64 - 36)); 2934 b3 = ror64(tmp, 36);
2935 b2 -= b3 + k8; 2935 b2 -= b3 + k8;
2936 b3 -= k0; 2936 b3 -= k0;
2937 2937
2938 tmp = b1 ^ b0; 2938 tmp = b1 ^ b0;
2939 b1 = (tmp >> 46) | (tmp << (64 - 46)); 2939 b1 = ror64(tmp, 46);
2940 b0 -= b1 + k6; 2940 b0 -= b1 + k6;
2941 b1 -= k7; 2941 b1 -= k7;
2942 2942
2943 tmp = b3 ^ b4; 2943 tmp = b3 ^ b4;
2944 b3 = (tmp >> 22) | (tmp << (64 - 22)); 2944 b3 = ror64(tmp, 22);
2945 b4 -= b3; 2945 b4 -= b3;
2946 2946
2947 tmp = b5 ^ b2; 2947 tmp = b5 ^ b2;
2948 b5 = (tmp >> 56) | (tmp << (64 - 56)); 2948 b5 = ror64(tmp, 56);
2949 b2 -= b5; 2949 b2 -= b5;
2950 2950
2951 tmp = b7 ^ b0; 2951 tmp = b7 ^ b0;
2952 b7 = (tmp >> 35) | (tmp << (64 - 35)); 2952 b7 = ror64(tmp, 35);
2953 b0 -= b7; 2953 b0 -= b7;
2954 2954
2955 tmp = b1 ^ b6; 2955 tmp = b1 ^ b6;
2956 b1 = (tmp >> 8) | (tmp << (64 - 8)); 2956 b1 = ror64(tmp, 8);
2957 b6 -= b1; 2957 b6 -= b1;
2958 2958
2959 tmp = b7 ^ b2; 2959 tmp = b7 ^ b2;
2960 b7 = (tmp >> 43) | (tmp << (64 - 43)); 2960 b7 = ror64(tmp, 43);
2961 b2 -= b7; 2961 b2 -= b7;
2962 2962
2963 tmp = b5 ^ b0; 2963 tmp = b5 ^ b0;
2964 b5 = (tmp >> 39) | (tmp << (64 - 39)); 2964 b5 = ror64(tmp, 39);
2965 b0 -= b5; 2965 b0 -= b5;
2966 2966
2967 tmp = b3 ^ b6; 2967 tmp = b3 ^ b6;
2968 b3 = (tmp >> 29) | (tmp << (64 - 29)); 2968 b3 = ror64(tmp, 29);
2969 b6 -= b3; 2969 b6 -= b3;
2970 2970
2971 tmp = b1 ^ b4; 2971 tmp = b1 ^ b4;
2972 b1 = (tmp >> 25) | (tmp << (64 - 25)); 2972 b1 = ror64(tmp, 25);
2973 b4 -= b1; 2973 b4 -= b1;
2974 2974
2975 tmp = b3 ^ b0; 2975 tmp = b3 ^ b0;
2976 b3 = (tmp >> 17) | (tmp << (64 - 17)); 2976 b3 = ror64(tmp, 17);
2977 b0 -= b3; 2977 b0 -= b3;
2978 2978
2979 tmp = b5 ^ b6; 2979 tmp = b5 ^ b6;
2980 b5 = (tmp >> 10) | (tmp << (64 - 10)); 2980 b5 = ror64(tmp, 10);
2981 b6 -= b5; 2981 b6 -= b5;
2982 2982
2983 tmp = b7 ^ b4; 2983 tmp = b7 ^ b4;
2984 b7 = (tmp >> 50) | (tmp << (64 - 50)); 2984 b7 = ror64(tmp, 50);
2985 b4 -= b7; 2985 b4 -= b7;
2986 2986
2987 tmp = b1 ^ b2; 2987 tmp = b1 ^ b2;
2988 b1 = (tmp >> 13) | (tmp << (64 - 13)); 2988 b1 = ror64(tmp, 13);
2989 b2 -= b1; 2989 b2 -= b1;
2990 2990
2991 tmp = b7 ^ b6; 2991 tmp = b7 ^ b6;
2992 b7 = (tmp >> 24) | (tmp << (64 - 24)); 2992 b7 = ror64(tmp, 24);
2993 b6 -= b7 + k2 + t0; 2993 b6 -= b7 + k2 + t0;
2994 b7 -= k3 + 5; 2994 b7 -= k3 + 5;
2995 2995
2996 tmp = b5 ^ b4; 2996 tmp = b5 ^ b4;
2997 b5 = (tmp >> 34) | (tmp << (64 - 34)); 2997 b5 = ror64(tmp, 34);
2998 b4 -= b5 + k0; 2998 b4 -= b5 + k0;
2999 b5 -= k1 + t2; 2999 b5 -= k1 + t2;
3000 3000
3001 tmp = b3 ^ b2; 3001 tmp = b3 ^ b2;
3002 b3 = (tmp >> 30) | (tmp << (64 - 30)); 3002 b3 = ror64(tmp, 30);
3003 b2 -= b3 + k7; 3003 b2 -= b3 + k7;
3004 b3 -= k8; 3004 b3 -= k8;
3005 3005
3006 tmp = b1 ^ b0; 3006 tmp = b1 ^ b0;
3007 b1 = (tmp >> 39) | (tmp << (64 - 39)); 3007 b1 = ror64(tmp, 39);
3008 b0 -= b1 + k5; 3008 b0 -= b1 + k5;
3009 b1 -= k6; 3009 b1 -= k6;
3010 3010
3011 tmp = b3 ^ b4; 3011 tmp = b3 ^ b4;
3012 b3 = (tmp >> 56) | (tmp << (64 - 56)); 3012 b3 = ror64(tmp, 56);
3013 b4 -= b3; 3013 b4 -= b3;
3014 3014
3015 tmp = b5 ^ b2; 3015 tmp = b5 ^ b2;
3016 b5 = (tmp >> 54) | (tmp << (64 - 54)); 3016 b5 = ror64(tmp, 54);
3017 b2 -= b5; 3017 b2 -= b5;
3018 3018
3019 tmp = b7 ^ b0; 3019 tmp = b7 ^ b0;
3020 b7 = (tmp >> 9) | (tmp << (64 - 9)); 3020 b7 = ror64(tmp, 9);
3021 b0 -= b7; 3021 b0 -= b7;
3022 3022
3023 tmp = b1 ^ b6; 3023 tmp = b1 ^ b6;
3024 b1 = (tmp >> 44) | (tmp << (64 - 44)); 3024 b1 = ror64(tmp, 44);
3025 b6 -= b1; 3025 b6 -= b1;
3026 3026
3027 tmp = b7 ^ b2; 3027 tmp = b7 ^ b2;
3028 b7 = (tmp >> 39) | (tmp << (64 - 39)); 3028 b7 = ror64(tmp, 39);
3029 b2 -= b7; 3029 b2 -= b7;
3030 3030
3031 tmp = b5 ^ b0; 3031 tmp = b5 ^ b0;
3032 b5 = (tmp >> 36) | (tmp << (64 - 36)); 3032 b5 = ror64(tmp, 36);
3033 b0 -= b5; 3033 b0 -= b5;
3034 3034
3035 tmp = b3 ^ b6; 3035 tmp = b3 ^ b6;
3036 b3 = (tmp >> 49) | (tmp << (64 - 49)); 3036 b3 = ror64(tmp, 49);
3037 b6 -= b3; 3037 b6 -= b3;
3038 3038
3039 tmp = b1 ^ b4; 3039 tmp = b1 ^ b4;
3040 b1 = (tmp >> 17) | (tmp << (64 - 17)); 3040 b1 = ror64(tmp, 17);
3041 b4 -= b1; 3041 b4 -= b1;
3042 3042
3043 tmp = b3 ^ b0; 3043 tmp = b3 ^ b0;
3044 b3 = (tmp >> 42) | (tmp << (64 - 42)); 3044 b3 = ror64(tmp, 42);
3045 b0 -= b3; 3045 b0 -= b3;
3046 3046
3047 tmp = b5 ^ b6; 3047 tmp = b5 ^ b6;
3048 b5 = (tmp >> 14) | (tmp << (64 - 14)); 3048 b5 = ror64(tmp, 14);
3049 b6 -= b5; 3049 b6 -= b5;
3050 3050
3051 tmp = b7 ^ b4; 3051 tmp = b7 ^ b4;
3052 b7 = (tmp >> 27) | (tmp << (64 - 27)); 3052 b7 = ror64(tmp, 27);
3053 b4 -= b7; 3053 b4 -= b7;
3054 3054
3055 tmp = b1 ^ b2; 3055 tmp = b1 ^ b2;
3056 b1 = (tmp >> 33) | (tmp << (64 - 33)); 3056 b1 = ror64(tmp, 33);
3057 b2 -= b1; 3057 b2 -= b1;
3058 3058
3059 tmp = b7 ^ b6; 3059 tmp = b7 ^ b6;
3060 b7 = (tmp >> 37) | (tmp << (64 - 37)); 3060 b7 = ror64(tmp, 37);
3061 b6 -= b7 + k1 + t2; 3061 b6 -= b7 + k1 + t2;
3062 b7 -= k2 + 4; 3062 b7 -= k2 + 4;
3063 3063
3064 tmp = b5 ^ b4; 3064 tmp = b5 ^ b4;
3065 b5 = (tmp >> 19) | (tmp << (64 - 19)); 3065 b5 = ror64(tmp, 19);
3066 b4 -= b5 + k8; 3066 b4 -= b5 + k8;
3067 b5 -= k0 + t1; 3067 b5 -= k0 + t1;
3068 3068
3069 tmp = b3 ^ b2; 3069 tmp = b3 ^ b2;
3070 b3 = (tmp >> 36) | (tmp << (64 - 36)); 3070 b3 = ror64(tmp, 36);
3071 b2 -= b3 + k6; 3071 b2 -= b3 + k6;
3072 b3 -= k7; 3072 b3 -= k7;
3073 3073
3074 tmp = b1 ^ b0; 3074 tmp = b1 ^ b0;
3075 b1 = (tmp >> 46) | (tmp << (64 - 46)); 3075 b1 = ror64(tmp, 46);
3076 b0 -= b1 + k4; 3076 b0 -= b1 + k4;
3077 b1 -= k5; 3077 b1 -= k5;
3078 3078
3079 tmp = b3 ^ b4; 3079 tmp = b3 ^ b4;
3080 b3 = (tmp >> 22) | (tmp << (64 - 22)); 3080 b3 = ror64(tmp, 22);
3081 b4 -= b3; 3081 b4 -= b3;
3082 3082
3083 tmp = b5 ^ b2; 3083 tmp = b5 ^ b2;
3084 b5 = (tmp >> 56) | (tmp << (64 - 56)); 3084 b5 = ror64(tmp, 56);
3085 b2 -= b5; 3085 b2 -= b5;
3086 3086
3087 tmp = b7 ^ b0; 3087 tmp = b7 ^ b0;
3088 b7 = (tmp >> 35) | (tmp << (64 - 35)); 3088 b7 = ror64(tmp, 35);
3089 b0 -= b7; 3089 b0 -= b7;
3090 3090
3091 tmp = b1 ^ b6; 3091 tmp = b1 ^ b6;
3092 b1 = (tmp >> 8) | (tmp << (64 - 8)); 3092 b1 = ror64(tmp, 8);
3093 b6 -= b1; 3093 b6 -= b1;
3094 3094
3095 tmp = b7 ^ b2; 3095 tmp = b7 ^ b2;
3096 b7 = (tmp >> 43) | (tmp << (64 - 43)); 3096 b7 = ror64(tmp, 43);
3097 b2 -= b7; 3097 b2 -= b7;
3098 3098
3099 tmp = b5 ^ b0; 3099 tmp = b5 ^ b0;
3100 b5 = (tmp >> 39) | (tmp << (64 - 39)); 3100 b5 = ror64(tmp, 39);
3101 b0 -= b5; 3101 b0 -= b5;
3102 3102
3103 tmp = b3 ^ b6; 3103 tmp = b3 ^ b6;
3104 b3 = (tmp >> 29) | (tmp << (64 - 29)); 3104 b3 = ror64(tmp, 29);
3105 b6 -= b3; 3105 b6 -= b3;
3106 3106
3107 tmp = b1 ^ b4; 3107 tmp = b1 ^ b4;
3108 b1 = (tmp >> 25) | (tmp << (64 - 25)); 3108 b1 = ror64(tmp, 25);
3109 b4 -= b1; 3109 b4 -= b1;
3110 3110
3111 tmp = b3 ^ b0; 3111 tmp = b3 ^ b0;
3112 b3 = (tmp >> 17) | (tmp << (64 - 17)); 3112 b3 = ror64(tmp, 17);
3113 b0 -= b3; 3113 b0 -= b3;
3114 3114
3115 tmp = b5 ^ b6; 3115 tmp = b5 ^ b6;
3116 b5 = (tmp >> 10) | (tmp << (64 - 10)); 3116 b5 = ror64(tmp, 10);
3117 b6 -= b5; 3117 b6 -= b5;
3118 3118
3119 tmp = b7 ^ b4; 3119 tmp = b7 ^ b4;
3120 b7 = (tmp >> 50) | (tmp << (64 - 50)); 3120 b7 = ror64(tmp, 50);
3121 b4 -= b7; 3121 b4 -= b7;
3122 3122
3123 tmp = b1 ^ b2; 3123 tmp = b1 ^ b2;
3124 b1 = (tmp >> 13) | (tmp << (64 - 13)); 3124 b1 = ror64(tmp, 13);
3125 b2 -= b1; 3125 b2 -= b1;
3126 3126
3127 tmp = b7 ^ b6; 3127 tmp = b7 ^ b6;
3128 b7 = (tmp >> 24) | (tmp << (64 - 24)); 3128 b7 = ror64(tmp, 24);
3129 b6 -= b7 + k0 + t1; 3129 b6 -= b7 + k0 + t1;
3130 b7 -= k1 + 3; 3130 b7 -= k1 + 3;
3131 3131
3132 tmp = b5 ^ b4; 3132 tmp = b5 ^ b4;
3133 b5 = (tmp >> 34) | (tmp << (64 - 34)); 3133 b5 = ror64(tmp, 34);
3134 b4 -= b5 + k7; 3134 b4 -= b5 + k7;
3135 b5 -= k8 + t0; 3135 b5 -= k8 + t0;
3136 3136
3137 tmp = b3 ^ b2; 3137 tmp = b3 ^ b2;
3138 b3 = (tmp >> 30) | (tmp << (64 - 30)); 3138 b3 = ror64(tmp, 30);
3139 b2 -= b3 + k5; 3139 b2 -= b3 + k5;
3140 b3 -= k6; 3140 b3 -= k6;
3141 3141
3142 tmp = b1 ^ b0; 3142 tmp = b1 ^ b0;
3143 b1 = (tmp >> 39) | (tmp << (64 - 39)); 3143 b1 = ror64(tmp, 39);
3144 b0 -= b1 + k3; 3144 b0 -= b1 + k3;
3145 b1 -= k4; 3145 b1 -= k4;
3146 3146
3147 tmp = b3 ^ b4; 3147 tmp = b3 ^ b4;
3148 b3 = (tmp >> 56) | (tmp << (64 - 56)); 3148 b3 = ror64(tmp, 56);
3149 b4 -= b3; 3149 b4 -= b3;
3150 3150
3151 tmp = b5 ^ b2; 3151 tmp = b5 ^ b2;
3152 b5 = (tmp >> 54) | (tmp << (64 - 54)); 3152 b5 = ror64(tmp, 54);
3153 b2 -= b5; 3153 b2 -= b5;
3154 3154
3155 tmp = b7 ^ b0; 3155 tmp = b7 ^ b0;
3156 b7 = (tmp >> 9) | (tmp << (64 - 9)); 3156 b7 = ror64(tmp, 9);
3157 b0 -= b7; 3157 b0 -= b7;
3158 3158
3159 tmp = b1 ^ b6; 3159 tmp = b1 ^ b6;
3160 b1 = (tmp >> 44) | (tmp << (64 - 44)); 3160 b1 = ror64(tmp, 44);
3161 b6 -= b1; 3161 b6 -= b1;
3162 3162
3163 tmp = b7 ^ b2; 3163 tmp = b7 ^ b2;
3164 b7 = (tmp >> 39) | (tmp << (64 - 39)); 3164 b7 = ror64(tmp, 39);
3165 b2 -= b7; 3165 b2 -= b7;
3166 3166
3167 tmp = b5 ^ b0; 3167 tmp = b5 ^ b0;
3168 b5 = (tmp >> 36) | (tmp << (64 - 36)); 3168 b5 = ror64(tmp, 36);
3169 b0 -= b5; 3169 b0 -= b5;
3170 3170
3171 tmp = b3 ^ b6; 3171 tmp = b3 ^ b6;
3172 b3 = (tmp >> 49) | (tmp << (64 - 49)); 3172 b3 = ror64(tmp, 49);
3173 b6 -= b3; 3173 b6 -= b3;
3174 3174
3175 tmp = b1 ^ b4; 3175 tmp = b1 ^ b4;
3176 b1 = (tmp >> 17) | (tmp << (64 - 17)); 3176 b1 = ror64(tmp, 17);
3177 b4 -= b1; 3177 b4 -= b1;
3178 3178
3179 tmp = b3 ^ b0; 3179 tmp = b3 ^ b0;
3180 b3 = (tmp >> 42) | (tmp << (64 - 42)); 3180 b3 = ror64(tmp, 42);
3181 b0 -= b3; 3181 b0 -= b3;
3182 3182
3183 tmp = b5 ^ b6; 3183 tmp = b5 ^ b6;
3184 b5 = (tmp >> 14) | (tmp << (64 - 14)); 3184 b5 = ror64(tmp, 14);
3185 b6 -= b5; 3185 b6 -= b5;
3186 3186
3187 tmp = b7 ^ b4; 3187 tmp = b7 ^ b4;
3188 b7 = (tmp >> 27) | (tmp << (64 - 27)); 3188 b7 = ror64(tmp, 27);
3189 b4 -= b7; 3189 b4 -= b7;
3190 3190
3191 tmp = b1 ^ b2; 3191 tmp = b1 ^ b2;
3192 b1 = (tmp >> 33) | (tmp << (64 - 33)); 3192 b1 = ror64(tmp, 33);
3193 b2 -= b1; 3193 b2 -= b1;
3194 3194
3195 tmp = b7 ^ b6; 3195 tmp = b7 ^ b6;
3196 b7 = (tmp >> 37) | (tmp << (64 - 37)); 3196 b7 = ror64(tmp, 37);
3197 b6 -= b7 + k8 + t0; 3197 b6 -= b7 + k8 + t0;
3198 b7 -= k0 + 2; 3198 b7 -= k0 + 2;
3199 3199
3200 tmp = b5 ^ b4; 3200 tmp = b5 ^ b4;
3201 b5 = (tmp >> 19) | (tmp << (64 - 19)); 3201 b5 = ror64(tmp, 19);
3202 b4 -= b5 + k6; 3202 b4 -= b5 + k6;
3203 b5 -= k7 + t2; 3203 b5 -= k7 + t2;
3204 3204
3205 tmp = b3 ^ b2; 3205 tmp = b3 ^ b2;
3206 b3 = (tmp >> 36) | (tmp << (64 - 36)); 3206 b3 = ror64(tmp, 36);
3207 b2 -= b3 + k4; 3207 b2 -= b3 + k4;
3208 b3 -= k5; 3208 b3 -= k5;
3209 3209
3210 tmp = b1 ^ b0; 3210 tmp = b1 ^ b0;
3211 b1 = (tmp >> 46) | (tmp << (64 - 46)); 3211 b1 = ror64(tmp, 46);
3212 b0 -= b1 + k2; 3212 b0 -= b1 + k2;
3213 b1 -= k3; 3213 b1 -= k3;
3214 3214
3215 tmp = b3 ^ b4; 3215 tmp = b3 ^ b4;
3216 b3 = (tmp >> 22) | (tmp << (64 - 22)); 3216 b3 = ror64(tmp, 22);
3217 b4 -= b3; 3217 b4 -= b3;
3218 3218
3219 tmp = b5 ^ b2; 3219 tmp = b5 ^ b2;
3220 b5 = (tmp >> 56) | (tmp << (64 - 56)); 3220 b5 = ror64(tmp, 56);
3221 b2 -= b5; 3221 b2 -= b5;
3222 3222
3223 tmp = b7 ^ b0; 3223 tmp = b7 ^ b0;
3224 b7 = (tmp >> 35) | (tmp << (64 - 35)); 3224 b7 = ror64(tmp, 35);
3225 b0 -= b7; 3225 b0 -= b7;
3226 3226
3227 tmp = b1 ^ b6; 3227 tmp = b1 ^ b6;
3228 b1 = (tmp >> 8) | (tmp << (64 - 8)); 3228 b1 = ror64(tmp, 8);
3229 b6 -= b1; 3229 b6 -= b1;
3230 3230
3231 tmp = b7 ^ b2; 3231 tmp = b7 ^ b2;
3232 b7 = (tmp >> 43) | (tmp << (64 - 43)); 3232 b7 = ror64(tmp, 43);
3233 b2 -= b7; 3233 b2 -= b7;
3234 3234
3235 tmp = b5 ^ b0; 3235 tmp = b5 ^ b0;
3236 b5 = (tmp >> 39) | (tmp << (64 - 39)); 3236 b5 = ror64(tmp, 39);
3237 b0 -= b5; 3237 b0 -= b5;
3238 3238
3239 tmp = b3 ^ b6; 3239 tmp = b3 ^ b6;
3240 b3 = (tmp >> 29) | (tmp << (64 - 29)); 3240 b3 = ror64(tmp, 29);
3241 b6 -= b3; 3241 b6 -= b3;
3242 3242
3243 tmp = b1 ^ b4; 3243 tmp = b1 ^ b4;
3244 b1 = (tmp >> 25) | (tmp << (64 - 25)); 3244 b1 = ror64(tmp, 25);
3245 b4 -= b1; 3245 b4 -= b1;
3246 3246
3247 tmp = b3 ^ b0; 3247 tmp = b3 ^ b0;
3248 b3 = (tmp >> 17) | (tmp << (64 - 17)); 3248 b3 = ror64(tmp, 17);
3249 b0 -= b3; 3249 b0 -= b3;
3250 3250
3251 tmp = b5 ^ b6; 3251 tmp = b5 ^ b6;
3252 b5 = (tmp >> 10) | (tmp << (64 - 10)); 3252 b5 = ror64(tmp, 10);
3253 b6 -= b5; 3253 b6 -= b5;
3254 3254
3255 tmp = b7 ^ b4; 3255 tmp = b7 ^ b4;
3256 b7 = (tmp >> 50) | (tmp << (64 - 50)); 3256 b7 = ror64(tmp, 50);
3257 b4 -= b7; 3257 b4 -= b7;
3258 3258
3259 tmp = b1 ^ b2; 3259 tmp = b1 ^ b2;
3260 b1 = (tmp >> 13) | (tmp << (64 - 13)); 3260 b1 = ror64(tmp, 13);
3261 b2 -= b1; 3261 b2 -= b1;
3262 3262
3263 tmp = b7 ^ b6; 3263 tmp = b7 ^ b6;
3264 b7 = (tmp >> 24) | (tmp << (64 - 24)); 3264 b7 = ror64(tmp, 24);
3265 b6 -= b7 + k7 + t2; 3265 b6 -= b7 + k7 + t2;
3266 b7 -= k8 + 1; 3266 b7 -= k8 + 1;
3267 3267
3268 tmp = b5 ^ b4; 3268 tmp = b5 ^ b4;
3269 b5 = (tmp >> 34) | (tmp << (64 - 34)); 3269 b5 = ror64(tmp, 34);
3270 b4 -= b5 + k5; 3270 b4 -= b5 + k5;
3271 b5 -= k6 + t1; 3271 b5 -= k6 + t1;
3272 3272
3273 tmp = b3 ^ b2; 3273 tmp = b3 ^ b2;
3274 b3 = (tmp >> 30) | (tmp << (64 - 30)); 3274 b3 = ror64(tmp, 30);
3275 b2 -= b3 + k3; 3275 b2 -= b3 + k3;
3276 b3 -= k4; 3276 b3 -= k4;
3277 3277
3278 tmp = b1 ^ b0; 3278 tmp = b1 ^ b0;
3279 b1 = (tmp >> 39) | (tmp << (64 - 39)); 3279 b1 = ror64(tmp, 39);
3280 b0 -= b1 + k1; 3280 b0 -= b1 + k1;
3281 b1 -= k2; 3281 b1 -= k2;
3282 3282
3283 tmp = b3 ^ b4; 3283 tmp = b3 ^ b4;
3284 b3 = (tmp >> 56) | (tmp << (64 - 56)); 3284 b3 = ror64(tmp, 56);
3285 b4 -= b3; 3285 b4 -= b3;
3286 3286
3287 tmp = b5 ^ b2; 3287 tmp = b5 ^ b2;
3288 b5 = (tmp >> 54) | (tmp << (64 - 54)); 3288 b5 = ror64(tmp, 54);
3289 b2 -= b5; 3289 b2 -= b5;
3290 3290
3291 tmp = b7 ^ b0; 3291 tmp = b7 ^ b0;
3292 b7 = (tmp >> 9) | (tmp << (64 - 9)); 3292 b7 = ror64(tmp, 9);
3293 b0 -= b7; 3293 b0 -= b7;
3294 3294
3295 tmp = b1 ^ b6; 3295 tmp = b1 ^ b6;
3296 b1 = (tmp >> 44) | (tmp << (64 - 44)); 3296 b1 = ror64(tmp, 44);
3297 b6 -= b1; 3297 b6 -= b1;
3298 3298
3299 tmp = b7 ^ b2; 3299 tmp = b7 ^ b2;
3300 b7 = (tmp >> 39) | (tmp << (64 - 39)); 3300 b7 = ror64(tmp, 39);
3301 b2 -= b7; 3301 b2 -= b7;
3302 3302
3303 tmp = b5 ^ b0; 3303 tmp = b5 ^ b0;
3304 b5 = (tmp >> 36) | (tmp << (64 - 36)); 3304 b5 = ror64(tmp, 36);
3305 b0 -= b5; 3305 b0 -= b5;
3306 3306
3307 tmp = b3 ^ b6; 3307 tmp = b3 ^ b6;
3308 b3 = (tmp >> 49) | (tmp << (64 - 49)); 3308 b3 = ror64(tmp, 49);
3309 b6 -= b3; 3309 b6 -= b3;
3310 3310
3311 tmp = b1 ^ b4; 3311 tmp = b1 ^ b4;
3312 b1 = (tmp >> 17) | (tmp << (64 - 17)); 3312 b1 = ror64(tmp, 17);
3313 b4 -= b1; 3313 b4 -= b1;
3314 3314
3315 tmp = b3 ^ b0; 3315 tmp = b3 ^ b0;
3316 b3 = (tmp >> 42) | (tmp << (64 - 42)); 3316 b3 = ror64(tmp, 42);
3317 b0 -= b3; 3317 b0 -= b3;
3318 3318
3319 tmp = b5 ^ b6; 3319 tmp = b5 ^ b6;
3320 b5 = (tmp >> 14) | (tmp << (64 - 14)); 3320 b5 = ror64(tmp, 14);
3321 b6 -= b5; 3321 b6 -= b5;
3322 3322
3323 tmp = b7 ^ b4; 3323 tmp = b7 ^ b4;
3324 b7 = (tmp >> 27) | (tmp << (64 - 27)); 3324 b7 = ror64(tmp, 27);
3325 b4 -= b7; 3325 b4 -= b7;
3326 3326
3327 tmp = b1 ^ b2; 3327 tmp = b1 ^ b2;
3328 b1 = (tmp >> 33) | (tmp << (64 - 33)); 3328 b1 = ror64(tmp, 33);
3329 b2 -= b1; 3329 b2 -= b1;
3330 3330
3331 tmp = b7 ^ b6; 3331 tmp = b7 ^ b6;
3332 b7 = (tmp >> 37) | (tmp << (64 - 37)); 3332 b7 = ror64(tmp, 37);
3333 b6 -= b7 + k6 + t1; 3333 b6 -= b7 + k6 + t1;
3334 b7 -= k7; 3334 b7 -= k7;
3335 3335
3336 tmp = b5 ^ b4; 3336 tmp = b5 ^ b4;
3337 b5 = (tmp >> 19) | (tmp << (64 - 19)); 3337 b5 = ror64(tmp, 19);
3338 b4 -= b5 + k4; 3338 b4 -= b5 + k4;
3339 b5 -= k5 + t0; 3339 b5 -= k5 + t0;
3340 3340
3341 tmp = b3 ^ b2; 3341 tmp = b3 ^ b2;
3342 b3 = (tmp >> 36) | (tmp << (64 - 36)); 3342 b3 = ror64(tmp, 36);
3343 b2 -= b3 + k2; 3343 b2 -= b3 + k2;
3344 b3 -= k3; 3344 b3 -= k3;
3345 3345
3346 tmp = b1 ^ b0; 3346 tmp = b1 ^ b0;
3347 b1 = (tmp >> 46) | (tmp << (64 - 46)); 3347 b1 = ror64(tmp, 46);
3348 b0 -= b1 + k0; 3348 b0 -= b1 + k0;
3349 b1 -= k1; 3349 b1 -= k1;
3350 3350
@@ -5521,2722 +5521,2722 @@ void threefish_decrypt_1024(struct threefish_key *key_ctx, u64 *input,
5521 b14 -= k0 + t0; 5521 b14 -= k0 + t0;
5522 b15 -= k1 + 20; 5522 b15 -= k1 + 20;
5523 tmp = b7 ^ b12; 5523 tmp = b7 ^ b12;
5524 b7 = (tmp >> 20) | (tmp << (64 - 20)); 5524 b7 = ror64(tmp, 20);
5525 b12 -= b7; 5525 b12 -= b7;
5526 5526
5527 tmp = b3 ^ b10; 5527 tmp = b3 ^ b10;
5528 b3 = (tmp >> 37) | (tmp << (64 - 37)); 5528 b3 = ror64(tmp, 37);
5529 b10 -= b3; 5529 b10 -= b3;
5530 5530
5531 tmp = b5 ^ b8; 5531 tmp = b5 ^ b8;
5532 b5 = (tmp >> 31) | (tmp << (64 - 31)); 5532 b5 = ror64(tmp, 31);
5533 b8 -= b5; 5533 b8 -= b5;
5534 5534
5535 tmp = b1 ^ b14; 5535 tmp = b1 ^ b14;
5536 b1 = (tmp >> 23) | (tmp << (64 - 23)); 5536 b1 = ror64(tmp, 23);
5537 b14 -= b1; 5537 b14 -= b1;
5538 5538
5539 tmp = b9 ^ b4; 5539 tmp = b9 ^ b4;
5540 b9 = (tmp >> 52) | (tmp << (64 - 52)); 5540 b9 = ror64(tmp, 52);
5541 b4 -= b9; 5541 b4 -= b9;
5542 5542
5543 tmp = b13 ^ b6; 5543 tmp = b13 ^ b6;
5544 b13 = (tmp >> 35) | (tmp << (64 - 35)); 5544 b13 = ror64(tmp, 35);
5545 b6 -= b13; 5545 b6 -= b13;
5546 5546
5547 tmp = b11 ^ b2; 5547 tmp = b11 ^ b2;
5548 b11 = (tmp >> 48) | (tmp << (64 - 48)); 5548 b11 = ror64(tmp, 48);
5549 b2 -= b11; 5549 b2 -= b11;
5550 5550
5551 tmp = b15 ^ b0; 5551 tmp = b15 ^ b0;
5552 b15 = (tmp >> 9) | (tmp << (64 - 9)); 5552 b15 = ror64(tmp, 9);
5553 b0 -= b15; 5553 b0 -= b15;
5554 5554
5555 tmp = b9 ^ b10; 5555 tmp = b9 ^ b10;
5556 b9 = (tmp >> 25) | (tmp << (64 - 25)); 5556 b9 = ror64(tmp, 25);
5557 b10 -= b9; 5557 b10 -= b9;
5558 5558
5559 tmp = b11 ^ b8; 5559 tmp = b11 ^ b8;
5560 b11 = (tmp >> 44) | (tmp << (64 - 44)); 5560 b11 = ror64(tmp, 44);
5561 b8 -= b11; 5561 b8 -= b11;
5562 5562
5563 tmp = b13 ^ b14; 5563 tmp = b13 ^ b14;
5564 b13 = (tmp >> 42) | (tmp << (64 - 42)); 5564 b13 = ror64(tmp, 42);
5565 b14 -= b13; 5565 b14 -= b13;
5566 5566
5567 tmp = b15 ^ b12; 5567 tmp = b15 ^ b12;
5568 b15 = (tmp >> 19) | (tmp << (64 - 19)); 5568 b15 = ror64(tmp, 19);
5569 b12 -= b15; 5569 b12 -= b15;
5570 5570
5571 tmp = b1 ^ b6; 5571 tmp = b1 ^ b6;
5572 b1 = (tmp >> 46) | (tmp << (64 - 46)); 5572 b1 = ror64(tmp, 46);
5573 b6 -= b1; 5573 b6 -= b1;
5574 5574
5575 tmp = b3 ^ b4; 5575 tmp = b3 ^ b4;
5576 b3 = (tmp >> 47) | (tmp << (64 - 47)); 5576 b3 = ror64(tmp, 47);
5577 b4 -= b3; 5577 b4 -= b3;
5578 5578
5579 tmp = b5 ^ b2; 5579 tmp = b5 ^ b2;
5580 b5 = (tmp >> 44) | (tmp << (64 - 44)); 5580 b5 = ror64(tmp, 44);
5581 b2 -= b5; 5581 b2 -= b5;
5582 5582
5583 tmp = b7 ^ b0; 5583 tmp = b7 ^ b0;
5584 b7 = (tmp >> 31) | (tmp << (64 - 31)); 5584 b7 = ror64(tmp, 31);
5585 b0 -= b7; 5585 b0 -= b7;
5586 5586
5587 tmp = b1 ^ b8; 5587 tmp = b1 ^ b8;
5588 b1 = (tmp >> 41) | (tmp << (64 - 41)); 5588 b1 = ror64(tmp, 41);
5589 b8 -= b1; 5589 b8 -= b1;
5590 5590
5591 tmp = b5 ^ b14; 5591 tmp = b5 ^ b14;
5592 b5 = (tmp >> 42) | (tmp << (64 - 42)); 5592 b5 = ror64(tmp, 42);
5593 b14 -= b5; 5593 b14 -= b5;
5594 5594
5595 tmp = b3 ^ b12; 5595 tmp = b3 ^ b12;
5596 b3 = (tmp >> 53) | (tmp << (64 - 53)); 5596 b3 = ror64(tmp, 53);
5597 b12 -= b3; 5597 b12 -= b3;
5598 5598
5599 tmp = b7 ^ b10; 5599 tmp = b7 ^ b10;
5600 b7 = (tmp >> 4) | (tmp << (64 - 4)); 5600 b7 = ror64(tmp, 4);
5601 b10 -= b7; 5601 b10 -= b7;
5602 5602
5603 tmp = b15 ^ b4; 5603 tmp = b15 ^ b4;
5604 b15 = (tmp >> 51) | (tmp << (64 - 51)); 5604 b15 = ror64(tmp, 51);
5605 b4 -= b15; 5605 b4 -= b15;
5606 5606
5607 tmp = b11 ^ b6; 5607 tmp = b11 ^ b6;
5608 b11 = (tmp >> 56) | (tmp << (64 - 56)); 5608 b11 = ror64(tmp, 56);
5609 b6 -= b11; 5609 b6 -= b11;
5610 5610
5611 tmp = b13 ^ b2; 5611 tmp = b13 ^ b2;
5612 b13 = (tmp >> 34) | (tmp << (64 - 34)); 5612 b13 = ror64(tmp, 34);
5613 b2 -= b13; 5613 b2 -= b13;
5614 5614
5615 tmp = b9 ^ b0; 5615 tmp = b9 ^ b0;
5616 b9 = (tmp >> 16) | (tmp << (64 - 16)); 5616 b9 = ror64(tmp, 16);
5617 b0 -= b9; 5617 b0 -= b9;
5618 5618
5619 tmp = b15 ^ b14; 5619 tmp = b15 ^ b14;
5620 b15 = (tmp >> 30) | (tmp << (64 - 30)); 5620 b15 = ror64(tmp, 30);
5621 b14 -= b15 + k16 + t2; 5621 b14 -= b15 + k16 + t2;
5622 b15 -= k0 + 19; 5622 b15 -= k0 + 19;
5623 5623
5624 tmp = b13 ^ b12; 5624 tmp = b13 ^ b12;
5625 b13 = (tmp >> 44) | (tmp << (64 - 44)); 5625 b13 = ror64(tmp, 44);
5626 b12 -= b13 + k14; 5626 b12 -= b13 + k14;
5627 b13 -= k15 + t1; 5627 b13 -= k15 + t1;
5628 5628
5629 tmp = b11 ^ b10; 5629 tmp = b11 ^ b10;
5630 b11 = (tmp >> 47) | (tmp << (64 - 47)); 5630 b11 = ror64(tmp, 47);
5631 b10 -= b11 + k12; 5631 b10 -= b11 + k12;
5632 b11 -= k13; 5632 b11 -= k13;
5633 5633
5634 tmp = b9 ^ b8; 5634 tmp = b9 ^ b8;
5635 b9 = (tmp >> 12) | (tmp << (64 - 12)); 5635 b9 = ror64(tmp, 12);
5636 b8 -= b9 + k10; 5636 b8 -= b9 + k10;
5637 b9 -= k11; 5637 b9 -= k11;
5638 5638
5639 tmp = b7 ^ b6; 5639 tmp = b7 ^ b6;
5640 b7 = (tmp >> 31) | (tmp << (64 - 31)); 5640 b7 = ror64(tmp, 31);
5641 b6 -= b7 + k8; 5641 b6 -= b7 + k8;
5642 b7 -= k9; 5642 b7 -= k9;
5643 5643
5644 tmp = b5 ^ b4; 5644 tmp = b5 ^ b4;
5645 b5 = (tmp >> 37) | (tmp << (64 - 37)); 5645 b5 = ror64(tmp, 37);
5646 b4 -= b5 + k6; 5646 b4 -= b5 + k6;
5647 b5 -= k7; 5647 b5 -= k7;
5648 5648
5649 tmp = b3 ^ b2; 5649 tmp = b3 ^ b2;
5650 b3 = (tmp >> 9) | (tmp << (64 - 9)); 5650 b3 = ror64(tmp, 9);
5651 b2 -= b3 + k4; 5651 b2 -= b3 + k4;
5652 b3 -= k5; 5652 b3 -= k5;
5653 5653
5654 tmp = b1 ^ b0; 5654 tmp = b1 ^ b0;
5655 b1 = (tmp >> 41) | (tmp << (64 - 41)); 5655 b1 = ror64(tmp, 41);
5656 b0 -= b1 + k2; 5656 b0 -= b1 + k2;
5657 b1 -= k3; 5657 b1 -= k3;
5658 5658
5659 tmp = b7 ^ b12; 5659 tmp = b7 ^ b12;
5660 b7 = (tmp >> 25) | (tmp << (64 - 25)); 5660 b7 = ror64(tmp, 25);
5661 b12 -= b7; 5661 b12 -= b7;
5662 5662
5663 tmp = b3 ^ b10; 5663 tmp = b3 ^ b10;
5664 b3 = (tmp >> 16) | (tmp << (64 - 16)); 5664 b3 = ror64(tmp, 16);
5665 b10 -= b3; 5665 b10 -= b3;
5666 5666
5667 tmp = b5 ^ b8; 5667 tmp = b5 ^ b8;
5668 b5 = (tmp >> 28) | (tmp << (64 - 28)); 5668 b5 = ror64(tmp, 28);
5669 b8 -= b5; 5669 b8 -= b5;
5670 5670
5671 tmp = b1 ^ b14; 5671 tmp = b1 ^ b14;
5672 b1 = (tmp >> 47) | (tmp << (64 - 47)); 5672 b1 = ror64(tmp, 47);
5673 b14 -= b1; 5673 b14 -= b1;
5674 5674
5675 tmp = b9 ^ b4; 5675 tmp = b9 ^ b4;
5676 b9 = (tmp >> 41) | (tmp << (64 - 41)); 5676 b9 = ror64(tmp, 41);
5677 b4 -= b9; 5677 b4 -= b9;
5678 5678
5679 tmp = b13 ^ b6; 5679 tmp = b13 ^ b6;
5680 b13 = (tmp >> 48) | (tmp << (64 - 48)); 5680 b13 = ror64(tmp, 48);
5681 b6 -= b13; 5681 b6 -= b13;
5682 5682
5683 tmp = b11 ^ b2; 5683 tmp = b11 ^ b2;
5684 b11 = (tmp >> 20) | (tmp << (64 - 20)); 5684 b11 = ror64(tmp, 20);
5685 b2 -= b11; 5685 b2 -= b11;
5686 5686
5687 tmp = b15 ^ b0; 5687 tmp = b15 ^ b0;
5688 b15 = (tmp >> 5) | (tmp << (64 - 5)); 5688 b15 = ror64(tmp, 5);
5689 b0 -= b15; 5689 b0 -= b15;
5690 5690
5691 tmp = b9 ^ b10; 5691 tmp = b9 ^ b10;
5692 b9 = (tmp >> 17) | (tmp << (64 - 17)); 5692 b9 = ror64(tmp, 17);
5693 b10 -= b9; 5693 b10 -= b9;
5694 5694
5695 tmp = b11 ^ b8; 5695 tmp = b11 ^ b8;
5696 b11 = (tmp >> 59) | (tmp << (64 - 59)); 5696 b11 = ror64(tmp, 59);
5697 b8 -= b11; 5697 b8 -= b11;
5698 5698
5699 tmp = b13 ^ b14; 5699 tmp = b13 ^ b14;
5700 b13 = (tmp >> 41) | (tmp << (64 - 41)); 5700 b13 = ror64(tmp, 41);
5701 b14 -= b13; 5701 b14 -= b13;
5702 5702
5703 tmp = b15 ^ b12; 5703 tmp = b15 ^ b12;
5704 b15 = (tmp >> 34) | (tmp << (64 - 34)); 5704 b15 = ror64(tmp, 34);
5705 b12 -= b15; 5705 b12 -= b15;
5706 5706
5707 tmp = b1 ^ b6; 5707 tmp = b1 ^ b6;
5708 b1 = (tmp >> 13) | (tmp << (64 - 13)); 5708 b1 = ror64(tmp, 13);
5709 b6 -= b1; 5709 b6 -= b1;
5710 5710
5711 tmp = b3 ^ b4; 5711 tmp = b3 ^ b4;
5712 b3 = (tmp >> 51) | (tmp << (64 - 51)); 5712 b3 = ror64(tmp, 51);
5713 b4 -= b3; 5713 b4 -= b3;
5714 5714
5715 tmp = b5 ^ b2; 5715 tmp = b5 ^ b2;
5716 b5 = (tmp >> 4) | (tmp << (64 - 4)); 5716 b5 = ror64(tmp, 4);
5717 b2 -= b5; 5717 b2 -= b5;
5718 5718
5719 tmp = b7 ^ b0; 5719 tmp = b7 ^ b0;
5720 b7 = (tmp >> 33) | (tmp << (64 - 33)); 5720 b7 = ror64(tmp, 33);
5721 b0 -= b7; 5721 b0 -= b7;
5722 5722
5723 tmp = b1 ^ b8; 5723 tmp = b1 ^ b8;
5724 b1 = (tmp >> 52) | (tmp << (64 - 52)); 5724 b1 = ror64(tmp, 52);
5725 b8 -= b1; 5725 b8 -= b1;
5726 5726
5727 tmp = b5 ^ b14; 5727 tmp = b5 ^ b14;
5728 b5 = (tmp >> 23) | (tmp << (64 - 23)); 5728 b5 = ror64(tmp, 23);
5729 b14 -= b5; 5729 b14 -= b5;
5730 5730
5731 tmp = b3 ^ b12; 5731 tmp = b3 ^ b12;
5732 b3 = (tmp >> 18) | (tmp << (64 - 18)); 5732 b3 = ror64(tmp, 18);
5733 b12 -= b3; 5733 b12 -= b3;
5734 5734
5735 tmp = b7 ^ b10; 5735 tmp = b7 ^ b10;
5736 b7 = (tmp >> 49) | (tmp << (64 - 49)); 5736 b7 = ror64(tmp, 49);
5737 b10 -= b7; 5737 b10 -= b7;
5738 5738
5739 tmp = b15 ^ b4; 5739 tmp = b15 ^ b4;
5740 b15 = (tmp >> 55) | (tmp << (64 - 55)); 5740 b15 = ror64(tmp, 55);
5741 b4 -= b15; 5741 b4 -= b15;
5742 5742
5743 tmp = b11 ^ b6; 5743 tmp = b11 ^ b6;
5744 b11 = (tmp >> 10) | (tmp << (64 - 10)); 5744 b11 = ror64(tmp, 10);
5745 b6 -= b11; 5745 b6 -= b11;
5746 5746
5747 tmp = b13 ^ b2; 5747 tmp = b13 ^ b2;
5748 b13 = (tmp >> 19) | (tmp << (64 - 19)); 5748 b13 = ror64(tmp, 19);
5749 b2 -= b13; 5749 b2 -= b13;
5750 5750
5751 tmp = b9 ^ b0; 5751 tmp = b9 ^ b0;
5752 b9 = (tmp >> 38) | (tmp << (64 - 38)); 5752 b9 = ror64(tmp, 38);
5753 b0 -= b9; 5753 b0 -= b9;
5754 5754
5755 tmp = b15 ^ b14; 5755 tmp = b15 ^ b14;
5756 b15 = (tmp >> 37) | (tmp << (64 - 37)); 5756 b15 = ror64(tmp, 37);
5757 b14 -= b15 + k15 + t1; 5757 b14 -= b15 + k15 + t1;
5758 b15 -= k16 + 18; 5758 b15 -= k16 + 18;
5759 5759
5760 tmp = b13 ^ b12; 5760 tmp = b13 ^ b12;
5761 b13 = (tmp >> 22) | (tmp << (64 - 22)); 5761 b13 = ror64(tmp, 22);
5762 b12 -= b13 + k13; 5762 b12 -= b13 + k13;
5763 b13 -= k14 + t0; 5763 b13 -= k14 + t0;
5764 5764
5765 tmp = b11 ^ b10; 5765 tmp = b11 ^ b10;
5766 b11 = (tmp >> 17) | (tmp << (64 - 17)); 5766 b11 = ror64(tmp, 17);
5767 b10 -= b11 + k11; 5767 b10 -= b11 + k11;
5768 b11 -= k12; 5768 b11 -= k12;
5769 5769
5770 tmp = b9 ^ b8; 5770 tmp = b9 ^ b8;
5771 b9 = (tmp >> 8) | (tmp << (64 - 8)); 5771 b9 = ror64(tmp, 8);
5772 b8 -= b9 + k9; 5772 b8 -= b9 + k9;
5773 b9 -= k10; 5773 b9 -= k10;
5774 5774
5775 tmp = b7 ^ b6; 5775 tmp = b7 ^ b6;
5776 b7 = (tmp >> 47) | (tmp << (64 - 47)); 5776 b7 = ror64(tmp, 47);
5777 b6 -= b7 + k7; 5777 b6 -= b7 + k7;
5778 b7 -= k8; 5778 b7 -= k8;
5779 5779
5780 tmp = b5 ^ b4; 5780 tmp = b5 ^ b4;
5781 b5 = (tmp >> 8) | (tmp << (64 - 8)); 5781 b5 = ror64(tmp, 8);
5782 b4 -= b5 + k5; 5782 b4 -= b5 + k5;
5783 b5 -= k6; 5783 b5 -= k6;
5784 5784
5785 tmp = b3 ^ b2; 5785 tmp = b3 ^ b2;
5786 b3 = (tmp >> 13) | (tmp << (64 - 13)); 5786 b3 = ror64(tmp, 13);
5787 b2 -= b3 + k3; 5787 b2 -= b3 + k3;
5788 b3 -= k4; 5788 b3 -= k4;
5789 5789
5790 tmp = b1 ^ b0; 5790 tmp = b1 ^ b0;
5791 b1 = (tmp >> 24) | (tmp << (64 - 24)); 5791 b1 = ror64(tmp, 24);
5792 b0 -= b1 + k1; 5792 b0 -= b1 + k1;
5793 b1 -= k2; 5793 b1 -= k2;
5794 5794
5795 tmp = b7 ^ b12; 5795 tmp = b7 ^ b12;
5796 b7 = (tmp >> 20) | (tmp << (64 - 20)); 5796 b7 = ror64(tmp, 20);
5797 b12 -= b7; 5797 b12 -= b7;
5798 5798
5799 tmp = b3 ^ b10; 5799 tmp = b3 ^ b10;
5800 b3 = (tmp >> 37) | (tmp << (64 - 37)); 5800 b3 = ror64(tmp, 37);
5801 b10 -= b3; 5801 b10 -= b3;
5802 5802
5803 tmp = b5 ^ b8; 5803 tmp = b5 ^ b8;
5804 b5 = (tmp >> 31) | (tmp << (64 - 31)); 5804 b5 = ror64(tmp, 31);
5805 b8 -= b5; 5805 b8 -= b5;
5806 5806
5807 tmp = b1 ^ b14; 5807 tmp = b1 ^ b14;
5808 b1 = (tmp >> 23) | (tmp << (64 - 23)); 5808 b1 = ror64(tmp, 23);
5809 b14 -= b1; 5809 b14 -= b1;
5810 5810
5811 tmp = b9 ^ b4; 5811 tmp = b9 ^ b4;
5812 b9 = (tmp >> 52) | (tmp << (64 - 52)); 5812 b9 = ror64(tmp, 52);
5813 b4 -= b9; 5813 b4 -= b9;
5814 5814
5815 tmp = b13 ^ b6; 5815 tmp = b13 ^ b6;
5816 b13 = (tmp >> 35) | (tmp << (64 - 35)); 5816 b13 = ror64(tmp, 35);
5817 b6 -= b13; 5817 b6 -= b13;
5818 5818
5819 tmp = b11 ^ b2; 5819 tmp = b11 ^ b2;
5820 b11 = (tmp >> 48) | (tmp << (64 - 48)); 5820 b11 = ror64(tmp, 48);
5821 b2 -= b11; 5821 b2 -= b11;
5822 5822
5823 tmp = b15 ^ b0; 5823 tmp = b15 ^ b0;
5824 b15 = (tmp >> 9) | (tmp << (64 - 9)); 5824 b15 = ror64(tmp, 9);
5825 b0 -= b15; 5825 b0 -= b15;
5826 5826
5827 tmp = b9 ^ b10; 5827 tmp = b9 ^ b10;
5828 b9 = (tmp >> 25) | (tmp << (64 - 25)); 5828 b9 = ror64(tmp, 25);
5829 b10 -= b9; 5829 b10 -= b9;
5830 5830
5831 tmp = b11 ^ b8; 5831 tmp = b11 ^ b8;
5832 b11 = (tmp >> 44) | (tmp << (64 - 44)); 5832 b11 = ror64(tmp, 44);
5833 b8 -= b11; 5833 b8 -= b11;
5834 5834
5835 tmp = b13 ^ b14; 5835 tmp = b13 ^ b14;
5836 b13 = (tmp >> 42) | (tmp << (64 - 42)); 5836 b13 = ror64(tmp, 42);
5837 b14 -= b13; 5837 b14 -= b13;
5838 5838
5839 tmp = b15 ^ b12; 5839 tmp = b15 ^ b12;
5840 b15 = (tmp >> 19) | (tmp << (64 - 19)); 5840 b15 = ror64(tmp, 19);
5841 b12 -= b15; 5841 b12 -= b15;
5842 5842
5843 tmp = b1 ^ b6; 5843 tmp = b1 ^ b6;
5844 b1 = (tmp >> 46) | (tmp << (64 - 46)); 5844 b1 = ror64(tmp, 46);
5845 b6 -= b1; 5845 b6 -= b1;
5846 5846
5847 tmp = b3 ^ b4; 5847 tmp = b3 ^ b4;
5848 b3 = (tmp >> 47) | (tmp << (64 - 47)); 5848 b3 = ror64(tmp, 47);
5849 b4 -= b3; 5849 b4 -= b3;
5850 5850
5851 tmp = b5 ^ b2; 5851 tmp = b5 ^ b2;
5852 b5 = (tmp >> 44) | (tmp << (64 - 44)); 5852 b5 = ror64(tmp, 44);
5853 b2 -= b5; 5853 b2 -= b5;
5854 5854
5855 tmp = b7 ^ b0; 5855 tmp = b7 ^ b0;
5856 b7 = (tmp >> 31) | (tmp << (64 - 31)); 5856 b7 = ror64(tmp, 31);
5857 b0 -= b7; 5857 b0 -= b7;
5858 5858
5859 tmp = b1 ^ b8; 5859 tmp = b1 ^ b8;
5860 b1 = (tmp >> 41) | (tmp << (64 - 41)); 5860 b1 = ror64(tmp, 41);
5861 b8 -= b1; 5861 b8 -= b1;
5862 5862
5863 tmp = b5 ^ b14; 5863 tmp = b5 ^ b14;
5864 b5 = (tmp >> 42) | (tmp << (64 - 42)); 5864 b5 = ror64(tmp, 42);
5865 b14 -= b5; 5865 b14 -= b5;
5866 5866
5867 tmp = b3 ^ b12; 5867 tmp = b3 ^ b12;
5868 b3 = (tmp >> 53) | (tmp << (64 - 53)); 5868 b3 = ror64(tmp, 53);
5869 b12 -= b3; 5869 b12 -= b3;
5870 5870
5871 tmp = b7 ^ b10; 5871 tmp = b7 ^ b10;
5872 b7 = (tmp >> 4) | (tmp << (64 - 4)); 5872 b7 = ror64(tmp, 4);
5873 b10 -= b7; 5873 b10 -= b7;
5874 5874
5875 tmp = b15 ^ b4; 5875 tmp = b15 ^ b4;
5876 b15 = (tmp >> 51) | (tmp << (64 - 51)); 5876 b15 = ror64(tmp, 51);
5877 b4 -= b15; 5877 b4 -= b15;
5878 5878
5879 tmp = b11 ^ b6; 5879 tmp = b11 ^ b6;
5880 b11 = (tmp >> 56) | (tmp << (64 - 56)); 5880 b11 = ror64(tmp, 56);
5881 b6 -= b11; 5881 b6 -= b11;
5882 5882
5883 tmp = b13 ^ b2; 5883 tmp = b13 ^ b2;
5884 b13 = (tmp >> 34) | (tmp << (64 - 34)); 5884 b13 = ror64(tmp, 34);
5885 b2 -= b13; 5885 b2 -= b13;
5886 5886
5887 tmp = b9 ^ b0; 5887 tmp = b9 ^ b0;
5888 b9 = (tmp >> 16) | (tmp << (64 - 16)); 5888 b9 = ror64(tmp, 16);
5889 b0 -= b9; 5889 b0 -= b9;
5890 5890
5891 tmp = b15 ^ b14; 5891 tmp = b15 ^ b14;
5892 b15 = (tmp >> 30) | (tmp << (64 - 30)); 5892 b15 = ror64(tmp, 30);
5893 b14 -= b15 + k14 + t0; 5893 b14 -= b15 + k14 + t0;
5894 b15 -= k15 + 17; 5894 b15 -= k15 + 17;
5895 5895
5896 tmp = b13 ^ b12; 5896 tmp = b13 ^ b12;
5897 b13 = (tmp >> 44) | (tmp << (64 - 44)); 5897 b13 = ror64(tmp, 44);
5898 b12 -= b13 + k12; 5898 b12 -= b13 + k12;
5899 b13 -= k13 + t2; 5899 b13 -= k13 + t2;
5900 5900
5901 tmp = b11 ^ b10; 5901 tmp = b11 ^ b10;
5902 b11 = (tmp >> 47) | (tmp << (64 - 47)); 5902 b11 = ror64(tmp, 47);
5903 b10 -= b11 + k10; 5903 b10 -= b11 + k10;
5904 b11 -= k11; 5904 b11 -= k11;
5905 5905
5906 tmp = b9 ^ b8; 5906 tmp = b9 ^ b8;
5907 b9 = (tmp >> 12) | (tmp << (64 - 12)); 5907 b9 = ror64(tmp, 12);
5908 b8 -= b9 + k8; 5908 b8 -= b9 + k8;
5909 b9 -= k9; 5909 b9 -= k9;
5910 5910
5911 tmp = b7 ^ b6; 5911 tmp = b7 ^ b6;
5912 b7 = (tmp >> 31) | (tmp << (64 - 31)); 5912 b7 = ror64(tmp, 31);
5913 b6 -= b7 + k6; 5913 b6 -= b7 + k6;
5914 b7 -= k7; 5914 b7 -= k7;
5915 5915
5916 tmp = b5 ^ b4; 5916 tmp = b5 ^ b4;
5917 b5 = (tmp >> 37) | (tmp << (64 - 37)); 5917 b5 = ror64(tmp, 37);
5918 b4 -= b5 + k4; 5918 b4 -= b5 + k4;
5919 b5 -= k5; 5919 b5 -= k5;
5920 5920
5921 tmp = b3 ^ b2; 5921 tmp = b3 ^ b2;
5922 b3 = (tmp >> 9) | (tmp << (64 - 9)); 5922 b3 = ror64(tmp, 9);
5923 b2 -= b3 + k2; 5923 b2 -= b3 + k2;
5924 b3 -= k3; 5924 b3 -= k3;
5925 5925
5926 tmp = b1 ^ b0; 5926 tmp = b1 ^ b0;
5927 b1 = (tmp >> 41) | (tmp << (64 - 41)); 5927 b1 = ror64(tmp, 41);
5928 b0 -= b1 + k0; 5928 b0 -= b1 + k0;
5929 b1 -= k1; 5929 b1 -= k1;
5930 5930
5931 tmp = b7 ^ b12; 5931 tmp = b7 ^ b12;
5932 b7 = (tmp >> 25) | (tmp << (64 - 25)); 5932 b7 = ror64(tmp, 25);
5933 b12 -= b7; 5933 b12 -= b7;
5934 5934
5935 tmp = b3 ^ b10; 5935 tmp = b3 ^ b10;
5936 b3 = (tmp >> 16) | (tmp << (64 - 16)); 5936 b3 = ror64(tmp, 16);
5937 b10 -= b3; 5937 b10 -= b3;
5938 5938
5939 tmp = b5 ^ b8; 5939 tmp = b5 ^ b8;
5940 b5 = (tmp >> 28) | (tmp << (64 - 28)); 5940 b5 = ror64(tmp, 28);
5941 b8 -= b5; 5941 b8 -= b5;
5942 5942
5943 tmp = b1 ^ b14; 5943 tmp = b1 ^ b14;
5944 b1 = (tmp >> 47) | (tmp << (64 - 47)); 5944 b1 = ror64(tmp, 47);
5945 b14 -= b1; 5945 b14 -= b1;
5946 5946
5947 tmp = b9 ^ b4; 5947 tmp = b9 ^ b4;
5948 b9 = (tmp >> 41) | (tmp << (64 - 41)); 5948 b9 = ror64(tmp, 41);
5949 b4 -= b9; 5949 b4 -= b9;
5950 5950
5951 tmp = b13 ^ b6; 5951 tmp = b13 ^ b6;
5952 b13 = (tmp >> 48) | (tmp << (64 - 48)); 5952 b13 = ror64(tmp, 48);
5953 b6 -= b13; 5953 b6 -= b13;
5954 5954
5955 tmp = b11 ^ b2; 5955 tmp = b11 ^ b2;
5956 b11 = (tmp >> 20) | (tmp << (64 - 20)); 5956 b11 = ror64(tmp, 20);
5957 b2 -= b11; 5957 b2 -= b11;
5958 5958
5959 tmp = b15 ^ b0; 5959 tmp = b15 ^ b0;
5960 b15 = (tmp >> 5) | (tmp << (64 - 5)); 5960 b15 = ror64(tmp, 5);
5961 b0 -= b15; 5961 b0 -= b15;
5962 5962
5963 tmp = b9 ^ b10; 5963 tmp = b9 ^ b10;
5964 b9 = (tmp >> 17) | (tmp << (64 - 17)); 5964 b9 = ror64(tmp, 17);
5965 b10 -= b9; 5965 b10 -= b9;
5966 5966
5967 tmp = b11 ^ b8; 5967 tmp = b11 ^ b8;
5968 b11 = (tmp >> 59) | (tmp << (64 - 59)); 5968 b11 = ror64(tmp, 59);
5969 b8 -= b11; 5969 b8 -= b11;
5970 5970
5971 tmp = b13 ^ b14; 5971 tmp = b13 ^ b14;
5972 b13 = (tmp >> 41) | (tmp << (64 - 41)); 5972 b13 = ror64(tmp, 41);
5973 b14 -= b13; 5973 b14 -= b13;
5974 5974
5975 tmp = b15 ^ b12; 5975 tmp = b15 ^ b12;
5976 b15 = (tmp >> 34) | (tmp << (64 - 34)); 5976 b15 = ror64(tmp, 34);
5977 b12 -= b15; 5977 b12 -= b15;
5978 5978
5979 tmp = b1 ^ b6; 5979 tmp = b1 ^ b6;
5980 b1 = (tmp >> 13) | (tmp << (64 - 13)); 5980 b1 = ror64(tmp, 13);
5981 b6 -= b1; 5981 b6 -= b1;
5982 5982
5983 tmp = b3 ^ b4; 5983 tmp = b3 ^ b4;
5984 b3 = (tmp >> 51) | (tmp << (64 - 51)); 5984 b3 = ror64(tmp, 51);
5985 b4 -= b3; 5985 b4 -= b3;
5986 5986
5987 tmp = b5 ^ b2; 5987 tmp = b5 ^ b2;
5988 b5 = (tmp >> 4) | (tmp << (64 - 4)); 5988 b5 = ror64(tmp, 4);
5989 b2 -= b5; 5989 b2 -= b5;
5990 5990
5991 tmp = b7 ^ b0; 5991 tmp = b7 ^ b0;
5992 b7 = (tmp >> 33) | (tmp << (64 - 33)); 5992 b7 = ror64(tmp, 33);
5993 b0 -= b7; 5993 b0 -= b7;
5994 5994
5995 tmp = b1 ^ b8; 5995 tmp = b1 ^ b8;
5996 b1 = (tmp >> 52) | (tmp << (64 - 52)); 5996 b1 = ror64(tmp, 52);
5997 b8 -= b1; 5997 b8 -= b1;
5998 5998
5999 tmp = b5 ^ b14; 5999 tmp = b5 ^ b14;
6000 b5 = (tmp >> 23) | (tmp << (64 - 23)); 6000 b5 = ror64(tmp, 23);
6001 b14 -= b5; 6001 b14 -= b5;
6002 6002
6003 tmp = b3 ^ b12; 6003 tmp = b3 ^ b12;
6004 b3 = (tmp >> 18) | (tmp << (64 - 18)); 6004 b3 = ror64(tmp, 18);
6005 b12 -= b3; 6005 b12 -= b3;
6006 6006
6007 tmp = b7 ^ b10; 6007 tmp = b7 ^ b10;
6008 b7 = (tmp >> 49) | (tmp << (64 - 49)); 6008 b7 = ror64(tmp, 49);
6009 b10 -= b7; 6009 b10 -= b7;
6010 6010
6011 tmp = b15 ^ b4; 6011 tmp = b15 ^ b4;
6012 b15 = (tmp >> 55) | (tmp << (64 - 55)); 6012 b15 = ror64(tmp, 55);
6013 b4 -= b15; 6013 b4 -= b15;
6014 6014
6015 tmp = b11 ^ b6; 6015 tmp = b11 ^ b6;
6016 b11 = (tmp >> 10) | (tmp << (64 - 10)); 6016 b11 = ror64(tmp, 10);
6017 b6 -= b11; 6017 b6 -= b11;
6018 6018
6019 tmp = b13 ^ b2; 6019 tmp = b13 ^ b2;
6020 b13 = (tmp >> 19) | (tmp << (64 - 19)); 6020 b13 = ror64(tmp, 19);
6021 b2 -= b13; 6021 b2 -= b13;
6022 6022
6023 tmp = b9 ^ b0; 6023 tmp = b9 ^ b0;
6024 b9 = (tmp >> 38) | (tmp << (64 - 38)); 6024 b9 = ror64(tmp, 38);
6025 b0 -= b9; 6025 b0 -= b9;
6026 6026
6027 tmp = b15 ^ b14; 6027 tmp = b15 ^ b14;
6028 b15 = (tmp >> 37) | (tmp << (64 - 37)); 6028 b15 = ror64(tmp, 37);
6029 b14 -= b15 + k13 + t2; 6029 b14 -= b15 + k13 + t2;
6030 b15 -= k14 + 16; 6030 b15 -= k14 + 16;
6031 6031
6032 tmp = b13 ^ b12; 6032 tmp = b13 ^ b12;
6033 b13 = (tmp >> 22) | (tmp << (64 - 22)); 6033 b13 = ror64(tmp, 22);
6034 b12 -= b13 + k11; 6034 b12 -= b13 + k11;
6035 b13 -= k12 + t1; 6035 b13 -= k12 + t1;
6036 6036
6037 tmp = b11 ^ b10; 6037 tmp = b11 ^ b10;
6038 b11 = (tmp >> 17) | (tmp << (64 - 17)); 6038 b11 = ror64(tmp, 17);
6039 b10 -= b11 + k9; 6039 b10 -= b11 + k9;
6040 b11 -= k10; 6040 b11 -= k10;
6041 6041
6042 tmp = b9 ^ b8; 6042 tmp = b9 ^ b8;
6043 b9 = (tmp >> 8) | (tmp << (64 - 8)); 6043 b9 = ror64(tmp, 8);
6044 b8 -= b9 + k7; 6044 b8 -= b9 + k7;
6045 b9 -= k8; 6045 b9 -= k8;
6046 6046
6047 tmp = b7 ^ b6; 6047 tmp = b7 ^ b6;
6048 b7 = (tmp >> 47) | (tmp << (64 - 47)); 6048 b7 = ror64(tmp, 47);
6049 b6 -= b7 + k5; 6049 b6 -= b7 + k5;
6050 b7 -= k6; 6050 b7 -= k6;
6051 6051
6052 tmp = b5 ^ b4; 6052 tmp = b5 ^ b4;
6053 b5 = (tmp >> 8) | (tmp << (64 - 8)); 6053 b5 = ror64(tmp, 8);
6054 b4 -= b5 + k3; 6054 b4 -= b5 + k3;
6055 b5 -= k4; 6055 b5 -= k4;
6056 6056
6057 tmp = b3 ^ b2; 6057 tmp = b3 ^ b2;
6058 b3 = (tmp >> 13) | (tmp << (64 - 13)); 6058 b3 = ror64(tmp, 13);
6059 b2 -= b3 + k1; 6059 b2 -= b3 + k1;
6060 b3 -= k2; 6060 b3 -= k2;
6061 6061
6062 tmp = b1 ^ b0; 6062 tmp = b1 ^ b0;
6063 b1 = (tmp >> 24) | (tmp << (64 - 24)); 6063 b1 = ror64(tmp, 24);
6064 b0 -= b1 + k16; 6064 b0 -= b1 + k16;
6065 b1 -= k0; 6065 b1 -= k0;
6066 6066
6067 tmp = b7 ^ b12; 6067 tmp = b7 ^ b12;
6068 b7 = (tmp >> 20) | (tmp << (64 - 20)); 6068 b7 = ror64(tmp, 20);
6069 b12 -= b7; 6069 b12 -= b7;
6070 6070
6071 tmp = b3 ^ b10; 6071 tmp = b3 ^ b10;
6072 b3 = (tmp >> 37) | (tmp << (64 - 37)); 6072 b3 = ror64(tmp, 37);
6073 b10 -= b3; 6073 b10 -= b3;
6074 6074
6075 tmp = b5 ^ b8; 6075 tmp = b5 ^ b8;
6076 b5 = (tmp >> 31) | (tmp << (64 - 31)); 6076 b5 = ror64(tmp, 31);
6077 b8 -= b5; 6077 b8 -= b5;
6078 6078
6079 tmp = b1 ^ b14; 6079 tmp = b1 ^ b14;
6080 b1 = (tmp >> 23) | (tmp << (64 - 23)); 6080 b1 = ror64(tmp, 23);
6081 b14 -= b1; 6081 b14 -= b1;
6082 6082
6083 tmp = b9 ^ b4; 6083 tmp = b9 ^ b4;
6084 b9 = (tmp >> 52) | (tmp << (64 - 52)); 6084 b9 = ror64(tmp, 52);
6085 b4 -= b9; 6085 b4 -= b9;
6086 6086
6087 tmp = b13 ^ b6; 6087 tmp = b13 ^ b6;
6088 b13 = (tmp >> 35) | (tmp << (64 - 35)); 6088 b13 = ror64(tmp, 35);
6089 b6 -= b13; 6089 b6 -= b13;
6090 6090
6091 tmp = b11 ^ b2; 6091 tmp = b11 ^ b2;
6092 b11 = (tmp >> 48) | (tmp << (64 - 48)); 6092 b11 = ror64(tmp, 48);
6093 b2 -= b11; 6093 b2 -= b11;
6094 6094
6095 tmp = b15 ^ b0; 6095 tmp = b15 ^ b0;
6096 b15 = (tmp >> 9) | (tmp << (64 - 9)); 6096 b15 = ror64(tmp, 9);
6097 b0 -= b15; 6097 b0 -= b15;
6098 6098
6099 tmp = b9 ^ b10; 6099 tmp = b9 ^ b10;
6100 b9 = (tmp >> 25) | (tmp << (64 - 25)); 6100 b9 = ror64(tmp, 25);
6101 b10 -= b9; 6101 b10 -= b9;
6102 6102
6103 tmp = b11 ^ b8; 6103 tmp = b11 ^ b8;
6104 b11 = (tmp >> 44) | (tmp << (64 - 44)); 6104 b11 = ror64(tmp, 44);
6105 b8 -= b11; 6105 b8 -= b11;
6106 6106
6107 tmp = b13 ^ b14; 6107 tmp = b13 ^ b14;
6108 b13 = (tmp >> 42) | (tmp << (64 - 42)); 6108 b13 = ror64(tmp, 42);
6109 b14 -= b13; 6109 b14 -= b13;
6110 6110
6111 tmp = b15 ^ b12; 6111 tmp = b15 ^ b12;
6112 b15 = (tmp >> 19) | (tmp << (64 - 19)); 6112 b15 = ror64(tmp, 19);
6113 b12 -= b15; 6113 b12 -= b15;
6114 6114
6115 tmp = b1 ^ b6; 6115 tmp = b1 ^ b6;
6116 b1 = (tmp >> 46) | (tmp << (64 - 46)); 6116 b1 = ror64(tmp, 46);
6117 b6 -= b1; 6117 b6 -= b1;
6118 6118
6119 tmp = b3 ^ b4; 6119 tmp = b3 ^ b4;
6120 b3 = (tmp >> 47) | (tmp << (64 - 47)); 6120 b3 = ror64(tmp, 47);
6121 b4 -= b3; 6121 b4 -= b3;
6122 6122
6123 tmp = b5 ^ b2; 6123 tmp = b5 ^ b2;
6124 b5 = (tmp >> 44) | (tmp << (64 - 44)); 6124 b5 = ror64(tmp, 44);
6125 b2 -= b5; 6125 b2 -= b5;
6126 6126
6127 tmp = b7 ^ b0; 6127 tmp = b7 ^ b0;
6128 b7 = (tmp >> 31) | (tmp << (64 - 31)); 6128 b7 = ror64(tmp, 31);
6129 b0 -= b7; 6129 b0 -= b7;
6130 6130
6131 tmp = b1 ^ b8; 6131 tmp = b1 ^ b8;
6132 b1 = (tmp >> 41) | (tmp << (64 - 41)); 6132 b1 = ror64(tmp, 41);
6133 b8 -= b1; 6133 b8 -= b1;
6134 6134
6135 tmp = b5 ^ b14; 6135 tmp = b5 ^ b14;
6136 b5 = (tmp >> 42) | (tmp << (64 - 42)); 6136 b5 = ror64(tmp, 42);
6137 b14 -= b5; 6137 b14 -= b5;
6138 6138
6139 tmp = b3 ^ b12; 6139 tmp = b3 ^ b12;
6140 b3 = (tmp >> 53) | (tmp << (64 - 53)); 6140 b3 = ror64(tmp, 53);
6141 b12 -= b3; 6141 b12 -= b3;
6142 6142
6143 tmp = b7 ^ b10; 6143 tmp = b7 ^ b10;
6144 b7 = (tmp >> 4) | (tmp << (64 - 4)); 6144 b7 = ror64(tmp, 4);
6145 b10 -= b7; 6145 b10 -= b7;
6146 6146
6147 tmp = b15 ^ b4; 6147 tmp = b15 ^ b4;
6148 b15 = (tmp >> 51) | (tmp << (64 - 51)); 6148 b15 = ror64(tmp, 51);
6149 b4 -= b15; 6149 b4 -= b15;
6150 6150
6151 tmp = b11 ^ b6; 6151 tmp = b11 ^ b6;
6152 b11 = (tmp >> 56) | (tmp << (64 - 56)); 6152 b11 = ror64(tmp, 56);
6153 b6 -= b11; 6153 b6 -= b11;
6154 6154
6155 tmp = b13 ^ b2; 6155 tmp = b13 ^ b2;
6156 b13 = (tmp >> 34) | (tmp << (64 - 34)); 6156 b13 = ror64(tmp, 34);
6157 b2 -= b13; 6157 b2 -= b13;
6158 6158
6159 tmp = b9 ^ b0; 6159 tmp = b9 ^ b0;
6160 b9 = (tmp >> 16) | (tmp << (64 - 16)); 6160 b9 = ror64(tmp, 16);
6161 b0 -= b9; 6161 b0 -= b9;
6162 6162
6163 tmp = b15 ^ b14; 6163 tmp = b15 ^ b14;
6164 b15 = (tmp >> 30) | (tmp << (64 - 30)); 6164 b15 = ror64(tmp, 30);
6165 b14 -= b15 + k12 + t1; 6165 b14 -= b15 + k12 + t1;
6166 b15 -= k13 + 15; 6166 b15 -= k13 + 15;
6167 6167
6168 tmp = b13 ^ b12; 6168 tmp = b13 ^ b12;
6169 b13 = (tmp >> 44) | (tmp << (64 - 44)); 6169 b13 = ror64(tmp, 44);
6170 b12 -= b13 + k10; 6170 b12 -= b13 + k10;
6171 b13 -= k11 + t0; 6171 b13 -= k11 + t0;
6172 6172
6173 tmp = b11 ^ b10; 6173 tmp = b11 ^ b10;
6174 b11 = (tmp >> 47) | (tmp << (64 - 47)); 6174 b11 = ror64(tmp, 47);
6175 b10 -= b11 + k8; 6175 b10 -= b11 + k8;
6176 b11 -= k9; 6176 b11 -= k9;
6177 6177
6178 tmp = b9 ^ b8; 6178 tmp = b9 ^ b8;
6179 b9 = (tmp >> 12) | (tmp << (64 - 12)); 6179 b9 = ror64(tmp, 12);
6180 b8 -= b9 + k6; 6180 b8 -= b9 + k6;
6181 b9 -= k7; 6181 b9 -= k7;
6182 6182
6183 tmp = b7 ^ b6; 6183 tmp = b7 ^ b6;
6184 b7 = (tmp >> 31) | (tmp << (64 - 31)); 6184 b7 = ror64(tmp, 31);
6185 b6 -= b7 + k4; 6185 b6 -= b7 + k4;
6186 b7 -= k5; 6186 b7 -= k5;
6187 6187
6188 tmp = b5 ^ b4; 6188 tmp = b5 ^ b4;
6189 b5 = (tmp >> 37) | (tmp << (64 - 37)); 6189 b5 = ror64(tmp, 37);
6190 b4 -= b5 + k2; 6190 b4 -= b5 + k2;
6191 b5 -= k3; 6191 b5 -= k3;
6192 6192
6193 tmp = b3 ^ b2; 6193 tmp = b3 ^ b2;
6194 b3 = (tmp >> 9) | (tmp << (64 - 9)); 6194 b3 = ror64(tmp, 9);
6195 b2 -= b3 + k0; 6195 b2 -= b3 + k0;
6196 b3 -= k1; 6196 b3 -= k1;
6197 6197
6198 tmp = b1 ^ b0; 6198 tmp = b1 ^ b0;
6199 b1 = (tmp >> 41) | (tmp << (64 - 41)); 6199 b1 = ror64(tmp, 41);
6200 b0 -= b1 + k15; 6200 b0 -= b1 + k15;
6201 b1 -= k16; 6201 b1 -= k16;
6202 6202
6203 tmp = b7 ^ b12; 6203 tmp = b7 ^ b12;
6204 b7 = (tmp >> 25) | (tmp << (64 - 25)); 6204 b7 = ror64(tmp, 25);
6205 b12 -= b7; 6205 b12 -= b7;
6206 6206
6207 tmp = b3 ^ b10; 6207 tmp = b3 ^ b10;
6208 b3 = (tmp >> 16) | (tmp << (64 - 16)); 6208 b3 = ror64(tmp, 16);
6209 b10 -= b3; 6209 b10 -= b3;
6210 6210
6211 tmp = b5 ^ b8; 6211 tmp = b5 ^ b8;
6212 b5 = (tmp >> 28) | (tmp << (64 - 28)); 6212 b5 = ror64(tmp, 28);
6213 b8 -= b5; 6213 b8 -= b5;
6214 6214
6215 tmp = b1 ^ b14; 6215 tmp = b1 ^ b14;
6216 b1 = (tmp >> 47) | (tmp << (64 - 47)); 6216 b1 = ror64(tmp, 47);
6217 b14 -= b1; 6217 b14 -= b1;
6218 6218
6219 tmp = b9 ^ b4; 6219 tmp = b9 ^ b4;
6220 b9 = (tmp >> 41) | (tmp << (64 - 41)); 6220 b9 = ror64(tmp, 41);
6221 b4 -= b9; 6221 b4 -= b9;
6222 6222
6223 tmp = b13 ^ b6; 6223 tmp = b13 ^ b6;
6224 b13 = (tmp >> 48) | (tmp << (64 - 48)); 6224 b13 = ror64(tmp, 48);
6225 b6 -= b13; 6225 b6 -= b13;
6226 6226
6227 tmp = b11 ^ b2; 6227 tmp = b11 ^ b2;
6228 b11 = (tmp >> 20) | (tmp << (64 - 20)); 6228 b11 = ror64(tmp, 20);
6229 b2 -= b11; 6229 b2 -= b11;
6230 6230
6231 tmp = b15 ^ b0; 6231 tmp = b15 ^ b0;
6232 b15 = (tmp >> 5) | (tmp << (64 - 5)); 6232 b15 = ror64(tmp, 5);
6233 b0 -= b15; 6233 b0 -= b15;
6234 6234
6235 tmp = b9 ^ b10; 6235 tmp = b9 ^ b10;
6236 b9 = (tmp >> 17) | (tmp << (64 - 17)); 6236 b9 = ror64(tmp, 17);
6237 b10 -= b9; 6237 b10 -= b9;
6238 6238
6239 tmp = b11 ^ b8; 6239 tmp = b11 ^ b8;
6240 b11 = (tmp >> 59) | (tmp << (64 - 59)); 6240 b11 = ror64(tmp, 59);
6241 b8 -= b11; 6241 b8 -= b11;
6242 6242
6243 tmp = b13 ^ b14; 6243 tmp = b13 ^ b14;
6244 b13 = (tmp >> 41) | (tmp << (64 - 41)); 6244 b13 = ror64(tmp, 41);
6245 b14 -= b13; 6245 b14 -= b13;
6246 6246
6247 tmp = b15 ^ b12; 6247 tmp = b15 ^ b12;
6248 b15 = (tmp >> 34) | (tmp << (64 - 34)); 6248 b15 = ror64(tmp, 34);
6249 b12 -= b15; 6249 b12 -= b15;
6250 6250
6251 tmp = b1 ^ b6; 6251 tmp = b1 ^ b6;
6252 b1 = (tmp >> 13) | (tmp << (64 - 13)); 6252 b1 = ror64(tmp, 13);
6253 b6 -= b1; 6253 b6 -= b1;
6254 6254
6255 tmp = b3 ^ b4; 6255 tmp = b3 ^ b4;
6256 b3 = (tmp >> 51) | (tmp << (64 - 51)); 6256 b3 = ror64(tmp, 51);
6257 b4 -= b3; 6257 b4 -= b3;
6258 6258
6259 tmp = b5 ^ b2; 6259 tmp = b5 ^ b2;
6260 b5 = (tmp >> 4) | (tmp << (64 - 4)); 6260 b5 = ror64(tmp, 4);
6261 b2 -= b5; 6261 b2 -= b5;
6262 6262
6263 tmp = b7 ^ b0; 6263 tmp = b7 ^ b0;
6264 b7 = (tmp >> 33) | (tmp << (64 - 33)); 6264 b7 = ror64(tmp, 33);
6265 b0 -= b7; 6265 b0 -= b7;
6266 6266
6267 tmp = b1 ^ b8; 6267 tmp = b1 ^ b8;
6268 b1 = (tmp >> 52) | (tmp << (64 - 52)); 6268 b1 = ror64(tmp, 52);
6269 b8 -= b1; 6269 b8 -= b1;
6270 6270
6271 tmp = b5 ^ b14; 6271 tmp = b5 ^ b14;
6272 b5 = (tmp >> 23) | (tmp << (64 - 23)); 6272 b5 = ror64(tmp, 23);
6273 b14 -= b5; 6273 b14 -= b5;
6274 6274
6275 tmp = b3 ^ b12; 6275 tmp = b3 ^ b12;
6276 b3 = (tmp >> 18) | (tmp << (64 - 18)); 6276 b3 = ror64(tmp, 18);
6277 b12 -= b3; 6277 b12 -= b3;
6278 6278
6279 tmp = b7 ^ b10; 6279 tmp = b7 ^ b10;
6280 b7 = (tmp >> 49) | (tmp << (64 - 49)); 6280 b7 = ror64(tmp, 49);
6281 b10 -= b7; 6281 b10 -= b7;
6282 6282
6283 tmp = b15 ^ b4; 6283 tmp = b15 ^ b4;
6284 b15 = (tmp >> 55) | (tmp << (64 - 55)); 6284 b15 = ror64(tmp, 55);
6285 b4 -= b15; 6285 b4 -= b15;
6286 6286
6287 tmp = b11 ^ b6; 6287 tmp = b11 ^ b6;
6288 b11 = (tmp >> 10) | (tmp << (64 - 10)); 6288 b11 = ror64(tmp, 10);
6289 b6 -= b11; 6289 b6 -= b11;
6290 6290
6291 tmp = b13 ^ b2; 6291 tmp = b13 ^ b2;
6292 b13 = (tmp >> 19) | (tmp << (64 - 19)); 6292 b13 = ror64(tmp, 19);
6293 b2 -= b13; 6293 b2 -= b13;
6294 6294
6295 tmp = b9 ^ b0; 6295 tmp = b9 ^ b0;
6296 b9 = (tmp >> 38) | (tmp << (64 - 38)); 6296 b9 = ror64(tmp, 38);
6297 b0 -= b9; 6297 b0 -= b9;
6298 6298
6299 tmp = b15 ^ b14; 6299 tmp = b15 ^ b14;
6300 b15 = (tmp >> 37) | (tmp << (64 - 37)); 6300 b15 = ror64(tmp, 37);
6301 b14 -= b15 + k11 + t0; 6301 b14 -= b15 + k11 + t0;
6302 b15 -= k12 + 14; 6302 b15 -= k12 + 14;
6303 6303
6304 tmp = b13 ^ b12; 6304 tmp = b13 ^ b12;
6305 b13 = (tmp >> 22) | (tmp << (64 - 22)); 6305 b13 = ror64(tmp, 22);
6306 b12 -= b13 + k9; 6306 b12 -= b13 + k9;
6307 b13 -= k10 + t2; 6307 b13 -= k10 + t2;
6308 6308
6309 tmp = b11 ^ b10; 6309 tmp = b11 ^ b10;
6310 b11 = (tmp >> 17) | (tmp << (64 - 17)); 6310 b11 = ror64(tmp, 17);
6311 b10 -= b11 + k7; 6311 b10 -= b11 + k7;
6312 b11 -= k8; 6312 b11 -= k8;
6313 6313
6314 tmp = b9 ^ b8; 6314 tmp = b9 ^ b8;
6315 b9 = (tmp >> 8) | (tmp << (64 - 8)); 6315 b9 = ror64(tmp, 8);
6316 b8 -= b9 + k5; 6316 b8 -= b9 + k5;
6317 b9 -= k6; 6317 b9 -= k6;
6318 6318
6319 tmp = b7 ^ b6; 6319 tmp = b7 ^ b6;
6320 b7 = (tmp >> 47) | (tmp << (64 - 47)); 6320 b7 = ror64(tmp, 47);
6321 b6 -= b7 + k3; 6321 b6 -= b7 + k3;
6322 b7 -= k4; 6322 b7 -= k4;
6323 6323
6324 tmp = b5 ^ b4; 6324 tmp = b5 ^ b4;
6325 b5 = (tmp >> 8) | (tmp << (64 - 8)); 6325 b5 = ror64(tmp, 8);
6326 b4 -= b5 + k1; 6326 b4 -= b5 + k1;
6327 b5 -= k2; 6327 b5 -= k2;
6328 6328
6329 tmp = b3 ^ b2; 6329 tmp = b3 ^ b2;
6330 b3 = (tmp >> 13) | (tmp << (64 - 13)); 6330 b3 = ror64(tmp, 13);
6331 b2 -= b3 + k16; 6331 b2 -= b3 + k16;
6332 b3 -= k0; 6332 b3 -= k0;
6333 6333
6334 tmp = b1 ^ b0; 6334 tmp = b1 ^ b0;
6335 b1 = (tmp >> 24) | (tmp << (64 - 24)); 6335 b1 = ror64(tmp, 24);
6336 b0 -= b1 + k14; 6336 b0 -= b1 + k14;
6337 b1 -= k15; 6337 b1 -= k15;
6338 6338
6339 tmp = b7 ^ b12; 6339 tmp = b7 ^ b12;
6340 b7 = (tmp >> 20) | (tmp << (64 - 20)); 6340 b7 = ror64(tmp, 20);
6341 b12 -= b7; 6341 b12 -= b7;
6342 6342
6343 tmp = b3 ^ b10; 6343 tmp = b3 ^ b10;
6344 b3 = (tmp >> 37) | (tmp << (64 - 37)); 6344 b3 = ror64(tmp, 37);
6345 b10 -= b3; 6345 b10 -= b3;
6346 6346
6347 tmp = b5 ^ b8; 6347 tmp = b5 ^ b8;
6348 b5 = (tmp >> 31) | (tmp << (64 - 31)); 6348 b5 = ror64(tmp, 31);
6349 b8 -= b5; 6349 b8 -= b5;
6350 6350
6351 tmp = b1 ^ b14; 6351 tmp = b1 ^ b14;
6352 b1 = (tmp >> 23) | (tmp << (64 - 23)); 6352 b1 = ror64(tmp, 23);
6353 b14 -= b1; 6353 b14 -= b1;
6354 6354
6355 tmp = b9 ^ b4; 6355 tmp = b9 ^ b4;
6356 b9 = (tmp >> 52) | (tmp << (64 - 52)); 6356 b9 = ror64(tmp, 52);
6357 b4 -= b9; 6357 b4 -= b9;
6358 6358
6359 tmp = b13 ^ b6; 6359 tmp = b13 ^ b6;
6360 b13 = (tmp >> 35) | (tmp << (64 - 35)); 6360 b13 = ror64(tmp, 35);
6361 b6 -= b13; 6361 b6 -= b13;
6362 6362
6363 tmp = b11 ^ b2; 6363 tmp = b11 ^ b2;
6364 b11 = (tmp >> 48) | (tmp << (64 - 48)); 6364 b11 = ror64(tmp, 48);
6365 b2 -= b11; 6365 b2 -= b11;
6366 6366
6367 tmp = b15 ^ b0; 6367 tmp = b15 ^ b0;
6368 b15 = (tmp >> 9) | (tmp << (64 - 9)); 6368 b15 = ror64(tmp, 9);
6369 b0 -= b15; 6369 b0 -= b15;
6370 6370
6371 tmp = b9 ^ b10; 6371 tmp = b9 ^ b10;
6372 b9 = (tmp >> 25) | (tmp << (64 - 25)); 6372 b9 = ror64(tmp, 25);
6373 b10 -= b9; 6373 b10 -= b9;
6374 6374
6375 tmp = b11 ^ b8; 6375 tmp = b11 ^ b8;
6376 b11 = (tmp >> 44) | (tmp << (64 - 44)); 6376 b11 = ror64(tmp, 44);
6377 b8 -= b11; 6377 b8 -= b11;
6378 6378
6379 tmp = b13 ^ b14; 6379 tmp = b13 ^ b14;
6380 b13 = (tmp >> 42) | (tmp << (64 - 42)); 6380 b13 = ror64(tmp, 42);
6381 b14 -= b13; 6381 b14 -= b13;
6382 6382
6383 tmp = b15 ^ b12; 6383 tmp = b15 ^ b12;
6384 b15 = (tmp >> 19) | (tmp << (64 - 19)); 6384 b15 = ror64(tmp, 19);
6385 b12 -= b15; 6385 b12 -= b15;
6386 6386
6387 tmp = b1 ^ b6; 6387 tmp = b1 ^ b6;
6388 b1 = (tmp >> 46) | (tmp << (64 - 46)); 6388 b1 = ror64(tmp, 46);
6389 b6 -= b1; 6389 b6 -= b1;
6390 6390
6391 tmp = b3 ^ b4; 6391 tmp = b3 ^ b4;
6392 b3 = (tmp >> 47) | (tmp << (64 - 47)); 6392 b3 = ror64(tmp, 47);
6393 b4 -= b3; 6393 b4 -= b3;
6394 6394
6395 tmp = b5 ^ b2; 6395 tmp = b5 ^ b2;
6396 b5 = (tmp >> 44) | (tmp << (64 - 44)); 6396 b5 = ror64(tmp, 44);
6397 b2 -= b5; 6397 b2 -= b5;
6398 6398
6399 tmp = b7 ^ b0; 6399 tmp = b7 ^ b0;
6400 b7 = (tmp >> 31) | (tmp << (64 - 31)); 6400 b7 = ror64(tmp, 31);
6401 b0 -= b7; 6401 b0 -= b7;
6402 6402
6403 tmp = b1 ^ b8; 6403 tmp = b1 ^ b8;
6404 b1 = (tmp >> 41) | (tmp << (64 - 41)); 6404 b1 = ror64(tmp, 41);
6405 b8 -= b1; 6405 b8 -= b1;
6406 6406
6407 tmp = b5 ^ b14; 6407 tmp = b5 ^ b14;
6408 b5 = (tmp >> 42) | (tmp << (64 - 42)); 6408 b5 = ror64(tmp, 42);
6409 b14 -= b5; 6409 b14 -= b5;
6410 6410
6411 tmp = b3 ^ b12; 6411 tmp = b3 ^ b12;
6412 b3 = (tmp >> 53) | (tmp << (64 - 53)); 6412 b3 = ror64(tmp, 53);
6413 b12 -= b3; 6413 b12 -= b3;
6414 6414
6415 tmp = b7 ^ b10; 6415 tmp = b7 ^ b10;
6416 b7 = (tmp >> 4) | (tmp << (64 - 4)); 6416 b7 = ror64(tmp, 4);
6417 b10 -= b7; 6417 b10 -= b7;
6418 6418
6419 tmp = b15 ^ b4; 6419 tmp = b15 ^ b4;
6420 b15 = (tmp >> 51) | (tmp << (64 - 51)); 6420 b15 = ror64(tmp, 51);
6421 b4 -= b15; 6421 b4 -= b15;
6422 6422
6423 tmp = b11 ^ b6; 6423 tmp = b11 ^ b6;
6424 b11 = (tmp >> 56) | (tmp << (64 - 56)); 6424 b11 = ror64(tmp, 56);
6425 b6 -= b11; 6425 b6 -= b11;
6426 6426
6427 tmp = b13 ^ b2; 6427 tmp = b13 ^ b2;
6428 b13 = (tmp >> 34) | (tmp << (64 - 34)); 6428 b13 = ror64(tmp, 34);
6429 b2 -= b13; 6429 b2 -= b13;
6430 6430
6431 tmp = b9 ^ b0; 6431 tmp = b9 ^ b0;
6432 b9 = (tmp >> 16) | (tmp << (64 - 16)); 6432 b9 = ror64(tmp, 16);
6433 b0 -= b9; 6433 b0 -= b9;
6434 6434
6435 tmp = b15 ^ b14; 6435 tmp = b15 ^ b14;
6436 b15 = (tmp >> 30) | (tmp << (64 - 30)); 6436 b15 = ror64(tmp, 30);
6437 b14 -= b15 + k10 + t2; 6437 b14 -= b15 + k10 + t2;
6438 b15 -= k11 + 13; 6438 b15 -= k11 + 13;
6439 6439
6440 tmp = b13 ^ b12; 6440 tmp = b13 ^ b12;
6441 b13 = (tmp >> 44) | (tmp << (64 - 44)); 6441 b13 = ror64(tmp, 44);
6442 b12 -= b13 + k8; 6442 b12 -= b13 + k8;
6443 b13 -= k9 + t1; 6443 b13 -= k9 + t1;
6444 6444
6445 tmp = b11 ^ b10; 6445 tmp = b11 ^ b10;
6446 b11 = (tmp >> 47) | (tmp << (64 - 47)); 6446 b11 = ror64(tmp, 47);
6447 b10 -= b11 + k6; 6447 b10 -= b11 + k6;
6448 b11 -= k7; 6448 b11 -= k7;
6449 6449
6450 tmp = b9 ^ b8; 6450 tmp = b9 ^ b8;
6451 b9 = (tmp >> 12) | (tmp << (64 - 12)); 6451 b9 = ror64(tmp, 12);
6452 b8 -= b9 + k4; 6452 b8 -= b9 + k4;
6453 b9 -= k5; 6453 b9 -= k5;
6454 6454
6455 tmp = b7 ^ b6; 6455 tmp = b7 ^ b6;
6456 b7 = (tmp >> 31) | (tmp << (64 - 31)); 6456 b7 = ror64(tmp, 31);
6457 b6 -= b7 + k2; 6457 b6 -= b7 + k2;
6458 b7 -= k3; 6458 b7 -= k3;
6459 6459
6460 tmp = b5 ^ b4; 6460 tmp = b5 ^ b4;
6461 b5 = (tmp >> 37) | (tmp << (64 - 37)); 6461 b5 = ror64(tmp, 37);
6462 b4 -= b5 + k0; 6462 b4 -= b5 + k0;
6463 b5 -= k1; 6463 b5 -= k1;
6464 6464
6465 tmp = b3 ^ b2; 6465 tmp = b3 ^ b2;
6466 b3 = (tmp >> 9) | (tmp << (64 - 9)); 6466 b3 = ror64(tmp, 9);
6467 b2 -= b3 + k15; 6467 b2 -= b3 + k15;
6468 b3 -= k16; 6468 b3 -= k16;
6469 6469
6470 tmp = b1 ^ b0; 6470 tmp = b1 ^ b0;
6471 b1 = (tmp >> 41) | (tmp << (64 - 41)); 6471 b1 = ror64(tmp, 41);
6472 b0 -= b1 + k13; 6472 b0 -= b1 + k13;
6473 b1 -= k14; 6473 b1 -= k14;
6474 6474
6475 tmp = b7 ^ b12; 6475 tmp = b7 ^ b12;
6476 b7 = (tmp >> 25) | (tmp << (64 - 25)); 6476 b7 = ror64(tmp, 25);
6477 b12 -= b7; 6477 b12 -= b7;
6478 6478
6479 tmp = b3 ^ b10; 6479 tmp = b3 ^ b10;
6480 b3 = (tmp >> 16) | (tmp << (64 - 16)); 6480 b3 = ror64(tmp, 16);
6481 b10 -= b3; 6481 b10 -= b3;
6482 6482
6483 tmp = b5 ^ b8; 6483 tmp = b5 ^ b8;
6484 b5 = (tmp >> 28) | (tmp << (64 - 28)); 6484 b5 = ror64(tmp, 28);
6485 b8 -= b5; 6485 b8 -= b5;
6486 6486
6487 tmp = b1 ^ b14; 6487 tmp = b1 ^ b14;
6488 b1 = (tmp >> 47) | (tmp << (64 - 47)); 6488 b1 = ror64(tmp, 47);
6489 b14 -= b1; 6489 b14 -= b1;
6490 6490
6491 tmp = b9 ^ b4; 6491 tmp = b9 ^ b4;
6492 b9 = (tmp >> 41) | (tmp << (64 - 41)); 6492 b9 = ror64(tmp, 41);
6493 b4 -= b9; 6493 b4 -= b9;
6494 6494
6495 tmp = b13 ^ b6; 6495 tmp = b13 ^ b6;
6496 b13 = (tmp >> 48) | (tmp << (64 - 48)); 6496 b13 = ror64(tmp, 48);
6497 b6 -= b13; 6497 b6 -= b13;
6498 6498
6499 tmp = b11 ^ b2; 6499 tmp = b11 ^ b2;
6500 b11 = (tmp >> 20) | (tmp << (64 - 20)); 6500 b11 = ror64(tmp, 20);
6501 b2 -= b11; 6501 b2 -= b11;
6502 6502
6503 tmp = b15 ^ b0; 6503 tmp = b15 ^ b0;
6504 b15 = (tmp >> 5) | (tmp << (64 - 5)); 6504 b15 = ror64(tmp, 5);
6505 b0 -= b15; 6505 b0 -= b15;
6506 6506
6507 tmp = b9 ^ b10; 6507 tmp = b9 ^ b10;
6508 b9 = (tmp >> 17) | (tmp << (64 - 17)); 6508 b9 = ror64(tmp, 17);
6509 b10 -= b9; 6509 b10 -= b9;
6510 6510
6511 tmp = b11 ^ b8; 6511 tmp = b11 ^ b8;
6512 b11 = (tmp >> 59) | (tmp << (64 - 59)); 6512 b11 = ror64(tmp, 59);
6513 b8 -= b11; 6513 b8 -= b11;
6514 6514
6515 tmp = b13 ^ b14; 6515 tmp = b13 ^ b14;
6516 b13 = (tmp >> 41) | (tmp << (64 - 41)); 6516 b13 = ror64(tmp, 41);
6517 b14 -= b13; 6517 b14 -= b13;
6518 6518
6519 tmp = b15 ^ b12; 6519 tmp = b15 ^ b12;
6520 b15 = (tmp >> 34) | (tmp << (64 - 34)); 6520 b15 = ror64(tmp, 34);
6521 b12 -= b15; 6521 b12 -= b15;
6522 6522
6523 tmp = b1 ^ b6; 6523 tmp = b1 ^ b6;
6524 b1 = (tmp >> 13) | (tmp << (64 - 13)); 6524 b1 = ror64(tmp, 13);
6525 b6 -= b1; 6525 b6 -= b1;
6526 6526
6527 tmp = b3 ^ b4; 6527 tmp = b3 ^ b4;
6528 b3 = (tmp >> 51) | (tmp << (64 - 51)); 6528 b3 = ror64(tmp, 51);
6529 b4 -= b3; 6529 b4 -= b3;
6530 6530
6531 tmp = b5 ^ b2; 6531 tmp = b5 ^ b2;
6532 b5 = (tmp >> 4) | (tmp << (64 - 4)); 6532 b5 = ror64(tmp, 4);
6533 b2 -= b5; 6533 b2 -= b5;
6534 6534
6535 tmp = b7 ^ b0; 6535 tmp = b7 ^ b0;
6536 b7 = (tmp >> 33) | (tmp << (64 - 33)); 6536 b7 = ror64(tmp, 33);
6537 b0 -= b7; 6537 b0 -= b7;
6538 6538
6539 tmp = b1 ^ b8; 6539 tmp = b1 ^ b8;
6540 b1 = (tmp >> 52) | (tmp << (64 - 52)); 6540 b1 = ror64(tmp, 52);
6541 b8 -= b1; 6541 b8 -= b1;
6542 6542
6543 tmp = b5 ^ b14; 6543 tmp = b5 ^ b14;
6544 b5 = (tmp >> 23) | (tmp << (64 - 23)); 6544 b5 = ror64(tmp, 23);
6545 b14 -= b5; 6545 b14 -= b5;
6546 6546
6547 tmp = b3 ^ b12; 6547 tmp = b3 ^ b12;
6548 b3 = (tmp >> 18) | (tmp << (64 - 18)); 6548 b3 = ror64(tmp, 18);
6549 b12 -= b3; 6549 b12 -= b3;
6550 6550
6551 tmp = b7 ^ b10; 6551 tmp = b7 ^ b10;
6552 b7 = (tmp >> 49) | (tmp << (64 - 49)); 6552 b7 = ror64(tmp, 49);
6553 b10 -= b7; 6553 b10 -= b7;
6554 6554
6555 tmp = b15 ^ b4; 6555 tmp = b15 ^ b4;
6556 b15 = (tmp >> 55) | (tmp << (64 - 55)); 6556 b15 = ror64(tmp, 55);
6557 b4 -= b15; 6557 b4 -= b15;
6558 6558
6559 tmp = b11 ^ b6; 6559 tmp = b11 ^ b6;
6560 b11 = (tmp >> 10) | (tmp << (64 - 10)); 6560 b11 = ror64(tmp, 10);
6561 b6 -= b11; 6561 b6 -= b11;
6562 6562
6563 tmp = b13 ^ b2; 6563 tmp = b13 ^ b2;
6564 b13 = (tmp >> 19) | (tmp << (64 - 19)); 6564 b13 = ror64(tmp, 19);
6565 b2 -= b13; 6565 b2 -= b13;
6566 6566
6567 tmp = b9 ^ b0; 6567 tmp = b9 ^ b0;
6568 b9 = (tmp >> 38) | (tmp << (64 - 38)); 6568 b9 = ror64(tmp, 38);
6569 b0 -= b9; 6569 b0 -= b9;
6570 6570
6571 tmp = b15 ^ b14; 6571 tmp = b15 ^ b14;
6572 b15 = (tmp >> 37) | (tmp << (64 - 37)); 6572 b15 = ror64(tmp, 37);
6573 b14 -= b15 + k9 + t1; 6573 b14 -= b15 + k9 + t1;
6574 b15 -= k10 + 12; 6574 b15 -= k10 + 12;
6575 6575
6576 tmp = b13 ^ b12; 6576 tmp = b13 ^ b12;
6577 b13 = (tmp >> 22) | (tmp << (64 - 22)); 6577 b13 = ror64(tmp, 22);
6578 b12 -= b13 + k7; 6578 b12 -= b13 + k7;
6579 b13 -= k8 + t0; 6579 b13 -= k8 + t0;
6580 6580
6581 tmp = b11 ^ b10; 6581 tmp = b11 ^ b10;
6582 b11 = (tmp >> 17) | (tmp << (64 - 17)); 6582 b11 = ror64(tmp, 17);
6583 b10 -= b11 + k5; 6583 b10 -= b11 + k5;
6584 b11 -= k6; 6584 b11 -= k6;
6585 6585
6586 tmp = b9 ^ b8; 6586 tmp = b9 ^ b8;
6587 b9 = (tmp >> 8) | (tmp << (64 - 8)); 6587 b9 = ror64(tmp, 8);
6588 b8 -= b9 + k3; 6588 b8 -= b9 + k3;
6589 b9 -= k4; 6589 b9 -= k4;
6590 6590
6591 tmp = b7 ^ b6; 6591 tmp = b7 ^ b6;
6592 b7 = (tmp >> 47) | (tmp << (64 - 47)); 6592 b7 = ror64(tmp, 47);
6593 b6 -= b7 + k1; 6593 b6 -= b7 + k1;
6594 b7 -= k2; 6594 b7 -= k2;
6595 6595
6596 tmp = b5 ^ b4; 6596 tmp = b5 ^ b4;
6597 b5 = (tmp >> 8) | (tmp << (64 - 8)); 6597 b5 = ror64(tmp, 8);
6598 b4 -= b5 + k16; 6598 b4 -= b5 + k16;
6599 b5 -= k0; 6599 b5 -= k0;
6600 6600
6601 tmp = b3 ^ b2; 6601 tmp = b3 ^ b2;
6602 b3 = (tmp >> 13) | (tmp << (64 - 13)); 6602 b3 = ror64(tmp, 13);
6603 b2 -= b3 + k14; 6603 b2 -= b3 + k14;
6604 b3 -= k15; 6604 b3 -= k15;
6605 6605
6606 tmp = b1 ^ b0; 6606 tmp = b1 ^ b0;
6607 b1 = (tmp >> 24) | (tmp << (64 - 24)); 6607 b1 = ror64(tmp, 24);
6608 b0 -= b1 + k12; 6608 b0 -= b1 + k12;
6609 b1 -= k13; 6609 b1 -= k13;
6610 6610
6611 tmp = b7 ^ b12; 6611 tmp = b7 ^ b12;
6612 b7 = (tmp >> 20) | (tmp << (64 - 20)); 6612 b7 = ror64(tmp, 20);
6613 b12 -= b7; 6613 b12 -= b7;
6614 6614
6615 tmp = b3 ^ b10; 6615 tmp = b3 ^ b10;
6616 b3 = (tmp >> 37) | (tmp << (64 - 37)); 6616 b3 = ror64(tmp, 37);
6617 b10 -= b3; 6617 b10 -= b3;
6618 6618
6619 tmp = b5 ^ b8; 6619 tmp = b5 ^ b8;
6620 b5 = (tmp >> 31) | (tmp << (64 - 31)); 6620 b5 = ror64(tmp, 31);
6621 b8 -= b5; 6621 b8 -= b5;
6622 6622
6623 tmp = b1 ^ b14; 6623 tmp = b1 ^ b14;
6624 b1 = (tmp >> 23) | (tmp << (64 - 23)); 6624 b1 = ror64(tmp, 23);
6625 b14 -= b1; 6625 b14 -= b1;
6626 6626
6627 tmp = b9 ^ b4; 6627 tmp = b9 ^ b4;
6628 b9 = (tmp >> 52) | (tmp << (64 - 52)); 6628 b9 = ror64(tmp, 52);
6629 b4 -= b9; 6629 b4 -= b9;
6630 6630
6631 tmp = b13 ^ b6; 6631 tmp = b13 ^ b6;
6632 b13 = (tmp >> 35) | (tmp << (64 - 35)); 6632 b13 = ror64(tmp, 35);
6633 b6 -= b13; 6633 b6 -= b13;
6634 6634
6635 tmp = b11 ^ b2; 6635 tmp = b11 ^ b2;
6636 b11 = (tmp >> 48) | (tmp << (64 - 48)); 6636 b11 = ror64(tmp, 48);
6637 b2 -= b11; 6637 b2 -= b11;
6638 6638
6639 tmp = b15 ^ b0; 6639 tmp = b15 ^ b0;
6640 b15 = (tmp >> 9) | (tmp << (64 - 9)); 6640 b15 = ror64(tmp, 9);
6641 b0 -= b15; 6641 b0 -= b15;
6642 6642
6643 tmp = b9 ^ b10; 6643 tmp = b9 ^ b10;
6644 b9 = (tmp >> 25) | (tmp << (64 - 25)); 6644 b9 = ror64(tmp, 25);
6645 b10 -= b9; 6645 b10 -= b9;
6646 6646
6647 tmp = b11 ^ b8; 6647 tmp = b11 ^ b8;
6648 b11 = (tmp >> 44) | (tmp << (64 - 44)); 6648 b11 = ror64(tmp, 44);
6649 b8 -= b11; 6649 b8 -= b11;
6650 6650
6651 tmp = b13 ^ b14; 6651 tmp = b13 ^ b14;
6652 b13 = (tmp >> 42) | (tmp << (64 - 42)); 6652 b13 = ror64(tmp, 42);
6653 b14 -= b13; 6653 b14 -= b13;
6654 6654
6655 tmp = b15 ^ b12; 6655 tmp = b15 ^ b12;
6656 b15 = (tmp >> 19) | (tmp << (64 - 19)); 6656 b15 = ror64(tmp, 19);
6657 b12 -= b15; 6657 b12 -= b15;
6658 6658
6659 tmp = b1 ^ b6; 6659 tmp = b1 ^ b6;
6660 b1 = (tmp >> 46) | (tmp << (64 - 46)); 6660 b1 = ror64(tmp, 46);
6661 b6 -= b1; 6661 b6 -= b1;
6662 6662
6663 tmp = b3 ^ b4; 6663 tmp = b3 ^ b4;
6664 b3 = (tmp >> 47) | (tmp << (64 - 47)); 6664 b3 = ror64(tmp, 47);
6665 b4 -= b3; 6665 b4 -= b3;
6666 6666
6667 tmp = b5 ^ b2; 6667 tmp = b5 ^ b2;
6668 b5 = (tmp >> 44) | (tmp << (64 - 44)); 6668 b5 = ror64(tmp, 44);
6669 b2 -= b5; 6669 b2 -= b5;
6670 6670
6671 tmp = b7 ^ b0; 6671 tmp = b7 ^ b0;
6672 b7 = (tmp >> 31) | (tmp << (64 - 31)); 6672 b7 = ror64(tmp, 31);
6673 b0 -= b7; 6673 b0 -= b7;
6674 6674
6675 tmp = b1 ^ b8; 6675 tmp = b1 ^ b8;
6676 b1 = (tmp >> 41) | (tmp << (64 - 41)); 6676 b1 = ror64(tmp, 41);
6677 b8 -= b1; 6677 b8 -= b1;
6678 6678
6679 tmp = b5 ^ b14; 6679 tmp = b5 ^ b14;
6680 b5 = (tmp >> 42) | (tmp << (64 - 42)); 6680 b5 = ror64(tmp, 42);
6681 b14 -= b5; 6681 b14 -= b5;
6682 6682
6683 tmp = b3 ^ b12; 6683 tmp = b3 ^ b12;
6684 b3 = (tmp >> 53) | (tmp << (64 - 53)); 6684 b3 = ror64(tmp, 53);
6685 b12 -= b3; 6685 b12 -= b3;
6686 6686
6687 tmp = b7 ^ b10; 6687 tmp = b7 ^ b10;
6688 b7 = (tmp >> 4) | (tmp << (64 - 4)); 6688 b7 = ror64(tmp, 4);
6689 b10 -= b7; 6689 b10 -= b7;
6690 6690
6691 tmp = b15 ^ b4; 6691 tmp = b15 ^ b4;
6692 b15 = (tmp >> 51) | (tmp << (64 - 51)); 6692 b15 = ror64(tmp, 51);
6693 b4 -= b15; 6693 b4 -= b15;
6694 6694
6695 tmp = b11 ^ b6; 6695 tmp = b11 ^ b6;
6696 b11 = (tmp >> 56) | (tmp << (64 - 56)); 6696 b11 = ror64(tmp, 56);
6697 b6 -= b11; 6697 b6 -= b11;
6698 6698
6699 tmp = b13 ^ b2; 6699 tmp = b13 ^ b2;
6700 b13 = (tmp >> 34) | (tmp << (64 - 34)); 6700 b13 = ror64(tmp, 34);
6701 b2 -= b13; 6701 b2 -= b13;
6702 6702
6703 tmp = b9 ^ b0; 6703 tmp = b9 ^ b0;
6704 b9 = (tmp >> 16) | (tmp << (64 - 16)); 6704 b9 = ror64(tmp, 16);
6705 b0 -= b9; 6705 b0 -= b9;
6706 6706
6707 tmp = b15 ^ b14; 6707 tmp = b15 ^ b14;
6708 b15 = (tmp >> 30) | (tmp << (64 - 30)); 6708 b15 = ror64(tmp, 30);
6709 b14 -= b15 + k8 + t0; 6709 b14 -= b15 + k8 + t0;
6710 b15 -= k9 + 11; 6710 b15 -= k9 + 11;
6711 6711
6712 tmp = b13 ^ b12; 6712 tmp = b13 ^ b12;
6713 b13 = (tmp >> 44) | (tmp << (64 - 44)); 6713 b13 = ror64(tmp, 44);
6714 b12 -= b13 + k6; 6714 b12 -= b13 + k6;
6715 b13 -= k7 + t2; 6715 b13 -= k7 + t2;
6716 6716
6717 tmp = b11 ^ b10; 6717 tmp = b11 ^ b10;
6718 b11 = (tmp >> 47) | (tmp << (64 - 47)); 6718 b11 = ror64(tmp, 47);
6719 b10 -= b11 + k4; 6719 b10 -= b11 + k4;
6720 b11 -= k5; 6720 b11 -= k5;
6721 6721
6722 tmp = b9 ^ b8; 6722 tmp = b9 ^ b8;
6723 b9 = (tmp >> 12) | (tmp << (64 - 12)); 6723 b9 = ror64(tmp, 12);
6724 b8 -= b9 + k2; 6724 b8 -= b9 + k2;
6725 b9 -= k3; 6725 b9 -= k3;
6726 6726
6727 tmp = b7 ^ b6; 6727 tmp = b7 ^ b6;
6728 b7 = (tmp >> 31) | (tmp << (64 - 31)); 6728 b7 = ror64(tmp, 31);
6729 b6 -= b7 + k0; 6729 b6 -= b7 + k0;
6730 b7 -= k1; 6730 b7 -= k1;
6731 6731
6732 tmp = b5 ^ b4; 6732 tmp = b5 ^ b4;
6733 b5 = (tmp >> 37) | (tmp << (64 - 37)); 6733 b5 = ror64(tmp, 37);
6734 b4 -= b5 + k15; 6734 b4 -= b5 + k15;
6735 b5 -= k16; 6735 b5 -= k16;
6736 6736
6737 tmp = b3 ^ b2; 6737 tmp = b3 ^ b2;
6738 b3 = (tmp >> 9) | (tmp << (64 - 9)); 6738 b3 = ror64(tmp, 9);
6739 b2 -= b3 + k13; 6739 b2 -= b3 + k13;
6740 b3 -= k14; 6740 b3 -= k14;
6741 6741
6742 tmp = b1 ^ b0; 6742 tmp = b1 ^ b0;
6743 b1 = (tmp >> 41) | (tmp << (64 - 41)); 6743 b1 = ror64(tmp, 41);
6744 b0 -= b1 + k11; 6744 b0 -= b1 + k11;
6745 b1 -= k12; 6745 b1 -= k12;
6746 6746
6747 tmp = b7 ^ b12; 6747 tmp = b7 ^ b12;
6748 b7 = (tmp >> 25) | (tmp << (64 - 25)); 6748 b7 = ror64(tmp, 25);
6749 b12 -= b7; 6749 b12 -= b7;
6750 6750
6751 tmp = b3 ^ b10; 6751 tmp = b3 ^ b10;
6752 b3 = (tmp >> 16) | (tmp << (64 - 16)); 6752 b3 = ror64(tmp, 16);
6753 b10 -= b3; 6753 b10 -= b3;
6754 6754
6755 tmp = b5 ^ b8; 6755 tmp = b5 ^ b8;
6756 b5 = (tmp >> 28) | (tmp << (64 - 28)); 6756 b5 = ror64(tmp, 28);
6757 b8 -= b5; 6757 b8 -= b5;
6758 6758
6759 tmp = b1 ^ b14; 6759 tmp = b1 ^ b14;
6760 b1 = (tmp >> 47) | (tmp << (64 - 47)); 6760 b1 = ror64(tmp, 47);
6761 b14 -= b1; 6761 b14 -= b1;
6762 6762
6763 tmp = b9 ^ b4; 6763 tmp = b9 ^ b4;
6764 b9 = (tmp >> 41) | (tmp << (64 - 41)); 6764 b9 = ror64(tmp, 41);
6765 b4 -= b9; 6765 b4 -= b9;
6766 6766
6767 tmp = b13 ^ b6; 6767 tmp = b13 ^ b6;
6768 b13 = (tmp >> 48) | (tmp << (64 - 48)); 6768 b13 = ror64(tmp, 48);
6769 b6 -= b13; 6769 b6 -= b13;
6770 6770
6771 tmp = b11 ^ b2; 6771 tmp = b11 ^ b2;
6772 b11 = (tmp >> 20) | (tmp << (64 - 20)); 6772 b11 = ror64(tmp, 20);
6773 b2 -= b11; 6773 b2 -= b11;
6774 6774
6775 tmp = b15 ^ b0; 6775 tmp = b15 ^ b0;
6776 b15 = (tmp >> 5) | (tmp << (64 - 5)); 6776 b15 = ror64(tmp, 5);
6777 b0 -= b15; 6777 b0 -= b15;
6778 6778
6779 tmp = b9 ^ b10; 6779 tmp = b9 ^ b10;
6780 b9 = (tmp >> 17) | (tmp << (64 - 17)); 6780 b9 = ror64(tmp, 17);
6781 b10 -= b9; 6781 b10 -= b9;
6782 6782
6783 tmp = b11 ^ b8; 6783 tmp = b11 ^ b8;
6784 b11 = (tmp >> 59) | (tmp << (64 - 59)); 6784 b11 = ror64(tmp, 59);
6785 b8 -= b11; 6785 b8 -= b11;
6786 6786
6787 tmp = b13 ^ b14; 6787 tmp = b13 ^ b14;
6788 b13 = (tmp >> 41) | (tmp << (64 - 41)); 6788 b13 = ror64(tmp, 41);
6789 b14 -= b13; 6789 b14 -= b13;
6790 6790
6791 tmp = b15 ^ b12; 6791 tmp = b15 ^ b12;
6792 b15 = (tmp >> 34) | (tmp << (64 - 34)); 6792 b15 = ror64(tmp, 34);
6793 b12 -= b15; 6793 b12 -= b15;
6794 6794
6795 tmp = b1 ^ b6; 6795 tmp = b1 ^ b6;
6796 b1 = (tmp >> 13) | (tmp << (64 - 13)); 6796 b1 = ror64(tmp, 13);
6797 b6 -= b1; 6797 b6 -= b1;
6798 6798
6799 tmp = b3 ^ b4; 6799 tmp = b3 ^ b4;
6800 b3 = (tmp >> 51) | (tmp << (64 - 51)); 6800 b3 = ror64(tmp, 51);
6801 b4 -= b3; 6801 b4 -= b3;
6802 6802
6803 tmp = b5 ^ b2; 6803 tmp = b5 ^ b2;
6804 b5 = (tmp >> 4) | (tmp << (64 - 4)); 6804 b5 = ror64(tmp, 4);
6805 b2 -= b5; 6805 b2 -= b5;
6806 6806
6807 tmp = b7 ^ b0; 6807 tmp = b7 ^ b0;
6808 b7 = (tmp >> 33) | (tmp << (64 - 33)); 6808 b7 = ror64(tmp, 33);
6809 b0 -= b7; 6809 b0 -= b7;
6810 6810
6811 tmp = b1 ^ b8; 6811 tmp = b1 ^ b8;
6812 b1 = (tmp >> 52) | (tmp << (64 - 52)); 6812 b1 = ror64(tmp, 52);
6813 b8 -= b1; 6813 b8 -= b1;
6814 6814
6815 tmp = b5 ^ b14; 6815 tmp = b5 ^ b14;
6816 b5 = (tmp >> 23) | (tmp << (64 - 23)); 6816 b5 = ror64(tmp, 23);
6817 b14 -= b5; 6817 b14 -= b5;
6818 6818
6819 tmp = b3 ^ b12; 6819 tmp = b3 ^ b12;
6820 b3 = (tmp >> 18) | (tmp << (64 - 18)); 6820 b3 = ror64(tmp, 18);
6821 b12 -= b3; 6821 b12 -= b3;
6822 6822
6823 tmp = b7 ^ b10; 6823 tmp = b7 ^ b10;
6824 b7 = (tmp >> 49) | (tmp << (64 - 49)); 6824 b7 = ror64(tmp, 49);
6825 b10 -= b7; 6825 b10 -= b7;
6826 6826
6827 tmp = b15 ^ b4; 6827 tmp = b15 ^ b4;
6828 b15 = (tmp >> 55) | (tmp << (64 - 55)); 6828 b15 = ror64(tmp, 55);
6829 b4 -= b15; 6829 b4 -= b15;
6830 6830
6831 tmp = b11 ^ b6; 6831 tmp = b11 ^ b6;
6832 b11 = (tmp >> 10) | (tmp << (64 - 10)); 6832 b11 = ror64(tmp, 10);
6833 b6 -= b11; 6833 b6 -= b11;
6834 6834
6835 tmp = b13 ^ b2; 6835 tmp = b13 ^ b2;
6836 b13 = (tmp >> 19) | (tmp << (64 - 19)); 6836 b13 = ror64(tmp, 19);
6837 b2 -= b13; 6837 b2 -= b13;
6838 6838
6839 tmp = b9 ^ b0; 6839 tmp = b9 ^ b0;
6840 b9 = (tmp >> 38) | (tmp << (64 - 38)); 6840 b9 = ror64(tmp, 38);
6841 b0 -= b9; 6841 b0 -= b9;
6842 6842
6843 tmp = b15 ^ b14; 6843 tmp = b15 ^ b14;
6844 b15 = (tmp >> 37) | (tmp << (64 - 37)); 6844 b15 = ror64(tmp, 37);
6845 b14 -= b15 + k7 + t2; 6845 b14 -= b15 + k7 + t2;
6846 b15 -= k8 + 10; 6846 b15 -= k8 + 10;
6847 6847
6848 tmp = b13 ^ b12; 6848 tmp = b13 ^ b12;
6849 b13 = (tmp >> 22) | (tmp << (64 - 22)); 6849 b13 = ror64(tmp, 22);
6850 b12 -= b13 + k5; 6850 b12 -= b13 + k5;
6851 b13 -= k6 + t1; 6851 b13 -= k6 + t1;
6852 6852
6853 tmp = b11 ^ b10; 6853 tmp = b11 ^ b10;
6854 b11 = (tmp >> 17) | (tmp << (64 - 17)); 6854 b11 = ror64(tmp, 17);
6855 b10 -= b11 + k3; 6855 b10 -= b11 + k3;
6856 b11 -= k4; 6856 b11 -= k4;
6857 6857
6858 tmp = b9 ^ b8; 6858 tmp = b9 ^ b8;
6859 b9 = (tmp >> 8) | (tmp << (64 - 8)); 6859 b9 = ror64(tmp, 8);
6860 b8 -= b9 + k1; 6860 b8 -= b9 + k1;
6861 b9 -= k2; 6861 b9 -= k2;
6862 6862
6863 tmp = b7 ^ b6; 6863 tmp = b7 ^ b6;
6864 b7 = (tmp >> 47) | (tmp << (64 - 47)); 6864 b7 = ror64(tmp, 47);
6865 b6 -= b7 + k16; 6865 b6 -= b7 + k16;
6866 b7 -= k0; 6866 b7 -= k0;
6867 6867
6868 tmp = b5 ^ b4; 6868 tmp = b5 ^ b4;
6869 b5 = (tmp >> 8) | (tmp << (64 - 8)); 6869 b5 = ror64(tmp, 8);
6870 b4 -= b5 + k14; 6870 b4 -= b5 + k14;
6871 b5 -= k15; 6871 b5 -= k15;
6872 6872
6873 tmp = b3 ^ b2; 6873 tmp = b3 ^ b2;
6874 b3 = (tmp >> 13) | (tmp << (64 - 13)); 6874 b3 = ror64(tmp, 13);
6875 b2 -= b3 + k12; 6875 b2 -= b3 + k12;
6876 b3 -= k13; 6876 b3 -= k13;
6877 6877
6878 tmp = b1 ^ b0; 6878 tmp = b1 ^ b0;
6879 b1 = (tmp >> 24) | (tmp << (64 - 24)); 6879 b1 = ror64(tmp, 24);
6880 b0 -= b1 + k10; 6880 b0 -= b1 + k10;
6881 b1 -= k11; 6881 b1 -= k11;
6882 6882
6883 tmp = b7 ^ b12; 6883 tmp = b7 ^ b12;
6884 b7 = (tmp >> 20) | (tmp << (64 - 20)); 6884 b7 = ror64(tmp, 20);
6885 b12 -= b7; 6885 b12 -= b7;
6886 6886
6887 tmp = b3 ^ b10; 6887 tmp = b3 ^ b10;
6888 b3 = (tmp >> 37) | (tmp << (64 - 37)); 6888 b3 = ror64(tmp, 37);
6889 b10 -= b3; 6889 b10 -= b3;
6890 6890
6891 tmp = b5 ^ b8; 6891 tmp = b5 ^ b8;
6892 b5 = (tmp >> 31) | (tmp << (64 - 31)); 6892 b5 = ror64(tmp, 31);
6893 b8 -= b5; 6893 b8 -= b5;
6894 6894
6895 tmp = b1 ^ b14; 6895 tmp = b1 ^ b14;
6896 b1 = (tmp >> 23) | (tmp << (64 - 23)); 6896 b1 = ror64(tmp, 23);
6897 b14 -= b1; 6897 b14 -= b1;
6898 6898
6899 tmp = b9 ^ b4; 6899 tmp = b9 ^ b4;
6900 b9 = (tmp >> 52) | (tmp << (64 - 52)); 6900 b9 = ror64(tmp, 52);
6901 b4 -= b9; 6901 b4 -= b9;
6902 6902
6903 tmp = b13 ^ b6; 6903 tmp = b13 ^ b6;
6904 b13 = (tmp >> 35) | (tmp << (64 - 35)); 6904 b13 = ror64(tmp, 35);
6905 b6 -= b13; 6905 b6 -= b13;
6906 6906
6907 tmp = b11 ^ b2; 6907 tmp = b11 ^ b2;
6908 b11 = (tmp >> 48) | (tmp << (64 - 48)); 6908 b11 = ror64(tmp, 48);
6909 b2 -= b11; 6909 b2 -= b11;
6910 6910
6911 tmp = b15 ^ b0; 6911 tmp = b15 ^ b0;
6912 b15 = (tmp >> 9) | (tmp << (64 - 9)); 6912 b15 = ror64(tmp, 9);
6913 b0 -= b15; 6913 b0 -= b15;
6914 6914
6915 tmp = b9 ^ b10; 6915 tmp = b9 ^ b10;
6916 b9 = (tmp >> 25) | (tmp << (64 - 25)); 6916 b9 = ror64(tmp, 25);
6917 b10 -= b9; 6917 b10 -= b9;
6918 6918
6919 tmp = b11 ^ b8; 6919 tmp = b11 ^ b8;
6920 b11 = (tmp >> 44) | (tmp << (64 - 44)); 6920 b11 = ror64(tmp, 44);
6921 b8 -= b11; 6921 b8 -= b11;
6922 6922
6923 tmp = b13 ^ b14; 6923 tmp = b13 ^ b14;
6924 b13 = (tmp >> 42) | (tmp << (64 - 42)); 6924 b13 = ror64(tmp, 42);
6925 b14 -= b13; 6925 b14 -= b13;
6926 6926
6927 tmp = b15 ^ b12; 6927 tmp = b15 ^ b12;
6928 b15 = (tmp >> 19) | (tmp << (64 - 19)); 6928 b15 = ror64(tmp, 19);
6929 b12 -= b15; 6929 b12 -= b15;
6930 6930
6931 tmp = b1 ^ b6; 6931 tmp = b1 ^ b6;
6932 b1 = (tmp >> 46) | (tmp << (64 - 46)); 6932 b1 = ror64(tmp, 46);
6933 b6 -= b1; 6933 b6 -= b1;
6934 6934
6935 tmp = b3 ^ b4; 6935 tmp = b3 ^ b4;
6936 b3 = (tmp >> 47) | (tmp << (64 - 47)); 6936 b3 = ror64(tmp, 47);
6937 b4 -= b3; 6937 b4 -= b3;
6938 6938
6939 tmp = b5 ^ b2; 6939 tmp = b5 ^ b2;
6940 b5 = (tmp >> 44) | (tmp << (64 - 44)); 6940 b5 = ror64(tmp, 44);
6941 b2 -= b5; 6941 b2 -= b5;
6942 6942
6943 tmp = b7 ^ b0; 6943 tmp = b7 ^ b0;
6944 b7 = (tmp >> 31) | (tmp << (64 - 31)); 6944 b7 = ror64(tmp, 31);
6945 b0 -= b7; 6945 b0 -= b7;
6946 6946
6947 tmp = b1 ^ b8; 6947 tmp = b1 ^ b8;
6948 b1 = (tmp >> 41) | (tmp << (64 - 41)); 6948 b1 = ror64(tmp, 41);
6949 b8 -= b1; 6949 b8 -= b1;
6950 6950
6951 tmp = b5 ^ b14; 6951 tmp = b5 ^ b14;
6952 b5 = (tmp >> 42) | (tmp << (64 - 42)); 6952 b5 = ror64(tmp, 42);
6953 b14 -= b5; 6953 b14 -= b5;
6954 6954
6955 tmp = b3 ^ b12; 6955 tmp = b3 ^ b12;
6956 b3 = (tmp >> 53) | (tmp << (64 - 53)); 6956 b3 = ror64(tmp, 53);
6957 b12 -= b3; 6957 b12 -= b3;
6958 6958
6959 tmp = b7 ^ b10; 6959 tmp = b7 ^ b10;
6960 b7 = (tmp >> 4) | (tmp << (64 - 4)); 6960 b7 = ror64(tmp, 4);
6961 b10 -= b7; 6961 b10 -= b7;
6962 6962
6963 tmp = b15 ^ b4; 6963 tmp = b15 ^ b4;
6964 b15 = (tmp >> 51) | (tmp << (64 - 51)); 6964 b15 = ror64(tmp, 51);
6965 b4 -= b15; 6965 b4 -= b15;
6966 6966
6967 tmp = b11 ^ b6; 6967 tmp = b11 ^ b6;
6968 b11 = (tmp >> 56) | (tmp << (64 - 56)); 6968 b11 = ror64(tmp, 56);
6969 b6 -= b11; 6969 b6 -= b11;
6970 6970
6971 tmp = b13 ^ b2; 6971 tmp = b13 ^ b2;
6972 b13 = (tmp >> 34) | (tmp << (64 - 34)); 6972 b13 = ror64(tmp, 34);
6973 b2 -= b13; 6973 b2 -= b13;
6974 6974
6975 tmp = b9 ^ b0; 6975 tmp = b9 ^ b0;
6976 b9 = (tmp >> 16) | (tmp << (64 - 16)); 6976 b9 = ror64(tmp, 16);
6977 b0 -= b9; 6977 b0 -= b9;
6978 6978
6979 tmp = b15 ^ b14; 6979 tmp = b15 ^ b14;
6980 b15 = (tmp >> 30) | (tmp << (64 - 30)); 6980 b15 = ror64(tmp, 30);
6981 b14 -= b15 + k6 + t1; 6981 b14 -= b15 + k6 + t1;
6982 b15 -= k7 + 9; 6982 b15 -= k7 + 9;
6983 6983
6984 tmp = b13 ^ b12; 6984 tmp = b13 ^ b12;
6985 b13 = (tmp >> 44) | (tmp << (64 - 44)); 6985 b13 = ror64(tmp, 44);
6986 b12 -= b13 + k4; 6986 b12 -= b13 + k4;
6987 b13 -= k5 + t0; 6987 b13 -= k5 + t0;
6988 6988
6989 tmp = b11 ^ b10; 6989 tmp = b11 ^ b10;
6990 b11 = (tmp >> 47) | (tmp << (64 - 47)); 6990 b11 = ror64(tmp, 47);
6991 b10 -= b11 + k2; 6991 b10 -= b11 + k2;
6992 b11 -= k3; 6992 b11 -= k3;
6993 6993
6994 tmp = b9 ^ b8; 6994 tmp = b9 ^ b8;
6995 b9 = (tmp >> 12) | (tmp << (64 - 12)); 6995 b9 = ror64(tmp, 12);
6996 b8 -= b9 + k0; 6996 b8 -= b9 + k0;
6997 b9 -= k1; 6997 b9 -= k1;
6998 6998
6999 tmp = b7 ^ b6; 6999 tmp = b7 ^ b6;
7000 b7 = (tmp >> 31) | (tmp << (64 - 31)); 7000 b7 = ror64(tmp, 31);
7001 b6 -= b7 + k15; 7001 b6 -= b7 + k15;
7002 b7 -= k16; 7002 b7 -= k16;
7003 7003
7004 tmp = b5 ^ b4; 7004 tmp = b5 ^ b4;
7005 b5 = (tmp >> 37) | (tmp << (64 - 37)); 7005 b5 = ror64(tmp, 37);
7006 b4 -= b5 + k13; 7006 b4 -= b5 + k13;
7007 b5 -= k14; 7007 b5 -= k14;
7008 7008
7009 tmp = b3 ^ b2; 7009 tmp = b3 ^ b2;
7010 b3 = (tmp >> 9) | (tmp << (64 - 9)); 7010 b3 = ror64(tmp, 9);
7011 b2 -= b3 + k11; 7011 b2 -= b3 + k11;
7012 b3 -= k12; 7012 b3 -= k12;
7013 7013
7014 tmp = b1 ^ b0; 7014 tmp = b1 ^ b0;
7015 b1 = (tmp >> 41) | (tmp << (64 - 41)); 7015 b1 = ror64(tmp, 41);
7016 b0 -= b1 + k9; 7016 b0 -= b1 + k9;
7017 b1 -= k10; 7017 b1 -= k10;
7018 7018
7019 tmp = b7 ^ b12; 7019 tmp = b7 ^ b12;
7020 b7 = (tmp >> 25) | (tmp << (64 - 25)); 7020 b7 = ror64(tmp, 25);
7021 b12 -= b7; 7021 b12 -= b7;
7022 7022
7023 tmp = b3 ^ b10; 7023 tmp = b3 ^ b10;
7024 b3 = (tmp >> 16) | (tmp << (64 - 16)); 7024 b3 = ror64(tmp, 16);
7025 b10 -= b3; 7025 b10 -= b3;
7026 7026
7027 tmp = b5 ^ b8; 7027 tmp = b5 ^ b8;
7028 b5 = (tmp >> 28) | (tmp << (64 - 28)); 7028 b5 = ror64(tmp, 28);
7029 b8 -= b5; 7029 b8 -= b5;
7030 7030
7031 tmp = b1 ^ b14; 7031 tmp = b1 ^ b14;
7032 b1 = (tmp >> 47) | (tmp << (64 - 47)); 7032 b1 = ror64(tmp, 47);
7033 b14 -= b1; 7033 b14 -= b1;
7034 7034
7035 tmp = b9 ^ b4; 7035 tmp = b9 ^ b4;
7036 b9 = (tmp >> 41) | (tmp << (64 - 41)); 7036 b9 = ror64(tmp, 41);
7037 b4 -= b9; 7037 b4 -= b9;
7038 7038
7039 tmp = b13 ^ b6; 7039 tmp = b13 ^ b6;
7040 b13 = (tmp >> 48) | (tmp << (64 - 48)); 7040 b13 = ror64(tmp, 48);
7041 b6 -= b13; 7041 b6 -= b13;
7042 7042
7043 tmp = b11 ^ b2; 7043 tmp = b11 ^ b2;
7044 b11 = (tmp >> 20) | (tmp << (64 - 20)); 7044 b11 = ror64(tmp, 20);
7045 b2 -= b11; 7045 b2 -= b11;
7046 7046
7047 tmp = b15 ^ b0; 7047 tmp = b15 ^ b0;
7048 b15 = (tmp >> 5) | (tmp << (64 - 5)); 7048 b15 = ror64(tmp, 5);
7049 b0 -= b15; 7049 b0 -= b15;
7050 7050
7051 tmp = b9 ^ b10; 7051 tmp = b9 ^ b10;
7052 b9 = (tmp >> 17) | (tmp << (64 - 17)); 7052 b9 = ror64(tmp, 17);
7053 b10 -= b9; 7053 b10 -= b9;
7054 7054
7055 tmp = b11 ^ b8; 7055 tmp = b11 ^ b8;
7056 b11 = (tmp >> 59) | (tmp << (64 - 59)); 7056 b11 = ror64(tmp, 59);
7057 b8 -= b11; 7057 b8 -= b11;
7058 7058
7059 tmp = b13 ^ b14; 7059 tmp = b13 ^ b14;
7060 b13 = (tmp >> 41) | (tmp << (64 - 41)); 7060 b13 = ror64(tmp, 41);
7061 b14 -= b13; 7061 b14 -= b13;
7062 7062
7063 tmp = b15 ^ b12; 7063 tmp = b15 ^ b12;
7064 b15 = (tmp >> 34) | (tmp << (64 - 34)); 7064 b15 = ror64(tmp, 34);
7065 b12 -= b15; 7065 b12 -= b15;
7066 7066
7067 tmp = b1 ^ b6; 7067 tmp = b1 ^ b6;
7068 b1 = (tmp >> 13) | (tmp << (64 - 13)); 7068 b1 = ror64(tmp, 13);
7069 b6 -= b1; 7069 b6 -= b1;
7070 7070
7071 tmp = b3 ^ b4; 7071 tmp = b3 ^ b4;
7072 b3 = (tmp >> 51) | (tmp << (64 - 51)); 7072 b3 = ror64(tmp, 51);
7073 b4 -= b3; 7073 b4 -= b3;
7074 7074
7075 tmp = b5 ^ b2; 7075 tmp = b5 ^ b2;
7076 b5 = (tmp >> 4) | (tmp << (64 - 4)); 7076 b5 = ror64(tmp, 4);
7077 b2 -= b5; 7077 b2 -= b5;
7078 7078
7079 tmp = b7 ^ b0; 7079 tmp = b7 ^ b0;
7080 b7 = (tmp >> 33) | (tmp << (64 - 33)); 7080 b7 = ror64(tmp, 33);
7081 b0 -= b7; 7081 b0 -= b7;
7082 7082
7083 tmp = b1 ^ b8; 7083 tmp = b1 ^ b8;
7084 b1 = (tmp >> 52) | (tmp << (64 - 52)); 7084 b1 = ror64(tmp, 52);
7085 b8 -= b1; 7085 b8 -= b1;
7086 7086
7087 tmp = b5 ^ b14; 7087 tmp = b5 ^ b14;
7088 b5 = (tmp >> 23) | (tmp << (64 - 23)); 7088 b5 = ror64(tmp, 23);
7089 b14 -= b5; 7089 b14 -= b5;
7090 7090
7091 tmp = b3 ^ b12; 7091 tmp = b3 ^ b12;
7092 b3 = (tmp >> 18) | (tmp << (64 - 18)); 7092 b3 = ror64(tmp, 18);
7093 b12 -= b3; 7093 b12 -= b3;
7094 7094
7095 tmp = b7 ^ b10; 7095 tmp = b7 ^ b10;
7096 b7 = (tmp >> 49) | (tmp << (64 - 49)); 7096 b7 = ror64(tmp, 49);
7097 b10 -= b7; 7097 b10 -= b7;
7098 7098
7099 tmp = b15 ^ b4; 7099 tmp = b15 ^ b4;
7100 b15 = (tmp >> 55) | (tmp << (64 - 55)); 7100 b15 = ror64(tmp, 55);
7101 b4 -= b15; 7101 b4 -= b15;
7102 7102
7103 tmp = b11 ^ b6; 7103 tmp = b11 ^ b6;
7104 b11 = (tmp >> 10) | (tmp << (64 - 10)); 7104 b11 = ror64(tmp, 10);
7105 b6 -= b11; 7105 b6 -= b11;
7106 7106
7107 tmp = b13 ^ b2; 7107 tmp = b13 ^ b2;
7108 b13 = (tmp >> 19) | (tmp << (64 - 19)); 7108 b13 = ror64(tmp, 19);
7109 b2 -= b13; 7109 b2 -= b13;
7110 7110
7111 tmp = b9 ^ b0; 7111 tmp = b9 ^ b0;
7112 b9 = (tmp >> 38) | (tmp << (64 - 38)); 7112 b9 = ror64(tmp, 38);
7113 b0 -= b9; 7113 b0 -= b9;
7114 7114
7115 tmp = b15 ^ b14; 7115 tmp = b15 ^ b14;
7116 b15 = (tmp >> 37) | (tmp << (64 - 37)); 7116 b15 = ror64(tmp, 37);
7117 b14 -= b15 + k5 + t0; 7117 b14 -= b15 + k5 + t0;
7118 b15 -= k6 + 8; 7118 b15 -= k6 + 8;
7119 7119
7120 tmp = b13 ^ b12; 7120 tmp = b13 ^ b12;
7121 b13 = (tmp >> 22) | (tmp << (64 - 22)); 7121 b13 = ror64(tmp, 22);
7122 b12 -= b13 + k3; 7122 b12 -= b13 + k3;
7123 b13 -= k4 + t2; 7123 b13 -= k4 + t2;
7124 7124
7125 tmp = b11 ^ b10; 7125 tmp = b11 ^ b10;
7126 b11 = (tmp >> 17) | (tmp << (64 - 17)); 7126 b11 = ror64(tmp, 17);
7127 b10 -= b11 + k1; 7127 b10 -= b11 + k1;
7128 b11 -= k2; 7128 b11 -= k2;
7129 7129
7130 tmp = b9 ^ b8; 7130 tmp = b9 ^ b8;
7131 b9 = (tmp >> 8) | (tmp << (64 - 8)); 7131 b9 = ror64(tmp, 8);
7132 b8 -= b9 + k16; 7132 b8 -= b9 + k16;
7133 b9 -= k0; 7133 b9 -= k0;
7134 7134
7135 tmp = b7 ^ b6; 7135 tmp = b7 ^ b6;
7136 b7 = (tmp >> 47) | (tmp << (64 - 47)); 7136 b7 = ror64(tmp, 47);
7137 b6 -= b7 + k14; 7137 b6 -= b7 + k14;
7138 b7 -= k15; 7138 b7 -= k15;
7139 7139
7140 tmp = b5 ^ b4; 7140 tmp = b5 ^ b4;
7141 b5 = (tmp >> 8) | (tmp << (64 - 8)); 7141 b5 = ror64(tmp, 8);
7142 b4 -= b5 + k12; 7142 b4 -= b5 + k12;
7143 b5 -= k13; 7143 b5 -= k13;
7144 7144
7145 tmp = b3 ^ b2; 7145 tmp = b3 ^ b2;
7146 b3 = (tmp >> 13) | (tmp << (64 - 13)); 7146 b3 = ror64(tmp, 13);
7147 b2 -= b3 + k10; 7147 b2 -= b3 + k10;
7148 b3 -= k11; 7148 b3 -= k11;
7149 7149
7150 tmp = b1 ^ b0; 7150 tmp = b1 ^ b0;
7151 b1 = (tmp >> 24) | (tmp << (64 - 24)); 7151 b1 = ror64(tmp, 24);
7152 b0 -= b1 + k8; 7152 b0 -= b1 + k8;
7153 b1 -= k9; 7153 b1 -= k9;
7154 7154
7155 tmp = b7 ^ b12; 7155 tmp = b7 ^ b12;
7156 b7 = (tmp >> 20) | (tmp << (64 - 20)); 7156 b7 = ror64(tmp, 20);
7157 b12 -= b7; 7157 b12 -= b7;
7158 7158
7159 tmp = b3 ^ b10; 7159 tmp = b3 ^ b10;
7160 b3 = (tmp >> 37) | (tmp << (64 - 37)); 7160 b3 = ror64(tmp, 37);
7161 b10 -= b3; 7161 b10 -= b3;
7162 7162
7163 tmp = b5 ^ b8; 7163 tmp = b5 ^ b8;
7164 b5 = (tmp >> 31) | (tmp << (64 - 31)); 7164 b5 = ror64(tmp, 31);
7165 b8 -= b5; 7165 b8 -= b5;
7166 7166
7167 tmp = b1 ^ b14; 7167 tmp = b1 ^ b14;
7168 b1 = (tmp >> 23) | (tmp << (64 - 23)); 7168 b1 = ror64(tmp, 23);
7169 b14 -= b1; 7169 b14 -= b1;
7170 7170
7171 tmp = b9 ^ b4; 7171 tmp = b9 ^ b4;
7172 b9 = (tmp >> 52) | (tmp << (64 - 52)); 7172 b9 = ror64(tmp, 52);
7173 b4 -= b9; 7173 b4 -= b9;
7174 7174
7175 tmp = b13 ^ b6; 7175 tmp = b13 ^ b6;
7176 b13 = (tmp >> 35) | (tmp << (64 - 35)); 7176 b13 = ror64(tmp, 35);
7177 b6 -= b13; 7177 b6 -= b13;
7178 7178
7179 tmp = b11 ^ b2; 7179 tmp = b11 ^ b2;
7180 b11 = (tmp >> 48) | (tmp << (64 - 48)); 7180 b11 = ror64(tmp, 48);
7181 b2 -= b11; 7181 b2 -= b11;
7182 7182
7183 tmp = b15 ^ b0; 7183 tmp = b15 ^ b0;
7184 b15 = (tmp >> 9) | (tmp << (64 - 9)); 7184 b15 = ror64(tmp, 9);
7185 b0 -= b15; 7185 b0 -= b15;
7186 7186
7187 tmp = b9 ^ b10; 7187 tmp = b9 ^ b10;
7188 b9 = (tmp >> 25) | (tmp << (64 - 25)); 7188 b9 = ror64(tmp, 25);
7189 b10 -= b9; 7189 b10 -= b9;
7190 7190
7191 tmp = b11 ^ b8; 7191 tmp = b11 ^ b8;
7192 b11 = (tmp >> 44) | (tmp << (64 - 44)); 7192 b11 = ror64(tmp, 44);
7193 b8 -= b11; 7193 b8 -= b11;
7194 7194
7195 tmp = b13 ^ b14; 7195 tmp = b13 ^ b14;
7196 b13 = (tmp >> 42) | (tmp << (64 - 42)); 7196 b13 = ror64(tmp, 42);
7197 b14 -= b13; 7197 b14 -= b13;
7198 7198
7199 tmp = b15 ^ b12; 7199 tmp = b15 ^ b12;
7200 b15 = (tmp >> 19) | (tmp << (64 - 19)); 7200 b15 = ror64(tmp, 19);
7201 b12 -= b15; 7201 b12 -= b15;
7202 7202
7203 tmp = b1 ^ b6; 7203 tmp = b1 ^ b6;
7204 b1 = (tmp >> 46) | (tmp << (64 - 46)); 7204 b1 = ror64(tmp, 46);
7205 b6 -= b1; 7205 b6 -= b1;
7206 7206
7207 tmp = b3 ^ b4; 7207 tmp = b3 ^ b4;
7208 b3 = (tmp >> 47) | (tmp << (64 - 47)); 7208 b3 = ror64(tmp, 47);
7209 b4 -= b3; 7209 b4 -= b3;
7210 7210
7211 tmp = b5 ^ b2; 7211 tmp = b5 ^ b2;
7212 b5 = (tmp >> 44) | (tmp << (64 - 44)); 7212 b5 = ror64(tmp, 44);
7213 b2 -= b5; 7213 b2 -= b5;
7214 7214
7215 tmp = b7 ^ b0; 7215 tmp = b7 ^ b0;
7216 b7 = (tmp >> 31) | (tmp << (64 - 31)); 7216 b7 = ror64(tmp, 31);
7217 b0 -= b7; 7217 b0 -= b7;
7218 7218
7219 tmp = b1 ^ b8; 7219 tmp = b1 ^ b8;
7220 b1 = (tmp >> 41) | (tmp << (64 - 41)); 7220 b1 = ror64(tmp, 41);
7221 b8 -= b1; 7221 b8 -= b1;
7222 7222
7223 tmp = b5 ^ b14; 7223 tmp = b5 ^ b14;
7224 b5 = (tmp >> 42) | (tmp << (64 - 42)); 7224 b5 = ror64(tmp, 42);
7225 b14 -= b5; 7225 b14 -= b5;
7226 7226
7227 tmp = b3 ^ b12; 7227 tmp = b3 ^ b12;
7228 b3 = (tmp >> 53) | (tmp << (64 - 53)); 7228 b3 = ror64(tmp, 53);
7229 b12 -= b3; 7229 b12 -= b3;
7230 7230
7231 tmp = b7 ^ b10; 7231 tmp = b7 ^ b10;
7232 b7 = (tmp >> 4) | (tmp << (64 - 4)); 7232 b7 = ror64(tmp, 4);
7233 b10 -= b7; 7233 b10 -= b7;
7234 7234
7235 tmp = b15 ^ b4; 7235 tmp = b15 ^ b4;
7236 b15 = (tmp >> 51) | (tmp << (64 - 51)); 7236 b15 = ror64(tmp, 51);
7237 b4 -= b15; 7237 b4 -= b15;
7238 7238
7239 tmp = b11 ^ b6; 7239 tmp = b11 ^ b6;
7240 b11 = (tmp >> 56) | (tmp << (64 - 56)); 7240 b11 = ror64(tmp, 56);
7241 b6 -= b11; 7241 b6 -= b11;
7242 7242
7243 tmp = b13 ^ b2; 7243 tmp = b13 ^ b2;
7244 b13 = (tmp >> 34) | (tmp << (64 - 34)); 7244 b13 = ror64(tmp, 34);
7245 b2 -= b13; 7245 b2 -= b13;
7246 7246
7247 tmp = b9 ^ b0; 7247 tmp = b9 ^ b0;
7248 b9 = (tmp >> 16) | (tmp << (64 - 16)); 7248 b9 = ror64(tmp, 16);
7249 b0 -= b9; 7249 b0 -= b9;
7250 7250
7251 tmp = b15 ^ b14; 7251 tmp = b15 ^ b14;
7252 b15 = (tmp >> 30) | (tmp << (64 - 30)); 7252 b15 = ror64(tmp, 30);
7253 b14 -= b15 + k4 + t2; 7253 b14 -= b15 + k4 + t2;
7254 b15 -= k5 + 7; 7254 b15 -= k5 + 7;
7255 7255
7256 tmp = b13 ^ b12; 7256 tmp = b13 ^ b12;
7257 b13 = (tmp >> 44) | (tmp << (64 - 44)); 7257 b13 = ror64(tmp, 44);
7258 b12 -= b13 + k2; 7258 b12 -= b13 + k2;
7259 b13 -= k3 + t1; 7259 b13 -= k3 + t1;
7260 7260
7261 tmp = b11 ^ b10; 7261 tmp = b11 ^ b10;
7262 b11 = (tmp >> 47) | (tmp << (64 - 47)); 7262 b11 = ror64(tmp, 47);
7263 b10 -= b11 + k0; 7263 b10 -= b11 + k0;
7264 b11 -= k1; 7264 b11 -= k1;
7265 7265
7266 tmp = b9 ^ b8; 7266 tmp = b9 ^ b8;
7267 b9 = (tmp >> 12) | (tmp << (64 - 12)); 7267 b9 = ror64(tmp, 12);
7268 b8 -= b9 + k15; 7268 b8 -= b9 + k15;
7269 b9 -= k16; 7269 b9 -= k16;
7270 7270
7271 tmp = b7 ^ b6; 7271 tmp = b7 ^ b6;
7272 b7 = (tmp >> 31) | (tmp << (64 - 31)); 7272 b7 = ror64(tmp, 31);
7273 b6 -= b7 + k13; 7273 b6 -= b7 + k13;
7274 b7 -= k14; 7274 b7 -= k14;
7275 7275
7276 tmp = b5 ^ b4; 7276 tmp = b5 ^ b4;
7277 b5 = (tmp >> 37) | (tmp << (64 - 37)); 7277 b5 = ror64(tmp, 37);
7278 b4 -= b5 + k11; 7278 b4 -= b5 + k11;
7279 b5 -= k12; 7279 b5 -= k12;
7280 7280
7281 tmp = b3 ^ b2; 7281 tmp = b3 ^ b2;
7282 b3 = (tmp >> 9) | (tmp << (64 - 9)); 7282 b3 = ror64(tmp, 9);
7283 b2 -= b3 + k9; 7283 b2 -= b3 + k9;
7284 b3 -= k10; 7284 b3 -= k10;
7285 7285
7286 tmp = b1 ^ b0; 7286 tmp = b1 ^ b0;
7287 b1 = (tmp >> 41) | (tmp << (64 - 41)); 7287 b1 = ror64(tmp, 41);
7288 b0 -= b1 + k7; 7288 b0 -= b1 + k7;
7289 b1 -= k8; 7289 b1 -= k8;
7290 7290
7291 tmp = b7 ^ b12; 7291 tmp = b7 ^ b12;
7292 b7 = (tmp >> 25) | (tmp << (64 - 25)); 7292 b7 = ror64(tmp, 25);
7293 b12 -= b7; 7293 b12 -= b7;
7294 7294
7295 tmp = b3 ^ b10; 7295 tmp = b3 ^ b10;
7296 b3 = (tmp >> 16) | (tmp << (64 - 16)); 7296 b3 = ror64(tmp, 16);
7297 b10 -= b3; 7297 b10 -= b3;
7298 7298
7299 tmp = b5 ^ b8; 7299 tmp = b5 ^ b8;
7300 b5 = (tmp >> 28) | (tmp << (64 - 28)); 7300 b5 = ror64(tmp, 28);
7301 b8 -= b5; 7301 b8 -= b5;
7302 7302
7303 tmp = b1 ^ b14; 7303 tmp = b1 ^ b14;
7304 b1 = (tmp >> 47) | (tmp << (64 - 47)); 7304 b1 = ror64(tmp, 47);
7305 b14 -= b1; 7305 b14 -= b1;
7306 7306
7307 tmp = b9 ^ b4; 7307 tmp = b9 ^ b4;
7308 b9 = (tmp >> 41) | (tmp << (64 - 41)); 7308 b9 = ror64(tmp, 41);
7309 b4 -= b9; 7309 b4 -= b9;
7310 7310
7311 tmp = b13 ^ b6; 7311 tmp = b13 ^ b6;
7312 b13 = (tmp >> 48) | (tmp << (64 - 48)); 7312 b13 = ror64(tmp, 48);
7313 b6 -= b13; 7313 b6 -= b13;
7314 7314
7315 tmp = b11 ^ b2; 7315 tmp = b11 ^ b2;
7316 b11 = (tmp >> 20) | (tmp << (64 - 20)); 7316 b11 = ror64(tmp, 20);
7317 b2 -= b11; 7317 b2 -= b11;
7318 7318
7319 tmp = b15 ^ b0; 7319 tmp = b15 ^ b0;
7320 b15 = (tmp >> 5) | (tmp << (64 - 5)); 7320 b15 = ror64(tmp, 5);
7321 b0 -= b15; 7321 b0 -= b15;
7322 7322
7323 tmp = b9 ^ b10; 7323 tmp = b9 ^ b10;
7324 b9 = (tmp >> 17) | (tmp << (64 - 17)); 7324 b9 = ror64(tmp, 17);
7325 b10 -= b9; 7325 b10 -= b9;
7326 7326
7327 tmp = b11 ^ b8; 7327 tmp = b11 ^ b8;
7328 b11 = (tmp >> 59) | (tmp << (64 - 59)); 7328 b11 = ror64(tmp, 59);
7329 b8 -= b11; 7329 b8 -= b11;
7330 7330
7331 tmp = b13 ^ b14; 7331 tmp = b13 ^ b14;
7332 b13 = (tmp >> 41) | (tmp << (64 - 41)); 7332 b13 = ror64(tmp, 41);
7333 b14 -= b13; 7333 b14 -= b13;
7334 7334
7335 tmp = b15 ^ b12; 7335 tmp = b15 ^ b12;
7336 b15 = (tmp >> 34) | (tmp << (64 - 34)); 7336 b15 = ror64(tmp, 34);
7337 b12 -= b15; 7337 b12 -= b15;
7338 7338
7339 tmp = b1 ^ b6; 7339 tmp = b1 ^ b6;
7340 b1 = (tmp >> 13) | (tmp << (64 - 13)); 7340 b1 = ror64(tmp, 13);
7341 b6 -= b1; 7341 b6 -= b1;
7342 7342
7343 tmp = b3 ^ b4; 7343 tmp = b3 ^ b4;
7344 b3 = (tmp >> 51) | (tmp << (64 - 51)); 7344 b3 = ror64(tmp, 51);
7345 b4 -= b3; 7345 b4 -= b3;
7346 7346
7347 tmp = b5 ^ b2; 7347 tmp = b5 ^ b2;
7348 b5 = (tmp >> 4) | (tmp << (64 - 4)); 7348 b5 = ror64(tmp, 4);
7349 b2 -= b5; 7349 b2 -= b5;
7350 7350
7351 tmp = b7 ^ b0; 7351 tmp = b7 ^ b0;
7352 b7 = (tmp >> 33) | (tmp << (64 - 33)); 7352 b7 = ror64(tmp, 33);
7353 b0 -= b7; 7353 b0 -= b7;
7354 7354
7355 tmp = b1 ^ b8; 7355 tmp = b1 ^ b8;
7356 b1 = (tmp >> 52) | (tmp << (64 - 52)); 7356 b1 = ror64(tmp, 52);
7357 b8 -= b1; 7357 b8 -= b1;
7358 7358
7359 tmp = b5 ^ b14; 7359 tmp = b5 ^ b14;
7360 b5 = (tmp >> 23) | (tmp << (64 - 23)); 7360 b5 = ror64(tmp, 23);
7361 b14 -= b5; 7361 b14 -= b5;
7362 7362
7363 tmp = b3 ^ b12; 7363 tmp = b3 ^ b12;
7364 b3 = (tmp >> 18) | (tmp << (64 - 18)); 7364 b3 = ror64(tmp, 18);
7365 b12 -= b3; 7365 b12 -= b3;
7366 7366
7367 tmp = b7 ^ b10; 7367 tmp = b7 ^ b10;
7368 b7 = (tmp >> 49) | (tmp << (64 - 49)); 7368 b7 = ror64(tmp, 49);
7369 b10 -= b7; 7369 b10 -= b7;
7370 7370
7371 tmp = b15 ^ b4; 7371 tmp = b15 ^ b4;
7372 b15 = (tmp >> 55) | (tmp << (64 - 55)); 7372 b15 = ror64(tmp, 55);
7373 b4 -= b15; 7373 b4 -= b15;
7374 7374
7375 tmp = b11 ^ b6; 7375 tmp = b11 ^ b6;
7376 b11 = (tmp >> 10) | (tmp << (64 - 10)); 7376 b11 = ror64(tmp, 10);
7377 b6 -= b11; 7377 b6 -= b11;
7378 7378
7379 tmp = b13 ^ b2; 7379 tmp = b13 ^ b2;
7380 b13 = (tmp >> 19) | (tmp << (64 - 19)); 7380 b13 = ror64(tmp, 19);
7381 b2 -= b13; 7381 b2 -= b13;
7382 7382
7383 tmp = b9 ^ b0; 7383 tmp = b9 ^ b0;
7384 b9 = (tmp >> 38) | (tmp << (64 - 38)); 7384 b9 = ror64(tmp, 38);
7385 b0 -= b9; 7385 b0 -= b9;
7386 7386
7387 tmp = b15 ^ b14; 7387 tmp = b15 ^ b14;
7388 b15 = (tmp >> 37) | (tmp << (64 - 37)); 7388 b15 = ror64(tmp, 37);
7389 b14 -= b15 + k3 + t1; 7389 b14 -= b15 + k3 + t1;
7390 b15 -= k4 + 6; 7390 b15 -= k4 + 6;
7391 7391
7392 tmp = b13 ^ b12; 7392 tmp = b13 ^ b12;
7393 b13 = (tmp >> 22) | (tmp << (64 - 22)); 7393 b13 = ror64(tmp, 22);
7394 b12 -= b13 + k1; 7394 b12 -= b13 + k1;
7395 b13 -= k2 + t0; 7395 b13 -= k2 + t0;
7396 7396
7397 tmp = b11 ^ b10; 7397 tmp = b11 ^ b10;
7398 b11 = (tmp >> 17) | (tmp << (64 - 17)); 7398 b11 = ror64(tmp, 17);
7399 b10 -= b11 + k16; 7399 b10 -= b11 + k16;
7400 b11 -= k0; 7400 b11 -= k0;
7401 7401
7402 tmp = b9 ^ b8; 7402 tmp = b9 ^ b8;
7403 b9 = (tmp >> 8) | (tmp << (64 - 8)); 7403 b9 = ror64(tmp, 8);
7404 b8 -= b9 + k14; 7404 b8 -= b9 + k14;
7405 b9 -= k15; 7405 b9 -= k15;
7406 7406
7407 tmp = b7 ^ b6; 7407 tmp = b7 ^ b6;
7408 b7 = (tmp >> 47) | (tmp << (64 - 47)); 7408 b7 = ror64(tmp, 47);
7409 b6 -= b7 + k12; 7409 b6 -= b7 + k12;
7410 b7 -= k13; 7410 b7 -= k13;
7411 7411
7412 tmp = b5 ^ b4; 7412 tmp = b5 ^ b4;
7413 b5 = (tmp >> 8) | (tmp << (64 - 8)); 7413 b5 = ror64(tmp, 8);
7414 b4 -= b5 + k10; 7414 b4 -= b5 + k10;
7415 b5 -= k11; 7415 b5 -= k11;
7416 7416
7417 tmp = b3 ^ b2; 7417 tmp = b3 ^ b2;
7418 b3 = (tmp >> 13) | (tmp << (64 - 13)); 7418 b3 = ror64(tmp, 13);
7419 b2 -= b3 + k8; 7419 b2 -= b3 + k8;
7420 b3 -= k9; 7420 b3 -= k9;
7421 7421
7422 tmp = b1 ^ b0; 7422 tmp = b1 ^ b0;
7423 b1 = (tmp >> 24) | (tmp << (64 - 24)); 7423 b1 = ror64(tmp, 24);
7424 b0 -= b1 + k6; 7424 b0 -= b1 + k6;
7425 b1 -= k7; 7425 b1 -= k7;
7426 7426
7427 tmp = b7 ^ b12; 7427 tmp = b7 ^ b12;
7428 b7 = (tmp >> 20) | (tmp << (64 - 20)); 7428 b7 = ror64(tmp, 20);
7429 b12 -= b7; 7429 b12 -= b7;
7430 7430
7431 tmp = b3 ^ b10; 7431 tmp = b3 ^ b10;
7432 b3 = (tmp >> 37) | (tmp << (64 - 37)); 7432 b3 = ror64(tmp, 37);
7433 b10 -= b3; 7433 b10 -= b3;
7434 7434
7435 tmp = b5 ^ b8; 7435 tmp = b5 ^ b8;
7436 b5 = (tmp >> 31) | (tmp << (64 - 31)); 7436 b5 = ror64(tmp, 31);
7437 b8 -= b5; 7437 b8 -= b5;
7438 7438
7439 tmp = b1 ^ b14; 7439 tmp = b1 ^ b14;
7440 b1 = (tmp >> 23) | (tmp << (64 - 23)); 7440 b1 = ror64(tmp, 23);
7441 b14 -= b1; 7441 b14 -= b1;
7442 7442
7443 tmp = b9 ^ b4; 7443 tmp = b9 ^ b4;
7444 b9 = (tmp >> 52) | (tmp << (64 - 52)); 7444 b9 = ror64(tmp, 52);
7445 b4 -= b9; 7445 b4 -= b9;
7446 7446
7447 tmp = b13 ^ b6; 7447 tmp = b13 ^ b6;
7448 b13 = (tmp >> 35) | (tmp << (64 - 35)); 7448 b13 = ror64(tmp, 35);
7449 b6 -= b13; 7449 b6 -= b13;
7450 7450
7451 tmp = b11 ^ b2; 7451 tmp = b11 ^ b2;
7452 b11 = (tmp >> 48) | (tmp << (64 - 48)); 7452 b11 = ror64(tmp, 48);
7453 b2 -= b11; 7453 b2 -= b11;
7454 7454
7455 tmp = b15 ^ b0; 7455 tmp = b15 ^ b0;
7456 b15 = (tmp >> 9) | (tmp << (64 - 9)); 7456 b15 = ror64(tmp, 9);
7457 b0 -= b15; 7457 b0 -= b15;
7458 7458
7459 tmp = b9 ^ b10; 7459 tmp = b9 ^ b10;
7460 b9 = (tmp >> 25) | (tmp << (64 - 25)); 7460 b9 = ror64(tmp, 25);
7461 b10 -= b9; 7461 b10 -= b9;
7462 7462
7463 tmp = b11 ^ b8; 7463 tmp = b11 ^ b8;
7464 b11 = (tmp >> 44) | (tmp << (64 - 44)); 7464 b11 = ror64(tmp, 44);
7465 b8 -= b11; 7465 b8 -= b11;
7466 7466
7467 tmp = b13 ^ b14; 7467 tmp = b13 ^ b14;
7468 b13 = (tmp >> 42) | (tmp << (64 - 42)); 7468 b13 = ror64(tmp, 42);
7469 b14 -= b13; 7469 b14 -= b13;
7470 7470
7471 tmp = b15 ^ b12; 7471 tmp = b15 ^ b12;
7472 b15 = (tmp >> 19) | (tmp << (64 - 19)); 7472 b15 = ror64(tmp, 19);
7473 b12 -= b15; 7473 b12 -= b15;
7474 7474
7475 tmp = b1 ^ b6; 7475 tmp = b1 ^ b6;
7476 b1 = (tmp >> 46) | (tmp << (64 - 46)); 7476 b1 = ror64(tmp, 46);
7477 b6 -= b1; 7477 b6 -= b1;
7478 7478
7479 tmp = b3 ^ b4; 7479 tmp = b3 ^ b4;
7480 b3 = (tmp >> 47) | (tmp << (64 - 47)); 7480 b3 = ror64(tmp, 47);
7481 b4 -= b3; 7481 b4 -= b3;
7482 7482
7483 tmp = b5 ^ b2; 7483 tmp = b5 ^ b2;
7484 b5 = (tmp >> 44) | (tmp << (64 - 44)); 7484 b5 = ror64(tmp, 44);
7485 b2 -= b5; 7485 b2 -= b5;
7486 7486
7487 tmp = b7 ^ b0; 7487 tmp = b7 ^ b0;
7488 b7 = (tmp >> 31) | (tmp << (64 - 31)); 7488 b7 = ror64(tmp, 31);
7489 b0 -= b7; 7489 b0 -= b7;
7490 7490
7491 tmp = b1 ^ b8; 7491 tmp = b1 ^ b8;
7492 b1 = (tmp >> 41) | (tmp << (64 - 41)); 7492 b1 = ror64(tmp, 41);
7493 b8 -= b1; 7493 b8 -= b1;
7494 7494
7495 tmp = b5 ^ b14; 7495 tmp = b5 ^ b14;
7496 b5 = (tmp >> 42) | (tmp << (64 - 42)); 7496 b5 = ror64(tmp, 42);
7497 b14 -= b5; 7497 b14 -= b5;
7498 7498
7499 tmp = b3 ^ b12; 7499 tmp = b3 ^ b12;
7500 b3 = (tmp >> 53) | (tmp << (64 - 53)); 7500 b3 = ror64(tmp, 53);
7501 b12 -= b3; 7501 b12 -= b3;
7502 7502
7503 tmp = b7 ^ b10; 7503 tmp = b7 ^ b10;
7504 b7 = (tmp >> 4) | (tmp << (64 - 4)); 7504 b7 = ror64(tmp, 4);
7505 b10 -= b7; 7505 b10 -= b7;
7506 7506
7507 tmp = b15 ^ b4; 7507 tmp = b15 ^ b4;
7508 b15 = (tmp >> 51) | (tmp << (64 - 51)); 7508 b15 = ror64(tmp, 51);
7509 b4 -= b15; 7509 b4 -= b15;
7510 7510
7511 tmp = b11 ^ b6; 7511 tmp = b11 ^ b6;
7512 b11 = (tmp >> 56) | (tmp << (64 - 56)); 7512 b11 = ror64(tmp, 56);
7513 b6 -= b11; 7513 b6 -= b11;
7514 7514
7515 tmp = b13 ^ b2; 7515 tmp = b13 ^ b2;
7516 b13 = (tmp >> 34) | (tmp << (64 - 34)); 7516 b13 = ror64(tmp, 34);
7517 b2 -= b13; 7517 b2 -= b13;
7518 7518
7519 tmp = b9 ^ b0; 7519 tmp = b9 ^ b0;
7520 b9 = (tmp >> 16) | (tmp << (64 - 16)); 7520 b9 = ror64(tmp, 16);
7521 b0 -= b9; 7521 b0 -= b9;
7522 7522
7523 tmp = b15 ^ b14; 7523 tmp = b15 ^ b14;
7524 b15 = (tmp >> 30) | (tmp << (64 - 30)); 7524 b15 = ror64(tmp, 30);
7525 b14 -= b15 + k2 + t0; 7525 b14 -= b15 + k2 + t0;
7526 b15 -= k3 + 5; 7526 b15 -= k3 + 5;
7527 7527
7528 tmp = b13 ^ b12; 7528 tmp = b13 ^ b12;
7529 b13 = (tmp >> 44) | (tmp << (64 - 44)); 7529 b13 = ror64(tmp, 44);
7530 b12 -= b13 + k0; 7530 b12 -= b13 + k0;
7531 b13 -= k1 + t2; 7531 b13 -= k1 + t2;
7532 7532
7533 tmp = b11 ^ b10; 7533 tmp = b11 ^ b10;
7534 b11 = (tmp >> 47) | (tmp << (64 - 47)); 7534 b11 = ror64(tmp, 47);
7535 b10 -= b11 + k15; 7535 b10 -= b11 + k15;
7536 b11 -= k16; 7536 b11 -= k16;
7537 7537
7538 tmp = b9 ^ b8; 7538 tmp = b9 ^ b8;
7539 b9 = (tmp >> 12) | (tmp << (64 - 12)); 7539 b9 = ror64(tmp, 12);
7540 b8 -= b9 + k13; 7540 b8 -= b9 + k13;
7541 b9 -= k14; 7541 b9 -= k14;
7542 7542
7543 tmp = b7 ^ b6; 7543 tmp = b7 ^ b6;
7544 b7 = (tmp >> 31) | (tmp << (64 - 31)); 7544 b7 = ror64(tmp, 31);
7545 b6 -= b7 + k11; 7545 b6 -= b7 + k11;
7546 b7 -= k12; 7546 b7 -= k12;
7547 7547
7548 tmp = b5 ^ b4; 7548 tmp = b5 ^ b4;
7549 b5 = (tmp >> 37) | (tmp << (64 - 37)); 7549 b5 = ror64(tmp, 37);
7550 b4 -= b5 + k9; 7550 b4 -= b5 + k9;
7551 b5 -= k10; 7551 b5 -= k10;
7552 7552
7553 tmp = b3 ^ b2; 7553 tmp = b3 ^ b2;
7554 b3 = (tmp >> 9) | (tmp << (64 - 9)); 7554 b3 = ror64(tmp, 9);
7555 b2 -= b3 + k7; 7555 b2 -= b3 + k7;
7556 b3 -= k8; 7556 b3 -= k8;
7557 7557
7558 tmp = b1 ^ b0; 7558 tmp = b1 ^ b0;
7559 b1 = (tmp >> 41) | (tmp << (64 - 41)); 7559 b1 = ror64(tmp, 41);
7560 b0 -= b1 + k5; 7560 b0 -= b1 + k5;
7561 b1 -= k6; 7561 b1 -= k6;
7562 7562
7563 tmp = b7 ^ b12; 7563 tmp = b7 ^ b12;
7564 b7 = (tmp >> 25) | (tmp << (64 - 25)); 7564 b7 = ror64(tmp, 25);
7565 b12 -= b7; 7565 b12 -= b7;
7566 7566
7567 tmp = b3 ^ b10; 7567 tmp = b3 ^ b10;
7568 b3 = (tmp >> 16) | (tmp << (64 - 16)); 7568 b3 = ror64(tmp, 16);
7569 b10 -= b3; 7569 b10 -= b3;
7570 7570
7571 tmp = b5 ^ b8; 7571 tmp = b5 ^ b8;
7572 b5 = (tmp >> 28) | (tmp << (64 - 28)); 7572 b5 = ror64(tmp, 28);
7573 b8 -= b5; 7573 b8 -= b5;
7574 7574
7575 tmp = b1 ^ b14; 7575 tmp = b1 ^ b14;
7576 b1 = (tmp >> 47) | (tmp << (64 - 47)); 7576 b1 = ror64(tmp, 47);
7577 b14 -= b1; 7577 b14 -= b1;
7578 7578
7579 tmp = b9 ^ b4; 7579 tmp = b9 ^ b4;
7580 b9 = (tmp >> 41) | (tmp << (64 - 41)); 7580 b9 = ror64(tmp, 41);
7581 b4 -= b9; 7581 b4 -= b9;
7582 7582
7583 tmp = b13 ^ b6; 7583 tmp = b13 ^ b6;
7584 b13 = (tmp >> 48) | (tmp << (64 - 48)); 7584 b13 = ror64(tmp, 48);
7585 b6 -= b13; 7585 b6 -= b13;
7586 7586
7587 tmp = b11 ^ b2; 7587 tmp = b11 ^ b2;
7588 b11 = (tmp >> 20) | (tmp << (64 - 20)); 7588 b11 = ror64(tmp, 20);
7589 b2 -= b11; 7589 b2 -= b11;
7590 7590
7591 tmp = b15 ^ b0; 7591 tmp = b15 ^ b0;
7592 b15 = (tmp >> 5) | (tmp << (64 - 5)); 7592 b15 = ror64(tmp, 5);
7593 b0 -= b15; 7593 b0 -= b15;
7594 7594
7595 tmp = b9 ^ b10; 7595 tmp = b9 ^ b10;
7596 b9 = (tmp >> 17) | (tmp << (64 - 17)); 7596 b9 = ror64(tmp, 17);
7597 b10 -= b9; 7597 b10 -= b9;
7598 7598
7599 tmp = b11 ^ b8; 7599 tmp = b11 ^ b8;
7600 b11 = (tmp >> 59) | (tmp << (64 - 59)); 7600 b11 = ror64(tmp, 59);
7601 b8 -= b11; 7601 b8 -= b11;
7602 7602
7603 tmp = b13 ^ b14; 7603 tmp = b13 ^ b14;
7604 b13 = (tmp >> 41) | (tmp << (64 - 41)); 7604 b13 = ror64(tmp, 41);
7605 b14 -= b13; 7605 b14 -= b13;
7606 7606
7607 tmp = b15 ^ b12; 7607 tmp = b15 ^ b12;
7608 b15 = (tmp >> 34) | (tmp << (64 - 34)); 7608 b15 = ror64(tmp, 34);
7609 b12 -= b15; 7609 b12 -= b15;
7610 7610
7611 tmp = b1 ^ b6; 7611 tmp = b1 ^ b6;
7612 b1 = (tmp >> 13) | (tmp << (64 - 13)); 7612 b1 = ror64(tmp, 13);
7613 b6 -= b1; 7613 b6 -= b1;
7614 7614
7615 tmp = b3 ^ b4; 7615 tmp = b3 ^ b4;
7616 b3 = (tmp >> 51) | (tmp << (64 - 51)); 7616 b3 = ror64(tmp, 51);
7617 b4 -= b3; 7617 b4 -= b3;
7618 7618
7619 tmp = b5 ^ b2; 7619 tmp = b5 ^ b2;
7620 b5 = (tmp >> 4) | (tmp << (64 - 4)); 7620 b5 = ror64(tmp, 4);
7621 b2 -= b5; 7621 b2 -= b5;
7622 7622
7623 tmp = b7 ^ b0; 7623 tmp = b7 ^ b0;
7624 b7 = (tmp >> 33) | (tmp << (64 - 33)); 7624 b7 = ror64(tmp, 33);
7625 b0 -= b7; 7625 b0 -= b7;
7626 7626
7627 tmp = b1 ^ b8; 7627 tmp = b1 ^ b8;
7628 b1 = (tmp >> 52) | (tmp << (64 - 52)); 7628 b1 = ror64(tmp, 52);
7629 b8 -= b1; 7629 b8 -= b1;
7630 7630
7631 tmp = b5 ^ b14; 7631 tmp = b5 ^ b14;
7632 b5 = (tmp >> 23) | (tmp << (64 - 23)); 7632 b5 = ror64(tmp, 23);
7633 b14 -= b5; 7633 b14 -= b5;
7634 7634
7635 tmp = b3 ^ b12; 7635 tmp = b3 ^ b12;
7636 b3 = (tmp >> 18) | (tmp << (64 - 18)); 7636 b3 = ror64(tmp, 18);
7637 b12 -= b3; 7637 b12 -= b3;
7638 7638
7639 tmp = b7 ^ b10; 7639 tmp = b7 ^ b10;
7640 b7 = (tmp >> 49) | (tmp << (64 - 49)); 7640 b7 = ror64(tmp, 49);
7641 b10 -= b7; 7641 b10 -= b7;
7642 7642
7643 tmp = b15 ^ b4; 7643 tmp = b15 ^ b4;
7644 b15 = (tmp >> 55) | (tmp << (64 - 55)); 7644 b15 = ror64(tmp, 55);
7645 b4 -= b15; 7645 b4 -= b15;
7646 7646
7647 tmp = b11 ^ b6; 7647 tmp = b11 ^ b6;
7648 b11 = (tmp >> 10) | (tmp << (64 - 10)); 7648 b11 = ror64(tmp, 10);
7649 b6 -= b11; 7649 b6 -= b11;
7650 7650
7651 tmp = b13 ^ b2; 7651 tmp = b13 ^ b2;
7652 b13 = (tmp >> 19) | (tmp << (64 - 19)); 7652 b13 = ror64(tmp, 19);
7653 b2 -= b13; 7653 b2 -= b13;
7654 7654
7655 tmp = b9 ^ b0; 7655 tmp = b9 ^ b0;
7656 b9 = (tmp >> 38) | (tmp << (64 - 38)); 7656 b9 = ror64(tmp, 38);
7657 b0 -= b9; 7657 b0 -= b9;
7658 7658
7659 tmp = b15 ^ b14; 7659 tmp = b15 ^ b14;
7660 b15 = (tmp >> 37) | (tmp << (64 - 37)); 7660 b15 = ror64(tmp, 37);
7661 b14 -= b15 + k1 + t2; 7661 b14 -= b15 + k1 + t2;
7662 b15 -= k2 + 4; 7662 b15 -= k2 + 4;
7663 7663
7664 tmp = b13 ^ b12; 7664 tmp = b13 ^ b12;
7665 b13 = (tmp >> 22) | (tmp << (64 - 22)); 7665 b13 = ror64(tmp, 22);
7666 b12 -= b13 + k16; 7666 b12 -= b13 + k16;
7667 b13 -= k0 + t1; 7667 b13 -= k0 + t1;
7668 7668
7669 tmp = b11 ^ b10; 7669 tmp = b11 ^ b10;
7670 b11 = (tmp >> 17) | (tmp << (64 - 17)); 7670 b11 = ror64(tmp, 17);
7671 b10 -= b11 + k14; 7671 b10 -= b11 + k14;
7672 b11 -= k15; 7672 b11 -= k15;
7673 7673
7674 tmp = b9 ^ b8; 7674 tmp = b9 ^ b8;
7675 b9 = (tmp >> 8) | (tmp << (64 - 8)); 7675 b9 = ror64(tmp, 8);
7676 b8 -= b9 + k12; 7676 b8 -= b9 + k12;
7677 b9 -= k13; 7677 b9 -= k13;
7678 7678
7679 tmp = b7 ^ b6; 7679 tmp = b7 ^ b6;
7680 b7 = (tmp >> 47) | (tmp << (64 - 47)); 7680 b7 = ror64(tmp, 47);
7681 b6 -= b7 + k10; 7681 b6 -= b7 + k10;
7682 b7 -= k11; 7682 b7 -= k11;
7683 7683
7684 tmp = b5 ^ b4; 7684 tmp = b5 ^ b4;
7685 b5 = (tmp >> 8) | (tmp << (64 - 8)); 7685 b5 = ror64(tmp, 8);
7686 b4 -= b5 + k8; 7686 b4 -= b5 + k8;
7687 b5 -= k9; 7687 b5 -= k9;
7688 7688
7689 tmp = b3 ^ b2; 7689 tmp = b3 ^ b2;
7690 b3 = (tmp >> 13) | (tmp << (64 - 13)); 7690 b3 = ror64(tmp, 13);
7691 b2 -= b3 + k6; 7691 b2 -= b3 + k6;
7692 b3 -= k7; 7692 b3 -= k7;
7693 7693
7694 tmp = b1 ^ b0; 7694 tmp = b1 ^ b0;
7695 b1 = (tmp >> 24) | (tmp << (64 - 24)); 7695 b1 = ror64(tmp, 24);
7696 b0 -= b1 + k4; 7696 b0 -= b1 + k4;
7697 b1 -= k5; 7697 b1 -= k5;
7698 7698
7699 tmp = b7 ^ b12; 7699 tmp = b7 ^ b12;
7700 b7 = (tmp >> 20) | (tmp << (64 - 20)); 7700 b7 = ror64(tmp, 20);
7701 b12 -= b7; 7701 b12 -= b7;
7702 7702
7703 tmp = b3 ^ b10; 7703 tmp = b3 ^ b10;
7704 b3 = (tmp >> 37) | (tmp << (64 - 37)); 7704 b3 = ror64(tmp, 37);
7705 b10 -= b3; 7705 b10 -= b3;
7706 7706
7707 tmp = b5 ^ b8; 7707 tmp = b5 ^ b8;
7708 b5 = (tmp >> 31) | (tmp << (64 - 31)); 7708 b5 = ror64(tmp, 31);
7709 b8 -= b5; 7709 b8 -= b5;
7710 7710
7711 tmp = b1 ^ b14; 7711 tmp = b1 ^ b14;
7712 b1 = (tmp >> 23) | (tmp << (64 - 23)); 7712 b1 = ror64(tmp, 23);
7713 b14 -= b1; 7713 b14 -= b1;
7714 7714
7715 tmp = b9 ^ b4; 7715 tmp = b9 ^ b4;
7716 b9 = (tmp >> 52) | (tmp << (64 - 52)); 7716 b9 = ror64(tmp, 52);
7717 b4 -= b9; 7717 b4 -= b9;
7718 7718
7719 tmp = b13 ^ b6; 7719 tmp = b13 ^ b6;
7720 b13 = (tmp >> 35) | (tmp << (64 - 35)); 7720 b13 = ror64(tmp, 35);
7721 b6 -= b13; 7721 b6 -= b13;
7722 7722
7723 tmp = b11 ^ b2; 7723 tmp = b11 ^ b2;
7724 b11 = (tmp >> 48) | (tmp << (64 - 48)); 7724 b11 = ror64(tmp, 48);
7725 b2 -= b11; 7725 b2 -= b11;
7726 7726
7727 tmp = b15 ^ b0; 7727 tmp = b15 ^ b0;
7728 b15 = (tmp >> 9) | (tmp << (64 - 9)); 7728 b15 = ror64(tmp, 9);
7729 b0 -= b15; 7729 b0 -= b15;
7730 7730
7731 tmp = b9 ^ b10; 7731 tmp = b9 ^ b10;
7732 b9 = (tmp >> 25) | (tmp << (64 - 25)); 7732 b9 = ror64(tmp, 25);
7733 b10 -= b9; 7733 b10 -= b9;
7734 7734
7735 tmp = b11 ^ b8; 7735 tmp = b11 ^ b8;
7736 b11 = (tmp >> 44) | (tmp << (64 - 44)); 7736 b11 = ror64(tmp, 44);
7737 b8 -= b11; 7737 b8 -= b11;
7738 7738
7739 tmp = b13 ^ b14; 7739 tmp = b13 ^ b14;
7740 b13 = (tmp >> 42) | (tmp << (64 - 42)); 7740 b13 = ror64(tmp, 42);
7741 b14 -= b13; 7741 b14 -= b13;
7742 7742
7743 tmp = b15 ^ b12; 7743 tmp = b15 ^ b12;
7744 b15 = (tmp >> 19) | (tmp << (64 - 19)); 7744 b15 = ror64(tmp, 19);
7745 b12 -= b15; 7745 b12 -= b15;
7746 7746
7747 tmp = b1 ^ b6; 7747 tmp = b1 ^ b6;
7748 b1 = (tmp >> 46) | (tmp << (64 - 46)); 7748 b1 = ror64(tmp, 46);
7749 b6 -= b1; 7749 b6 -= b1;
7750 7750
7751 tmp = b3 ^ b4; 7751 tmp = b3 ^ b4;
7752 b3 = (tmp >> 47) | (tmp << (64 - 47)); 7752 b3 = ror64(tmp, 47);
7753 b4 -= b3; 7753 b4 -= b3;
7754 7754
7755 tmp = b5 ^ b2; 7755 tmp = b5 ^ b2;
7756 b5 = (tmp >> 44) | (tmp << (64 - 44)); 7756 b5 = ror64(tmp, 44);
7757 b2 -= b5; 7757 b2 -= b5;
7758 7758
7759 tmp = b7 ^ b0; 7759 tmp = b7 ^ b0;
7760 b7 = (tmp >> 31) | (tmp << (64 - 31)); 7760 b7 = ror64(tmp, 31);
7761 b0 -= b7; 7761 b0 -= b7;
7762 7762
7763 tmp = b1 ^ b8; 7763 tmp = b1 ^ b8;
7764 b1 = (tmp >> 41) | (tmp << (64 - 41)); 7764 b1 = ror64(tmp, 41);
7765 b8 -= b1; 7765 b8 -= b1;
7766 7766
7767 tmp = b5 ^ b14; 7767 tmp = b5 ^ b14;
7768 b5 = (tmp >> 42) | (tmp << (64 - 42)); 7768 b5 = ror64(tmp, 42);
7769 b14 -= b5; 7769 b14 -= b5;
7770 7770
7771 tmp = b3 ^ b12; 7771 tmp = b3 ^ b12;
7772 b3 = (tmp >> 53) | (tmp << (64 - 53)); 7772 b3 = ror64(tmp, 53);
7773 b12 -= b3; 7773 b12 -= b3;
7774 7774
7775 tmp = b7 ^ b10; 7775 tmp = b7 ^ b10;
7776 b7 = (tmp >> 4) | (tmp << (64 - 4)); 7776 b7 = ror64(tmp, 4);
7777 b10 -= b7; 7777 b10 -= b7;
7778 7778
7779 tmp = b15 ^ b4; 7779 tmp = b15 ^ b4;
7780 b15 = (tmp >> 51) | (tmp << (64 - 51)); 7780 b15 = ror64(tmp, 51);
7781 b4 -= b15; 7781 b4 -= b15;
7782 7782
7783 tmp = b11 ^ b6; 7783 tmp = b11 ^ b6;
7784 b11 = (tmp >> 56) | (tmp << (64 - 56)); 7784 b11 = ror64(tmp, 56);
7785 b6 -= b11; 7785 b6 -= b11;
7786 7786
7787 tmp = b13 ^ b2; 7787 tmp = b13 ^ b2;
7788 b13 = (tmp >> 34) | (tmp << (64 - 34)); 7788 b13 = ror64(tmp, 34);
7789 b2 -= b13; 7789 b2 -= b13;
7790 7790
7791 tmp = b9 ^ b0; 7791 tmp = b9 ^ b0;
7792 b9 = (tmp >> 16) | (tmp << (64 - 16)); 7792 b9 = ror64(tmp, 16);
7793 b0 -= b9; 7793 b0 -= b9;
7794 7794
7795 tmp = b15 ^ b14; 7795 tmp = b15 ^ b14;
7796 b15 = (tmp >> 30) | (tmp << (64 - 30)); 7796 b15 = ror64(tmp, 30);
7797 b14 -= b15 + k0 + t1; 7797 b14 -= b15 + k0 + t1;
7798 b15 -= k1 + 3; 7798 b15 -= k1 + 3;
7799 7799
7800 tmp = b13 ^ b12; 7800 tmp = b13 ^ b12;
7801 b13 = (tmp >> 44) | (tmp << (64 - 44)); 7801 b13 = ror64(tmp, 44);
7802 b12 -= b13 + k15; 7802 b12 -= b13 + k15;
7803 b13 -= k16 + t0; 7803 b13 -= k16 + t0;
7804 7804
7805 tmp = b11 ^ b10; 7805 tmp = b11 ^ b10;
7806 b11 = (tmp >> 47) | (tmp << (64 - 47)); 7806 b11 = ror64(tmp, 47);
7807 b10 -= b11 + k13; 7807 b10 -= b11 + k13;
7808 b11 -= k14; 7808 b11 -= k14;
7809 7809
7810 tmp = b9 ^ b8; 7810 tmp = b9 ^ b8;
7811 b9 = (tmp >> 12) | (tmp << (64 - 12)); 7811 b9 = ror64(tmp, 12);
7812 b8 -= b9 + k11; 7812 b8 -= b9 + k11;
7813 b9 -= k12; 7813 b9 -= k12;
7814 7814
7815 tmp = b7 ^ b6; 7815 tmp = b7 ^ b6;
7816 b7 = (tmp >> 31) | (tmp << (64 - 31)); 7816 b7 = ror64(tmp, 31);
7817 b6 -= b7 + k9; 7817 b6 -= b7 + k9;
7818 b7 -= k10; 7818 b7 -= k10;
7819 7819
7820 tmp = b5 ^ b4; 7820 tmp = b5 ^ b4;
7821 b5 = (tmp >> 37) | (tmp << (64 - 37)); 7821 b5 = ror64(tmp, 37);
7822 b4 -= b5 + k7; 7822 b4 -= b5 + k7;
7823 b5 -= k8; 7823 b5 -= k8;
7824 7824
7825 tmp = b3 ^ b2; 7825 tmp = b3 ^ b2;
7826 b3 = (tmp >> 9) | (tmp << (64 - 9)); 7826 b3 = ror64(tmp, 9);
7827 b2 -= b3 + k5; 7827 b2 -= b3 + k5;
7828 b3 -= k6; 7828 b3 -= k6;
7829 7829
7830 tmp = b1 ^ b0; 7830 tmp = b1 ^ b0;
7831 b1 = (tmp >> 41) | (tmp << (64 - 41)); 7831 b1 = ror64(tmp, 41);
7832 b0 -= b1 + k3; 7832 b0 -= b1 + k3;
7833 b1 -= k4; 7833 b1 -= k4;
7834 7834
7835 tmp = b7 ^ b12; 7835 tmp = b7 ^ b12;
7836 b7 = (tmp >> 25) | (tmp << (64 - 25)); 7836 b7 = ror64(tmp, 25);
7837 b12 -= b7; 7837 b12 -= b7;
7838 7838
7839 tmp = b3 ^ b10; 7839 tmp = b3 ^ b10;
7840 b3 = (tmp >> 16) | (tmp << (64 - 16)); 7840 b3 = ror64(tmp, 16);
7841 b10 -= b3; 7841 b10 -= b3;
7842 7842
7843 tmp = b5 ^ b8; 7843 tmp = b5 ^ b8;
7844 b5 = (tmp >> 28) | (tmp << (64 - 28)); 7844 b5 = ror64(tmp, 28);
7845 b8 -= b5; 7845 b8 -= b5;
7846 7846
7847 tmp = b1 ^ b14; 7847 tmp = b1 ^ b14;
7848 b1 = (tmp >> 47) | (tmp << (64 - 47)); 7848 b1 = ror64(tmp, 47);
7849 b14 -= b1; 7849 b14 -= b1;
7850 7850
7851 tmp = b9 ^ b4; 7851 tmp = b9 ^ b4;
7852 b9 = (tmp >> 41) | (tmp << (64 - 41)); 7852 b9 = ror64(tmp, 41);
7853 b4 -= b9; 7853 b4 -= b9;
7854 7854
7855 tmp = b13 ^ b6; 7855 tmp = b13 ^ b6;
7856 b13 = (tmp >> 48) | (tmp << (64 - 48)); 7856 b13 = ror64(tmp, 48);
7857 b6 -= b13; 7857 b6 -= b13;
7858 7858
7859 tmp = b11 ^ b2; 7859 tmp = b11 ^ b2;
7860 b11 = (tmp >> 20) | (tmp << (64 - 20)); 7860 b11 = ror64(tmp, 20);
7861 b2 -= b11; 7861 b2 -= b11;
7862 7862
7863 tmp = b15 ^ b0; 7863 tmp = b15 ^ b0;
7864 b15 = (tmp >> 5) | (tmp << (64 - 5)); 7864 b15 = ror64(tmp, 5);
7865 b0 -= b15; 7865 b0 -= b15;
7866 7866
7867 tmp = b9 ^ b10; 7867 tmp = b9 ^ b10;
7868 b9 = (tmp >> 17) | (tmp << (64 - 17)); 7868 b9 = ror64(tmp, 17);
7869 b10 -= b9; 7869 b10 -= b9;
7870 7870
7871 tmp = b11 ^ b8; 7871 tmp = b11 ^ b8;
7872 b11 = (tmp >> 59) | (tmp << (64 - 59)); 7872 b11 = ror64(tmp, 59);
7873 b8 -= b11; 7873 b8 -= b11;
7874 7874
7875 tmp = b13 ^ b14; 7875 tmp = b13 ^ b14;
7876 b13 = (tmp >> 41) | (tmp << (64 - 41)); 7876 b13 = ror64(tmp, 41);
7877 b14 -= b13; 7877 b14 -= b13;
7878 7878
7879 tmp = b15 ^ b12; 7879 tmp = b15 ^ b12;
7880 b15 = (tmp >> 34) | (tmp << (64 - 34)); 7880 b15 = ror64(tmp, 34);
7881 b12 -= b15; 7881 b12 -= b15;
7882 7882
7883 tmp = b1 ^ b6; 7883 tmp = b1 ^ b6;
7884 b1 = (tmp >> 13) | (tmp << (64 - 13)); 7884 b1 = ror64(tmp, 13);
7885 b6 -= b1; 7885 b6 -= b1;
7886 7886
7887 tmp = b3 ^ b4; 7887 tmp = b3 ^ b4;
7888 b3 = (tmp >> 51) | (tmp << (64 - 51)); 7888 b3 = ror64(tmp, 51);
7889 b4 -= b3; 7889 b4 -= b3;
7890 7890
7891 tmp = b5 ^ b2; 7891 tmp = b5 ^ b2;
7892 b5 = (tmp >> 4) | (tmp << (64 - 4)); 7892 b5 = ror64(tmp, 4);
7893 b2 -= b5; 7893 b2 -= b5;
7894 7894
7895 tmp = b7 ^ b0; 7895 tmp = b7 ^ b0;
7896 b7 = (tmp >> 33) | (tmp << (64 - 33)); 7896 b7 = ror64(tmp, 33);
7897 b0 -= b7; 7897 b0 -= b7;
7898 7898
7899 tmp = b1 ^ b8; 7899 tmp = b1 ^ b8;
7900 b1 = (tmp >> 52) | (tmp << (64 - 52)); 7900 b1 = ror64(tmp, 52);
7901 b8 -= b1; 7901 b8 -= b1;
7902 7902
7903 tmp = b5 ^ b14; 7903 tmp = b5 ^ b14;
7904 b5 = (tmp >> 23) | (tmp << (64 - 23)); 7904 b5 = ror64(tmp, 23);
7905 b14 -= b5; 7905 b14 -= b5;
7906 7906
7907 tmp = b3 ^ b12; 7907 tmp = b3 ^ b12;
7908 b3 = (tmp >> 18) | (tmp << (64 - 18)); 7908 b3 = ror64(tmp, 18);
7909 b12 -= b3; 7909 b12 -= b3;
7910 7910
7911 tmp = b7 ^ b10; 7911 tmp = b7 ^ b10;
7912 b7 = (tmp >> 49) | (tmp << (64 - 49)); 7912 b7 = ror64(tmp, 49);
7913 b10 -= b7; 7913 b10 -= b7;
7914 7914
7915 tmp = b15 ^ b4; 7915 tmp = b15 ^ b4;
7916 b15 = (tmp >> 55) | (tmp << (64 - 55)); 7916 b15 = ror64(tmp, 55);
7917 b4 -= b15; 7917 b4 -= b15;
7918 7918
7919 tmp = b11 ^ b6; 7919 tmp = b11 ^ b6;
7920 b11 = (tmp >> 10) | (tmp << (64 - 10)); 7920 b11 = ror64(tmp, 10);
7921 b6 -= b11; 7921 b6 -= b11;
7922 7922
7923 tmp = b13 ^ b2; 7923 tmp = b13 ^ b2;
7924 b13 = (tmp >> 19) | (tmp << (64 - 19)); 7924 b13 = ror64(tmp, 19);
7925 b2 -= b13; 7925 b2 -= b13;
7926 7926
7927 tmp = b9 ^ b0; 7927 tmp = b9 ^ b0;
7928 b9 = (tmp >> 38) | (tmp << (64 - 38)); 7928 b9 = ror64(tmp, 38);
7929 b0 -= b9; 7929 b0 -= b9;
7930 7930
7931 tmp = b15 ^ b14; 7931 tmp = b15 ^ b14;
7932 b15 = (tmp >> 37) | (tmp << (64 - 37)); 7932 b15 = ror64(tmp, 37);
7933 b14 -= b15 + k16 + t0; 7933 b14 -= b15 + k16 + t0;
7934 b15 -= k0 + 2; 7934 b15 -= k0 + 2;
7935 7935
7936 tmp = b13 ^ b12; 7936 tmp = b13 ^ b12;
7937 b13 = (tmp >> 22) | (tmp << (64 - 22)); 7937 b13 = ror64(tmp, 22);
7938 b12 -= b13 + k14; 7938 b12 -= b13 + k14;
7939 b13 -= k15 + t2; 7939 b13 -= k15 + t2;
7940 7940
7941 tmp = b11 ^ b10; 7941 tmp = b11 ^ b10;
7942 b11 = (tmp >> 17) | (tmp << (64 - 17)); 7942 b11 = ror64(tmp, 17);
7943 b10 -= b11 + k12; 7943 b10 -= b11 + k12;
7944 b11 -= k13; 7944 b11 -= k13;
7945 7945
7946 tmp = b9 ^ b8; 7946 tmp = b9 ^ b8;
7947 b9 = (tmp >> 8) | (tmp << (64 - 8)); 7947 b9 = ror64(tmp, 8);
7948 b8 -= b9 + k10; 7948 b8 -= b9 + k10;
7949 b9 -= k11; 7949 b9 -= k11;
7950 7950
7951 tmp = b7 ^ b6; 7951 tmp = b7 ^ b6;
7952 b7 = (tmp >> 47) | (tmp << (64 - 47)); 7952 b7 = ror64(tmp, 47);
7953 b6 -= b7 + k8; 7953 b6 -= b7 + k8;
7954 b7 -= k9; 7954 b7 -= k9;
7955 7955
7956 tmp = b5 ^ b4; 7956 tmp = b5 ^ b4;
7957 b5 = (tmp >> 8) | (tmp << (64 - 8)); 7957 b5 = ror64(tmp, 8);
7958 b4 -= b5 + k6; 7958 b4 -= b5 + k6;
7959 b5 -= k7; 7959 b5 -= k7;
7960 7960
7961 tmp = b3 ^ b2; 7961 tmp = b3 ^ b2;
7962 b3 = (tmp >> 13) | (tmp << (64 - 13)); 7962 b3 = ror64(tmp, 13);
7963 b2 -= b3 + k4; 7963 b2 -= b3 + k4;
7964 b3 -= k5; 7964 b3 -= k5;
7965 7965
7966 tmp = b1 ^ b0; 7966 tmp = b1 ^ b0;
7967 b1 = (tmp >> 24) | (tmp << (64 - 24)); 7967 b1 = ror64(tmp, 24);
7968 b0 -= b1 + k2; 7968 b0 -= b1 + k2;
7969 b1 -= k3; 7969 b1 -= k3;
7970 7970
7971 tmp = b7 ^ b12; 7971 tmp = b7 ^ b12;
7972 b7 = (tmp >> 20) | (tmp << (64 - 20)); 7972 b7 = ror64(tmp, 20);
7973 b12 -= b7; 7973 b12 -= b7;
7974 7974
7975 tmp = b3 ^ b10; 7975 tmp = b3 ^ b10;
7976 b3 = (tmp >> 37) | (tmp << (64 - 37)); 7976 b3 = ror64(tmp, 37);
7977 b10 -= b3; 7977 b10 -= b3;
7978 7978
7979 tmp = b5 ^ b8; 7979 tmp = b5 ^ b8;
7980 b5 = (tmp >> 31) | (tmp << (64 - 31)); 7980 b5 = ror64(tmp, 31);
7981 b8 -= b5; 7981 b8 -= b5;
7982 7982
7983 tmp = b1 ^ b14; 7983 tmp = b1 ^ b14;
7984 b1 = (tmp >> 23) | (tmp << (64 - 23)); 7984 b1 = ror64(tmp, 23);
7985 b14 -= b1; 7985 b14 -= b1;
7986 7986
7987 tmp = b9 ^ b4; 7987 tmp = b9 ^ b4;
7988 b9 = (tmp >> 52) | (tmp << (64 - 52)); 7988 b9 = ror64(tmp, 52);
7989 b4 -= b9; 7989 b4 -= b9;
7990 7990
7991 tmp = b13 ^ b6; 7991 tmp = b13 ^ b6;
7992 b13 = (tmp >> 35) | (tmp << (64 - 35)); 7992 b13 = ror64(tmp, 35);
7993 b6 -= b13; 7993 b6 -= b13;
7994 7994
7995 tmp = b11 ^ b2; 7995 tmp = b11 ^ b2;
7996 b11 = (tmp >> 48) | (tmp << (64 - 48)); 7996 b11 = ror64(tmp, 48);
7997 b2 -= b11; 7997 b2 -= b11;
7998 7998
7999 tmp = b15 ^ b0; 7999 tmp = b15 ^ b0;
8000 b15 = (tmp >> 9) | (tmp << (64 - 9)); 8000 b15 = ror64(tmp, 9);
8001 b0 -= b15; 8001 b0 -= b15;
8002 8002
8003 tmp = b9 ^ b10; 8003 tmp = b9 ^ b10;
8004 b9 = (tmp >> 25) | (tmp << (64 - 25)); 8004 b9 = ror64(tmp, 25);
8005 b10 -= b9; 8005 b10 -= b9;
8006 8006
8007 tmp = b11 ^ b8; 8007 tmp = b11 ^ b8;
8008 b11 = (tmp >> 44) | (tmp << (64 - 44)); 8008 b11 = ror64(tmp, 44);
8009 b8 -= b11; 8009 b8 -= b11;
8010 8010
8011 tmp = b13 ^ b14; 8011 tmp = b13 ^ b14;
8012 b13 = (tmp >> 42) | (tmp << (64 - 42)); 8012 b13 = ror64(tmp, 42);
8013 b14 -= b13; 8013 b14 -= b13;
8014 8014
8015 tmp = b15 ^ b12; 8015 tmp = b15 ^ b12;
8016 b15 = (tmp >> 19) | (tmp << (64 - 19)); 8016 b15 = ror64(tmp, 19);
8017 b12 -= b15; 8017 b12 -= b15;
8018 8018
8019 tmp = b1 ^ b6; 8019 tmp = b1 ^ b6;
8020 b1 = (tmp >> 46) | (tmp << (64 - 46)); 8020 b1 = ror64(tmp, 46);
8021 b6 -= b1; 8021 b6 -= b1;
8022 8022
8023 tmp = b3 ^ b4; 8023 tmp = b3 ^ b4;
8024 b3 = (tmp >> 47) | (tmp << (64 - 47)); 8024 b3 = ror64(tmp, 47);
8025 b4 -= b3; 8025 b4 -= b3;
8026 8026
8027 tmp = b5 ^ b2; 8027 tmp = b5 ^ b2;
8028 b5 = (tmp >> 44) | (tmp << (64 - 44)); 8028 b5 = ror64(tmp, 44);
8029 b2 -= b5; 8029 b2 -= b5;
8030 8030
8031 tmp = b7 ^ b0; 8031 tmp = b7 ^ b0;
8032 b7 = (tmp >> 31) | (tmp << (64 - 31)); 8032 b7 = ror64(tmp, 31);
8033 b0 -= b7; 8033 b0 -= b7;
8034 8034
8035 tmp = b1 ^ b8; 8035 tmp = b1 ^ b8;
8036 b1 = (tmp >> 41) | (tmp << (64 - 41)); 8036 b1 = ror64(tmp, 41);
8037 b8 -= b1; 8037 b8 -= b1;
8038 8038
8039 tmp = b5 ^ b14; 8039 tmp = b5 ^ b14;
8040 b5 = (tmp >> 42) | (tmp << (64 - 42)); 8040 b5 = ror64(tmp, 42);
8041 b14 -= b5; 8041 b14 -= b5;
8042 8042
8043 tmp = b3 ^ b12; 8043 tmp = b3 ^ b12;
8044 b3 = (tmp >> 53) | (tmp << (64 - 53)); 8044 b3 = ror64(tmp, 53);
8045 b12 -= b3; 8045 b12 -= b3;
8046 8046
8047 tmp = b7 ^ b10; 8047 tmp = b7 ^ b10;
8048 b7 = (tmp >> 4) | (tmp << (64 - 4)); 8048 b7 = ror64(tmp, 4);
8049 b10 -= b7; 8049 b10 -= b7;
8050 8050
8051 tmp = b15 ^ b4; 8051 tmp = b15 ^ b4;
8052 b15 = (tmp >> 51) | (tmp << (64 - 51)); 8052 b15 = ror64(tmp, 51);
8053 b4 -= b15; 8053 b4 -= b15;
8054 8054
8055 tmp = b11 ^ b6; 8055 tmp = b11 ^ b6;
8056 b11 = (tmp >> 56) | (tmp << (64 - 56)); 8056 b11 = ror64(tmp, 56);
8057 b6 -= b11; 8057 b6 -= b11;
8058 8058
8059 tmp = b13 ^ b2; 8059 tmp = b13 ^ b2;
8060 b13 = (tmp >> 34) | (tmp << (64 - 34)); 8060 b13 = ror64(tmp, 34);
8061 b2 -= b13; 8061 b2 -= b13;
8062 8062
8063 tmp = b9 ^ b0; 8063 tmp = b9 ^ b0;
8064 b9 = (tmp >> 16) | (tmp << (64 - 16)); 8064 b9 = ror64(tmp, 16);
8065 b0 -= b9; 8065 b0 -= b9;
8066 8066
8067 tmp = b15 ^ b14; 8067 tmp = b15 ^ b14;
8068 b15 = (tmp >> 30) | (tmp << (64 - 30)); 8068 b15 = ror64(tmp, 30);
8069 b14 -= b15 + k15 + t2; 8069 b14 -= b15 + k15 + t2;
8070 b15 -= k16 + 1; 8070 b15 -= k16 + 1;
8071 8071
8072 tmp = b13 ^ b12; 8072 tmp = b13 ^ b12;
8073 b13 = (tmp >> 44) | (tmp << (64 - 44)); 8073 b13 = ror64(tmp, 44);
8074 b12 -= b13 + k13; 8074 b12 -= b13 + k13;
8075 b13 -= k14 + t1; 8075 b13 -= k14 + t1;
8076 8076
8077 tmp = b11 ^ b10; 8077 tmp = b11 ^ b10;
8078 b11 = (tmp >> 47) | (tmp << (64 - 47)); 8078 b11 = ror64(tmp, 47);
8079 b10 -= b11 + k11; 8079 b10 -= b11 + k11;
8080 b11 -= k12; 8080 b11 -= k12;
8081 8081
8082 tmp = b9 ^ b8; 8082 tmp = b9 ^ b8;
8083 b9 = (tmp >> 12) | (tmp << (64 - 12)); 8083 b9 = ror64(tmp, 12);
8084 b8 -= b9 + k9; 8084 b8 -= b9 + k9;
8085 b9 -= k10; 8085 b9 -= k10;
8086 8086
8087 tmp = b7 ^ b6; 8087 tmp = b7 ^ b6;
8088 b7 = (tmp >> 31) | (tmp << (64 - 31)); 8088 b7 = ror64(tmp, 31);
8089 b6 -= b7 + k7; 8089 b6 -= b7 + k7;
8090 b7 -= k8; 8090 b7 -= k8;
8091 8091
8092 tmp = b5 ^ b4; 8092 tmp = b5 ^ b4;
8093 b5 = (tmp >> 37) | (tmp << (64 - 37)); 8093 b5 = ror64(tmp, 37);
8094 b4 -= b5 + k5; 8094 b4 -= b5 + k5;
8095 b5 -= k6; 8095 b5 -= k6;
8096 8096
8097 tmp = b3 ^ b2; 8097 tmp = b3 ^ b2;
8098 b3 = (tmp >> 9) | (tmp << (64 - 9)); 8098 b3 = ror64(tmp, 9);
8099 b2 -= b3 + k3; 8099 b2 -= b3 + k3;
8100 b3 -= k4; 8100 b3 -= k4;
8101 8101
8102 tmp = b1 ^ b0; 8102 tmp = b1 ^ b0;
8103 b1 = (tmp >> 41) | (tmp << (64 - 41)); 8103 b1 = ror64(tmp, 41);
8104 b0 -= b1 + k1; 8104 b0 -= b1 + k1;
8105 b1 -= k2; 8105 b1 -= k2;
8106 8106
8107 tmp = b7 ^ b12; 8107 tmp = b7 ^ b12;
8108 b7 = (tmp >> 25) | (tmp << (64 - 25)); 8108 b7 = ror64(tmp, 25);
8109 b12 -= b7; 8109 b12 -= b7;
8110 8110
8111 tmp = b3 ^ b10; 8111 tmp = b3 ^ b10;
8112 b3 = (tmp >> 16) | (tmp << (64 - 16)); 8112 b3 = ror64(tmp, 16);
8113 b10 -= b3; 8113 b10 -= b3;
8114 8114
8115 tmp = b5 ^ b8; 8115 tmp = b5 ^ b8;
8116 b5 = (tmp >> 28) | (tmp << (64 - 28)); 8116 b5 = ror64(tmp, 28);
8117 b8 -= b5; 8117 b8 -= b5;
8118 8118
8119 tmp = b1 ^ b14; 8119 tmp = b1 ^ b14;
8120 b1 = (tmp >> 47) | (tmp << (64 - 47)); 8120 b1 = ror64(tmp, 47);
8121 b14 -= b1; 8121 b14 -= b1;
8122 8122
8123 tmp = b9 ^ b4; 8123 tmp = b9 ^ b4;
8124 b9 = (tmp >> 41) | (tmp << (64 - 41)); 8124 b9 = ror64(tmp, 41);
8125 b4 -= b9; 8125 b4 -= b9;
8126 8126
8127 tmp = b13 ^ b6; 8127 tmp = b13 ^ b6;
8128 b13 = (tmp >> 48) | (tmp << (64 - 48)); 8128 b13 = ror64(tmp, 48);
8129 b6 -= b13; 8129 b6 -= b13;
8130 8130
8131 tmp = b11 ^ b2; 8131 tmp = b11 ^ b2;
8132 b11 = (tmp >> 20) | (tmp << (64 - 20)); 8132 b11 = ror64(tmp, 20);
8133 b2 -= b11; 8133 b2 -= b11;
8134 8134
8135 tmp = b15 ^ b0; 8135 tmp = b15 ^ b0;
8136 b15 = (tmp >> 5) | (tmp << (64 - 5)); 8136 b15 = ror64(tmp, 5);
8137 b0 -= b15; 8137 b0 -= b15;
8138 8138
8139 tmp = b9 ^ b10; 8139 tmp = b9 ^ b10;
8140 b9 = (tmp >> 17) | (tmp << (64 - 17)); 8140 b9 = ror64(tmp, 17);
8141 b10 -= b9; 8141 b10 -= b9;
8142 8142
8143 tmp = b11 ^ b8; 8143 tmp = b11 ^ b8;
8144 b11 = (tmp >> 59) | (tmp << (64 - 59)); 8144 b11 = ror64(tmp, 59);
8145 b8 -= b11; 8145 b8 -= b11;
8146 8146
8147 tmp = b13 ^ b14; 8147 tmp = b13 ^ b14;
8148 b13 = (tmp >> 41) | (tmp << (64 - 41)); 8148 b13 = ror64(tmp, 41);
8149 b14 -= b13; 8149 b14 -= b13;
8150 8150
8151 tmp = b15 ^ b12; 8151 tmp = b15 ^ b12;
8152 b15 = (tmp >> 34) | (tmp << (64 - 34)); 8152 b15 = ror64(tmp, 34);
8153 b12 -= b15; 8153 b12 -= b15;
8154 8154
8155 tmp = b1 ^ b6; 8155 tmp = b1 ^ b6;
8156 b1 = (tmp >> 13) | (tmp << (64 - 13)); 8156 b1 = ror64(tmp, 13);
8157 b6 -= b1; 8157 b6 -= b1;
8158 8158
8159 tmp = b3 ^ b4; 8159 tmp = b3 ^ b4;
8160 b3 = (tmp >> 51) | (tmp << (64 - 51)); 8160 b3 = ror64(tmp, 51);
8161 b4 -= b3; 8161 b4 -= b3;
8162 8162
8163 tmp = b5 ^ b2; 8163 tmp = b5 ^ b2;
8164 b5 = (tmp >> 4) | (tmp << (64 - 4)); 8164 b5 = ror64(tmp, 4);
8165 b2 -= b5; 8165 b2 -= b5;
8166 8166
8167 tmp = b7 ^ b0; 8167 tmp = b7 ^ b0;
8168 b7 = (tmp >> 33) | (tmp << (64 - 33)); 8168 b7 = ror64(tmp, 33);
8169 b0 -= b7; 8169 b0 -= b7;
8170 8170
8171 tmp = b1 ^ b8; 8171 tmp = b1 ^ b8;
8172 b1 = (tmp >> 52) | (tmp << (64 - 52)); 8172 b1 = ror64(tmp, 52);
8173 b8 -= b1; 8173 b8 -= b1;
8174 8174
8175 tmp = b5 ^ b14; 8175 tmp = b5 ^ b14;
8176 b5 = (tmp >> 23) | (tmp << (64 - 23)); 8176 b5 = ror64(tmp, 23);
8177 b14 -= b5; 8177 b14 -= b5;
8178 8178
8179 tmp = b3 ^ b12; 8179 tmp = b3 ^ b12;
8180 b3 = (tmp >> 18) | (tmp << (64 - 18)); 8180 b3 = ror64(tmp, 18);
8181 b12 -= b3; 8181 b12 -= b3;
8182 8182
8183 tmp = b7 ^ b10; 8183 tmp = b7 ^ b10;
8184 b7 = (tmp >> 49) | (tmp << (64 - 49)); 8184 b7 = ror64(tmp, 49);
8185 b10 -= b7; 8185 b10 -= b7;
8186 8186
8187 tmp = b15 ^ b4; 8187 tmp = b15 ^ b4;
8188 b15 = (tmp >> 55) | (tmp << (64 - 55)); 8188 b15 = ror64(tmp, 55);
8189 b4 -= b15; 8189 b4 -= b15;
8190 8190
8191 tmp = b11 ^ b6; 8191 tmp = b11 ^ b6;
8192 b11 = (tmp >> 10) | (tmp << (64 - 10)); 8192 b11 = ror64(tmp, 10);
8193 b6 -= b11; 8193 b6 -= b11;
8194 8194
8195 tmp = b13 ^ b2; 8195 tmp = b13 ^ b2;
8196 b13 = (tmp >> 19) | (tmp << (64 - 19)); 8196 b13 = ror64(tmp, 19);
8197 b2 -= b13; 8197 b2 -= b13;
8198 8198
8199 tmp = b9 ^ b0; 8199 tmp = b9 ^ b0;
8200 b9 = (tmp >> 38) | (tmp << (64 - 38)); 8200 b9 = ror64(tmp, 38);
8201 b0 -= b9; 8201 b0 -= b9;
8202 8202
8203 tmp = b15 ^ b14; 8203 tmp = b15 ^ b14;
8204 b15 = (tmp >> 37) | (tmp << (64 - 37)); 8204 b15 = ror64(tmp, 37);
8205 b14 -= b15 + k14 + t1; 8205 b14 -= b15 + k14 + t1;
8206 b15 -= k15; 8206 b15 -= k15;
8207 8207
8208 tmp = b13 ^ b12; 8208 tmp = b13 ^ b12;
8209 b13 = (tmp >> 22) | (tmp << (64 - 22)); 8209 b13 = ror64(tmp, 22);
8210 b12 -= b13 + k12; 8210 b12 -= b13 + k12;
8211 b13 -= k13 + t0; 8211 b13 -= k13 + t0;
8212 8212
8213 tmp = b11 ^ b10; 8213 tmp = b11 ^ b10;
8214 b11 = (tmp >> 17) | (tmp << (64 - 17)); 8214 b11 = ror64(tmp, 17);
8215 b10 -= b11 + k10; 8215 b10 -= b11 + k10;
8216 b11 -= k11; 8216 b11 -= k11;
8217 8217
8218 tmp = b9 ^ b8; 8218 tmp = b9 ^ b8;
8219 b9 = (tmp >> 8) | (tmp << (64 - 8)); 8219 b9 = ror64(tmp, 8);
8220 b8 -= b9 + k8; 8220 b8 -= b9 + k8;
8221 b9 -= k9; 8221 b9 -= k9;
8222 8222
8223 tmp = b7 ^ b6; 8223 tmp = b7 ^ b6;
8224 b7 = (tmp >> 47) | (tmp << (64 - 47)); 8224 b7 = ror64(tmp, 47);
8225 b6 -= b7 + k6; 8225 b6 -= b7 + k6;
8226 b7 -= k7; 8226 b7 -= k7;
8227 8227
8228 tmp = b5 ^ b4; 8228 tmp = b5 ^ b4;
8229 b5 = (tmp >> 8) | (tmp << (64 - 8)); 8229 b5 = ror64(tmp, 8);
8230 b4 -= b5 + k4; 8230 b4 -= b5 + k4;
8231 b5 -= k5; 8231 b5 -= k5;
8232 8232
8233 tmp = b3 ^ b2; 8233 tmp = b3 ^ b2;
8234 b3 = (tmp >> 13) | (tmp << (64 - 13)); 8234 b3 = ror64(tmp, 13);
8235 b2 -= b3 + k2; 8235 b2 -= b3 + k2;
8236 b3 -= k3; 8236 b3 -= k3;
8237 8237
8238 tmp = b1 ^ b0; 8238 tmp = b1 ^ b0;
8239 b1 = (tmp >> 24) | (tmp << (64 - 24)); 8239 b1 = ror64(tmp, 24);
8240 b0 -= b1 + k0; 8240 b0 -= b1 + k0;
8241 b1 -= k1; 8241 b1 -= k1;
8242 8242
diff --git a/drivers/staging/slicoss/slicoss.c b/drivers/staging/slicoss/slicoss.c
index 6d50fc4fd02e..ac126d4f3117 100644
--- a/drivers/staging/slicoss/slicoss.c
+++ b/drivers/staging/slicoss/slicoss.c
@@ -898,6 +898,7 @@ static void slic_upr_start(struct adapter *adapter)
898{ 898{
899 struct slic_upr *upr; 899 struct slic_upr *upr;
900 __iomem struct slic_regs *slic_regs = adapter->slic_regs; 900 __iomem struct slic_regs *slic_regs = adapter->slic_regs;
901
901 upr = adapter->upr_list; 902 upr = adapter->upr_list;
902 if (!upr) 903 if (!upr)
903 return; 904 return;
@@ -1144,7 +1145,7 @@ static int slic_config_get(struct adapter *adapter, u32 config, u32 config_h)
1144/* 1145/*
1145 * Compute a checksum of the EEPROM according to RFC 1071. 1146 * Compute a checksum of the EEPROM according to RFC 1071.
1146 */ 1147 */
1147static u16 slic_eeprom_cksum(void *eeprom, unsigned len) 1148static u16 slic_eeprom_cksum(void *eeprom, unsigned int len)
1148{ 1149{
1149 u16 *wp = eeprom; 1150 u16 *wp = eeprom;
1150 u32 checksum = 0; 1151 u32 checksum = 0;
@@ -1855,6 +1856,11 @@ static void slic_xmit_build_request(struct adapter *adapter,
1855 ihcmd->u.slic_buffers.totlen = skb->len; 1856 ihcmd->u.slic_buffers.totlen = skb->len;
1856 phys_addr = pci_map_single(adapter->pcidev, skb->data, skb->len, 1857 phys_addr = pci_map_single(adapter->pcidev, skb->data, skb->len,
1857 PCI_DMA_TODEVICE); 1858 PCI_DMA_TODEVICE);
1859 if (pci_dma_mapping_error(adapter->pcidev, phys_addr)) {
1860 kfree_skb(skb);
1861 dev_err(&adapter->pcidev->dev, "DMA mapping error\n");
1862 return;
1863 }
1858 ihcmd->u.slic_buffers.bufs[0].paddrl = SLIC_GET_ADDR_LOW(phys_addr); 1864 ihcmd->u.slic_buffers.bufs[0].paddrl = SLIC_GET_ADDR_LOW(phys_addr);
1859 ihcmd->u.slic_buffers.bufs[0].paddrh = SLIC_GET_ADDR_HIGH(phys_addr); 1865 ihcmd->u.slic_buffers.bufs[0].paddrh = SLIC_GET_ADDR_HIGH(phys_addr);
1860 ihcmd->u.slic_buffers.bufs[0].length = skb->len; 1866 ihcmd->u.slic_buffers.bufs[0].length = skb->len;
diff --git a/drivers/staging/sm750fb/ddk750_chip.c b/drivers/staging/sm750fb/ddk750_chip.c
index 95f7cae3cc23..f80ee776677f 100644
--- a/drivers/staging/sm750fb/ddk750_chip.c
+++ b/drivers/staging/sm750fb/ddk750_chip.c
@@ -306,7 +306,7 @@ unsigned int calcPllValue(unsigned int request_orig, pll_value_t *pll)
306 unsigned int input, request; 306 unsigned int input, request;
307 unsigned int tmpClock, ret; 307 unsigned int tmpClock, ret;
308 const int max_OD = 3; 308 const int max_OD = 3;
309 int max_d; 309 int max_d = 6;
310 310
311 if (getChipType() == SM750LE) { 311 if (getChipType() == SM750LE) {
312 /* SM750LE don't have prgrammable PLL and M/N values to work on. 312 /* SM750LE don't have prgrammable PLL and M/N values to work on.
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index a22fb07512a1..97ca4ecca8a9 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -263,7 +263,7 @@ static struct notifier_block vt_notifier_block = {
263static unsigned char get_attributes(struct vc_data *vc, u16 *pos) 263static unsigned char get_attributes(struct vc_data *vc, u16 *pos)
264{ 264{
265 pos = screen_pos(vc, pos - (u16 *)vc->vc_origin, 1); 265 pos = screen_pos(vc, pos - (u16 *)vc->vc_origin, 1);
266 return (u_char) (scr_readw(pos) >> 8); 266 return (scr_readw(pos) & ~vc->vc_hi_font_mask) >> 8;
267} 267}
268 268
269static void speakup_date(struct vc_data *vc) 269static void speakup_date(struct vc_data *vc)
@@ -473,8 +473,10 @@ static u16 get_char(struct vc_data *vc, u16 *pos, u_char *attribs)
473 w = scr_readw(pos); 473 w = scr_readw(pos);
474 c = w & 0xff; 474 c = w & 0xff;
475 475
476 if (w & vc->vc_hi_font_mask) 476 if (w & vc->vc_hi_font_mask) {
477 w &= ~vc->vc_hi_font_mask;
477 c |= 0x100; 478 c |= 0x100;
479 }
478 480
479 ch = inverse_translate(vc, c, 0); 481 ch = inverse_translate(vc, c, 0);
480 *attribs = (w & 0xff00) >> 8; 482 *attribs = (w & 0xff00) >> 8;
diff --git a/drivers/staging/speakup/serialio.h b/drivers/staging/speakup/serialio.h
index 1b399214ecf7..3ad7ff0bc3c3 100644
--- a/drivers/staging/speakup/serialio.h
+++ b/drivers/staging/speakup/serialio.h
@@ -6,6 +6,7 @@
6#ifndef __sparc__ 6#ifndef __sparc__
7#include <linux/serial.h> 7#include <linux/serial.h>
8#endif 8#endif
9#include <linux/serial_core.h>
9 10
10/* 11/*
11 * this is cut&paste from 8250.h. Get rid of the structure, the definitions 12 * this is cut&paste from 8250.h. Get rid of the structure, the definitions
@@ -16,7 +17,7 @@ struct old_serial_port {
16 unsigned int baud_base; 17 unsigned int baud_base;
17 unsigned int port; 18 unsigned int port;
18 unsigned int irq; 19 unsigned int irq;
19 unsigned int flags; /* unused */ 20 upf_t flags; /* unused */
20}; 21};
21 22
22/* countdown values for serial timeouts in us */ 23/* countdown values for serial timeouts in us */
diff --git a/drivers/staging/unisys/Documentation/ABI/sysfs-platform-visorchipset b/drivers/staging/unisys/Documentation/ABI/sysfs-platform-visorchipset
index b0498ff32405..c2359de17eaf 100644
--- a/drivers/staging/unisys/Documentation/ABI/sysfs-platform-visorchipset
+++ b/drivers/staging/unisys/Documentation/ABI/sysfs-platform-visorchipset
@@ -50,20 +50,6 @@ Description: This field is used to tell s-Par which type of recovery tool
50 commission the guest. 50 commission the guest.
51Users: sparmaintainer@unisys.com 51Users: sparmaintainer@unisys.com
52 52
53What: guest/chipsetready
54Date: 7/18/2014
55KernelVersion: TBD
56Contact: sparmaintainer@unisys.com
57Description: This entry is used by Unisys application software on the guest
58 to acknowledge completion of specific events for integration
59 purposes, but these acknowledgements are not required for the
60 guest to operate correctly. The interface accepts one of two
61 strings: MODULES_LOADED to indicate that the s-Par driver
62 modules have been loaded successfully, or CALLHOMEDISK_MOUNTED,
63 which indicates that the disk used to support call home services
64 has been successfully mounted.
65Users: sparmaintainer@unisys.com
66
67What: parahotplug/deviceenabled 53What: parahotplug/deviceenabled
68Date: 7/18/2014 54Date: 7/18/2014
69KernelVersion: TBD 55KernelVersion: TBD
diff --git a/drivers/staging/unisys/Documentation/overview.txt b/drivers/staging/unisys/Documentation/overview.txt
index c2d8dd4a2e41..1146c1cf5c2a 100644
--- a/drivers/staging/unisys/Documentation/overview.txt
+++ b/drivers/staging/unisys/Documentation/overview.txt
@@ -137,12 +137,6 @@ called automatically by the visorbus driver at appropriate times:
137 137
138* The resume() function is the "book-end" to pause(), and is described above. 138* The resume() function is the "book-end" to pause(), and is described above.
139 139
140If/when a function driver creates a Linux device (that needs to be accessed
141from usermode), it calls visorbus_registerdevnode(), passing the major and
142minor number of the device. (Of course not all function drivers will need
143to do this.) This simply creates the appropriate "devmajorminor" sysfs entry
144described below, so that a hotplug script can use it to create a device node.
145
1462.1.3. sysfs Advertised Information 1402.1.3. sysfs Advertised Information
147----------------------------------- 141-----------------------------------
148 142
@@ -197,19 +191,6 @@ The following files exist under /sys/devices/visorbus<x>/vbus<x>:dev<y>:
197 if the appropriate function driver has not 191 if the appropriate function driver has not
198 been loaded yet. 192 been loaded yet.
199 193
200 devmajorminor
201
202 <devname> if applicable, each file here identifies (via
203 ... its file contents) the
204 "<major>:<minor>" needed for a device node to
205 enable access from usermode. There is exactly
206 one file here for each different device node
207 that can be accessed (from usermode). Note
208 that this info is provided by a particular
209 function driver, so these will not exist
210 until AFTER the appropriate function driver
211 controlling this device class is loaded.
212
213 channel properties of the device channel (all in 194 channel properties of the device channel (all in
214 ascii text format) 195 ascii text format)
215 196
diff --git a/drivers/staging/unisys/Documentation/proc-entries.txt b/drivers/staging/unisys/Documentation/proc-entries.txt
deleted file mode 100644
index 426f92b1c577..000000000000
--- a/drivers/staging/unisys/Documentation/proc-entries.txt
+++ /dev/null
@@ -1,93 +0,0 @@
1 s-Par Proc Entries
2This document describes the proc entries created by the Unisys s-Par modules.
3
4Support Module Entries
5These entries are provided primarily for debugging.
6
7/proc/uislib/info: This entry contains debugging information for the
8uislib module, including bus information and memory usage.
9
10/proc/visorchipset/controlvm: This directory contains debugging
11entries for the controlvm channel used by visorchipset.
12
13/proc/uislib/platform: This entry is used to display the platform
14number this node is in the system. For some guests, this may be
15invalid.
16
17/proc/visorchipset/chipsetready: This entry is written to by scripts
18to signify that any user level activity has been completed before the
19guest can be considered running and is shown as running in the s-Par
20UI.
21
22Device Entries
23These entries provide status of the devices shared by a service partition.
24
25/proc/uislib/vbus: this is a directory containing entries for each
26virtual bus. Each numbered sub-directory contains an info entry, which
27describes the devices that appear on that bus.
28
29/proc/uislib/cycles_before_wait: This entry is used to tune
30performance, by setting the number of cycles we wait before going idle
31when in polling mode. A longer time will reduce message latency but
32spend more processing time polling.
33
34/proc/uislib/smart_wakeup: This entry is used to tune performance, by
35enabling or disabling smart wakeup.
36
37/proc/virthba/info: This entry contains debugging information for the
38virthba module, including interrupt information and memory usage.
39
40/proc/virthba/enable_ints: This entry controls interrupt use by the
41virthba module. Writing a 0 to this entry will disable interrupts.
42
43/proc/virtnic/info: This entry contains debugging information for the
44virtnic module, including interrupt information, send and receive
45counts, and other device information.
46
47/proc/virtnic/ethX: This is a directory containing entries for each
48virtual NIC. Each named subdirectory contains two entries,
49clientstring and zone.
50
51/proc/virtpci/info: This entry contains debugging information for the
52virtpci module, including virtual PCI bus information and device
53locations.
54
55/proc/virtnic/enable_ints: This entry controls interrupt use by the
56virtnic module. Writing a 0 to this entry will disable interrupts.
57
58Visorconinclient, visordiag, visornoop, visorserialclient, and
59visorvideoclient Entries
60
61The entries in proc for these modules all follow the same
62pattern. Each module has its own proc directory with the same name,
63e.g. visordiag presents a /proc/visordiag directory. Inside of the
64module's directory are a device directory, which contains one numbered
65directory for each device provided by that module. Each device has a
66diag entry that presents the device number and visorbus name for that
67device. The module directory also has a driver/diag entry, which
68reports the corresponding s-Par version number of the driver.
69
70Automated Installation Entries
71
72These entries are used to pass information between the s-Par platform
73and the Linux-based installation and recovery tool. These values are
74read/write, however, the guest can only reset them to 0, or report an
75error status through the installer entry. The values are only set via
76s-Par's firmware interface, to help prevent accidentally booting into
77the tool.
78
79/proc/visorchipset/boottotool: This entry instructs s-Par that the
80next reboot will launch the installation and recovery tool. If set to
810, the next boot will happen according to the UEFI boot manager
82settings.
83
84/proc/visorchipset/toolaction: This entry indicates the installation
85and recovery tool mode requested for the next boot.
86
87/proc/visorchipset/installer: this entry is used by the installation
88and recovery tool to pass status and result information back to the
89s-Par firmware.
90
91/proc/visorchipset/partition: This directory contains the guest
92partition configuration data for each virtual bus, for use during
93installation and at runtime for s-Par service partitions.
diff --git a/drivers/staging/unisys/MAINTAINERS b/drivers/staging/unisys/MAINTAINERS
index cc46e37e64c1..1f0425bf3583 100644
--- a/drivers/staging/unisys/MAINTAINERS
+++ b/drivers/staging/unisys/MAINTAINERS
@@ -2,5 +2,4 @@ Unisys s-Par drivers
2M: David Kershner <sparmaintainer@unisys.com> 2M: David Kershner <sparmaintainer@unisys.com>
3S: Maintained 3S: Maintained
4F: Documentation/s-Par/overview.txt 4F: Documentation/s-Par/overview.txt
5F: Documentation/s-Par/proc-entries.txt
6F: drivers/staging/unisys/ 5F: drivers/staging/unisys/
diff --git a/drivers/staging/unisys/include/channel.h b/drivers/staging/unisys/include/channel.h
index 5af59a5fce61..db4e6b28755b 100644
--- a/drivers/staging/unisys/include/channel.h
+++ b/drivers/staging/unisys/include/channel.h
@@ -76,9 +76,9 @@ enum channel_clientstate {
76}; 76};
77 77
78static inline const u8 * 78static inline const u8 *
79ULTRA_CHANNELCLI_STRING(u32 v) 79ULTRA_CHANNELCLI_STRING(u32 state)
80{ 80{
81 switch (v) { 81 switch (state) {
82 case CHANNELCLI_DETACHED: 82 case CHANNELCLI_DETACHED:
83 return (const u8 *)("DETACHED"); 83 return (const u8 *)("DETACHED");
84 case CHANNELCLI_DISABLED: 84 case CHANNELCLI_DISABLED:
@@ -411,7 +411,7 @@ spar_channel_client_acquire_os(void __iomem *ch, u8 *id)
411 mb(); /* required for channel synch */ 411 mb(); /* required for channel synch */
412 } 412 }
413 if (readl(&hdr->cli_state_os) == CHANNELCLI_OWNED) { 413 if (readl(&hdr->cli_state_os) == CHANNELCLI_OWNED) {
414 if (readb(&hdr->cli_error_os) != 0) { 414 if (readb(&hdr->cli_error_os)) {
415 /* we are in an error msg throttling state; 415 /* we are in an error msg throttling state;
416 * come out of it 416 * come out of it
417 */ 417 */
@@ -459,7 +459,7 @@ spar_channel_client_acquire_os(void __iomem *ch, u8 *id)
459 mb(); /* required for channel synch */ 459 mb(); /* required for channel synch */
460 return 0; 460 return 0;
461 } 461 }
462 if (readb(&hdr->cli_error_os) != 0) { 462 if (readb(&hdr->cli_error_os)) {
463 /* we are in an error msg throttling state; come out of it */ 463 /* we are in an error msg throttling state; come out of it */
464 pr_info("%s Channel OS client acquire now successful\n", id); 464 pr_info("%s Channel OS client acquire now successful\n", id);
465 writeb(0, &hdr->cli_error_os); 465 writeb(0, &hdr->cli_error_os);
@@ -472,7 +472,7 @@ spar_channel_client_release_os(void __iomem *ch, u8 *id)
472{ 472{
473 struct channel_header __iomem *hdr = ch; 473 struct channel_header __iomem *hdr = ch;
474 474
475 if (readb(&hdr->cli_error_os) != 0) { 475 if (readb(&hdr->cli_error_os)) {
476 /* we are in an error msg throttling state; come out of it */ 476 /* we are in an error msg throttling state; come out of it */
477 pr_info("%s Channel OS client error state cleared\n", id); 477 pr_info("%s Channel OS client error state cleared\n", id);
478 writeb(0, &hdr->cli_error_os); 478 writeb(0, &hdr->cli_error_os);
diff --git a/drivers/staging/unisys/include/iochannel.h b/drivers/staging/unisys/include/iochannel.h
index 880d9f04cbcf..5ccf81485d72 100644
--- a/drivers/staging/unisys/include/iochannel.h
+++ b/drivers/staging/unisys/include/iochannel.h
@@ -253,48 +253,6 @@ struct uiscmdrsp_scsi {
253/* SCSI device version for no disk inquiry result */ 253/* SCSI device version for no disk inquiry result */
254#define SCSI_SPC2_VER 4 /* indicates SCSI SPC2 (SPC3 is 5) */ 254#define SCSI_SPC2_VER 4 /* indicates SCSI SPC2 (SPC3 is 5) */
255 255
256/* Windows and Linux want different things for a non-existent lun. So, we'll let
257 * caller pass in the peripheral qualifier and type.
258 * NOTE:[4] SCSI returns (n-4); so we return length-1-4 or length-5.
259 */
260
261#define SET_NO_DISK_INQUIRY_RESULT(buf, len, lun, lun0notpresent, notpresent) \
262 do { \
263 memset(buf, 0, \
264 MINNUM(len, \
265 (unsigned int)NO_DISK_INQUIRY_RESULT_LEN)); \
266 buf[2] = (u8)SCSI_SPC2_VER; \
267 if (lun == 0) { \
268 buf[0] = (u8)lun0notpresent; \
269 buf[3] = (u8)DEV_HISUPPORT; \
270 } else \
271 buf[0] = (u8)notpresent; \
272 buf[4] = (u8)( \
273 MINNUM(len, \
274 (unsigned int)NO_DISK_INQUIRY_RESULT_LEN) - 5);\
275 if (len >= NO_DISK_INQUIRY_RESULT_LEN) { \
276 buf[8] = 'D'; \
277 buf[9] = 'E'; \
278 buf[10] = 'L'; \
279 buf[11] = 'L'; \
280 buf[16] = 'P'; \
281 buf[17] = 'S'; \
282 buf[18] = 'E'; \
283 buf[19] = 'U'; \
284 buf[20] = 'D'; \
285 buf[21] = 'O'; \
286 buf[22] = ' '; \
287 buf[23] = 'D'; \
288 buf[24] = 'E'; \
289 buf[25] = 'V'; \
290 buf[26] = 'I'; \
291 buf[27] = 'C'; \
292 buf[28] = 'E'; \
293 buf[30] = ' '; \
294 buf[31] = '.'; \
295 } \
296 } while (0)
297
298/* Struct & Defines to support sense information. */ 256/* Struct & Defines to support sense information. */
299 257
300/* The following struct is returned in sensebuf field in uiscmdrsp_scsi. It is 258/* The following struct is returned in sensebuf field in uiscmdrsp_scsi. It is
diff --git a/drivers/staging/unisys/include/visorbus.h b/drivers/staging/unisys/include/visorbus.h
index 2a64a9ce0208..9baf1ec70d01 100644
--- a/drivers/staging/unisys/include/visorbus.h
+++ b/drivers/staging/unisys/include/visorbus.h
@@ -61,54 +61,55 @@ struct visor_channeltype_descriptor {
61 const char *name; 61 const char *name;
62}; 62};
63 63
64/** Information provided by each visor driver when it registers with the 64/**
65 * visorbus driver. 65 * struct visor_driver - Information provided by each visor driver when it
66 * registers with the visorbus driver.
67 * @name: Name of the visor driver.
68 * @version: The numbered version of the driver (x.x.xxx).
69 * @vertag: A human readable version string.
70 * @owner: The module owner.
71 * @channel_types: Types of channels handled by this driver, ending with
72 * a zero GUID. Our specialized BUS.match() method knows
73 * about this list, and uses it to determine whether this
74 * driver will in fact handle a new device that it has
75 * detected.
76 * @probe: Called when a new device comes online, by our probe()
77 * function specified by driver.probe() (triggered
78 * ultimately by some call to driver_register(),
79 * bus_add_driver(), or driver_attach()).
80 * @remove: Called when a new device is removed, by our remove()
81 * function specified by driver.remove() (triggered
82 * ultimately by some call to device_release_driver()).
83 * @channel_interrupt: Called periodically, whenever there is a possiblity
84 * that "something interesting" may have happened to the
85 * channel.
86 * @pause: Called to initiate a change of the device's state. If
87 * the return valu`e is < 0, there was an error and the
88 * state transition will NOT occur. If the return value
89 * is >= 0, then the state transition was INITIATED
90 * successfully, and complete_func() will be called (or
91 * was just called) with the final status when either the
92 * state transition fails or completes successfully.
93 * @resume: Behaves similar to pause.
94 * @driver: Private reference to the device driver. For use by bus
95 * driver only.
96 * @version_attr: Private version field. For use by bus driver only.
66 */ 97 */
67struct visor_driver { 98struct visor_driver {
68 const char *name; 99 const char *name;
69 const char *version; 100 const char *version;
70 const char *vertag; 101 const char *vertag;
71 const char *build_date;
72 const char *build_time;
73 struct module *owner; 102 struct module *owner;
74
75 /** Types of channels handled by this driver, ending with 0 GUID.
76 * Our specialized BUS.match() method knows about this list, and
77 * uses it to determine whether this driver will in fact handle a
78 * new device that it has detected.
79 */
80 struct visor_channeltype_descriptor *channel_types; 103 struct visor_channeltype_descriptor *channel_types;
81
82 /** Called when a new device comes online, by our probe() function
83 * specified by driver.probe() (triggered ultimately by some call
84 * to driver_register() / bus_add_driver() / driver_attach()).
85 */
86 int (*probe)(struct visor_device *dev); 104 int (*probe)(struct visor_device *dev);
87
88 /** Called when a new device is removed, by our remove() function
89 * specified by driver.remove() (triggered ultimately by some call
90 * to device_release_driver()).
91 */
92 void (*remove)(struct visor_device *dev); 105 void (*remove)(struct visor_device *dev);
93
94 /** Called periodically, whenever there is a possibility that
95 * "something interesting" may have happened to the channel state.
96 */
97 void (*channel_interrupt)(struct visor_device *dev); 106 void (*channel_interrupt)(struct visor_device *dev);
98
99 /** Called to initiate a change of the device's state. If the return
100 * valu`e is < 0, there was an error and the state transition will NOT
101 * occur. If the return value is >= 0, then the state transition was
102 * INITIATED successfully, and complete_func() will be called (or was
103 * just called) with the final status when either the state transition
104 * fails or completes successfully.
105 */
106 int (*pause)(struct visor_device *dev, 107 int (*pause)(struct visor_device *dev,
107 visorbus_state_complete_func complete_func); 108 visorbus_state_complete_func complete_func);
108 int (*resume)(struct visor_device *dev, 109 int (*resume)(struct visor_device *dev,
109 visorbus_state_complete_func complete_func); 110 visorbus_state_complete_func complete_func);
110 111
111 /** These fields are for private use by the bus driver only. */ 112 /* These fields are for private use by the bus driver only. */
112 struct device_driver driver; 113 struct device_driver driver;
113 struct driver_attribute version_attr; 114 struct driver_attribute version_attr;
114}; 115};
@@ -116,48 +117,58 @@ struct visor_driver {
116#define to_visor_driver(x) ((x) ? \ 117#define to_visor_driver(x) ((x) ? \
117 (container_of(x, struct visor_driver, driver)) : (NULL)) 118 (container_of(x, struct visor_driver, driver)) : (NULL))
118 119
119/** A device type for things "plugged" into the visorbus bus */ 120/**
121 * struct visor_device - A device type for things "plugged" into the visorbus
122 * bus
123 * visorchannel: Points to the channel that the device is
124 * associated with.
125 * channel_type_guid: Identifies the channel type to the bus driver.
126 * device: Device struct meant for use by the bus driver
127 * only.
128 * list_all: Used by the bus driver to enumerate devices.
129 * periodic_work: Device work queue. Private use by bus driver
130 * only.
131 * being_removed: Indicates that the device is being removed from
132 * the bus. Private bus driver use only.
133 * visordriver_callback_lock: Used by the bus driver to lock when handling
134 * channel events.
135 * pausing: Indicates that a change towards a paused state.
136 * is in progress. Only modified by the bus driver.
137 * resuming: Indicates that a change towards a running state
138 * is in progress. Only modified by the bus driver.
139 * chipset_bus_no: Private field used by the bus driver.
140 * chipset_dev_no: Private field used the bus driver.
141 * state: Used to indicate the current state of the
142 * device.
143 * inst: Unique GUID for this instance of the device.
144 * name: Name of the device.
145 * pending_msg_hdr: For private use by bus driver to respond to
146 * hypervisor requests.
147 * vbus_hdr_info: A pointer to header info. Private use by bus
148 * driver.
149 * partition_uuid: Indicates client partion id. This should be the
150 * same across all visor_devices in the current
151 * guest. Private use by bus driver only.
152 */
120 153
121struct visor_device { 154struct visor_device {
122 /** visor driver can use the visorchannel member with the functions
123 * defined in visorchannel.h to access the channel
124 */
125 struct visorchannel *visorchannel; 155 struct visorchannel *visorchannel;
126 uuid_le channel_type_guid; 156 uuid_le channel_type_guid;
127 u64 channel_bytes; 157 /* These fields are for private use by the bus driver only. */
128
129 /** These fields are for private use by the bus driver only.
130 * A notable exception is that the visor driver can use
131 * visor_get_drvdata() and visor_set_drvdata() to retrieve or stash
132 * private visor driver specific data within the device member.
133 */
134 struct device device; 158 struct device device;
135 struct list_head list_all; 159 struct list_head list_all;
136 struct periodic_work *periodic_work; 160 struct periodic_work *periodic_work;
137 bool being_removed; 161 bool being_removed;
138 bool responded_to_device_create;
139 struct kobject kobjdevmajorminor; /* visorbus<x>/dev<y>/devmajorminor/*/
140 struct {
141 int major, minor;
142 void *attr; /* private use by devmajorminor_attr.c you can
143 * change this constant to whatever you want
144 */
145 } devnodes[5];
146 /* the code will detect and behave appropriately) */
147 struct semaphore visordriver_callback_lock; 162 struct semaphore visordriver_callback_lock;
148 bool pausing; 163 bool pausing;
149 bool resuming; 164 bool resuming;
150 u32 chipset_bus_no; 165 u32 chipset_bus_no;
151 u32 chipset_dev_no; 166 u32 chipset_dev_no;
152 struct visorchipset_state state; 167 struct visorchipset_state state;
153 uuid_le type;
154 uuid_le inst; 168 uuid_le inst;
155 u8 *name; 169 u8 *name;
156 u8 *description;
157 struct controlvm_message_header *pending_msg_hdr; 170 struct controlvm_message_header *pending_msg_hdr;
158 void *vbus_hdr_info; 171 void *vbus_hdr_info;
159 u32 switch_no;
160 u32 internal_port_no;
161 uuid_le partition_uuid; 172 uuid_le partition_uuid;
162}; 173};
163 174
@@ -174,8 +185,6 @@ int visorbus_write_channel(struct visor_device *dev,
174 unsigned long nbytes); 185 unsigned long nbytes);
175int visorbus_clear_channel(struct visor_device *dev, 186int visorbus_clear_channel(struct visor_device *dev,
176 unsigned long offset, u8 ch, unsigned long nbytes); 187 unsigned long offset, u8 ch, unsigned long nbytes);
177int visorbus_registerdevnode(struct visor_device *dev,
178 const char *name, int major, int minor);
179void visorbus_enable_channel_interrupts(struct visor_device *dev); 188void visorbus_enable_channel_interrupts(struct visor_device *dev);
180void visorbus_disable_channel_interrupts(struct visor_device *dev); 189void visorbus_disable_channel_interrupts(struct visor_device *dev);
181#endif 190#endif
diff --git a/drivers/staging/unisys/visorbus/visorbus_main.c b/drivers/staging/unisys/visorbus/visorbus_main.c
index 533bb5b3d284..3a147dbbd7b5 100644
--- a/drivers/staging/unisys/visorbus/visorbus_main.c
+++ b/drivers/staging/unisys/visorbus/visorbus_main.c
@@ -33,6 +33,9 @@ static int visorbus_forcenomatch;
33static int visorbus_debugref; 33static int visorbus_debugref;
34#define SERIALLOOPBACKCHANADDR (100 * 1024 * 1024) 34#define SERIALLOOPBACKCHANADDR (100 * 1024 * 1024)
35 35
36/* Display string that is guaranteed to be no longer the 99 characters*/
37#define LINESIZE 99
38
36#define CURRENT_FILE_PC VISOR_BUS_PC_visorbus_main_c 39#define CURRENT_FILE_PC VISOR_BUS_PC_visorbus_main_c
37#define POLLJIFFIES_TESTWORK 100 40#define POLLJIFFIES_TESTWORK 100
38#define POLLJIFFIES_NORMALCHANNEL 10 41#define POLLJIFFIES_NORMALCHANNEL 10
@@ -182,7 +185,6 @@ static int
182visorbus_match(struct device *xdev, struct device_driver *xdrv) 185visorbus_match(struct device *xdev, struct device_driver *xdrv)
183{ 186{
184 uuid_le channel_type; 187 uuid_le channel_type;
185 int rc = 0;
186 int i; 188 int i;
187 struct visor_device *dev; 189 struct visor_device *dev;
188 struct visor_driver *drv; 190 struct visor_driver *drv;
@@ -190,26 +192,23 @@ visorbus_match(struct device *xdev, struct device_driver *xdrv)
190 dev = to_visor_device(xdev); 192 dev = to_visor_device(xdev);
191 drv = to_visor_driver(xdrv); 193 drv = to_visor_driver(xdrv);
192 channel_type = visorchannel_get_uuid(dev->visorchannel); 194 channel_type = visorchannel_get_uuid(dev->visorchannel);
193 if (visorbus_forcematch) {
194 rc = 1;
195 goto away;
196 }
197 if (visorbus_forcenomatch)
198 goto away;
199 195
196 if (visorbus_forcematch)
197 return 1;
198 if (visorbus_forcenomatch)
199 return 0;
200 if (!drv->channel_types) 200 if (!drv->channel_types)
201 goto away; 201 return 0;
202
202 for (i = 0; 203 for (i = 0;
203 (uuid_le_cmp(drv->channel_types[i].guid, NULL_UUID_LE) != 0) || 204 (uuid_le_cmp(drv->channel_types[i].guid, NULL_UUID_LE) != 0) ||
204 (drv->channel_types[i].name); 205 (drv->channel_types[i].name);
205 i++) 206 i++)
206 if (uuid_le_cmp(drv->channel_types[i].guid, 207 if (uuid_le_cmp(drv->channel_types[i].guid,
207 channel_type) == 0) { 208 channel_type) == 0)
208 rc = i + 1; 209 return i + 1;
209 goto away; 210
210 } 211 return 0;
211away:
212 return rc;
213} 212}
214 213
215/** This is called when device_unregister() is called for the bus device 214/** This is called when device_unregister() is called for the bus device
@@ -243,180 +242,6 @@ visorbus_release_device(struct device *xdev)
243 kfree(dev); 242 kfree(dev);
244} 243}
245 244
246/* Implement publishing of device node attributes under:
247 *
248 * /sys/bus/visorbus<x>/dev<y>/devmajorminor
249 *
250 */
251
252#define to_devmajorminor_attr(_attr) \
253 container_of(_attr, struct devmajorminor_attribute, attr)
254#define to_visor_device_from_kobjdevmajorminor(obj) \
255 container_of(obj, struct visor_device, kobjdevmajorminor)
256
257struct devmajorminor_attribute {
258 struct attribute attr;
259 int slot;
260 ssize_t (*show)(struct visor_device *, int slot, char *buf);
261 ssize_t (*store)(struct visor_device *, int slot, const char *buf,
262 size_t count);
263};
264
265static ssize_t DEVMAJORMINOR_ATTR(struct visor_device *dev, int slot, char *buf)
266{
267 int maxdevnodes = ARRAY_SIZE(dev->devnodes) / sizeof(dev->devnodes[0]);
268
269 if (slot < 0 || slot >= maxdevnodes)
270 return 0;
271 return snprintf(buf, PAGE_SIZE, "%d:%d\n",
272 dev->devnodes[slot].major, dev->devnodes[slot].minor);
273}
274
275static ssize_t
276devmajorminor_attr_show(struct kobject *kobj, struct attribute *attr, char *buf)
277{
278 struct devmajorminor_attribute *devmajorminor_attr =
279 to_devmajorminor_attr(attr);
280 struct visor_device *dev = to_visor_device_from_kobjdevmajorminor(kobj);
281 ssize_t ret = 0;
282
283 if (devmajorminor_attr->show)
284 ret = devmajorminor_attr->show(dev,
285 devmajorminor_attr->slot, buf);
286 return ret;
287}
288
289static ssize_t
290devmajorminor_attr_store(struct kobject *kobj,
291 struct attribute *attr, const char *buf, size_t count)
292{
293 struct devmajorminor_attribute *devmajorminor_attr =
294 to_devmajorminor_attr(attr);
295 struct visor_device *dev = to_visor_device_from_kobjdevmajorminor(kobj);
296 ssize_t ret = 0;
297
298 if (devmajorminor_attr->store)
299 ret = devmajorminor_attr->store(dev,
300 devmajorminor_attr->slot,
301 buf, count);
302 return ret;
303}
304
305static int register_devmajorminor_attributes(struct visor_device *dev);
306
307static int
308devmajorminor_create_file(struct visor_device *dev, const char *name,
309 int major, int minor)
310{
311 int maxdevnodes = ARRAY_SIZE(dev->devnodes) / sizeof(dev->devnodes[0]);
312 struct devmajorminor_attribute *myattr = NULL;
313 int x = -1, rc = 0, slot = -1;
314
315 register_devmajorminor_attributes(dev);
316 for (slot = 0; slot < maxdevnodes; slot++)
317 if (!dev->devnodes[slot].attr)
318 break;
319 if (slot == maxdevnodes) {
320 rc = -ENOMEM;
321 goto away;
322 }
323 myattr = kzalloc(sizeof(*myattr), GFP_KERNEL);
324 if (!myattr) {
325 rc = -ENOMEM;
326 goto away;
327 }
328 myattr->show = DEVMAJORMINOR_ATTR;
329 myattr->store = NULL;
330 myattr->slot = slot;
331 myattr->attr.name = name;
332 myattr->attr.mode = S_IRUGO;
333 dev->devnodes[slot].attr = myattr;
334 dev->devnodes[slot].major = major;
335 dev->devnodes[slot].minor = minor;
336 x = sysfs_create_file(&dev->kobjdevmajorminor, &myattr->attr);
337 if (x < 0) {
338 rc = x;
339 goto away;
340 }
341 kobject_uevent(&dev->device.kobj, KOBJ_ONLINE);
342away:
343 if (rc < 0) {
344 kfree(myattr);
345 myattr = NULL;
346 dev->devnodes[slot].attr = NULL;
347 }
348 return rc;
349}
350
351static void
352devmajorminor_remove_file(struct visor_device *dev, int slot)
353{
354 int maxdevnodes = ARRAY_SIZE(dev->devnodes) / sizeof(dev->devnodes[0]);
355 struct devmajorminor_attribute *myattr = NULL;
356
357 if (slot < 0 || slot >= maxdevnodes)
358 return;
359 myattr = (struct devmajorminor_attribute *)(dev->devnodes[slot].attr);
360 if (!myattr)
361 return;
362 sysfs_remove_file(&dev->kobjdevmajorminor, &myattr->attr);
363 kobject_uevent(&dev->device.kobj, KOBJ_OFFLINE);
364 dev->devnodes[slot].attr = NULL;
365 kfree(myattr);
366}
367
368static void
369devmajorminor_remove_all_files(struct visor_device *dev)
370{
371 int i = 0;
372 int maxdevnodes = ARRAY_SIZE(dev->devnodes) / sizeof(dev->devnodes[0]);
373
374 for (i = 0; i < maxdevnodes; i++)
375 devmajorminor_remove_file(dev, i);
376}
377
378static const struct sysfs_ops devmajorminor_sysfs_ops = {
379 .show = devmajorminor_attr_show,
380 .store = devmajorminor_attr_store,
381};
382
383static struct kobj_type devmajorminor_kobj_type = {
384 .sysfs_ops = &devmajorminor_sysfs_ops
385};
386
387static int
388register_devmajorminor_attributes(struct visor_device *dev)
389{
390 int rc = 0, x = 0;
391
392 if (dev->kobjdevmajorminor.parent)
393 goto away; /* already registered */
394 x = kobject_init_and_add(&dev->kobjdevmajorminor,
395 &devmajorminor_kobj_type, &dev->device.kobj,
396 "devmajorminor");
397 if (x < 0) {
398 rc = x;
399 goto away;
400 }
401
402 kobject_uevent(&dev->kobjdevmajorminor, KOBJ_ADD);
403
404away:
405 return rc;
406}
407
408static void
409unregister_devmajorminor_attributes(struct visor_device *dev)
410{
411 if (!dev->kobjdevmajorminor.parent)
412 return; /* already unregistered */
413 devmajorminor_remove_all_files(dev);
414
415 kobject_del(&dev->kobjdevmajorminor);
416 kobject_put(&dev->kobjdevmajorminor);
417 dev->kobjdevmajorminor.parent = NULL;
418}
419
420/* begin implementation of specific channel attributes to appear under 245/* begin implementation of specific channel attributes to appear under
421* /sys/bus/visorbus<x>/dev<y>/channel 246* /sys/bus/visorbus<x>/dev<y>/channel
422*/ 247*/
@@ -427,7 +252,7 @@ static ssize_t physaddr_show(struct device *dev, struct device_attribute *attr,
427 252
428 if (!vdev->visorchannel) 253 if (!vdev->visorchannel)
429 return 0; 254 return 0;
430 return snprintf(buf, PAGE_SIZE, "0x%Lx\n", 255 return snprintf(buf, PAGE_SIZE, "0x%llx\n",
431 visorchannel_get_physaddr(vdev->visorchannel)); 256 visorchannel_get_physaddr(vdev->visorchannel));
432} 257}
433 258
@@ -449,7 +274,7 @@ static ssize_t clientpartition_show(struct device *dev,
449 274
450 if (!vdev->visorchannel) 275 if (!vdev->visorchannel)
451 return 0; 276 return 0;
452 return snprintf(buf, PAGE_SIZE, "0x%Lx\n", 277 return snprintf(buf, PAGE_SIZE, "0x%llx\n",
453 visorchannel_get_clientpartition(vdev->visorchannel)); 278 visorchannel_get_clientpartition(vdev->visorchannel));
454} 279}
455 280
@@ -457,24 +282,24 @@ static ssize_t typeguid_show(struct device *dev, struct device_attribute *attr,
457 char *buf) 282 char *buf)
458{ 283{
459 struct visor_device *vdev = to_visor_device(dev); 284 struct visor_device *vdev = to_visor_device(dev);
460 char s[99]; 285 char typeid[LINESIZE];
461 286
462 if (!vdev->visorchannel) 287 if (!vdev->visorchannel)
463 return 0; 288 return 0;
464 return snprintf(buf, PAGE_SIZE, "%s\n", 289 return snprintf(buf, PAGE_SIZE, "%s\n",
465 visorchannel_id(vdev->visorchannel, s)); 290 visorchannel_id(vdev->visorchannel, typeid));
466} 291}
467 292
468static ssize_t zoneguid_show(struct device *dev, struct device_attribute *attr, 293static ssize_t zoneguid_show(struct device *dev, struct device_attribute *attr,
469 char *buf) 294 char *buf)
470{ 295{
471 struct visor_device *vdev = to_visor_device(dev); 296 struct visor_device *vdev = to_visor_device(dev);
472 char s[99]; 297 char zoneid[LINESIZE];
473 298
474 if (!vdev->visorchannel) 299 if (!vdev->visorchannel)
475 return 0; 300 return 0;
476 return snprintf(buf, PAGE_SIZE, "%s\n", 301 return snprintf(buf, PAGE_SIZE, "%s\n",
477 visorchannel_zoneid(vdev->visorchannel, s)); 302 visorchannel_zoneid(vdev->visorchannel, zoneid));
478} 303}
479 304
480static ssize_t typename_show(struct device *dev, struct device_attribute *attr, 305static ssize_t typename_show(struct device *dev, struct device_attribute *attr,
@@ -541,7 +366,7 @@ static ssize_t partition_handle_show(struct device *dev,
541 struct visor_device *vdev = to_visor_device(dev); 366 struct visor_device *vdev = to_visor_device(dev);
542 u64 handle = visorchannel_get_clientpartition(vdev->visorchannel); 367 u64 handle = visorchannel_get_clientpartition(vdev->visorchannel);
543 368
544 return snprintf(buf, PAGE_SIZE, "0x%Lx\n", handle); 369 return snprintf(buf, PAGE_SIZE, "0x%llx\n", handle);
545} 370}
546 371
547static ssize_t partition_guid_show(struct device *dev, 372static ssize_t partition_guid_show(struct device *dev,
@@ -566,7 +391,7 @@ static ssize_t channel_addr_show(struct device *dev,
566 struct visor_device *vdev = to_visor_device(dev); 391 struct visor_device *vdev = to_visor_device(dev);
567 u64 addr = visorchannel_get_physaddr(vdev->visorchannel); 392 u64 addr = visorchannel_get_physaddr(vdev->visorchannel);
568 393
569 return snprintf(buf, PAGE_SIZE, "0x%Lx\n", addr); 394 return snprintf(buf, PAGE_SIZE, "0x%llx\n", addr);
570} 395}
571 396
572static ssize_t channel_bytes_show(struct device *dev, 397static ssize_t channel_bytes_show(struct device *dev,
@@ -575,7 +400,7 @@ static ssize_t channel_bytes_show(struct device *dev,
575 struct visor_device *vdev = to_visor_device(dev); 400 struct visor_device *vdev = to_visor_device(dev);
576 u64 nbytes = visorchannel_get_nbytes(vdev->visorchannel); 401 u64 nbytes = visorchannel_get_nbytes(vdev->visorchannel);
577 402
578 return snprintf(buf, PAGE_SIZE, "0x%Lx\n", nbytes); 403 return snprintf(buf, PAGE_SIZE, "0x%llx\n", nbytes);
579} 404}
580 405
581static ssize_t channel_id_show(struct device *dev, 406static ssize_t channel_id_show(struct device *dev,
@@ -598,9 +423,9 @@ static ssize_t client_bus_info_show(struct device *dev,
598 struct visor_device *vdev = to_visor_device(dev); 423 struct visor_device *vdev = to_visor_device(dev);
599 struct visorchannel *channel = vdev->visorchannel; 424 struct visorchannel *channel = vdev->visorchannel;
600 425
601 int i, x, remain = PAGE_SIZE; 426 int i, shift, remain = PAGE_SIZE;
602 unsigned long off; 427 unsigned long off;
603 char *p = buf; 428 char *pos = buf;
604 u8 *partition_name; 429 u8 *partition_name;
605 struct ultra_vbus_deviceinfo dev_info; 430 struct ultra_vbus_deviceinfo dev_info;
606 431
@@ -608,44 +433,45 @@ static ssize_t client_bus_info_show(struct device *dev,
608 if (channel) { 433 if (channel) {
609 if (vdev->name) 434 if (vdev->name)
610 partition_name = vdev->name; 435 partition_name = vdev->name;
611 x = snprintf(p, remain, 436 shift = snprintf(pos, remain,
612 "Client device / client driver info for %s partition (vbus #%d):\n", 437 "Client device / client driver info for %s eartition (vbus #%d):\n",
613 partition_name, vdev->chipset_dev_no); 438 partition_name, vdev->chipset_dev_no);
614 p += x; 439 pos += shift;
615 remain -= x; 440 remain -= shift;
616 x = visorchannel_read(channel, 441 shift = visorchannel_read(channel,
617 offsetof(struct 442 offsetof(struct
618 spar_vbus_channel_protocol, 443 spar_vbus_channel_protocol,
619 chp_info), 444 chp_info),
620 &dev_info, sizeof(dev_info)); 445 &dev_info, sizeof(dev_info));
621 if (x >= 0) { 446 if (shift >= 0) {
622 x = vbuschannel_devinfo_to_string(&dev_info, p, 447 shift = vbuschannel_devinfo_to_string(&dev_info, pos,
623 remain, -1); 448 remain, -1);
624 p += x; 449 pos += shift;
625 remain -= x; 450 remain -= shift;
626 } 451 }
627 x = visorchannel_read(channel, 452 shift = visorchannel_read(channel,
628 offsetof(struct 453 offsetof(struct
629 spar_vbus_channel_protocol, 454 spar_vbus_channel_protocol,
630 bus_info), 455 bus_info),
631 &dev_info, sizeof(dev_info)); 456 &dev_info, sizeof(dev_info));
632 if (x >= 0) { 457 if (shift >= 0) {
633 x = vbuschannel_devinfo_to_string(&dev_info, p, 458 shift = vbuschannel_devinfo_to_string(&dev_info, pos,
634 remain, -1); 459 remain, -1);
635 p += x; 460 pos += shift;
636 remain -= x; 461 remain -= shift;
637 } 462 }
638 off = offsetof(struct spar_vbus_channel_protocol, dev_info); 463 off = offsetof(struct spar_vbus_channel_protocol, dev_info);
639 i = 0; 464 i = 0;
640 while (off + sizeof(dev_info) <= 465 while (off + sizeof(dev_info) <=
641 visorchannel_get_nbytes(channel)) { 466 visorchannel_get_nbytes(channel)) {
642 x = visorchannel_read(channel, 467 shift = visorchannel_read(channel,
643 off, &dev_info, sizeof(dev_info)); 468 off, &dev_info,
644 if (x >= 0) { 469 sizeof(dev_info));
645 x = vbuschannel_devinfo_to_string 470 if (shift >= 0) {
646 (&dev_info, p, remain, i); 471 shift = vbuschannel_devinfo_to_string
647 p += x; 472 (&dev_info, pos, remain, i);
648 remain -= x; 473 pos += shift;
474 remain -= shift;
649 } 475 }
650 off += sizeof(dev_info); 476 off += sizeof(dev_info);
651 i++; 477 i++;
@@ -752,36 +578,28 @@ dev_stop_periodic_work(struct visor_device *dev)
752static int 578static int
753visordriver_probe_device(struct device *xdev) 579visordriver_probe_device(struct device *xdev)
754{ 580{
755 int rc; 581 int res;
756 struct visor_driver *drv; 582 struct visor_driver *drv;
757 struct visor_device *dev; 583 struct visor_device *dev;
758 584
759 drv = to_visor_driver(xdev->driver); 585 drv = to_visor_driver(xdev->driver);
760 dev = to_visor_device(xdev); 586 dev = to_visor_device(xdev);
587
588 if (!drv->probe)
589 return -ENODEV;
590
761 down(&dev->visordriver_callback_lock); 591 down(&dev->visordriver_callback_lock);
762 dev->being_removed = false; 592 dev->being_removed = false;
763 /* 593
764 * ensure that the dev->being_removed flag is cleared before 594 res = drv->probe(dev);
765 * we start the probe 595 if (res >= 0) {
766 */ 596 /* success: reference kept via unmatched get_device() */
767 wmb(); 597 get_device(&dev->device);
768 get_device(&dev->device); 598 fix_vbus_dev_info(dev);
769 if (!drv->probe) {
770 up(&dev->visordriver_callback_lock);
771 rc = -ENODEV;
772 goto away;
773 } 599 }
774 rc = drv->probe(dev);
775 if (rc < 0)
776 goto away;
777 600
778 fix_vbus_dev_info(dev);
779 up(&dev->visordriver_callback_lock); 601 up(&dev->visordriver_callback_lock);
780 rc = 0; 602 return res;
781away:
782 if (rc != 0)
783 put_device(&dev->device);
784 return rc;
785} 603}
786 604
787/** This is called when device_unregister() is called for each child device 605/** This is called when device_unregister() is called for each child device
@@ -798,21 +616,12 @@ visordriver_remove_device(struct device *xdev)
798 drv = to_visor_driver(xdev->driver); 616 drv = to_visor_driver(xdev->driver);
799 down(&dev->visordriver_callback_lock); 617 down(&dev->visordriver_callback_lock);
800 dev->being_removed = true; 618 dev->being_removed = true;
801 /* 619 if (drv->remove)
802 * ensure that the dev->being_removed flag is set before we start the 620 drv->remove(dev);
803 * actual removal
804 */
805 wmb();
806 if (drv) {
807 if (drv->remove)
808 drv->remove(dev);
809 }
810 up(&dev->visordriver_callback_lock); 621 up(&dev->visordriver_callback_lock);
811 dev_stop_periodic_work(dev); 622 dev_stop_periodic_work(dev);
812 devmajorminor_remove_all_files(dev);
813 623
814 put_device(&dev->device); 624 put_device(&dev->device);
815
816 return 0; 625 return 0;
817} 626}
818 627
@@ -928,14 +737,6 @@ visorbus_clear_channel(struct visor_device *dev, unsigned long offset, u8 ch,
928} 737}
929EXPORT_SYMBOL_GPL(visorbus_clear_channel); 738EXPORT_SYMBOL_GPL(visorbus_clear_channel);
930 739
931int
932visorbus_registerdevnode(struct visor_device *dev,
933 const char *name, int major, int minor)
934{
935 return devmajorminor_create_file(dev, name, major, minor);
936}
937EXPORT_SYMBOL_GPL(visorbus_registerdevnode);
938
939/** We don't really have a real interrupt, so for now we just call the 740/** We don't really have a real interrupt, so for now we just call the
940 * interrupt function periodically... 741 * interrupt function periodically...
941 */ 742 */
@@ -970,7 +771,7 @@ EXPORT_SYMBOL_GPL(visorbus_disable_channel_interrupts);
970static int 771static int
971create_visor_device(struct visor_device *dev) 772create_visor_device(struct visor_device *dev)
972{ 773{
973 int rc; 774 int err;
974 u32 chipset_bus_no = dev->chipset_bus_no; 775 u32 chipset_bus_no = dev->chipset_bus_no;
975 u32 chipset_dev_no = dev->chipset_dev_no; 776 u32 chipset_dev_no = dev->chipset_dev_no;
976 777
@@ -992,8 +793,8 @@ create_visor_device(struct visor_device *dev)
992 if (!dev->periodic_work) { 793 if (!dev->periodic_work) {
993 POSTCODE_LINUX_3(DEVICE_CREATE_FAILURE_PC, chipset_dev_no, 794 POSTCODE_LINUX_3(DEVICE_CREATE_FAILURE_PC, chipset_dev_no,
994 DIAG_SEVERITY_ERR); 795 DIAG_SEVERITY_ERR);
995 rc = -EINVAL; 796 err = -EINVAL;
996 goto away; 797 goto err_put;
997 } 798 }
998 799
999 /* bus_id must be a unique name with respect to this bus TYPE 800 /* bus_id must be a unique name with respect to this bus TYPE
@@ -1019,36 +820,25 @@ create_visor_device(struct visor_device *dev)
1019 * claim the device. The device will be linked onto 820 * claim the device. The device will be linked onto
1020 * bus_type.klist_devices regardless (use bus_for_each_dev). 821 * bus_type.klist_devices regardless (use bus_for_each_dev).
1021 */ 822 */
1022 rc = device_add(&dev->device); 823 err = device_add(&dev->device);
1023 if (rc < 0) { 824 if (err < 0) {
1024 POSTCODE_LINUX_3(DEVICE_ADD_PC, chipset_bus_no, 825 POSTCODE_LINUX_3(DEVICE_ADD_PC, chipset_bus_no,
1025 DIAG_SEVERITY_ERR); 826 DIAG_SEVERITY_ERR);
1026 goto away; 827 goto err_put;
1027 }
1028
1029 rc = register_devmajorminor_attributes(dev);
1030 if (rc < 0) {
1031 POSTCODE_LINUX_3(DEVICE_REGISTER_FAILURE_PC, chipset_dev_no,
1032 DIAG_SEVERITY_ERR);
1033 goto away_unregister;
1034 } 828 }
1035 829
1036 list_add_tail(&dev->list_all, &list_all_device_instances); 830 list_add_tail(&dev->list_all, &list_all_device_instances);
1037 return 0; 831 return 0; /* success: reference kept via unmatched get_device() */
1038
1039away_unregister:
1040 device_unregister(&dev->device);
1041 832
1042away: 833err_put:
1043 put_device(&dev->device); 834 put_device(&dev->device);
1044 return rc; 835 return err;
1045} 836}
1046 837
1047static void 838static void
1048remove_visor_device(struct visor_device *dev) 839remove_visor_device(struct visor_device *dev)
1049{ 840{
1050 list_del(&dev->list_all); 841 list_del(&dev->list_all);
1051 unregister_devmajorminor_attributes(dev);
1052 put_device(&dev->device); 842 put_device(&dev->device);
1053 device_unregister(&dev->device); 843 device_unregister(&dev->device);
1054} 844}
@@ -1477,24 +1267,24 @@ struct channel_size_info {
1477int 1267int
1478visorbus_init(void) 1268visorbus_init(void)
1479{ 1269{
1480 int rc = 0; 1270 int err;
1481 1271
1482 POSTCODE_LINUX_3(DRIVER_ENTRY_PC, rc, POSTCODE_SEVERITY_INFO); 1272 POSTCODE_LINUX_3(DRIVER_ENTRY_PC, 0, POSTCODE_SEVERITY_INFO);
1483 bus_device_info_init(&clientbus_driverinfo, 1273 bus_device_info_init(&clientbus_driverinfo,
1484 "clientbus", "visorbus", 1274 "clientbus", "visorbus",
1485 VERSION, NULL); 1275 VERSION, NULL);
1486 1276
1487 rc = create_bus_type(); 1277 err = create_bus_type();
1488 if (rc < 0) { 1278 if (err < 0) {
1489 POSTCODE_LINUX_2(BUS_CREATE_ENTRY_PC, DIAG_SEVERITY_ERR); 1279 POSTCODE_LINUX_2(BUS_CREATE_ENTRY_PC, DIAG_SEVERITY_ERR);
1490 goto away; 1280 goto error;
1491 } 1281 }
1492 1282
1493 periodic_dev_workqueue = create_singlethread_workqueue("visorbus_dev"); 1283 periodic_dev_workqueue = create_singlethread_workqueue("visorbus_dev");
1494 if (!periodic_dev_workqueue) { 1284 if (!periodic_dev_workqueue) {
1495 POSTCODE_LINUX_2(CREATE_WORKQUEUE_PC, DIAG_SEVERITY_ERR); 1285 POSTCODE_LINUX_2(CREATE_WORKQUEUE_PC, DIAG_SEVERITY_ERR);
1496 rc = -ENOMEM; 1286 err = -ENOMEM;
1497 goto away; 1287 goto error;
1498 } 1288 }
1499 1289
1500 /* This enables us to receive notifications when devices appear for 1290 /* This enables us to receive notifications when devices appear for
@@ -1504,13 +1294,11 @@ visorbus_init(void)
1504 &chipset_responders, 1294 &chipset_responders,
1505 &chipset_driverinfo); 1295 &chipset_driverinfo);
1506 1296
1507 rc = 0; 1297 return 0;
1508 1298
1509away: 1299error:
1510 if (rc) 1300 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, err, POSTCODE_SEVERITY_ERR);
1511 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc, 1301 return err;
1512 POSTCODE_SEVERITY_ERR);
1513 return rc;
1514} 1302}
1515 1303
1516void 1304void
diff --git a/drivers/staging/unisys/visorbus/visorchannel.c b/drivers/staging/unisys/visorbus/visorchannel.c
index b68a904ac617..43373582cf1d 100644
--- a/drivers/staging/unisys/visorbus/visorchannel.c
+++ b/drivers/staging/unisys/visorbus/visorchannel.c
@@ -40,7 +40,6 @@ struct visorchannel {
40 bool requested; 40 bool requested;
41 struct channel_header chan_hdr; 41 struct channel_header chan_hdr;
42 uuid_le guid; 42 uuid_le guid;
43 ulong size;
44 bool needs_lock; /* channel creator knows if more than one */ 43 bool needs_lock; /* channel creator knows if more than one */
45 /* thread will be inserting or removing */ 44 /* thread will be inserting or removing */
46 spinlock_t insert_lock; /* protect head writes in chan_hdr */ 45 spinlock_t insert_lock; /* protect head writes in chan_hdr */
@@ -134,8 +133,6 @@ visorchannel_create_guts(u64 physaddr, unsigned long channel_bytes,
134 } 133 }
135 134
136 channel->nbytes = channel_bytes; 135 channel->nbytes = channel_bytes;
137
138 channel->size = channel_bytes;
139 channel->guid = guid; 136 channel->guid = guid;
140 return channel; 137 return channel;
141 138
@@ -186,7 +183,7 @@ EXPORT_SYMBOL_GPL(visorchannel_get_physaddr);
186ulong 183ulong
187visorchannel_get_nbytes(struct visorchannel *channel) 184visorchannel_get_nbytes(struct visorchannel *channel)
188{ 185{
189 return channel->size; 186 return channel->nbytes;
190} 187}
191EXPORT_SYMBOL_GPL(visorchannel_get_nbytes); 188EXPORT_SYMBOL_GPL(visorchannel_get_nbytes);
192 189
diff --git a/drivers/staging/unisys/visorbus/visorchipset.c b/drivers/staging/unisys/visorbus/visorchipset.c
index 9cf4f8463c4e..5ba5936e2203 100644
--- a/drivers/staging/unisys/visorbus/visorchipset.c
+++ b/drivers/staging/unisys/visorbus/visorchipset.c
@@ -59,14 +59,13 @@
59 */ 59 */
60static int visorchipset_major; 60static int visorchipset_major;
61static int visorchipset_visorbusregwait = 1; /* default is on */ 61static int visorchipset_visorbusregwait = 1; /* default is on */
62static int visorchipset_holdchipsetready;
63static unsigned long controlvm_payload_bytes_buffered; 62static unsigned long controlvm_payload_bytes_buffered;
64static u32 dump_vhba_bus; 63static u32 dump_vhba_bus;
65 64
66static int 65static int
67visorchipset_open(struct inode *inode, struct file *file) 66visorchipset_open(struct inode *inode, struct file *file)
68{ 67{
69 unsigned minor_number = iminor(inode); 68 unsigned int minor_number = iminor(inode);
70 69
71 if (minor_number) 70 if (minor_number)
72 return -ENODEV; 71 return -ENODEV;
@@ -90,9 +89,6 @@ static unsigned long poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
90static unsigned long most_recent_message_jiffies; 89static unsigned long most_recent_message_jiffies;
91static int visorbusregistered; 90static int visorbusregistered;
92 91
93#define MAX_CHIPSET_EVENTS 2
94static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
95
96struct parser_context { 92struct parser_context {
97 unsigned long allocbytes; 93 unsigned long allocbytes;
98 unsigned long param_bytes; 94 unsigned long param_bytes;
@@ -107,7 +103,6 @@ static DEFINE_SEMAPHORE(notifier_lock);
107 103
108static struct cdev file_cdev; 104static struct cdev file_cdev;
109static struct visorchannel **file_controlvm_channel; 105static struct visorchannel **file_controlvm_channel;
110static struct controlvm_message_header g_chipset_msg_hdr;
111static struct controlvm_message_packet g_devicechangestate_packet; 106static struct controlvm_message_packet g_devicechangestate_packet;
112 107
113static LIST_HEAD(bus_info_list); 108static LIST_HEAD(bus_info_list);
@@ -156,8 +151,6 @@ struct putfile_active_buffer {
156 /* a payload from a controlvm message, containing a file data buffer */ 151 /* a payload from a controlvm message, containing a file data buffer */
157 struct parser_context *parser_ctx; 152 struct parser_context *parser_ctx;
158 /* points within data area of parser_ctx to next byte of data */ 153 /* points within data area of parser_ctx to next byte of data */
159 u8 *pnext;
160 /* # bytes left from <pnext> to the end of this data buffer */
161 size_t bytes_remaining; 154 size_t bytes_remaining;
162}; 155};
163 156
@@ -171,14 +164,10 @@ struct putfile_request {
171 164
172 /* header from original TransmitFile request */ 165 /* header from original TransmitFile request */
173 struct controlvm_message_header controlvm_header; 166 struct controlvm_message_header controlvm_header;
174 u64 file_request_number; /* from original TransmitFile request */
175 167
176 /* link to next struct putfile_request */ 168 /* link to next struct putfile_request */
177 struct list_head next_putfile_request; 169 struct list_head next_putfile_request;
178 170
179 /* most-recent sequence number supplied via a controlvm message */
180 u64 data_sequence_number;
181
182 /* head of putfile_buffer_entry list, which describes the data to be 171 /* head of putfile_buffer_entry list, which describes the data to be
183 * supplied as putfile data; 172 * supplied as putfile data;
184 * - this list is added to when controlvm messages come in that supply 173 * - this list is added to when controlvm messages come in that supply
@@ -274,11 +263,6 @@ static ssize_t remaining_steps_store(struct device *dev,
274 const char *buf, size_t count); 263 const char *buf, size_t count);
275static DEVICE_ATTR_RW(remaining_steps); 264static DEVICE_ATTR_RW(remaining_steps);
276 265
277static ssize_t chipsetready_store(struct device *dev,
278 struct device_attribute *attr,
279 const char *buf, size_t count);
280static DEVICE_ATTR_WO(chipsetready);
281
282static ssize_t devicedisabled_store(struct device *dev, 266static ssize_t devicedisabled_store(struct device *dev,
283 struct device_attribute *attr, 267 struct device_attribute *attr,
284 const char *buf, size_t count); 268 const char *buf, size_t count);
@@ -303,16 +287,6 @@ static struct attribute_group visorchipset_install_group = {
303 .attrs = visorchipset_install_attrs 287 .attrs = visorchipset_install_attrs
304}; 288};
305 289
306static struct attribute *visorchipset_guest_attrs[] = {
307 &dev_attr_chipsetready.attr,
308 NULL
309};
310
311static struct attribute_group visorchipset_guest_group = {
312 .name = "guest",
313 .attrs = visorchipset_guest_attrs
314};
315
316static struct attribute *visorchipset_parahotplug_attrs[] = { 290static struct attribute *visorchipset_parahotplug_attrs[] = {
317 &dev_attr_devicedisabled.attr, 291 &dev_attr_devicedisabled.attr,
318 &dev_attr_deviceenabled.attr, 292 &dev_attr_deviceenabled.attr,
@@ -326,7 +300,6 @@ static struct attribute_group visorchipset_parahotplug_group = {
326 300
327static const struct attribute_group *visorchipset_dev_groups[] = { 301static const struct attribute_group *visorchipset_dev_groups[] = {
328 &visorchipset_install_group, 302 &visorchipset_install_group,
329 &visorchipset_guest_group,
330 &visorchipset_parahotplug_group, 303 &visorchipset_parahotplug_group,
331 NULL 304 NULL
332}; 305};
@@ -359,8 +332,7 @@ static struct parser_context *
359parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry) 332parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
360{ 333{
361 int allocbytes = sizeof(struct parser_context) + bytes; 334 int allocbytes = sizeof(struct parser_context) + bytes;
362 struct parser_context *rc = NULL; 335 struct parser_context *ctx;
363 struct parser_context *ctx = NULL;
364 336
365 if (retry) 337 if (retry)
366 *retry = false; 338 *retry = false;
@@ -374,15 +346,13 @@ parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
374 > MAX_CONTROLVM_PAYLOAD_BYTES) { 346 > MAX_CONTROLVM_PAYLOAD_BYTES) {
375 if (retry) 347 if (retry)
376 *retry = true; 348 *retry = true;
377 rc = NULL; 349 return NULL;
378 goto cleanup;
379 } 350 }
380 ctx = kzalloc(allocbytes, GFP_KERNEL | __GFP_NORETRY); 351 ctx = kzalloc(allocbytes, GFP_KERNEL | __GFP_NORETRY);
381 if (!ctx) { 352 if (!ctx) {
382 if (retry) 353 if (retry)
383 *retry = true; 354 *retry = true;
384 rc = NULL; 355 return NULL;
385 goto cleanup;
386 } 356 }
387 357
388 ctx->allocbytes = allocbytes; 358 ctx->allocbytes = allocbytes;
@@ -393,35 +363,27 @@ parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
393 if (local) { 363 if (local) {
394 void *p; 364 void *p;
395 365
396 if (addr > virt_to_phys(high_memory - 1)) { 366 if (addr > virt_to_phys(high_memory - 1))
397 rc = NULL; 367 goto err_finish_ctx;
398 goto cleanup;
399 }
400 p = __va((unsigned long)(addr)); 368 p = __va((unsigned long)(addr));
401 memcpy(ctx->data, p, bytes); 369 memcpy(ctx->data, p, bytes);
402 } else { 370 } else {
403 void *mapping = memremap(addr, bytes, MEMREMAP_WB); 371 void *mapping = memremap(addr, bytes, MEMREMAP_WB);
404 372
405 if (!mapping) { 373 if (!mapping)
406 rc = NULL; 374 goto err_finish_ctx;
407 goto cleanup;
408 }
409 memcpy(ctx->data, mapping, bytes); 375 memcpy(ctx->data, mapping, bytes);
410 memunmap(mapping); 376 memunmap(mapping);
411 } 377 }
412 378
413 ctx->byte_stream = true; 379 ctx->byte_stream = true;
414 rc = ctx; 380 controlvm_payload_bytes_buffered += ctx->param_bytes;
415cleanup: 381
416 if (rc) { 382 return ctx;
417 controlvm_payload_bytes_buffered += ctx->param_bytes; 383
418 } else { 384err_finish_ctx:
419 if (ctx) { 385 parser_done(ctx);
420 parser_done(ctx); 386 return NULL;
421 ctx = NULL;
422 }
423 }
424 return rc;
425} 387}
426 388
427static uuid_le 389static uuid_le
@@ -523,7 +485,7 @@ static ssize_t toolaction_show(struct device *dev,
523 struct device_attribute *attr, 485 struct device_attribute *attr,
524 char *buf) 486 char *buf)
525{ 487{
526 u8 tool_action; 488 u8 tool_action = 0;
527 489
528 visorchannel_read(controlvm_channel, 490 visorchannel_read(controlvm_channel,
529 offsetof(struct spar_controlvm_channel_protocol, 491 offsetof(struct spar_controlvm_channel_protocol,
@@ -541,10 +503,11 @@ static ssize_t toolaction_store(struct device *dev,
541 if (kstrtou8(buf, 10, &tool_action)) 503 if (kstrtou8(buf, 10, &tool_action))
542 return -EINVAL; 504 return -EINVAL;
543 505
544 ret = visorchannel_write(controlvm_channel, 506 ret = visorchannel_write
545 offsetof(struct spar_controlvm_channel_protocol, 507 (controlvm_channel,
546 tool_action), 508 offsetof(struct spar_controlvm_channel_protocol,
547 &tool_action, sizeof(u8)); 509 tool_action),
510 &tool_action, sizeof(u8));
548 511
549 if (ret) 512 if (ret)
550 return ret; 513 return ret;
@@ -576,10 +539,11 @@ static ssize_t boottotool_store(struct device *dev,
576 return -EINVAL; 539 return -EINVAL;
577 540
578 efi_spar_indication.boot_to_tool = val; 541 efi_spar_indication.boot_to_tool = val;
579 ret = visorchannel_write(controlvm_channel, 542 ret = visorchannel_write
580 offsetof(struct spar_controlvm_channel_protocol, 543 (controlvm_channel,
581 efi_spar_ind), &(efi_spar_indication), 544 offsetof(struct spar_controlvm_channel_protocol,
582 sizeof(struct efi_spar_indication)); 545 efi_spar_ind), &(efi_spar_indication),
546 sizeof(struct efi_spar_indication));
583 547
584 if (ret) 548 if (ret)
585 return ret; 549 return ret;
@@ -589,7 +553,7 @@ static ssize_t boottotool_store(struct device *dev,
589static ssize_t error_show(struct device *dev, struct device_attribute *attr, 553static ssize_t error_show(struct device *dev, struct device_attribute *attr,
590 char *buf) 554 char *buf)
591{ 555{
592 u32 error; 556 u32 error = 0;
593 557
594 visorchannel_read(controlvm_channel, 558 visorchannel_read(controlvm_channel,
595 offsetof(struct spar_controlvm_channel_protocol, 559 offsetof(struct spar_controlvm_channel_protocol,
@@ -607,10 +571,11 @@ static ssize_t error_store(struct device *dev, struct device_attribute *attr,
607 if (kstrtou32(buf, 10, &error)) 571 if (kstrtou32(buf, 10, &error))
608 return -EINVAL; 572 return -EINVAL;
609 573
610 ret = visorchannel_write(controlvm_channel, 574 ret = visorchannel_write
611 offsetof(struct spar_controlvm_channel_protocol, 575 (controlvm_channel,
612 installation_error), 576 offsetof(struct spar_controlvm_channel_protocol,
613 &error, sizeof(u32)); 577 installation_error),
578 &error, sizeof(u32));
614 if (ret) 579 if (ret)
615 return ret; 580 return ret;
616 return count; 581 return count;
@@ -619,12 +584,13 @@ static ssize_t error_store(struct device *dev, struct device_attribute *attr,
619static ssize_t textid_show(struct device *dev, struct device_attribute *attr, 584static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
620 char *buf) 585 char *buf)
621{ 586{
622 u32 text_id; 587 u32 text_id = 0;
623 588
624 visorchannel_read(controlvm_channel, 589 visorchannel_read
625 offsetof(struct spar_controlvm_channel_protocol, 590 (controlvm_channel,
626 installation_text_id), 591 offsetof(struct spar_controlvm_channel_protocol,
627 &text_id, sizeof(u32)); 592 installation_text_id),
593 &text_id, sizeof(u32));
628 return scnprintf(buf, PAGE_SIZE, "%i\n", text_id); 594 return scnprintf(buf, PAGE_SIZE, "%i\n", text_id);
629} 595}
630 596
@@ -637,10 +603,11 @@ static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
637 if (kstrtou32(buf, 10, &text_id)) 603 if (kstrtou32(buf, 10, &text_id))
638 return -EINVAL; 604 return -EINVAL;
639 605
640 ret = visorchannel_write(controlvm_channel, 606 ret = visorchannel_write
641 offsetof(struct spar_controlvm_channel_protocol, 607 (controlvm_channel,
642 installation_text_id), 608 offsetof(struct spar_controlvm_channel_protocol,
643 &text_id, sizeof(u32)); 609 installation_text_id),
610 &text_id, sizeof(u32));
644 if (ret) 611 if (ret)
645 return ret; 612 return ret;
646 return count; 613 return count;
@@ -649,7 +616,7 @@ static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
649static ssize_t remaining_steps_show(struct device *dev, 616static ssize_t remaining_steps_show(struct device *dev,
650 struct device_attribute *attr, char *buf) 617 struct device_attribute *attr, char *buf)
651{ 618{
652 u16 remaining_steps; 619 u16 remaining_steps = 0;
653 620
654 visorchannel_read(controlvm_channel, 621 visorchannel_read(controlvm_channel,
655 offsetof(struct spar_controlvm_channel_protocol, 622 offsetof(struct spar_controlvm_channel_protocol,
@@ -668,10 +635,11 @@ static ssize_t remaining_steps_store(struct device *dev,
668 if (kstrtou16(buf, 10, &remaining_steps)) 635 if (kstrtou16(buf, 10, &remaining_steps))
669 return -EINVAL; 636 return -EINVAL;
670 637
671 ret = visorchannel_write(controlvm_channel, 638 ret = visorchannel_write
672 offsetof(struct spar_controlvm_channel_protocol, 639 (controlvm_channel,
673 installation_remaining_steps), 640 offsetof(struct spar_controlvm_channel_protocol,
674 &remaining_steps, sizeof(u16)); 641 installation_remaining_steps),
642 &remaining_steps, sizeof(u16));
675 if (ret) 643 if (ret)
676 return ret; 644 return ret;
677 return count; 645 return count;
@@ -717,26 +685,6 @@ struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
717} 685}
718EXPORT_SYMBOL(visorbus_get_device_by_id); 686EXPORT_SYMBOL(visorbus_get_device_by_id);
719 687
720static u8
721check_chipset_events(void)
722{
723 int i;
724 u8 send_msg = 1;
725 /* Check events to determine if response should be sent */
726 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
727 send_msg &= chipset_events[i];
728 return send_msg;
729}
730
731static void
732clear_chipset_events(void)
733{
734 int i;
735 /* Clear chipset_events */
736 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
737 chipset_events[i] = 0;
738}
739
740void 688void
741visorchipset_register_busdev( 689visorchipset_register_busdev(
742 struct visorchipset_busdev_notifiers *notifiers, 690 struct visorchipset_busdev_notifiers *notifiers,
@@ -772,7 +720,7 @@ chipset_init(struct controlvm_message *inmsg)
772 POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO); 720 POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
773 if (chipset_inited) { 721 if (chipset_inited) {
774 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE; 722 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
775 goto cleanup; 723 goto out_respond;
776 } 724 }
777 chipset_inited = 1; 725 chipset_inited = 1;
778 POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO); 726 POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
@@ -789,7 +737,7 @@ chipset_init(struct controlvm_message *inmsg)
789 */ 737 */
790 features |= ULTRA_CHIPSET_FEATURE_REPLY; 738 features |= ULTRA_CHIPSET_FEATURE_REPLY;
791 739
792cleanup: 740out_respond:
793 if (inmsg->hdr.flags.response_expected) 741 if (inmsg->hdr.flags.response_expected)
794 controlvm_respond_chipset_init(&inmsg->hdr, rc, features); 742 controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
795} 743}
@@ -970,28 +918,31 @@ bus_epilog(struct visor_device *bus_info,
970 u32 cmd, struct controlvm_message_header *msg_hdr, 918 u32 cmd, struct controlvm_message_header *msg_hdr,
971 int response, bool need_response) 919 int response, bool need_response)
972{ 920{
973 bool notified = false;
974 struct controlvm_message_header *pmsg_hdr = NULL; 921 struct controlvm_message_header *pmsg_hdr = NULL;
975 922
923 down(&notifier_lock);
924
976 if (!bus_info) { 925 if (!bus_info) {
977 /* relying on a valid passed in response code */ 926 /* relying on a valid passed in response code */
978 /* be lazy and re-use msg_hdr for this failure, is this ok?? */ 927 /* be lazy and re-use msg_hdr for this failure, is this ok?? */
979 pmsg_hdr = msg_hdr; 928 pmsg_hdr = msg_hdr;
980 goto away; 929 goto out_respond_and_unlock;
981 } 930 }
982 931
983 if (bus_info->pending_msg_hdr) { 932 if (bus_info->pending_msg_hdr) {
984 /* only non-NULL if dev is still waiting on a response */ 933 /* only non-NULL if dev is still waiting on a response */
985 response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT; 934 response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
986 pmsg_hdr = bus_info->pending_msg_hdr; 935 pmsg_hdr = bus_info->pending_msg_hdr;
987 goto away; 936 goto out_respond_and_unlock;
988 } 937 }
989 938
990 if (need_response) { 939 if (need_response) {
991 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL); 940 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
992 if (!pmsg_hdr) { 941 if (!pmsg_hdr) {
993 response = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED; 942 POSTCODE_LINUX_4(MALLOC_FAILURE_PC, cmd,
994 goto away; 943 bus_info->chipset_bus_no,
944 POSTCODE_SEVERITY_ERR);
945 goto out_unlock;
995 } 946 }
996 947
997 memcpy(pmsg_hdr, msg_hdr, 948 memcpy(pmsg_hdr, msg_hdr,
@@ -999,37 +950,27 @@ bus_epilog(struct visor_device *bus_info,
999 bus_info->pending_msg_hdr = pmsg_hdr; 950 bus_info->pending_msg_hdr = pmsg_hdr;
1000 } 951 }
1001 952
1002 down(&notifier_lock);
1003 if (response == CONTROLVM_RESP_SUCCESS) { 953 if (response == CONTROLVM_RESP_SUCCESS) {
1004 switch (cmd) { 954 switch (cmd) {
1005 case CONTROLVM_BUS_CREATE: 955 case CONTROLVM_BUS_CREATE:
1006 if (busdev_notifiers.bus_create) { 956 if (busdev_notifiers.bus_create) {
1007 (*busdev_notifiers.bus_create) (bus_info); 957 (*busdev_notifiers.bus_create) (bus_info);
1008 notified = true; 958 goto out_unlock;
1009 } 959 }
1010 break; 960 break;
1011 case CONTROLVM_BUS_DESTROY: 961 case CONTROLVM_BUS_DESTROY:
1012 if (busdev_notifiers.bus_destroy) { 962 if (busdev_notifiers.bus_destroy) {
1013 (*busdev_notifiers.bus_destroy) (bus_info); 963 (*busdev_notifiers.bus_destroy) (bus_info);
1014 notified = true; 964 goto out_unlock;
1015 } 965 }
1016 break; 966 break;
1017 } 967 }
1018 } 968 }
1019away: 969
1020 if (notified) 970out_respond_and_unlock:
1021 /* The callback function just called above is responsible 971 bus_responder(cmd, pmsg_hdr, response);
1022 * for calling the appropriate visorchipset_busdev_responders 972
1023 * function, which will call bus_responder() 973out_unlock:
1024 */
1025 ;
1026 else
1027 /*
1028 * Do not kfree(pmsg_hdr) as this is the failure path.
1029 * The success path ('notified') will call the responder
1030 * directly and kfree() there.
1031 */
1032 bus_responder(cmd, pmsg_hdr, response);
1033 up(&notifier_lock); 974 up(&notifier_lock);
1034} 975}
1035 976
@@ -1040,30 +981,30 @@ device_epilog(struct visor_device *dev_info,
1040 bool need_response, bool for_visorbus) 981 bool need_response, bool for_visorbus)
1041{ 982{
1042 struct visorchipset_busdev_notifiers *notifiers; 983 struct visorchipset_busdev_notifiers *notifiers;
1043 bool notified = false;
1044 struct controlvm_message_header *pmsg_hdr = NULL; 984 struct controlvm_message_header *pmsg_hdr = NULL;
1045 985
1046 notifiers = &busdev_notifiers; 986 notifiers = &busdev_notifiers;
1047 987
988 down(&notifier_lock);
1048 if (!dev_info) { 989 if (!dev_info) {
1049 /* relying on a valid passed in response code */ 990 /* relying on a valid passed in response code */
1050 /* be lazy and re-use msg_hdr for this failure, is this ok?? */ 991 /* be lazy and re-use msg_hdr for this failure, is this ok?? */
1051 pmsg_hdr = msg_hdr; 992 pmsg_hdr = msg_hdr;
1052 goto away; 993 goto out_respond_and_unlock;
1053 } 994 }
1054 995
1055 if (dev_info->pending_msg_hdr) { 996 if (dev_info->pending_msg_hdr) {
1056 /* only non-NULL if dev is still waiting on a response */ 997 /* only non-NULL if dev is still waiting on a response */
1057 response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT; 998 response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1058 pmsg_hdr = dev_info->pending_msg_hdr; 999 pmsg_hdr = dev_info->pending_msg_hdr;
1059 goto away; 1000 goto out_respond_and_unlock;
1060 } 1001 }
1061 1002
1062 if (need_response) { 1003 if (need_response) {
1063 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL); 1004 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
1064 if (!pmsg_hdr) { 1005 if (!pmsg_hdr) {
1065 response = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED; 1006 response = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1066 goto away; 1007 goto out_respond_and_unlock;
1067 } 1008 }
1068 1009
1069 memcpy(pmsg_hdr, msg_hdr, 1010 memcpy(pmsg_hdr, msg_hdr,
@@ -1071,13 +1012,12 @@ device_epilog(struct visor_device *dev_info,
1071 dev_info->pending_msg_hdr = pmsg_hdr; 1012 dev_info->pending_msg_hdr = pmsg_hdr;
1072 } 1013 }
1073 1014
1074 down(&notifier_lock);
1075 if (response >= 0) { 1015 if (response >= 0) {
1076 switch (cmd) { 1016 switch (cmd) {
1077 case CONTROLVM_DEVICE_CREATE: 1017 case CONTROLVM_DEVICE_CREATE:
1078 if (notifiers->device_create) { 1018 if (notifiers->device_create) {
1079 (*notifiers->device_create) (dev_info); 1019 (*notifiers->device_create) (dev_info);
1080 notified = true; 1020 goto out_unlock;
1081 } 1021 }
1082 break; 1022 break;
1083 case CONTROLVM_DEVICE_CHANGESTATE: 1023 case CONTROLVM_DEVICE_CHANGESTATE:
@@ -1087,7 +1027,7 @@ device_epilog(struct visor_device *dev_info,
1087 segment_state_running.operating) { 1027 segment_state_running.operating) {
1088 if (notifiers->device_resume) { 1028 if (notifiers->device_resume) {
1089 (*notifiers->device_resume) (dev_info); 1029 (*notifiers->device_resume) (dev_info);
1090 notified = true; 1030 goto out_unlock;
1091 } 1031 }
1092 } 1032 }
1093 /* ServerNotReady / ServerLost / SegmentStateStandby */ 1033 /* ServerNotReady / ServerLost / SegmentStateStandby */
@@ -1099,32 +1039,23 @@ device_epilog(struct visor_device *dev_info,
1099 */ 1039 */
1100 if (notifiers->device_pause) { 1040 if (notifiers->device_pause) {
1101 (*notifiers->device_pause) (dev_info); 1041 (*notifiers->device_pause) (dev_info);
1102 notified = true; 1042 goto out_unlock;
1103 } 1043 }
1104 } 1044 }
1105 break; 1045 break;
1106 case CONTROLVM_DEVICE_DESTROY: 1046 case CONTROLVM_DEVICE_DESTROY:
1107 if (notifiers->device_destroy) { 1047 if (notifiers->device_destroy) {
1108 (*notifiers->device_destroy) (dev_info); 1048 (*notifiers->device_destroy) (dev_info);
1109 notified = true; 1049 goto out_unlock;
1110 } 1050 }
1111 break; 1051 break;
1112 } 1052 }
1113 } 1053 }
1114away: 1054
1115 if (notified) 1055out_respond_and_unlock:
1116 /* The callback function just called above is responsible 1056 device_responder(cmd, pmsg_hdr, response);
1117 * for calling the appropriate visorchipset_busdev_responders 1057
1118 * function, which will call device_responder() 1058out_unlock:
1119 */
1120 ;
1121 else
1122 /*
1123 * Do not kfree(pmsg_hdr) as this is the failure path.
1124 * The success path ('notified') will call the responder
1125 * directly and kfree() there.
1126 */
1127 device_responder(cmd, pmsg_hdr, response);
1128 up(&notifier_lock); 1059 up(&notifier_lock);
1129} 1060}
1130 1061
@@ -1142,14 +1073,14 @@ bus_create(struct controlvm_message *inmsg)
1142 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no, 1073 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1143 POSTCODE_SEVERITY_ERR); 1074 POSTCODE_SEVERITY_ERR);
1144 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE; 1075 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1145 goto cleanup; 1076 goto out_bus_epilog;
1146 } 1077 }
1147 bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL); 1078 bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
1148 if (!bus_info) { 1079 if (!bus_info) {
1149 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no, 1080 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1150 POSTCODE_SEVERITY_ERR); 1081 POSTCODE_SEVERITY_ERR);
1151 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED; 1082 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1152 goto cleanup; 1083 goto out_bus_epilog;
1153 } 1084 }
1154 1085
1155 INIT_LIST_HEAD(&bus_info->list_all); 1086 INIT_LIST_HEAD(&bus_info->list_all);
@@ -1169,7 +1100,7 @@ bus_create(struct controlvm_message *inmsg)
1169 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED; 1100 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1170 kfree(bus_info); 1101 kfree(bus_info);
1171 bus_info = NULL; 1102 bus_info = NULL;
1172 goto cleanup; 1103 goto out_bus_epilog;
1173 } 1104 }
1174 bus_info->visorchannel = visorchannel; 1105 bus_info->visorchannel = visorchannel;
1175 if (uuid_le_cmp(cmd->create_bus.bus_inst_uuid, spar_siovm_uuid) == 0) { 1106 if (uuid_le_cmp(cmd->create_bus.bus_inst_uuid, spar_siovm_uuid) == 0) {
@@ -1179,7 +1110,7 @@ bus_create(struct controlvm_message *inmsg)
1179 1110
1180 POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO); 1111 POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
1181 1112
1182cleanup: 1113out_bus_epilog:
1183 bus_epilog(bus_info, CONTROLVM_BUS_CREATE, &inmsg->hdr, 1114 bus_epilog(bus_info, CONTROLVM_BUS_CREATE, &inmsg->hdr,
1184 rc, inmsg->hdr.flags.response_expected == 1); 1115 rc, inmsg->hdr.flags.response_expected == 1);
1185} 1116}
@@ -1231,8 +1162,9 @@ bus_configure(struct controlvm_message *inmsg,
1231 POSTCODE_SEVERITY_ERR); 1162 POSTCODE_SEVERITY_ERR);
1232 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT; 1163 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1233 } else { 1164 } else {
1234 visorchannel_set_clientpartition(bus_info->visorchannel, 1165 visorchannel_set_clientpartition
1235 cmd->configure_bus.guest_handle); 1166 (bus_info->visorchannel,
1167 cmd->configure_bus.guest_handle);
1236 bus_info->partition_uuid = parser_id_get(parser_ctx); 1168 bus_info->partition_uuid = parser_id_get(parser_ctx);
1237 parser_param_start(parser_ctx, PARSERSTRING_NAME); 1169 parser_param_start(parser_ctx, PARSERSTRING_NAME);
1238 bus_info->name = parser_string_get(parser_ctx); 1170 bus_info->name = parser_string_get(parser_ctx);
@@ -1260,14 +1192,14 @@ my_device_create(struct controlvm_message *inmsg)
1260 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no, 1192 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1261 POSTCODE_SEVERITY_ERR); 1193 POSTCODE_SEVERITY_ERR);
1262 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID; 1194 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1263 goto cleanup; 1195 goto out_respond;
1264 } 1196 }
1265 1197
1266 if (bus_info->state.created == 0) { 1198 if (bus_info->state.created == 0) {
1267 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no, 1199 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1268 POSTCODE_SEVERITY_ERR); 1200 POSTCODE_SEVERITY_ERR);
1269 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID; 1201 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1270 goto cleanup; 1202 goto out_respond;
1271 } 1203 }
1272 1204
1273 dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL); 1205 dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
@@ -1275,7 +1207,7 @@ my_device_create(struct controlvm_message *inmsg)
1275 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no, 1207 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1276 POSTCODE_SEVERITY_ERR); 1208 POSTCODE_SEVERITY_ERR);
1277 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE; 1209 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1278 goto cleanup; 1210 goto out_respond;
1279 } 1211 }
1280 1212
1281 dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL); 1213 dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
@@ -1283,7 +1215,7 @@ my_device_create(struct controlvm_message *inmsg)
1283 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no, 1215 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1284 POSTCODE_SEVERITY_ERR); 1216 POSTCODE_SEVERITY_ERR);
1285 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED; 1217 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1286 goto cleanup; 1218 goto out_respond;
1287 } 1219 }
1288 1220
1289 dev_info->chipset_bus_no = bus_no; 1221 dev_info->chipset_bus_no = bus_no;
@@ -1308,7 +1240,7 @@ my_device_create(struct controlvm_message *inmsg)
1308 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED; 1240 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1309 kfree(dev_info); 1241 kfree(dev_info);
1310 dev_info = NULL; 1242 dev_info = NULL;
1311 goto cleanup; 1243 goto out_respond;
1312 } 1244 }
1313 dev_info->visorchannel = visorchannel; 1245 dev_info->visorchannel = visorchannel;
1314 dev_info->channel_type_guid = cmd->create_device.data_type_uuid; 1246 dev_info->channel_type_guid = cmd->create_device.data_type_uuid;
@@ -1318,7 +1250,7 @@ my_device_create(struct controlvm_message *inmsg)
1318 1250
1319 POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no, 1251 POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
1320 POSTCODE_SEVERITY_INFO); 1252 POSTCODE_SEVERITY_INFO);
1321cleanup: 1253out_respond:
1322 device_epilog(dev_info, segment_state_running, 1254 device_epilog(dev_info, segment_state_running,
1323 CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc, 1255 CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
1324 inmsg->hdr.flags.response_expected == 1, 1); 1256 inmsg->hdr.flags.response_expected == 1, 1);
@@ -1382,35 +1314,23 @@ initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes,
1382 struct visor_controlvm_payload_info *info) 1314 struct visor_controlvm_payload_info *info)
1383{ 1315{
1384 u8 *payload = NULL; 1316 u8 *payload = NULL;
1385 int rc = CONTROLVM_RESP_SUCCESS;
1386 1317
1387 if (!info) { 1318 if (!info)
1388 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID; 1319 return -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1389 goto cleanup; 1320
1390 }
1391 memset(info, 0, sizeof(struct visor_controlvm_payload_info)); 1321 memset(info, 0, sizeof(struct visor_controlvm_payload_info));
1392 if ((offset == 0) || (bytes == 0)) { 1322 if ((offset == 0) || (bytes == 0))
1393 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID; 1323 return -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1394 goto cleanup; 1324
1395 }
1396 payload = memremap(phys_addr + offset, bytes, MEMREMAP_WB); 1325 payload = memremap(phys_addr + offset, bytes, MEMREMAP_WB);
1397 if (!payload) { 1326 if (!payload)
1398 rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED; 1327 return -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
1399 goto cleanup;
1400 }
1401 1328
1402 info->offset = offset; 1329 info->offset = offset;
1403 info->bytes = bytes; 1330 info->bytes = bytes;
1404 info->ptr = payload; 1331 info->ptr = payload;
1405 1332
1406cleanup: 1333 return CONTROLVM_RESP_SUCCESS;
1407 if (rc < 0) {
1408 if (payload) {
1409 memunmap(payload);
1410 payload = NULL;
1411 }
1412 }
1413 return rc;
1414} 1334}
1415 1335
1416static void 1336static void
@@ -1490,14 +1410,8 @@ chipset_ready(struct controlvm_message_header *msg_hdr)
1490 1410
1491 if (rc != CONTROLVM_RESP_SUCCESS) 1411 if (rc != CONTROLVM_RESP_SUCCESS)
1492 rc = -rc; 1412 rc = -rc;
1493 if (msg_hdr->flags.response_expected && !visorchipset_holdchipsetready) 1413 if (msg_hdr->flags.response_expected)
1494 controlvm_respond(msg_hdr, rc); 1414 controlvm_respond(msg_hdr, rc);
1495 if (msg_hdr->flags.response_expected && visorchipset_holdchipsetready) {
1496 /* Send CHIPSET_READY response when all modules have been loaded
1497 * and disks mounted for the partition
1498 */
1499 g_chipset_msg_hdr = *msg_hdr;
1500 }
1501} 1415}
1502 1416
1503static void 1417static void
@@ -1726,9 +1640,10 @@ parahotplug_process_message(struct controlvm_message *inmsg)
1726 * initialization. 1640 * initialization.
1727 */ 1641 */
1728 parahotplug_request_kickoff(req); 1642 parahotplug_request_kickoff(req);
1729 controlvm_respond_physdev_changestate(&inmsg->hdr, 1643 controlvm_respond_physdev_changestate
1730 CONTROLVM_RESP_SUCCESS, 1644 (&inmsg->hdr,
1731 inmsg->cmd.device_change_state.state); 1645 CONTROLVM_RESP_SUCCESS,
1646 inmsg->cmd.device_change_state.state);
1732 parahotplug_request_destroy(req); 1647 parahotplug_request_destroy(req);
1733 } else { 1648 } else {
1734 /* For disable messages, add the request to the 1649 /* For disable messages, add the request to the
@@ -1840,8 +1755,9 @@ handle_command(struct controlvm_message inmsg, u64 channel_addr)
1840 break; 1755 break;
1841 default: 1756 default:
1842 if (inmsg.hdr.flags.response_expected) 1757 if (inmsg.hdr.flags.response_expected)
1843 controlvm_respond(&inmsg.hdr, 1758 controlvm_respond
1844 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN); 1759 (&inmsg.hdr,
1760 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
1845 break; 1761 break;
1846 } 1762 }
1847 1763
@@ -1885,31 +1801,11 @@ controlvm_periodic_work(struct work_struct *work)
1885 struct controlvm_message inmsg; 1801 struct controlvm_message inmsg;
1886 bool got_command = false; 1802 bool got_command = false;
1887 bool handle_command_failed = false; 1803 bool handle_command_failed = false;
1888 static u64 poll_count;
1889 1804
1890 /* make sure visorbus server is registered for controlvm callbacks */ 1805 /* make sure visorbus server is registered for controlvm callbacks */
1891 if (visorchipset_visorbusregwait && !visorbusregistered) 1806 if (visorchipset_visorbusregwait && !visorbusregistered)
1892 goto cleanup; 1807 goto cleanup;
1893 1808
1894 poll_count++;
1895 if (poll_count >= 250)
1896 ; /* keep going */
1897 else
1898 goto cleanup;
1899
1900 /* Check events to determine if response to CHIPSET_READY
1901 * should be sent
1902 */
1903 if (visorchipset_holdchipsetready &&
1904 (g_chipset_msg_hdr.id != CONTROLVM_INVALID)) {
1905 if (check_chipset_events() == 1) {
1906 controlvm_respond(&g_chipset_msg_hdr, 0);
1907 clear_chipset_events();
1908 memset(&g_chipset_msg_hdr, 0,
1909 sizeof(struct controlvm_message_header));
1910 }
1911 }
1912
1913 while (visorchannel_signalremove(controlvm_channel, 1809 while (visorchannel_signalremove(controlvm_channel,
1914 CONTROLVM_QUEUE_RESPONSE, 1810 CONTROLVM_QUEUE_RESPONSE,
1915 &inmsg)) 1811 &inmsg))
@@ -1979,8 +1875,11 @@ setup_crash_devices_work_queue(struct work_struct *work)
1979 u16 local_crash_msg_count; 1875 u16 local_crash_msg_count;
1980 1876
1981 /* make sure visorbus is registered for controlvm callbacks */ 1877 /* make sure visorbus is registered for controlvm callbacks */
1982 if (visorchipset_visorbusregwait && !visorbusregistered) 1878 if (visorchipset_visorbusregwait && !visorbusregistered) {
1983 goto cleanup; 1879 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
1880 schedule_delayed_work(&periodic_controlvm_work, poll_jiffies);
1881 return;
1882 }
1984 1883
1985 POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO); 1884 POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1986 1885
@@ -2057,13 +1956,6 @@ setup_crash_devices_work_queue(struct work_struct *work)
2057 return; 1956 return;
2058 } 1957 }
2059 POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO); 1958 POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
2060 return;
2061
2062cleanup:
2063
2064 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
2065
2066 schedule_delayed_work(&periodic_controlvm_work, poll_jiffies);
2067} 1959}
2068 1960
2069static void 1961static void
@@ -2135,25 +2027,6 @@ device_resume_response(struct visor_device *dev_info, int response)
2135 dev_info->pending_msg_hdr = NULL; 2027 dev_info->pending_msg_hdr = NULL;
2136} 2028}
2137 2029
2138static ssize_t chipsetready_store(struct device *dev,
2139 struct device_attribute *attr,
2140 const char *buf, size_t count)
2141{
2142 char msgtype[64];
2143
2144 if (sscanf(buf, "%63s", msgtype) != 1)
2145 return -EINVAL;
2146
2147 if (!strcmp(msgtype, "CALLHOMEDISK_MOUNTED")) {
2148 chipset_events[0] = 1;
2149 return count;
2150 } else if (!strcmp(msgtype, "MODULES_LOADED")) {
2151 chipset_events[1] = 1;
2152 return count;
2153 }
2154 return -EINVAL;
2155}
2156
2157/* The parahotplug/devicedisabled interface gets called by our support script 2030/* The parahotplug/devicedisabled interface gets called by our support script
2158 * when an SR-IOV device has been shut down. The ID is passed to the script 2031 * when an SR-IOV device has been shut down. The ID is passed to the script
2159 * and then passed back when the device has been removed. 2032 * and then passed back when the device has been removed.
@@ -2205,10 +2078,11 @@ visorchipset_mmap(struct file *file, struct vm_area_struct *vma)
2205 if (!*file_controlvm_channel) 2078 if (!*file_controlvm_channel)
2206 return -ENXIO; 2079 return -ENXIO;
2207 2080
2208 visorchannel_read(*file_controlvm_channel, 2081 visorchannel_read
2209 offsetof(struct spar_controlvm_channel_protocol, 2082 (*file_controlvm_channel,
2210 gp_control_channel), 2083 offsetof(struct spar_controlvm_channel_protocol,
2211 &addr, sizeof(addr)); 2084 gp_control_channel),
2085 &addr, sizeof(addr));
2212 if (!addr) 2086 if (!addr)
2213 return -ENXIO; 2087 return -ENXIO;
2214 2088
@@ -2308,16 +2182,25 @@ visorchipset_file_init(dev_t major_dev, struct visorchannel **controlvm_channel)
2308 return 0; 2182 return 0;
2309} 2183}
2310 2184
2185static void
2186visorchipset_file_cleanup(dev_t major_dev)
2187{
2188 if (file_cdev.ops)
2189 cdev_del(&file_cdev);
2190 file_cdev.ops = NULL;
2191 unregister_chrdev_region(major_dev, 1);
2192}
2193
2311static int 2194static int
2312visorchipset_init(struct acpi_device *acpi_device) 2195visorchipset_init(struct acpi_device *acpi_device)
2313{ 2196{
2314 int rc = 0; 2197 int err = -ENODEV;
2315 u64 addr; 2198 u64 addr;
2316 uuid_le uuid = SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID; 2199 uuid_le uuid = SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID;
2317 2200
2318 addr = controlvm_get_channel_address(); 2201 addr = controlvm_get_channel_address();
2319 if (!addr) 2202 if (!addr)
2320 return -ENODEV; 2203 goto error;
2321 2204
2322 memset(&busdev_notifiers, 0, sizeof(busdev_notifiers)); 2205 memset(&busdev_notifiers, 0, sizeof(busdev_notifiers));
2323 memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info)); 2206 memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
@@ -2325,24 +2208,19 @@ visorchipset_init(struct acpi_device *acpi_device)
2325 controlvm_channel = visorchannel_create_with_lock(addr, 0, 2208 controlvm_channel = visorchannel_create_with_lock(addr, 0,
2326 GFP_KERNEL, uuid); 2209 GFP_KERNEL, uuid);
2327 if (!controlvm_channel) 2210 if (!controlvm_channel)
2328 return -ENODEV; 2211 goto error;
2212
2329 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT( 2213 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
2330 visorchannel_get_header(controlvm_channel))) { 2214 visorchannel_get_header(controlvm_channel))) {
2331 initialize_controlvm_payload(); 2215 initialize_controlvm_payload();
2332 } else { 2216 } else {
2333 visorchannel_destroy(controlvm_channel); 2217 goto error_destroy_channel;
2334 controlvm_channel = NULL;
2335 return -ENODEV;
2336 } 2218 }
2337 2219
2338 major_dev = MKDEV(visorchipset_major, 0); 2220 major_dev = MKDEV(visorchipset_major, 0);
2339 rc = visorchipset_file_init(major_dev, &controlvm_channel); 2221 err = visorchipset_file_init(major_dev, &controlvm_channel);
2340 if (rc < 0) { 2222 if (err < 0)
2341 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR); 2223 goto error_destroy_payload;
2342 goto cleanup;
2343 }
2344
2345 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2346 2224
2347 /* if booting in a crash kernel */ 2225 /* if booting in a crash kernel */
2348 if (is_kdump_kernel()) 2226 if (is_kdump_kernel())
@@ -2359,27 +2237,33 @@ visorchipset_init(struct acpi_device *acpi_device)
2359 visorchipset_platform_device.dev.devt = major_dev; 2237 visorchipset_platform_device.dev.devt = major_dev;
2360 if (platform_device_register(&visorchipset_platform_device) < 0) { 2238 if (platform_device_register(&visorchipset_platform_device) < 0) {
2361 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR); 2239 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
2362 rc = -ENODEV; 2240 err = -ENODEV;
2363 goto cleanup; 2241 goto error_cancel_work;
2364 } 2242 }
2365 POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO); 2243 POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
2366 2244
2367 rc = visorbus_init(); 2245 err = visorbus_init();
2368cleanup: 2246 if (err < 0)
2369 if (rc) { 2247 goto error_unregister;
2370 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
2371 POSTCODE_SEVERITY_ERR);
2372 }
2373 return rc;
2374}
2375 2248
2376static void 2249 return 0;
2377visorchipset_file_cleanup(dev_t major_dev) 2250
2378{ 2251error_unregister:
2379 if (file_cdev.ops) 2252 platform_device_unregister(&visorchipset_platform_device);
2380 cdev_del(&file_cdev); 2253
2381 file_cdev.ops = NULL; 2254error_cancel_work:
2382 unregister_chrdev_region(major_dev, 1); 2255 cancel_delayed_work_sync(&periodic_controlvm_work);
2256 visorchipset_file_cleanup(major_dev);
2257
2258error_destroy_payload:
2259 destroy_controlvm_payload_info(&controlvm_payload_info);
2260
2261error_destroy_channel:
2262 visorchannel_destroy(controlvm_channel);
2263
2264error:
2265 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, err, POSTCODE_SEVERITY_ERR);
2266 return err;
2383} 2267}
2384 2268
2385static int 2269static int
@@ -2392,8 +2276,6 @@ visorchipset_exit(struct acpi_device *acpi_device)
2392 cancel_delayed_work_sync(&periodic_controlvm_work); 2276 cancel_delayed_work_sync(&periodic_controlvm_work);
2393 destroy_controlvm_payload_info(&controlvm_payload_info); 2277 destroy_controlvm_payload_info(&controlvm_payload_info);
2394 2278
2395 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2396
2397 visorchannel_destroy(controlvm_channel); 2279 visorchannel_destroy(controlvm_channel);
2398 2280
2399 visorchipset_file_cleanup(visorchipset_platform_device.dev.devt); 2281 visorchipset_file_cleanup(visorchipset_platform_device.dev.devt);
@@ -2460,12 +2342,8 @@ module_param_named(major, visorchipset_major, int, S_IRUGO);
2460MODULE_PARM_DESC(visorchipset_major, 2342MODULE_PARM_DESC(visorchipset_major,
2461 "major device number to use for the device node"); 2343 "major device number to use for the device node");
2462module_param_named(visorbusregwait, visorchipset_visorbusregwait, int, S_IRUGO); 2344module_param_named(visorbusregwait, visorchipset_visorbusregwait, int, S_IRUGO);
2463MODULE_PARM_DESC(visorchipset_visorbusreqwait, 2345MODULE_PARM_DESC(visorchipset_visorbusregwait,
2464 "1 to have the module wait for the visor bus to register"); 2346 "1 to have the module wait for the visor bus to register");
2465module_param_named(holdchipsetready, visorchipset_holdchipsetready,
2466 int, S_IRUGO);
2467MODULE_PARM_DESC(visorchipset_holdchipsetready,
2468 "1 to hold response to CHIPSET_READY");
2469 2347
2470module_init(init_unisys); 2348module_init(init_unisys);
2471module_exit(exit_unisys); 2349module_exit(exit_unisys);
diff --git a/drivers/staging/unisys/visorhba/visorhba_main.c b/drivers/staging/unisys/visorhba/visorhba_main.c
index e93bb1dbfd97..6a4570d10642 100644
--- a/drivers/staging/unisys/visorhba/visorhba_main.c
+++ b/drivers/staging/unisys/visorhba/visorhba_main.c
@@ -52,6 +52,8 @@ static int visorhba_resume(struct visor_device *dev,
52 52
53static ssize_t info_debugfs_read(struct file *file, char __user *buf, 53static ssize_t info_debugfs_read(struct file *file, char __user *buf,
54 size_t len, loff_t *offset); 54 size_t len, loff_t *offset);
55static int set_no_disk_inquiry_result(unsigned char *buf,
56 size_t len, bool is_lun0);
55static struct dentry *visorhba_debugfs_dir; 57static struct dentry *visorhba_debugfs_dir;
56static const struct file_operations debugfs_info_fops = { 58static const struct file_operations debugfs_info_fops = {
57 .read = info_debugfs_read, 59 .read = info_debugfs_read,
@@ -83,12 +85,6 @@ static struct visor_driver visorhba_driver = {
83MODULE_DEVICE_TABLE(visorbus, visorhba_channel_types); 85MODULE_DEVICE_TABLE(visorbus, visorhba_channel_types);
84MODULE_ALIAS("visorbus:" SPAR_VHBA_CHANNEL_PROTOCOL_UUID_STR); 86MODULE_ALIAS("visorbus:" SPAR_VHBA_CHANNEL_PROTOCOL_UUID_STR);
85 87
86struct visor_thread_info {
87 struct task_struct *task;
88 struct completion has_stopped;
89 int id;
90};
91
92struct visordisk_info { 88struct visordisk_info {
93 u32 valid; 89 u32 valid;
94 u32 channel, id, lun; /* Disk Path */ 90 u32 channel, id, lun; /* Disk Path */
@@ -135,7 +131,7 @@ struct visorhba_devdata {
135 struct visordisk_info head; 131 struct visordisk_info head;
136 unsigned int max_buff_len; 132 unsigned int max_buff_len;
137 int devnum; 133 int devnum;
138 struct visor_thread_info threadinfo; 134 struct task_struct *thread;
139 int thread_wait_ms; 135 int thread_wait_ms;
140}; 136};
141 137
@@ -152,28 +148,36 @@ static struct visorhba_devices_open visorhbas_open[VISORHBA_OPEN_MAX];
152 (iter->lun == match->lun)) 148 (iter->lun == match->lun))
153/** 149/**
154 * visor_thread_start - starts a thread for the device 150 * visor_thread_start - starts a thread for the device
155 * @thrinfo: The thread to start
156 * @threadfn: Function the thread starts 151 * @threadfn: Function the thread starts
157 * @thrcontext: Context to pass to the thread, i.e. devdata 152 * @thrcontext: Context to pass to the thread, i.e. devdata
158 * @name: string describing name of thread 153 * @name: string describing name of thread
159 * 154 *
160 * Starts a thread for the device. 155 * Starts a thread for the device.
161 * 156 *
162 * Return 0 on success; 157 * Return the task_struct * denoting the thread on success,
158 * or NULL on failure
163 */ 159 */
164static int visor_thread_start(struct visor_thread_info *thrinfo, 160static struct task_struct *visor_thread_start
165 int (*threadfn)(void *), 161(int (*threadfn)(void *), void *thrcontext, char *name)
166 void *thrcontext, char *name)
167{ 162{
168 /* used to stop the thread */ 163 struct task_struct *task;
169 init_completion(&thrinfo->has_stopped); 164
170 thrinfo->task = kthread_run(threadfn, thrcontext, "%s", name); 165 task = kthread_run(threadfn, thrcontext, "%s", name);
171 if (IS_ERR(thrinfo->task)) { 166 if (IS_ERR(task)) {
172 thrinfo->id = 0; 167 pr_err("visorbus failed to start thread\n");
173 return PTR_ERR(thrinfo->task); 168 return NULL;
174 } 169 }
175 thrinfo->id = thrinfo->task->pid; 170 return task;
176 return 0; 171}
172
173/**
174 * visor_thread_stop - stops the thread if it is running
175 */
176static void visor_thread_stop(struct task_struct *task)
177{
178 if (!task)
179 return; /* no thread running */
180 kthread_stop(task);
177} 181}
178 182
179/** 183/**
@@ -231,16 +235,17 @@ static void *del_scsipending_ent(struct visorhba_devdata *devdata,
231 int del) 235 int del)
232{ 236{
233 unsigned long flags; 237 unsigned long flags;
234 void *sent = NULL; 238 void *sent;
235 239
236 if (del < MAX_PENDING_REQUESTS) { 240 if (del >= MAX_PENDING_REQUESTS)
237 spin_lock_irqsave(&devdata->privlock, flags); 241 return NULL;
238 sent = devdata->pending[del].sent;
239 242
240 devdata->pending[del].cmdtype = 0; 243 spin_lock_irqsave(&devdata->privlock, flags);
241 devdata->pending[del].sent = NULL; 244 sent = devdata->pending[del].sent;
242 spin_unlock_irqrestore(&devdata->privlock, flags); 245
243 } 246 devdata->pending[del].cmdtype = 0;
247 devdata->pending[del].sent = NULL;
248 spin_unlock_irqrestore(&devdata->privlock, flags);
244 249
245 return sent; 250 return sent;
246} 251}
@@ -681,7 +686,7 @@ static void visorhba_serverdown_complete(struct visorhba_devdata *devdata)
681 /* Stop using the IOVM response queue (queue should be drained 686 /* Stop using the IOVM response queue (queue should be drained
682 * by the end) 687 * by the end)
683 */ 688 */
684 kthread_stop(devdata->threadinfo.task); 689 visor_thread_stop(devdata->thread);
685 690
686 /* Fail commands that weren't completed */ 691 /* Fail commands that weren't completed */
687 spin_lock_irqsave(&devdata->privlock, flags); 692 spin_lock_irqsave(&devdata->privlock, flags);
@@ -772,6 +777,24 @@ do_scsi_linuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
772 } 777 }
773} 778}
774 779
780static int set_no_disk_inquiry_result(unsigned char *buf,
781 size_t len, bool is_lun0)
782{
783 if (!buf || len < NO_DISK_INQUIRY_RESULT_LEN)
784 return -EINVAL;
785 memset(buf, 0, NO_DISK_INQUIRY_RESULT_LEN);
786 buf[2] = SCSI_SPC2_VER;
787 if (is_lun0) {
788 buf[0] = DEV_DISK_CAPABLE_NOT_PRESENT;
789 buf[3] = DEV_HISUPPORT;
790 } else {
791 buf[0] = DEV_NOT_CAPABLE;
792 }
793 buf[4] = NO_DISK_INQUIRY_RESULT_LEN - 5;
794 strncpy(buf + 8, "DELLPSEUDO DEVICE .", NO_DISK_INQUIRY_RESULT_LEN - 8);
795 return 0;
796}
797
775/** 798/**
776 * do_scsi_nolinuxstat - scsi command didn't have linuxstat 799 * do_scsi_nolinuxstat - scsi command didn't have linuxstat
777 * @cmdrsp: response from IOVM 800 * @cmdrsp: response from IOVM
@@ -804,10 +827,8 @@ do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
804 * a disk there so we'll present a processor 827 * a disk there so we'll present a processor
805 * there. 828 * there.
806 */ 829 */
807 SET_NO_DISK_INQUIRY_RESULT(buf, cmdrsp->scsi.bufflen, 830 set_no_disk_inquiry_result(buf, (size_t)cmdrsp->scsi.bufflen,
808 scsidev->lun, 831 scsidev->lun == 0);
809 DEV_DISK_CAPABLE_NOT_PRESENT,
810 DEV_NOT_CAPABLE);
811 832
812 if (scsi_sg_count(scsicmd) == 0) { 833 if (scsi_sg_count(scsicmd) == 0) {
813 memcpy(scsi_sglist(scsicmd), buf, 834 memcpy(scsi_sglist(scsicmd), buf,
@@ -929,14 +950,15 @@ static void process_disk_notify(struct Scsi_Host *shost,
929 struct diskaddremove *dar; 950 struct diskaddremove *dar;
930 951
931 dar = kzalloc(sizeof(*dar), GFP_ATOMIC); 952 dar = kzalloc(sizeof(*dar), GFP_ATOMIC);
932 if (dar) { 953 if (!dar)
933 dar->add = cmdrsp->disknotify.add; 954 return;
934 dar->shost = shost; 955
935 dar->channel = cmdrsp->disknotify.channel; 956 dar->add = cmdrsp->disknotify.add;
936 dar->id = cmdrsp->disknotify.id; 957 dar->shost = shost;
937 dar->lun = cmdrsp->disknotify.lun; 958 dar->channel = cmdrsp->disknotify.channel;
938 queue_disk_add_remove(dar); 959 dar->id = cmdrsp->disknotify.id;
939 } 960 dar->lun = cmdrsp->disknotify.lun;
961 queue_disk_add_remove(dar);
940} 962}
941 963
942/** 964/**
@@ -1064,8 +1086,8 @@ static int visorhba_resume(struct visor_device *dev,
1064 if (devdata->serverdown && !devdata->serverchangingstate) 1086 if (devdata->serverdown && !devdata->serverchangingstate)
1065 devdata->serverchangingstate = true; 1087 devdata->serverchangingstate = true;
1066 1088
1067 visor_thread_start(&devdata->threadinfo, process_incoming_rsps, 1089 devdata->thread = visor_thread_start(process_incoming_rsps, devdata,
1068 devdata, "vhba_incming"); 1090 "vhba_incming");
1069 1091
1070 devdata->serverdown = false; 1092 devdata->serverdown = false;
1071 devdata->serverchangingstate = false; 1093 devdata->serverchangingstate = false;
@@ -1141,8 +1163,8 @@ static int visorhba_probe(struct visor_device *dev)
1141 goto err_scsi_remove_host; 1163 goto err_scsi_remove_host;
1142 1164
1143 devdata->thread_wait_ms = 2; 1165 devdata->thread_wait_ms = 2;
1144 visor_thread_start(&devdata->threadinfo, process_incoming_rsps, 1166 devdata->thread = visor_thread_start(process_incoming_rsps, devdata,
1145 devdata, "vhba_incoming"); 1167 "vhba_incoming");
1146 1168
1147 scsi_scan_host(scsihost); 1169 scsi_scan_host(scsihost);
1148 1170
@@ -1172,7 +1194,7 @@ static void visorhba_remove(struct visor_device *dev)
1172 return; 1194 return;
1173 1195
1174 scsihost = devdata->scsihost; 1196 scsihost = devdata->scsihost;
1175 kthread_stop(devdata->threadinfo.task); 1197 visor_thread_stop(devdata->thread);
1176 scsi_remove_host(scsihost); 1198 scsi_remove_host(scsihost);
1177 scsi_host_put(scsihost); 1199 scsi_host_put(scsihost);
1178 1200
diff --git a/drivers/staging/unisys/visorinput/visorinput.c b/drivers/staging/unisys/visorinput/visorinput.c
index 13c0316112ac..12a3570780fc 100644
--- a/drivers/staging/unisys/visorinput/visorinput.c
+++ b/drivers/staging/unisys/visorinput/visorinput.c
@@ -123,9 +123,9 @@ static const unsigned char visorkbd_keycode[KEYCODE_TABLE_BYTES] = {
123 [38] = KEY_L, 123 [38] = KEY_L,
124 [39] = KEY_SEMICOLON, 124 [39] = KEY_SEMICOLON,
125 [40] = KEY_APOSTROPHE, 125 [40] = KEY_APOSTROPHE,
126 [41] = KEY_GRAVE, /* FIXME, '#' */ 126 [41] = KEY_GRAVE,
127 [42] = KEY_LEFTSHIFT, 127 [42] = KEY_LEFTSHIFT,
128 [43] = KEY_BACKSLASH, /* FIXME, '~' */ 128 [43] = KEY_BACKSLASH,
129 [44] = KEY_Z, 129 [44] = KEY_Z,
130 [45] = KEY_X, 130 [45] = KEY_X,
131 [46] = KEY_C, 131 [46] = KEY_C,
@@ -173,7 +173,7 @@ static const unsigned char visorkbd_keycode[KEYCODE_TABLE_BYTES] = {
173 [88] = KEY_F12, 173 [88] = KEY_F12,
174 [90] = KEY_KPLEFTPAREN, 174 [90] = KEY_KPLEFTPAREN,
175 [91] = KEY_KPRIGHTPAREN, 175 [91] = KEY_KPRIGHTPAREN,
176 [92] = KEY_KPASTERISK, /* FIXME */ 176 [92] = KEY_KPASTERISK,
177 [93] = KEY_KPASTERISK, 177 [93] = KEY_KPASTERISK,
178 [94] = KEY_KPPLUS, 178 [94] = KEY_KPPLUS,
179 [95] = KEY_HELP, 179 [95] = KEY_HELP,
@@ -467,18 +467,14 @@ handle_locking_key(struct input_dev *visorinput_dev,
467 break; 467 break;
468 default: 468 default:
469 led = -1; 469 led = -1;
470 break; 470 return;
471 } 471 }
472 if (led >= 0) { 472 if (test_bit(led, visorinput_dev->led) != desired_state) {
473 int old_state = (test_bit(led, visorinput_dev->led) != 0); 473 input_report_key(visorinput_dev, keycode, 1);
474 474 input_sync(visorinput_dev);
475 if (old_state != desired_state) { 475 input_report_key(visorinput_dev, keycode, 0);
476 input_report_key(visorinput_dev, keycode, 1); 476 input_sync(visorinput_dev);
477 input_sync(visorinput_dev); 477 __change_bit(led, visorinput_dev->led);
478 input_report_key(visorinput_dev, keycode, 0);
479 input_sync(visorinput_dev);
480 __change_bit(led, visorinput_dev->led);
481 }
482 } 478 }
483} 479}
484 480
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
index be0d057346c3..fd7c9a6cb6f3 100644
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ b/drivers/staging/unisys/visornic/visornic_main.c
@@ -109,51 +109,46 @@ struct chanstat {
109}; 109};
110 110
111struct visornic_devdata { 111struct visornic_devdata {
112 unsigned short enabled; /* 0 disabled 1 enabled to receive */ 112 /* 0 disabled 1 enabled to receive */
113 unsigned short enab_dis_acked; /* NET_RCV_ENABLE/DISABLE acked by 113 unsigned short enabled;
114 * IOPART 114 /* NET_RCV_ENABLE/DISABLE acked by IOPART */
115 */ 115 unsigned short enab_dis_acked;
116
116 struct visor_device *dev; 117 struct visor_device *dev;
117 struct net_device *netdev; 118 struct net_device *netdev;
118 struct net_device_stats net_stats; 119 struct net_device_stats net_stats;
119 atomic_t interrupt_rcvd; 120 atomic_t interrupt_rcvd;
120 wait_queue_head_t rsp_queue; 121 wait_queue_head_t rsp_queue;
121 struct sk_buff **rcvbuf; 122 struct sk_buff **rcvbuf;
122 u64 incarnation_id; /* lets IOPART know about re-birth */ 123 /* incarnation_id lets IOPART know about re-birth */
123 unsigned short old_flags; /* flags as they were prior to 124 u64 incarnation_id;
124 * set_multicast_list 125 /* flags as they were prior to set_multicast_list */
125 */ 126 unsigned short old_flags;
126 atomic_t usage; /* count of users */ 127 atomic_t usage; /* count of users */
127 int num_rcv_bufs; /* indicates how many rcv buffers 128
128 * the vnic will post 129 /* number of rcv buffers the vnic will post */
129 */ 130 int num_rcv_bufs;
130 int num_rcv_bufs_could_not_alloc; 131 int num_rcv_bufs_could_not_alloc;
131 atomic_t num_rcvbuf_in_iovm; 132 atomic_t num_rcvbuf_in_iovm;
132 unsigned long alloc_failed_in_if_needed_cnt; 133 unsigned long alloc_failed_in_if_needed_cnt;
133 unsigned long alloc_failed_in_repost_rtn_cnt; 134 unsigned long alloc_failed_in_repost_rtn_cnt;
134 unsigned long max_outstanding_net_xmits; /* absolute max number of 135
135 * outstanding xmits - should 136 /* absolute max number of outstanding xmits - should never hit this */
136 * never hit this 137 unsigned long max_outstanding_net_xmits;
137 */ 138 /* high water mark for calling netif_stop_queue() */
138 unsigned long upper_threshold_net_xmits; /* high water mark for 139 unsigned long upper_threshold_net_xmits;
139 * calling netif_stop_queue() 140 /* high water mark for calling netif_wake_queue() */
140 */ 141 unsigned long lower_threshold_net_xmits;
141 unsigned long lower_threshold_net_xmits; /* high water mark for calling 142 /* xmitbufhead - head of the xmit buffer list sent to the IOPART end */
142 * netif_wake_queue() 143 struct sk_buff_head xmitbufhead;
143 */ 144
144 struct sk_buff_head xmitbufhead; /* xmitbufhead is the head of the
145 * xmit buffer list that have been
146 * sent to the IOPART end
147 */
148 visorbus_state_complete_func server_down_complete_func; 145 visorbus_state_complete_func server_down_complete_func;
149 struct work_struct timeout_reset; 146 struct work_struct timeout_reset;
150 struct uiscmdrsp *cmdrsp_rcv; /* cmdrsp_rcv is used for 147 /* cmdrsp_rcv is used for posting/unposting rcv buffers */
151 * posting/unposting rcv buffers 148 struct uiscmdrsp *cmdrsp_rcv;
152 */ 149 /* xmit_cmdrsp - issues NET_XMIT - only one active xmit at a time */
153 struct uiscmdrsp *xmit_cmdrsp; /* used to issue NET_XMIT - there is 150 struct uiscmdrsp *xmit_cmdrsp;
154 * never more that one xmit in 151
155 * progress at a time
156 */
157 bool server_down; /* IOPART is down */ 152 bool server_down; /* IOPART is down */
158 bool server_change_state; /* Processing SERVER_CHANGESTATE msg */ 153 bool server_change_state; /* Processing SERVER_CHANGESTATE msg */
159 bool going_away; /* device is being torn down */ 154 bool going_away; /* device is being torn down */
@@ -173,18 +168,10 @@ struct visornic_devdata {
173 unsigned long n_rcv1; /* # rcvs of 1 buffers */ 168 unsigned long n_rcv1; /* # rcvs of 1 buffers */
174 unsigned long n_rcv2; /* # rcvs of 2 buffers */ 169 unsigned long n_rcv2; /* # rcvs of 2 buffers */
175 unsigned long n_rcvx; /* # rcvs of >2 buffers */ 170 unsigned long n_rcvx; /* # rcvs of >2 buffers */
176 unsigned long found_repost_rcvbuf_cnt; /* # times we called 171 unsigned long found_repost_rcvbuf_cnt; /* # repost_rcvbuf_cnt */
177 * repost_rcvbuf_cnt 172 unsigned long repost_found_skb_cnt; /* # of found the skb */
178 */ 173 unsigned long n_repost_deficit; /* # of lost rcv buffers */
179 unsigned long repost_found_skb_cnt; /* # times found the skb */ 174 unsigned long bad_rcv_buf; /* # of unknown rcv skb not freed */
180 unsigned long n_repost_deficit; /* # times we couldn't find
181 * all of the rcv buffers
182 */
183 unsigned long bad_rcv_buf; /* # times we negleted to
184 * free the rcv skb because
185 * we didn't know where it
186 * came from
187 */
188 unsigned long n_rcv_packets_not_accepted;/* # bogs rcv packets */ 175 unsigned long n_rcv_packets_not_accepted;/* # bogs rcv packets */
189 176
190 int queuefullmsg_logged; 177 int queuefullmsg_logged;
@@ -209,18 +196,17 @@ static void poll_for_irq(unsigned long v);
209 * Return value indicates number of entries filled in frags 196 * Return value indicates number of entries filled in frags
210 * Negative values indicate an error. 197 * Negative values indicate an error.
211 */ 198 */
212static unsigned int 199static int
213visor_copy_fragsinfo_from_skb(struct sk_buff *skb, unsigned int firstfraglen, 200visor_copy_fragsinfo_from_skb(struct sk_buff *skb, unsigned int firstfraglen,
214 unsigned int frags_max, 201 unsigned int frags_max,
215 struct phys_info frags[]) 202 struct phys_info frags[])
216{ 203{
217 unsigned int count = 0, ii, size, offset = 0, numfrags; 204 unsigned int count = 0, frag, size, offset = 0, numfrags;
218 unsigned int total_count; 205 unsigned int total_count;
219 206
220 numfrags = skb_shinfo(skb)->nr_frags; 207 numfrags = skb_shinfo(skb)->nr_frags;
221 208
222 /* 209 /* Compute the number of fragments this skb has, and if its more than
223 * Compute the number of fragments this skb has, and if its more than
224 * frag array can hold, linearize the skb 210 * frag array can hold, linearize the skb
225 */ 211 */
226 total_count = numfrags + (firstfraglen / PI_PAGE_SIZE); 212 total_count = numfrags + (firstfraglen / PI_PAGE_SIZE);
@@ -257,23 +243,20 @@ visor_copy_fragsinfo_from_skb(struct sk_buff *skb, unsigned int firstfraglen,
257 if ((count + numfrags) > frags_max) 243 if ((count + numfrags) > frags_max)
258 return -EINVAL; 244 return -EINVAL;
259 245
260 for (ii = 0; ii < numfrags; ii++) { 246 for (frag = 0; frag < numfrags; frag++) {
261 count = add_physinfo_entries(page_to_pfn( 247 count = add_physinfo_entries(page_to_pfn(
262 skb_frag_page(&skb_shinfo(skb)->frags[ii])), 248 skb_frag_page(&skb_shinfo(skb)->frags[frag])),
263 skb_shinfo(skb)->frags[ii]. 249 skb_shinfo(skb)->frags[frag].
264 page_offset, 250 page_offset,
265 skb_shinfo(skb)->frags[ii]. 251 skb_shinfo(skb)->frags[frag].
266 size, count, frags_max, frags); 252 size, count, frags_max, frags);
267 /* 253 /* add_physinfo_entries only returns
268 * add_physinfo_entries only returns
269 * zero if the frags array is out of room 254 * zero if the frags array is out of room
270 * That should never happen because we 255 * That should never happen because we
271 * fail above, if count+numfrags > frags_max. 256 * fail above, if count+numfrags > frags_max.
272 * Given that theres no recovery mechanism from putting
273 * half a packet in the I/O channel, panic here as this
274 * should never happen
275 */ 257 */
276 BUG_ON(!count); 258 if (!count)
259 return -EINVAL;
277 } 260 }
278 } 261 }
279 if (skb_shinfo(skb)->frag_list) { 262 if (skb_shinfo(skb)->frag_list) {
@@ -299,8 +282,7 @@ static ssize_t enable_ints_write(struct file *file,
299 const char __user *buffer, 282 const char __user *buffer,
300 size_t count, loff_t *ppos) 283 size_t count, loff_t *ppos)
301{ 284{
302 /* 285 /* Don't want to break ABI here by having a debugfs
303 * Don't want to break ABI here by having a debugfs
304 * file that no longer exists or is writable, so 286 * file that no longer exists or is writable, so
305 * lets just make this a vestigual function 287 * lets just make this a vestigual function
306 */ 288 */
@@ -308,8 +290,7 @@ static ssize_t enable_ints_write(struct file *file,
308} 290}
309 291
310/** 292/**
311 * visornic_serverdown_complete - IOPART went down, need to pause 293 * visornic_serverdown_complete - IOPART went down, pause device
312 * device
313 * @work: Work queue it was scheduled on 294 * @work: Work queue it was scheduled on
314 * 295 *
315 * The IO partition has gone down and we need to do some cleanup 296 * The IO partition has gone down and we need to do some cleanup
@@ -344,7 +325,7 @@ visornic_serverdown_complete(struct visornic_devdata *devdata)
344} 325}
345 326
346/** 327/**
347 * visornic_serverdown - Command has notified us that IOPARt is down 328 * visornic_serverdown - Command has notified us that IOPART is down
348 * @devdata: device that is being managed by IOPART 329 * @devdata: device that is being managed by IOPART
349 * 330 *
350 * Schedule the work needed to handle the server down request. Make 331 * Schedule the work needed to handle the server down request. Make
@@ -356,28 +337,38 @@ visornic_serverdown(struct visornic_devdata *devdata,
356 visorbus_state_complete_func complete_func) 337 visorbus_state_complete_func complete_func)
357{ 338{
358 unsigned long flags; 339 unsigned long flags;
340 int err;
359 341
360 spin_lock_irqsave(&devdata->priv_lock, flags); 342 spin_lock_irqsave(&devdata->priv_lock, flags);
361 if (!devdata->server_down && !devdata->server_change_state) { 343 if (devdata->server_change_state) {
362 if (devdata->going_away) {
363 spin_unlock_irqrestore(&devdata->priv_lock, flags);
364 dev_dbg(&devdata->dev->device,
365 "%s aborting because device removal pending\n",
366 __func__);
367 return -ENODEV;
368 }
369 devdata->server_change_state = true;
370 devdata->server_down_complete_func = complete_func;
371 spin_unlock_irqrestore(&devdata->priv_lock, flags);
372 visornic_serverdown_complete(devdata);
373 } else if (devdata->server_change_state) {
374 dev_dbg(&devdata->dev->device, "%s changing state\n", 344 dev_dbg(&devdata->dev->device, "%s changing state\n",
375 __func__); 345 __func__);
376 spin_unlock_irqrestore(&devdata->priv_lock, flags); 346 err = -EINVAL;
377 return -EINVAL; 347 goto err_unlock;
348 }
349 if (devdata->server_down) {
350 dev_dbg(&devdata->dev->device, "%s already down\n",
351 __func__);
352 err = -EINVAL;
353 goto err_unlock;
354 }
355 if (devdata->going_away) {
356 dev_dbg(&devdata->dev->device,
357 "%s aborting because device removal pending\n",
358 __func__);
359 err = -ENODEV;
360 goto err_unlock;
378 } 361 }
362 devdata->server_change_state = true;
363 devdata->server_down_complete_func = complete_func;
379 spin_unlock_irqrestore(&devdata->priv_lock, flags); 364 spin_unlock_irqrestore(&devdata->priv_lock, flags);
365
366 visornic_serverdown_complete(devdata);
380 return 0; 367 return 0;
368
369err_unlock:
370 spin_unlock_irqrestore(&devdata->priv_lock, flags);
371 return err;
381} 372}
382 373
383/** 374/**
@@ -395,20 +386,19 @@ alloc_rcv_buf(struct net_device *netdev)
395 386
396 /* NOTE: the first fragment in each rcv buffer is pointed to by 387 /* NOTE: the first fragment in each rcv buffer is pointed to by
397 * rcvskb->data. For now all rcv buffers will be RCVPOST_BUF_SIZE 388 * rcvskb->data. For now all rcv buffers will be RCVPOST_BUF_SIZE
398 * in length, so the firstfrag is large enough to hold 1514. 389 * in length, so the first frag is large enough to hold 1514.
399 */ 390 */
400 skb = alloc_skb(RCVPOST_BUF_SIZE, GFP_ATOMIC); 391 skb = alloc_skb(RCVPOST_BUF_SIZE, GFP_ATOMIC);
401 if (!skb) 392 if (!skb)
402 return NULL; 393 return NULL;
403 skb->dev = netdev; 394 skb->dev = netdev;
404 skb->len = RCVPOST_BUF_SIZE;
405 /* current value of mtu doesn't come into play here; large 395 /* current value of mtu doesn't come into play here; large
406 * packets will just end up using multiple rcv buffers all of 396 * packets will just end up using multiple rcv buffers all of
407 * same size 397 * same size.
408 */ 398 */
409 skb->data_len = 0; /* dev_alloc_skb already zeroes it out 399 skb->len = RCVPOST_BUF_SIZE;
410 * for clarification. 400 /* alloc_skb already zeroes it out for clarification. */
411 */ 401 skb->data_len = 0;
412 return skb; 402 return skb;
413} 403}
414 404
@@ -436,8 +426,8 @@ post_skb(struct uiscmdrsp *cmdrsp,
436 cmdrsp->net.type = NET_RCV_POST; 426 cmdrsp->net.type = NET_RCV_POST;
437 cmdrsp->cmdtype = CMD_NET_TYPE; 427 cmdrsp->cmdtype = CMD_NET_TYPE;
438 if (visorchannel_signalinsert(devdata->dev->visorchannel, 428 if (visorchannel_signalinsert(devdata->dev->visorchannel,
439 IOCHAN_TO_IOPART, 429 IOCHAN_TO_IOPART,
440 cmdrsp)) { 430 cmdrsp)) {
441 atomic_inc(&devdata->num_rcvbuf_in_iovm); 431 atomic_inc(&devdata->num_rcvbuf_in_iovm);
442 devdata->chstat.sent_post++; 432 devdata->chstat.sent_post++;
443 } else { 433 } else {
@@ -465,8 +455,8 @@ send_enbdis(struct net_device *netdev, int state,
465 devdata->cmdrsp_rcv->net.type = NET_RCV_ENBDIS; 455 devdata->cmdrsp_rcv->net.type = NET_RCV_ENBDIS;
466 devdata->cmdrsp_rcv->cmdtype = CMD_NET_TYPE; 456 devdata->cmdrsp_rcv->cmdtype = CMD_NET_TYPE;
467 if (visorchannel_signalinsert(devdata->dev->visorchannel, 457 if (visorchannel_signalinsert(devdata->dev->visorchannel,
468 IOCHAN_TO_IOPART, 458 IOCHAN_TO_IOPART,
469 devdata->cmdrsp_rcv)) 459 devdata->cmdrsp_rcv))
470 devdata->chstat.sent_enbdis++; 460 devdata->chstat.sent_enbdis++;
471} 461}
472 462
@@ -872,8 +862,7 @@ visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
872 862
873 if (vnic_hit_high_watermark(devdata, 863 if (vnic_hit_high_watermark(devdata,
874 devdata->max_outstanding_net_xmits)) { 864 devdata->max_outstanding_net_xmits)) {
875 /* too many NET_XMITs queued over to IOVM - need to wait 865 /* extra NET_XMITs queued over to IOVM - need to wait */
876 */
877 devdata->chstat.reject_count++; 866 devdata->chstat.reject_count++;
878 if (!devdata->queuefullmsg_logged && 867 if (!devdata->queuefullmsg_logged &&
879 ((devdata->chstat.reject_count & 0x3ff) == 1)) 868 ((devdata->chstat.reject_count & 0x3ff) == 1))
@@ -950,16 +939,12 @@ visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
950 devdata->net_stats.tx_bytes += skb->len; 939 devdata->net_stats.tx_bytes += skb->len;
951 devdata->chstat.sent_xmit++; 940 devdata->chstat.sent_xmit++;
952 941
953 /* check to see if we have hit the high watermark for 942 /* check if we have hit the high watermark for netif_stop_queue() */
954 * netif_stop_queue()
955 */
956 if (vnic_hit_high_watermark(devdata, 943 if (vnic_hit_high_watermark(devdata,
957 devdata->upper_threshold_net_xmits)) { 944 devdata->upper_threshold_net_xmits)) {
958 /* too many NET_XMITs queued over to IOVM - need to wait */ 945 /* extra NET_XMITs queued over to IOVM - need to wait */
959 netif_stop_queue(netdev); /* calling stop queue - call 946 /* stop queue - call netif_wake_queue() after lower threshold */
960 * netif_wake_queue() after lower 947 netif_stop_queue(netdev);
961 * threshold
962 */
963 dev_dbg(&netdev->dev, 948 dev_dbg(&netdev->dev,
964 "%s busy - invoking iovm flow control\n", 949 "%s busy - invoking iovm flow control\n",
965 __func__); 950 __func__);
@@ -1312,16 +1297,13 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
1312 break; 1297 break;
1313 } 1298 }
1314 } 1299 }
1300 /* accept pkt, dest matches a multicast addr */
1315 if (found_mc) 1301 if (found_mc)
1316 break; /* accept packet, dest 1302 break;
1317 * matches a multicast
1318 * address
1319 */
1320 } 1303 }
1304 /* accept packet, h_dest must match vnic mac address */
1321 } else if (skb->pkt_type == PACKET_HOST) { 1305 } else if (skb->pkt_type == PACKET_HOST) {
1322 break; /* accept packet, h_dest must match vnic 1306 break;
1323 * mac address
1324 */
1325 } else if (skb->pkt_type == PACKET_OTHERHOST) { 1307 } else if (skb->pkt_type == PACKET_OTHERHOST) {
1326 /* something is not right */ 1308 /* something is not right */
1327 dev_err(&devdata->netdev->dev, 1309 dev_err(&devdata->netdev->dev,
@@ -1409,14 +1391,10 @@ static ssize_t info_debugfs_read(struct file *file, char __user *buf,
1409 if (!vbuf) 1391 if (!vbuf)
1410 return -ENOMEM; 1392 return -ENOMEM;
1411 1393
1412 /* for each vnic channel 1394 /* for each vnic channel dump out channel specific data */
1413 * dump out channel specific data
1414 */
1415 rcu_read_lock(); 1395 rcu_read_lock();
1416 for_each_netdev_rcu(current->nsproxy->net_ns, dev) { 1396 for_each_netdev_rcu(current->nsproxy->net_ns, dev) {
1417 /* 1397 /* Only consider netdevs that are visornic, and are open */
1418 * Only consider netdevs that are visornic, and are open
1419 */
1420 if ((dev->netdev_ops != &visornic_dev_ops) || 1398 if ((dev->netdev_ops != &visornic_dev_ops) ||
1421 (!netif_queue_stopped(dev))) 1399 (!netif_queue_stopped(dev)))
1422 continue; 1400 continue;
@@ -1643,12 +1621,12 @@ service_resp_queue(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata,
1643 /* ASSERT netdev == vnicinfo->netdev; */ 1621 /* ASSERT netdev == vnicinfo->netdev; */
1644 if ((netdev == devdata->netdev) && 1622 if ((netdev == devdata->netdev) &&
1645 netif_queue_stopped(netdev)) { 1623 netif_queue_stopped(netdev)) {
1646 /* check to see if we have crossed 1624 /* check if we have crossed the lower watermark
1647 * the lower watermark for 1625 * for netif_wake_queue()
1648 * netif_wake_queue()
1649 */ 1626 */
1650 if (vnic_hit_low_watermark(devdata, 1627 if (vnic_hit_low_watermark
1651 devdata->lower_threshold_net_xmits)) { 1628 (devdata,
1629 devdata->lower_threshold_net_xmits)) {
1652 /* enough NET_XMITs completed 1630 /* enough NET_XMITs completed
1653 * so can restart netif queue 1631 * so can restart netif queue
1654 */ 1632 */
@@ -1712,10 +1690,7 @@ static int visornic_poll(struct napi_struct *napi, int budget)
1712 send_rcv_posts_if_needed(devdata); 1690 send_rcv_posts_if_needed(devdata);
1713 service_resp_queue(devdata->cmdrsp, devdata, &rx_count, budget); 1691 service_resp_queue(devdata->cmdrsp, devdata, &rx_count, budget);
1714 1692
1715 /* 1693 /* If there aren't any more packets to receive stop the poll */
1716 * If there aren't any more packets to receive
1717 * stop the poll
1718 */
1719 if (rx_count < budget) 1694 if (rx_count < budget)
1720 napi_complete(napi); 1695 napi_complete(napi);
1721 1696
@@ -1867,8 +1842,7 @@ static int visornic_probe(struct visor_device *dev)
1867 1842
1868 setup_timer(&devdata->irq_poll_timer, poll_for_irq, 1843 setup_timer(&devdata->irq_poll_timer, poll_for_irq,
1869 (unsigned long)devdata); 1844 (unsigned long)devdata);
1870 /* 1845 /* Note: This time has to start running before the while
1871 * Note: This time has to start running before the while
1872 * loop below because the napi routine is responsible for 1846 * loop below because the napi routine is responsible for
1873 * setting enab_dis_acked 1847 * setting enab_dis_acked
1874 */ 1848 */
@@ -1897,8 +1871,7 @@ static int visornic_probe(struct visor_device *dev)
1897 /* Let's start our threads to get responses */ 1871 /* Let's start our threads to get responses */
1898 netif_napi_add(netdev, &devdata->napi, visornic_poll, NAPI_WEIGHT); 1872 netif_napi_add(netdev, &devdata->napi, visornic_poll, NAPI_WEIGHT);
1899 1873
1900 /* 1874 /* Note: Interupts have to be enable before the while
1901 * Note: Interupts have to be enable before the while
1902 * loop below because the napi routine is responsible for 1875 * loop below because the napi routine is responsible for
1903 * setting enab_dis_acked 1876 * setting enab_dis_acked
1904 */ 1877 */
diff --git a/drivers/staging/vme/devices/vme_pio2_gpio.c b/drivers/staging/vme/devices/vme_pio2_gpio.c
index 6d361201d98c..ba9fe3bc2642 100644
--- a/drivers/staging/vme/devices/vme_pio2_gpio.c
+++ b/drivers/staging/vme/devices/vme_pio2_gpio.c
@@ -92,7 +92,7 @@ static void pio2_gpio_set(struct gpio_chip *chip,
92} 92}
93 93
94/* Directionality configured at board build - send appropriate response */ 94/* Directionality configured at board build - send appropriate response */
95static int pio2_gpio_dir_in(struct gpio_chip *chip, unsigned offset) 95static int pio2_gpio_dir_in(struct gpio_chip *chip, unsigned int offset)
96{ 96{
97 int data; 97 int data;
98 struct pio2_card *card = gpiochip_get_data(chip); 98 struct pio2_card *card = gpiochip_get_data(chip);
@@ -111,7 +111,8 @@ static int pio2_gpio_dir_in(struct gpio_chip *chip, unsigned offset)
111} 111}
112 112
113/* Directionality configured at board build - send appropriate response */ 113/* Directionality configured at board build - send appropriate response */
114static int pio2_gpio_dir_out(struct gpio_chip *chip, unsigned offset, int value) 114static int pio2_gpio_dir_out(struct gpio_chip *chip,
115 unsigned int offset, int value)
115{ 116{
116 int data; 117 int data;
117 struct pio2_card *card = gpiochip_get_data(chip); 118 struct pio2_card *card = gpiochip_get_data(chip);
diff --git a/drivers/staging/vt6655/baseband.c b/drivers/staging/vt6655/baseband.c
index 1e6c0c4a0307..654d072bdc28 100644
--- a/drivers/staging/vt6655/baseband.c
+++ b/drivers/staging/vt6655/baseband.c
@@ -36,8 +36,10 @@
36 * Revision History: 36 * Revision History:
37 * 06-10-2003 Bryan YC Fan: Re-write codes to support VT3253 spec. 37 * 06-10-2003 Bryan YC Fan: Re-write codes to support VT3253 spec.
38 * 08-07-2003 Bryan YC Fan: Add MAXIM2827/2825 and RFMD2959 support. 38 * 08-07-2003 Bryan YC Fan: Add MAXIM2827/2825 and RFMD2959 support.
39 * 08-26-2003 Kyle Hsu : Modify BBuGetFrameTime() and BBvCalculateParameter(). 39 * 08-26-2003 Kyle Hsu : Modify BBuGetFrameTime() and
40 * cancel the setting of MAC_REG_SOFTPWRCTL on BBbVT3253Init(). 40 * BBvCalculateParameter().
41 * cancel the setting of MAC_REG_SOFTPWRCTL on
42 * BBbVT3253Init().
41 * Add the comments. 43 * Add the comments.
42 * 09-01-2003 Bryan YC Fan: RF & BB tables updated. 44 * 09-01-2003 Bryan YC Fan: RF & BB tables updated.
43 * Modified BBvLoopbackOn & BBvLoopbackOff(). 45 * Modified BBvLoopbackOn & BBvLoopbackOff().
@@ -66,7 +68,7 @@
66/*--------------------- Static Variables --------------------------*/ 68/*--------------------- Static Variables --------------------------*/
67 69
68#define CB_VT3253_INIT_FOR_RFMD 446 70#define CB_VT3253_INIT_FOR_RFMD 446
69static unsigned char byVT3253InitTab_RFMD[CB_VT3253_INIT_FOR_RFMD][2] = { 71static const unsigned char byVT3253InitTab_RFMD[CB_VT3253_INIT_FOR_RFMD][2] = {
70 {0x00, 0x30}, 72 {0x00, 0x30},
71 {0x01, 0x00}, 73 {0x01, 0x00},
72 {0x02, 0x00}, 74 {0x02, 0x00},
@@ -516,7 +518,7 @@ static unsigned char byVT3253InitTab_RFMD[CB_VT3253_INIT_FOR_RFMD][2] = {
516}; 518};
517 519
518#define CB_VT3253B0_INIT_FOR_RFMD 256 520#define CB_VT3253B0_INIT_FOR_RFMD 256
519static unsigned char byVT3253B0_RFMD[CB_VT3253B0_INIT_FOR_RFMD][2] = { 521static const unsigned char byVT3253B0_RFMD[CB_VT3253B0_INIT_FOR_RFMD][2] = {
520 {0x00, 0x31}, 522 {0x00, 0x31},
521 {0x01, 0x00}, 523 {0x01, 0x00},
522 {0x02, 0x00}, 524 {0x02, 0x00},
@@ -777,7 +779,8 @@ static unsigned char byVT3253B0_RFMD[CB_VT3253B0_INIT_FOR_RFMD][2] = {
777 779
778#define CB_VT3253B0_AGC_FOR_RFMD2959 195 780#define CB_VT3253B0_AGC_FOR_RFMD2959 195
779/* For RFMD2959 */ 781/* For RFMD2959 */
780static unsigned char byVT3253B0_AGC4_RFMD2959[CB_VT3253B0_AGC_FOR_RFMD2959][2] = { 782static
783unsigned char byVT3253B0_AGC4_RFMD2959[CB_VT3253B0_AGC_FOR_RFMD2959][2] = {
781 {0xF0, 0x00}, 784 {0xF0, 0x00},
782 {0xF1, 0x3E}, 785 {0xF1, 0x3E},
783 {0xF0, 0x80}, 786 {0xF0, 0x80},
@@ -977,7 +980,8 @@ static unsigned char byVT3253B0_AGC4_RFMD2959[CB_VT3253B0_AGC_FOR_RFMD2959][2] =
977 980
978#define CB_VT3253B0_INIT_FOR_AIROHA2230 256 981#define CB_VT3253B0_INIT_FOR_AIROHA2230 256
979/* For AIROHA */ 982/* For AIROHA */
980static unsigned char byVT3253B0_AIROHA2230[CB_VT3253B0_INIT_FOR_AIROHA2230][2] = { 983static
984unsigned char byVT3253B0_AIROHA2230[CB_VT3253B0_INIT_FOR_AIROHA2230][2] = {
981 {0x00, 0x31}, 985 {0x00, 0x31},
982 {0x01, 0x00}, 986 {0x01, 0x00},
983 {0x02, 0x00}, 987 {0x02, 0x00},
@@ -2160,9 +2164,13 @@ bool BBbVT3253Init(struct vnt_private *priv)
2160 2164
2161 2165
2162 /* {{ RobertYu:20050223, request by JerryChung */ 2166 /* {{ RobertYu:20050223, request by JerryChung */
2163 /* Init ANT B select,TX Config CR09 = 0x61->0x45, 0x45->0x41(VC1/VC2 define, make the ANT_A, ANT_B inverted) */ 2167 /* Init ANT B select,TX Config CR09 = 0x61->0x45,
2168 * 0x45->0x41(VC1/VC2 define, make the ANT_A, ANT_B inverted)
2169 */
2164 /*bResult &= BBbWriteEmbedded(dwIoBase,0x09,0x41);*/ 2170 /*bResult &= BBbWriteEmbedded(dwIoBase,0x09,0x41);*/
2165 /* Init ANT B select,RX Config CR10 = 0x28->0x2A, 0x2A->0x28(VC1/VC2 define, make the ANT_A, ANT_B inverted) */ 2171 /* Init ANT B select,RX Config CR10 = 0x28->0x2A,
2172 * 0x2A->0x28(VC1/VC2 define, make the ANT_A, ANT_B inverted)
2173 */
2166 /*bResult &= BBbWriteEmbedded(dwIoBase,0x0a,0x28);*/ 2174 /*bResult &= BBbWriteEmbedded(dwIoBase,0x0a,0x28);*/
2167 /* Select VC1/VC2, CR215 = 0x02->0x06 */ 2175 /* Select VC1/VC2, CR215 = 0x02->0x06 */
2168 bResult &= BBbWriteEmbedded(priv, 0xd7, 0x06); 2176 bResult &= BBbWriteEmbedded(priv, 0xd7, 0x06);
diff --git a/drivers/staging/vt6655/baseband.h b/drivers/staging/vt6655/baseband.h
index 43a4fb1f3570..b4e8c43180ec 100644
--- a/drivers/staging/vt6655/baseband.h
+++ b/drivers/staging/vt6655/baseband.h
@@ -77,8 +77,10 @@ BBuGetFrameTime(
77void vnt_get_phy_field(struct vnt_private *, u32 frame_length, 77void vnt_get_phy_field(struct vnt_private *, u32 frame_length,
78 u16 tx_rate, u8 pkt_type, struct vnt_phy_field *); 78 u16 tx_rate, u8 pkt_type, struct vnt_phy_field *);
79 79
80bool BBbReadEmbedded(struct vnt_private *, unsigned char byBBAddr, unsigned char *pbyData); 80bool BBbReadEmbedded(struct vnt_private *, unsigned char byBBAddr,
81bool BBbWriteEmbedded(struct vnt_private *, unsigned char byBBAddr, unsigned char byData); 81 unsigned char *pbyData);
82bool BBbWriteEmbedded(struct vnt_private *, unsigned char byBBAddr,
83 unsigned char byData);
82 84
83void BBvSetShortSlotTime(struct vnt_private *); 85void BBvSetShortSlotTime(struct vnt_private *);
84void BBvSetVGAGainOffset(struct vnt_private *, unsigned char byData); 86void BBvSetVGAGainOffset(struct vnt_private *, unsigned char byData);
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index 3d338122b590..afb1e8bde975 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -336,7 +336,8 @@ bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type)
336 } 336 }
337 if (priv->byCWMaxMin != byCWMaxMin) { 337 if (priv->byCWMaxMin != byCWMaxMin) {
338 priv->byCWMaxMin = byCWMaxMin; 338 priv->byCWMaxMin = byCWMaxMin;
339 VNSvOutPortB(priv->PortOffset + MAC_REG_CWMAXMIN0, priv->byCWMaxMin); 339 VNSvOutPortB(priv->PortOffset + MAC_REG_CWMAXMIN0,
340 priv->byCWMaxMin);
340 } 341 }
341 342
342 priv->byPacketType = CARDbyGetPktType(priv); 343 priv->byPacketType = CARDbyGetPktType(priv);
@@ -373,9 +374,12 @@ bool CARDbUpdateTSF(struct vnt_private *priv, unsigned char byRxRate,
373 qwTSFOffset = CARDqGetTSFOffset(byRxRate, qwBSSTimestamp, 374 qwTSFOffset = CARDqGetTSFOffset(byRxRate, qwBSSTimestamp,
374 local_tsf); 375 local_tsf);
375 /* adjust TSF, HW's TSF add TSF Offset reg */ 376 /* adjust TSF, HW's TSF add TSF Offset reg */
376 VNSvOutPortD(priv->PortOffset + MAC_REG_TSFOFST, (u32)qwTSFOffset); 377 VNSvOutPortD(priv->PortOffset + MAC_REG_TSFOFST,
377 VNSvOutPortD(priv->PortOffset + MAC_REG_TSFOFST + 4, (u32)(qwTSFOffset >> 32)); 378 (u32)qwTSFOffset);
378 MACvRegBitsOn(priv->PortOffset, MAC_REG_TFTCTL, TFTCTL_TSFSYNCEN); 379 VNSvOutPortD(priv->PortOffset + MAC_REG_TSFOFST + 4,
380 (u32)(qwTSFOffset >> 32));
381 MACvRegBitsOn(priv->PortOffset, MAC_REG_TFTCTL,
382 TFTCTL_TSFSYNCEN);
379 } 383 }
380 return true; 384 return true;
381} 385}
@@ -407,7 +411,8 @@ bool CARDbSetBeaconPeriod(struct vnt_private *priv,
407 priv->wBeaconInterval = wBeaconInterval; 411 priv->wBeaconInterval = wBeaconInterval;
408 /* Set NextTBTT */ 412 /* Set NextTBTT */
409 VNSvOutPortD(priv->PortOffset + MAC_REG_NEXTTBTT, (u32)qwNextTBTT); 413 VNSvOutPortD(priv->PortOffset + MAC_REG_NEXTTBTT, (u32)qwNextTBTT);
410 VNSvOutPortD(priv->PortOffset + MAC_REG_NEXTTBTT + 4, (u32)(qwNextTBTT >> 32)); 414 VNSvOutPortD(priv->PortOffset + MAC_REG_NEXTTBTT + 4,
415 (u32)(qwNextTBTT >> 32));
411 MACvRegBitsOn(priv->PortOffset, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN); 416 MACvRegBitsOn(priv->PortOffset, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN);
412 417
413 return true; 418 return true;
@@ -433,15 +438,19 @@ bool CARDbRadioPowerOff(struct vnt_private *priv)
433 438
434 switch (priv->byRFType) { 439 switch (priv->byRFType) {
435 case RF_RFMD2959: 440 case RF_RFMD2959:
436 MACvWordRegBitsOff(priv->PortOffset, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_TXPEINV); 441 MACvWordRegBitsOff(priv->PortOffset, MAC_REG_SOFTPWRCTL,
437 MACvWordRegBitsOn(priv->PortOffset, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE1); 442 SOFTPWRCTL_TXPEINV);
443 MACvWordRegBitsOn(priv->PortOffset, MAC_REG_SOFTPWRCTL,
444 SOFTPWRCTL_SWPE1);
438 break; 445 break;
439 446
440 case RF_AIROHA: 447 case RF_AIROHA:
441 case RF_AL2230S: 448 case RF_AL2230S:
442 case RF_AIROHA7230: 449 case RF_AIROHA7230:
443 MACvWordRegBitsOff(priv->PortOffset, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE2); 450 MACvWordRegBitsOff(priv->PortOffset, MAC_REG_SOFTPWRCTL,
444 MACvWordRegBitsOff(priv->PortOffset, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3); 451 SOFTPWRCTL_SWPE2);
452 MACvWordRegBitsOff(priv->PortOffset, MAC_REG_SOFTPWRCTL,
453 SOFTPWRCTL_SWPE3);
445 break; 454 break;
446 } 455 }
447 456
@@ -451,7 +460,8 @@ bool CARDbRadioPowerOff(struct vnt_private *priv)
451 460
452 priv->bRadioOff = true; 461 priv->bRadioOff = true;
453 pr_debug("chester power off\n"); 462 pr_debug("chester power off\n");
454 MACvRegBitsOn(priv->PortOffset, MAC_REG_GPIOCTL0, LED_ACTSET); /* LED issue */ 463 MACvRegBitsOn(priv->PortOffset, MAC_REG_GPIOCTL0,
464 LED_ACTSET); /* LED issue */
455 return bResult; 465 return bResult;
456} 466}
457 467
@@ -488,21 +498,24 @@ bool CARDbRadioPowerOn(struct vnt_private *priv)
488 498
489 switch (priv->byRFType) { 499 switch (priv->byRFType) {
490 case RF_RFMD2959: 500 case RF_RFMD2959:
491 MACvWordRegBitsOn(priv->PortOffset, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_TXPEINV); 501 MACvWordRegBitsOn(priv->PortOffset, MAC_REG_SOFTPWRCTL,
492 MACvWordRegBitsOff(priv->PortOffset, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE1); 502 SOFTPWRCTL_TXPEINV);
503 MACvWordRegBitsOff(priv->PortOffset, MAC_REG_SOFTPWRCTL,
504 SOFTPWRCTL_SWPE1);
493 break; 505 break;
494 506
495 case RF_AIROHA: 507 case RF_AIROHA:
496 case RF_AL2230S: 508 case RF_AL2230S:
497 case RF_AIROHA7230: 509 case RF_AIROHA7230:
498 MACvWordRegBitsOn(priv->PortOffset, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE2 | 510 MACvWordRegBitsOn(priv->PortOffset, MAC_REG_SOFTPWRCTL,
499 SOFTPWRCTL_SWPE3)); 511 (SOFTPWRCTL_SWPE2 | SOFTPWRCTL_SWPE3));
500 break; 512 break;
501 } 513 }
502 514
503 priv->bRadioOff = false; 515 priv->bRadioOff = false;
504 pr_debug("chester power on\n"); 516 pr_debug("chester power on\n");
505 MACvRegBitsOff(priv->PortOffset, MAC_REG_GPIOCTL0, LED_ACTSET); /* LED issue */ 517 MACvRegBitsOff(priv->PortOffset, MAC_REG_GPIOCTL0,
518 LED_ACTSET); /* LED issue */
506 return bResult; 519 return bResult;
507} 520}
508 521
@@ -717,55 +730,72 @@ void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type)
717 bb_type, 730 bb_type,
718 &byTxRate, 731 &byTxRate,
719 &byRsvTime); 732 &byRsvTime);
720 VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_6, MAKEWORD(byTxRate, byRsvTime)); 733 VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_6,
734 MAKEWORD(byTxRate, byRsvTime));
721 /* RSPINF_a_9 */ 735 /* RSPINF_a_9 */
722 s_vCalculateOFDMRParameter(RATE_9M, 736 s_vCalculateOFDMRParameter(RATE_9M,
723 bb_type, 737 bb_type,
724 &byTxRate, 738 &byTxRate,
725 &byRsvTime); 739 &byRsvTime);
726 VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_9, MAKEWORD(byTxRate, byRsvTime)); 740 VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_9,
741 MAKEWORD(byTxRate, byRsvTime));
727 /* RSPINF_a_12 */ 742 /* RSPINF_a_12 */
728 s_vCalculateOFDMRParameter(RATE_12M, 743 s_vCalculateOFDMRParameter(RATE_12M,
729 bb_type, 744 bb_type,
730 &byTxRate, 745 &byTxRate,
731 &byRsvTime); 746 &byRsvTime);
732 VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_12, MAKEWORD(byTxRate, byRsvTime)); 747 VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_12,
748 MAKEWORD(byTxRate, byRsvTime));
733 /* RSPINF_a_18 */ 749 /* RSPINF_a_18 */
734 s_vCalculateOFDMRParameter(RATE_18M, 750 s_vCalculateOFDMRParameter(RATE_18M,
735 bb_type, 751 bb_type,
736 &byTxRate, 752 &byTxRate,
737 &byRsvTime); 753 &byRsvTime);
738 VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_18, MAKEWORD(byTxRate, byRsvTime)); 754 VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_18,
755 MAKEWORD(byTxRate, byRsvTime));
739 /* RSPINF_a_24 */ 756 /* RSPINF_a_24 */
740 s_vCalculateOFDMRParameter(RATE_24M, 757 s_vCalculateOFDMRParameter(RATE_24M,
741 bb_type, 758 bb_type,
742 &byTxRate, 759 &byTxRate,
743 &byRsvTime); 760 &byRsvTime);
744 VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_24, MAKEWORD(byTxRate, byRsvTime)); 761 VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_24,
762 MAKEWORD(byTxRate, byRsvTime));
745 /* RSPINF_a_36 */ 763 /* RSPINF_a_36 */
746 s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv, RATE_36M), 764 s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate(
765 (void *)priv,
766 RATE_36M),
747 bb_type, 767 bb_type,
748 &byTxRate, 768 &byTxRate,
749 &byRsvTime); 769 &byRsvTime);
750 VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_36, MAKEWORD(byTxRate, byRsvTime)); 770 VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_36,
771 MAKEWORD(byTxRate, byRsvTime));
751 /* RSPINF_a_48 */ 772 /* RSPINF_a_48 */
752 s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv, RATE_48M), 773 s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate(
774 (void *)priv,
775 RATE_48M),
753 bb_type, 776 bb_type,
754 &byTxRate, 777 &byTxRate,
755 &byRsvTime); 778 &byRsvTime);
756 VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_48, MAKEWORD(byTxRate, byRsvTime)); 779 VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_48,
780 MAKEWORD(byTxRate, byRsvTime));
757 /* RSPINF_a_54 */ 781 /* RSPINF_a_54 */
758 s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv, RATE_54M), 782 s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate(
783 (void *)priv,
784 RATE_54M),
759 bb_type, 785 bb_type,
760 &byTxRate, 786 &byTxRate,
761 &byRsvTime); 787 &byRsvTime);
762 VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_54, MAKEWORD(byTxRate, byRsvTime)); 788 VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_54,
789 MAKEWORD(byTxRate, byRsvTime));
763 /* RSPINF_a_72 */ 790 /* RSPINF_a_72 */
764 s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv, RATE_54M), 791 s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate(
792 (void *)priv,
793 RATE_54M),
765 bb_type, 794 bb_type,
766 &byTxRate, 795 &byTxRate,
767 &byRsvTime); 796 &byRsvTime);
768 VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_72, MAKEWORD(byTxRate, byRsvTime)); 797 VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_72,
798 MAKEWORD(byTxRate, byRsvTime));
769 /* Set to Page0 */ 799 /* Set to Page0 */
770 MACvSelectPage0(priv->PortOffset); 800 MACvSelectPage0(priv->PortOffset);
771 801
@@ -830,7 +860,8 @@ unsigned char CARDbyGetPktType(struct vnt_private *priv)
830 * 860 *
831 * Return Value: none 861 * Return Value: none
832 */ 862 */
833void CARDvSetLoopbackMode(struct vnt_private *priv, unsigned short wLoopbackMode) 863void CARDvSetLoopbackMode(struct vnt_private *priv,
864 unsigned short wLoopbackMode)
834{ 865{
835 switch (wLoopbackMode) { 866 switch (wLoopbackMode) {
836 case CARD_LB_NONE: 867 case CARD_LB_NONE:
@@ -965,7 +996,8 @@ u64 CARDqGetNextTBTT(u64 qwTSF, unsigned short wBeaconInterval)
965 * 996 *
966 * Return Value: none 997 * Return Value: none
967 */ 998 */
968void CARDvSetFirstNextTBTT(struct vnt_private *priv, unsigned short wBeaconInterval) 999void CARDvSetFirstNextTBTT(struct vnt_private *priv,
1000 unsigned short wBeaconInterval)
969{ 1001{
970 void __iomem *dwIoBase = priv->PortOffset; 1002 void __iomem *dwIoBase = priv->PortOffset;
971 u64 qwNextTBTT = 0; 1003 u64 qwNextTBTT = 0;
@@ -993,7 +1025,8 @@ void CARDvSetFirstNextTBTT(struct vnt_private *priv, unsigned short wBeaconInter
993 * 1025 *
994 * Return Value: none 1026 * Return Value: none
995 */ 1027 */
996void CARDvUpdateNextTBTT(struct vnt_private *priv, u64 qwTSF, unsigned short wBeaconInterval) 1028void CARDvUpdateNextTBTT(struct vnt_private *priv, u64 qwTSF,
1029 unsigned short wBeaconInterval)
997{ 1030{
998 void __iomem *dwIoBase = priv->PortOffset; 1031 void __iomem *dwIoBase = priv->PortOffset;
999 1032
diff --git a/drivers/staging/vt6655/card.h b/drivers/staging/vt6655/card.h
index 16cca49e680a..0203c7fd91a2 100644
--- a/drivers/staging/vt6655/card.h
+++ b/drivers/staging/vt6655/card.h
@@ -38,7 +38,8 @@
38 * LOBYTE is MAC LB mode, HIBYTE is MII LB mode 38 * LOBYTE is MAC LB mode, HIBYTE is MII LB mode
39 */ 39 */
40#define CARD_LB_NONE MAKEWORD(MAC_LB_NONE, 0) 40#define CARD_LB_NONE MAKEWORD(MAC_LB_NONE, 0)
41#define CARD_LB_MAC MAKEWORD(MAC_LB_INTERNAL, 0) /* PHY must ISO, avoid MAC loopback packet go out */ 41/* PHY must ISO, avoid MAC loopback packet go out */
42#define CARD_LB_MAC MAKEWORD(MAC_LB_INTERNAL, 0)
42#define CARD_LB_PHY MAKEWORD(MAC_LB_EXT, 0) 43#define CARD_LB_PHY MAKEWORD(MAC_LB_EXT, 0)
43 44
44#define DEFAULT_MSDU_LIFETIME 512 /* ms */ 45#define DEFAULT_MSDU_LIFETIME 512 /* ms */
@@ -71,8 +72,10 @@ void CARDvUpdateBasicTopRate(struct vnt_private *);
71bool CARDbIsOFDMinBasicRate(struct vnt_private *); 72bool CARDbIsOFDMinBasicRate(struct vnt_private *);
72void CARDvSetLoopbackMode(struct vnt_private *, unsigned short wLoopbackMode); 73void CARDvSetLoopbackMode(struct vnt_private *, unsigned short wLoopbackMode);
73bool CARDbSoftwareReset(struct vnt_private *); 74bool CARDbSoftwareReset(struct vnt_private *);
74void CARDvSetFirstNextTBTT(struct vnt_private *, unsigned short wBeaconInterval); 75void CARDvSetFirstNextTBTT(struct vnt_private *,
75void CARDvUpdateNextTBTT(struct vnt_private *, u64 qwTSF, unsigned short wBeaconInterval); 76 unsigned short wBeaconInterval);
77void CARDvUpdateNextTBTT(struct vnt_private *, u64 qwTSF,
78 unsigned short wBeaconInterval);
76bool CARDbGetCurrentTSF(struct vnt_private *, u64 *pqwCurrTSF); 79bool CARDbGetCurrentTSF(struct vnt_private *, u64 *pqwCurrTSF);
77u64 CARDqGetNextTBTT(u64 qwTSF, unsigned short wBeaconInterval); 80u64 CARDqGetNextTBTT(u64 qwTSF, unsigned short wBeaconInterval);
78u64 CARDqGetTSFOffset(unsigned char byRxRate, u64 qwTSF1, u64 qwTSF2); 81u64 CARDqGetTSFOffset(unsigned char byRxRate, u64 qwTSF1, u64 qwTSF2);
diff --git a/drivers/staging/vt6655/desc.h b/drivers/staging/vt6655/desc.h
index 9fbc7172484e..2d7f6ae89164 100644
--- a/drivers/staging/vt6655/desc.h
+++ b/drivers/staging/vt6655/desc.h
@@ -157,7 +157,8 @@
157 157
158/* TD_INFO flags control bit */ 158/* TD_INFO flags control bit */
159#define TD_FLAGS_NETIF_SKB 0x01 /* check if need release skb */ 159#define TD_FLAGS_NETIF_SKB 0x01 /* check if need release skb */
160#define TD_FLAGS_PRIV_SKB 0x02 /* check if called from private skb (hostap) */ 160/* check if called from private skb (hostap) */
161#define TD_FLAGS_PRIV_SKB 0x02
161#define TD_FLAGS_PS_RETRY 0x04 /* check if PS STA frame re-transmit */ 162#define TD_FLAGS_PS_RETRY 0x04 /* check if PS STA frame re-transmit */
162 163
163/* 164/*
diff --git a/drivers/staging/vt6655/mac.c b/drivers/staging/vt6655/mac.c
index 45196c6e9e12..8e13f7f41415 100644
--- a/drivers/staging/vt6655/mac.c
+++ b/drivers/staging/vt6655/mac.c
@@ -47,7 +47,8 @@
47 * 47 *
48 * Revision History: 48 * Revision History:
49 * 08-22-2003 Kyle Hsu : Porting MAC functions from sim53 49 * 08-22-2003 Kyle Hsu : Porting MAC functions from sim53
50 * 09-03-2003 Bryan YC Fan : Add MACvClearBusSusInd()& MACvEnableBusSusEn() 50 * 09-03-2003 Bryan YC Fan : Add MACvClearBusSusInd()&
51 * MACvEnableBusSusEn()
51 * 09-18-2003 Jerry Chen : Add MACvSetKeyEntry & MACvDisableKeyEntry 52 * 09-18-2003 Jerry Chen : Add MACvSetKeyEntry & MACvDisableKeyEntry
52 * 53 *
53 */ 54 */
@@ -138,7 +139,8 @@ bool MACbIsIntDisable(struct vnt_private *priv)
138 * Return Value: none 139 * Return Value: none
139 * 140 *
140 */ 141 */
141void MACvSetShortRetryLimit(struct vnt_private *priv, unsigned char byRetryLimit) 142void MACvSetShortRetryLimit(struct vnt_private *priv,
143 unsigned char byRetryLimit)
142{ 144{
143 void __iomem *io_base = priv->PortOffset; 145 void __iomem *io_base = priv->PortOffset;
144 /* set SRT */ 146 /* set SRT */
@@ -160,7 +162,8 @@ void MACvSetShortRetryLimit(struct vnt_private *priv, unsigned char byRetryLimit
160 * Return Value: none 162 * Return Value: none
161 * 163 *
162 */ 164 */
163void MACvSetLongRetryLimit(struct vnt_private *priv, unsigned char byRetryLimit) 165void MACvSetLongRetryLimit(struct vnt_private *priv,
166 unsigned char byRetryLimit)
164{ 167{
165 void __iomem *io_base = priv->PortOffset; 168 void __iomem *io_base = priv->PortOffset;
166 /* set LRT */ 169 /* set LRT */
@@ -304,7 +307,8 @@ bool MACbSoftwareReset(struct vnt_private *priv)
304 307
305/* 308/*
306 * Description: 309 * Description:
307 * save some important register's value, then do reset, then restore register's value 310 * save some important register's value, then do reset, then restore
311 * register's value
308 * 312 *
309 * Parameters: 313 * Parameters:
310 * In: 314 * In:
@@ -738,7 +742,8 @@ void MACvTimer0MicroSDelay(struct vnt_private *priv, unsigned int uDelay)
738 * Return Value: none 742 * Return Value: none
739 * 743 *
740 */ 744 */
741void MACvOneShotTimer1MicroSec(struct vnt_private *priv, unsigned int uDelayTime) 745void MACvOneShotTimer1MicroSec(struct vnt_private *priv,
746 unsigned int uDelayTime)
742{ 747{
743 void __iomem *io_base = priv->PortOffset; 748 void __iomem *io_base = priv->PortOffset;
744 749
diff --git a/drivers/staging/vt6655/srom.c b/drivers/staging/vt6655/srom.c
index 9ec49e653b61..ee992772066f 100644
--- a/drivers/staging/vt6655/srom.c
+++ b/drivers/staging/vt6655/srom.c
@@ -72,7 +72,8 @@
72 * Return Value: data read 72 * Return Value: data read
73 * 73 *
74 */ 74 */
75unsigned char SROMbyReadEmbedded(void __iomem *dwIoBase, unsigned char byContntOffset) 75unsigned char SROMbyReadEmbedded(void __iomem *dwIoBase,
76 unsigned char byContntOffset)
76{ 77{
77 unsigned short wDelay, wNoACK; 78 unsigned short wDelay, wNoACK;
78 unsigned char byWait; 79 unsigned char byWait;
@@ -124,7 +125,8 @@ void SROMvReadAllContents(void __iomem *dwIoBase, unsigned char *pbyEepromRegs)
124 125
125 /* ii = Rom Address */ 126 /* ii = Rom Address */
126 for (ii = 0; ii < EEP_MAX_CONTEXT_SIZE; ii++) { 127 for (ii = 0; ii < EEP_MAX_CONTEXT_SIZE; ii++) {
127 *pbyEepromRegs = SROMbyReadEmbedded(dwIoBase, (unsigned char)ii); 128 *pbyEepromRegs = SROMbyReadEmbedded(dwIoBase,
129 (unsigned char)ii);
128 pbyEepromRegs++; 130 pbyEepromRegs++;
129 } 131 }
130} 132}
@@ -141,7 +143,8 @@ void SROMvReadAllContents(void __iomem *dwIoBase, unsigned char *pbyEepromRegs)
141 * Return Value: none 143 * Return Value: none
142 * 144 *
143 */ 145 */
144void SROMvReadEtherAddress(void __iomem *dwIoBase, unsigned char *pbyEtherAddress) 146void SROMvReadEtherAddress(void __iomem *dwIoBase,
147 unsigned char *pbyEtherAddress)
145{ 148{
146 unsigned char ii; 149 unsigned char ii;
147 150
diff --git a/drivers/staging/vt6656/baseband.c b/drivers/staging/vt6656/baseband.c
index 9417c935fc30..882fe54ce41d 100644
--- a/drivers/staging/vt6656/baseband.c
+++ b/drivers/staging/vt6656/baseband.c
@@ -138,7 +138,7 @@ static const u16 vnt_frame_time[MAX_RATE] = {
138 * 138 *
139 */ 139 */
140unsigned int vnt_get_frame_time(u8 preamble_type, u8 pkt_type, 140unsigned int vnt_get_frame_time(u8 preamble_type, u8 pkt_type,
141 unsigned int frame_length, u16 tx_rate) 141 unsigned int frame_length, u16 tx_rate)
142{ 142{
143 unsigned int frame_time; 143 unsigned int frame_time;
144 unsigned int preamble; 144 unsigned int preamble;
@@ -195,7 +195,7 @@ unsigned int vnt_get_frame_time(u8 preamble_type, u8 pkt_type,
195 * 195 *
196 */ 196 */
197void vnt_get_phy_field(struct vnt_private *priv, u32 frame_length, 197void vnt_get_phy_field(struct vnt_private *priv, u32 frame_length,
198 u16 tx_rate, u8 pkt_type, struct vnt_phy_field *phy) 198 u16 tx_rate, u8 pkt_type, struct vnt_phy_field *phy)
199{ 199{
200 u32 bit_count; 200 u32 bit_count;
201 u32 count = 0; 201 u32 count = 0;
@@ -355,7 +355,7 @@ void vnt_set_antenna_mode(struct vnt_private *priv, u8 antenna_mode)
355 } 355 }
356 356
357 vnt_control_out(priv, MESSAGE_TYPE_SET_ANTMD, 357 vnt_control_out(priv, MESSAGE_TYPE_SET_ANTMD,
358 (u16)antenna_mode, 0, 0, NULL); 358 (u16)antenna_mode, 0, 0, NULL);
359} 359}
360 360
361/* 361/*
@@ -383,7 +383,7 @@ int vnt_vt3184_init(struct vnt_private *priv)
383 u8 data; 383 u8 data;
384 384
385 status = vnt_control_in(priv, MESSAGE_TYPE_READ, 0, 385 status = vnt_control_in(priv, MESSAGE_TYPE_READ, 0,
386 MESSAGE_REQUEST_EEPROM, EEP_MAX_CONTEXT_SIZE, 386 MESSAGE_REQUEST_EEPROM, EEP_MAX_CONTEXT_SIZE,
387 priv->eeprom); 387 priv->eeprom);
388 if (status != STATUS_SUCCESS) 388 if (status != STATUS_SUCCESS)
389 return false; 389 return false;
@@ -393,7 +393,7 @@ int vnt_vt3184_init(struct vnt_private *priv)
393 dev_dbg(&priv->usb->dev, "RF Type %d\n", priv->rf_type); 393 dev_dbg(&priv->usb->dev, "RF Type %d\n", priv->rf_type);
394 394
395 if ((priv->rf_type == RF_AL2230) || 395 if ((priv->rf_type == RF_AL2230) ||
396 (priv->rf_type == RF_AL2230S)) { 396 (priv->rf_type == RF_AL2230S)) {
397 priv->bb_rx_conf = vnt_vt3184_al2230[10]; 397 priv->bb_rx_conf = vnt_vt3184_al2230[10];
398 length = sizeof(vnt_vt3184_al2230); 398 length = sizeof(vnt_vt3184_al2230);
399 addr = vnt_vt3184_al2230; 399 addr = vnt_vt3184_al2230;
@@ -457,21 +457,21 @@ int vnt_vt3184_init(struct vnt_private *priv)
457 memcpy(array, addr, length); 457 memcpy(array, addr, length);
458 458
459 vnt_control_out(priv, MESSAGE_TYPE_WRITE, 0, 459 vnt_control_out(priv, MESSAGE_TYPE_WRITE, 0,
460 MESSAGE_REQUEST_BBREG, length, array); 460 MESSAGE_REQUEST_BBREG, length, array);
461 461
462 memcpy(array, agc, length_agc); 462 memcpy(array, agc, length_agc);
463 463
464 vnt_control_out(priv, MESSAGE_TYPE_WRITE, 0, 464 vnt_control_out(priv, MESSAGE_TYPE_WRITE, 0,
465 MESSAGE_REQUEST_BBAGC, length_agc, array); 465 MESSAGE_REQUEST_BBAGC, length_agc, array);
466 466
467 if ((priv->rf_type == RF_VT3226) || 467 if ((priv->rf_type == RF_VT3226) ||
468 (priv->rf_type == RF_VT3342A0)) { 468 (priv->rf_type == RF_VT3342A0)) {
469 vnt_control_out_u8(priv, MESSAGE_REQUEST_MACREG, 469 vnt_control_out_u8(priv, MESSAGE_REQUEST_MACREG,
470 MAC_REG_ITRTMSET, 0x23); 470 MAC_REG_ITRTMSET, 0x23);
471 vnt_mac_reg_bits_on(priv, MAC_REG_PAPEDELAY, 0x01); 471 vnt_mac_reg_bits_on(priv, MAC_REG_PAPEDELAY, 0x01);
472 } else if (priv->rf_type == RF_VT3226D0) { 472 } else if (priv->rf_type == RF_VT3226D0) {
473 vnt_control_out_u8(priv, MESSAGE_REQUEST_MACREG, 473 vnt_control_out_u8(priv, MESSAGE_REQUEST_MACREG,
474 MAC_REG_ITRTMSET, 0x11); 474 MAC_REG_ITRTMSET, 0x11);
475 vnt_mac_reg_bits_on(priv, MAC_REG_PAPEDELAY, 0x01); 475 vnt_mac_reg_bits_on(priv, MAC_REG_PAPEDELAY, 0x01);
476 } 476 }
477 477
@@ -482,12 +482,12 @@ int vnt_vt3184_init(struct vnt_private *priv)
482 482
483 /* Fix for TX USB resets from vendors driver */ 483 /* Fix for TX USB resets from vendors driver */
484 vnt_control_in(priv, MESSAGE_TYPE_READ, USB_REG4, 484 vnt_control_in(priv, MESSAGE_TYPE_READ, USB_REG4,
485 MESSAGE_REQUEST_MEM, sizeof(data), &data); 485 MESSAGE_REQUEST_MEM, sizeof(data), &data);
486 486
487 data |= 0x2; 487 data |= 0x2;
488 488
489 vnt_control_out(priv, MESSAGE_TYPE_WRITE, USB_REG4, 489 vnt_control_out(priv, MESSAGE_TYPE_WRITE, USB_REG4,
490 MESSAGE_REQUEST_MEM, sizeof(data), &data); 490 MESSAGE_REQUEST_MEM, sizeof(data), &data);
491 491
492 return true; 492 return true;
493} 493}
@@ -814,7 +814,7 @@ void vnt_update_pre_ed_threshold(struct vnt_private *priv, int scanning)
814 priv->bb_pre_ed_index = ed_inx; 814 priv->bb_pre_ed_index = ed_inx;
815 815
816 dev_dbg(&priv->usb->dev, "%s bb_pre_ed_rssi %d\n", 816 dev_dbg(&priv->usb->dev, "%s bb_pre_ed_rssi %d\n",
817 __func__, priv->bb_pre_ed_rssi); 817 __func__, priv->bb_pre_ed_rssi);
818 818
819 if (!cr_201 && !cr_206) 819 if (!cr_201 && !cr_206)
820 return; 820 return;
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index fc5fe4ec6d05..ac4fecb30d0e 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -238,7 +238,7 @@ static int vnt_init_registers(struct vnt_private *priv)
238 priv->tx_antenna_mode = ANT_B; 238 priv->tx_antenna_mode = ANT_B;
239 priv->rx_antenna_sel = 1; 239 priv->rx_antenna_sel = 1;
240 240
241 if (priv->tx_rx_ant_inv == true) 241 if (priv->tx_rx_ant_inv)
242 priv->rx_antenna_mode = ANT_A; 242 priv->rx_antenna_mode = ANT_A;
243 else 243 else
244 priv->rx_antenna_mode = ANT_B; 244 priv->rx_antenna_mode = ANT_B;
@@ -248,14 +248,14 @@ static int vnt_init_registers(struct vnt_private *priv)
248 if (antenna & EEP_ANTENNA_AUX) { 248 if (antenna & EEP_ANTENNA_AUX) {
249 priv->tx_antenna_mode = ANT_A; 249 priv->tx_antenna_mode = ANT_A;
250 250
251 if (priv->tx_rx_ant_inv == true) 251 if (priv->tx_rx_ant_inv)
252 priv->rx_antenna_mode = ANT_B; 252 priv->rx_antenna_mode = ANT_B;
253 else 253 else
254 priv->rx_antenna_mode = ANT_A; 254 priv->rx_antenna_mode = ANT_A;
255 } else { 255 } else {
256 priv->tx_antenna_mode = ANT_B; 256 priv->tx_antenna_mode = ANT_B;
257 257
258 if (priv->tx_rx_ant_inv == true) 258 if (priv->tx_rx_ant_inv)
259 priv->rx_antenna_mode = ANT_A; 259 priv->rx_antenna_mode = ANT_A;
260 else 260 else
261 priv->rx_antenna_mode = ANT_B; 261 priv->rx_antenna_mode = ANT_B;
diff --git a/drivers/staging/vt6656/wcmd.c b/drivers/staging/vt6656/wcmd.c
index 4846a898d39b..95faaeb7432a 100644
--- a/drivers/staging/vt6656/wcmd.c
+++ b/drivers/staging/vt6656/wcmd.c
@@ -97,7 +97,7 @@ void vnt_run_command(struct work_struct *work)
97 if (test_bit(DEVICE_FLAGS_DISCONNECTED, &priv->flags)) 97 if (test_bit(DEVICE_FLAGS_DISCONNECTED, &priv->flags))
98 return; 98 return;
99 99
100 if (priv->cmd_running != true) 100 if (!priv->cmd_running)
101 return; 101 return;
102 102
103 switch (priv->command_state) { 103 switch (priv->command_state) {
@@ -143,13 +143,13 @@ void vnt_run_command(struct work_struct *work)
143 143
144 if (priv->rx_antenna_sel == 0) { 144 if (priv->rx_antenna_sel == 0) {
145 priv->rx_antenna_sel = 1; 145 priv->rx_antenna_sel = 1;
146 if (priv->tx_rx_ant_inv == true) 146 if (priv->tx_rx_ant_inv)
147 vnt_set_antenna_mode(priv, ANT_RXA); 147 vnt_set_antenna_mode(priv, ANT_RXA);
148 else 148 else
149 vnt_set_antenna_mode(priv, ANT_RXB); 149 vnt_set_antenna_mode(priv, ANT_RXB);
150 } else { 150 } else {
151 priv->rx_antenna_sel = 0; 151 priv->rx_antenna_sel = 0;
152 if (priv->tx_rx_ant_inv == true) 152 if (priv->tx_rx_ant_inv)
153 vnt_set_antenna_mode(priv, ANT_RXB); 153 vnt_set_antenna_mode(priv, ANT_RXB);
154 else 154 else
155 vnt_set_antenna_mode(priv, ANT_RXA); 155 vnt_set_antenna_mode(priv, ANT_RXA);
@@ -174,7 +174,7 @@ int vnt_schedule_command(struct vnt_private *priv, enum vnt_cmd command)
174 ADD_ONE_WITH_WRAP_AROUND(priv->cmd_enqueue_idx, CMD_Q_SIZE); 174 ADD_ONE_WITH_WRAP_AROUND(priv->cmd_enqueue_idx, CMD_Q_SIZE);
175 priv->free_cmd_queue--; 175 priv->free_cmd_queue--;
176 176
177 if (priv->cmd_running == false) 177 if (!priv->cmd_running)
178 vnt_cmd_complete(priv); 178 vnt_cmd_complete(priv);
179 179
180 return true; 180 return true;
diff --git a/drivers/staging/wilc1000/Kconfig b/drivers/staging/wilc1000/Kconfig
index dce9cee9134a..73f7fefd3bc3 100644
--- a/drivers/staging/wilc1000/Kconfig
+++ b/drivers/staging/wilc1000/Kconfig
@@ -1,6 +1,5 @@
1config WILC1000 1config WILC1000
2 tristate 2 tristate
3 select WIRELESS_EXT
4 ---help--- 3 ---help---
5 This module only support IEEE 802.11n WiFi. 4 This module only support IEEE 802.11n WiFi.
6 5
diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c
index 0a922c7c7cbf..953584248e63 100644
--- a/drivers/staging/wilc1000/host_interface.c
+++ b/drivers/staging/wilc1000/host_interface.c
@@ -2,6 +2,7 @@
2#include <linux/time.h> 2#include <linux/time.h>
3#include <linux/kthread.h> 3#include <linux/kthread.h>
4#include <linux/delay.h> 4#include <linux/delay.h>
5#include <linux/completion.h>
5#include "host_interface.h" 6#include "host_interface.h"
6#include "coreconfigurator.h" 7#include "coreconfigurator.h"
7#include "wilc_wlan.h" 8#include "wilc_wlan.h"
@@ -230,10 +231,10 @@ bool wilc_optaining_ip;
230static u8 P2P_LISTEN_STATE; 231static u8 P2P_LISTEN_STATE;
231static struct task_struct *hif_thread_handler; 232static struct task_struct *hif_thread_handler;
232static struct message_queue hif_msg_q; 233static struct message_queue hif_msg_q;
233static struct semaphore hif_sema_thread; 234static struct completion hif_thread_comp;
234static struct semaphore hif_sema_driver; 235static struct completion hif_driver_comp;
235static struct semaphore hif_sema_wait_response; 236static struct completion hif_wait_response;
236static struct semaphore hif_sema_deinit; 237static struct mutex hif_deinit_lock;
237static struct timer_list periodic_rssi; 238static struct timer_list periodic_rssi;
238 239
239u8 wilc_multicast_mac_addr_list[WILC_MULTICAST_TABLE_SIZE][ETH_ALEN]; 240u8 wilc_multicast_mac_addr_list[WILC_MULTICAST_TABLE_SIZE][ETH_ALEN];
@@ -262,6 +263,7 @@ static struct wilc_vif *join_req_vif;
262 263
263static void *host_int_ParseJoinBssParam(struct network_info *ptstrNetworkInfo); 264static void *host_int_ParseJoinBssParam(struct network_info *ptstrNetworkInfo);
264static int host_int_get_ipaddress(struct wilc_vif *vif, u8 *ip_addr, u8 idx); 265static int host_int_get_ipaddress(struct wilc_vif *vif, u8 *ip_addr, u8 idx);
266static s32 Handle_ScanDone(struct wilc_vif *vif, enum scan_event enuEvent);
265 267
266/* The u8IfIdx starts from 0 to NUM_CONCURRENT_IFC -1, but 0 index used as 268/* The u8IfIdx starts from 0 to NUM_CONCURRENT_IFC -1, but 0 index used as
267 * special purpose in wilc device, so we add 1 to the index to starts from 1. 269 * special purpose in wilc device, so we add 1 to the index to starts from 1.
@@ -305,10 +307,10 @@ static void handle_set_channel(struct wilc_vif *vif,
305 netdev_err(vif->ndev, "Failed to set channel\n"); 307 netdev_err(vif->ndev, "Failed to set channel\n");
306} 308}
307 309
308static s32 handle_set_wfi_drv_handler(struct wilc_vif *vif, 310static void handle_set_wfi_drv_handler(struct wilc_vif *vif,
309 struct drv_handler *hif_drv_handler) 311 struct drv_handler *hif_drv_handler)
310{ 312{
311 s32 result = 0; 313 int ret = 0;
312 struct wid wid; 314 struct wid wid;
313 315
314 wid.id = (u16)WID_SET_DRV_HANDLER; 316 wid.id = (u16)WID_SET_DRV_HANDLER;
@@ -316,24 +318,20 @@ static s32 handle_set_wfi_drv_handler(struct wilc_vif *vif,
316 wid.val = (s8 *)hif_drv_handler; 318 wid.val = (s8 *)hif_drv_handler;
317 wid.size = sizeof(*hif_drv_handler); 319 wid.size = sizeof(*hif_drv_handler);
318 320
319 result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1, 321 ret = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
320 hif_drv_handler->handler); 322 hif_drv_handler->handler);
321 323
322 if (!hif_drv_handler->handler) 324 if (!hif_drv_handler->handler)
323 up(&hif_sema_driver); 325 complete(&hif_driver_comp);
324 326
325 if (result) { 327 if (ret)
326 netdev_err(vif->ndev, "Failed to set driver handler\n"); 328 netdev_err(vif->ndev, "Failed to set driver handler\n");
327 return -EINVAL;
328 }
329
330 return result;
331} 329}
332 330
333static s32 handle_set_operation_mode(struct wilc_vif *vif, 331static void handle_set_operation_mode(struct wilc_vif *vif,
334 struct op_mode *hif_op_mode) 332 struct op_mode *hif_op_mode)
335{ 333{
336 s32 result = 0; 334 int ret = 0;
337 struct wid wid; 335 struct wid wid;
338 336
339 wid.id = (u16)WID_SET_OPERATION_MODE; 337 wid.id = (u16)WID_SET_OPERATION_MODE;
@@ -341,23 +339,19 @@ static s32 handle_set_operation_mode(struct wilc_vif *vif,
341 wid.val = (s8 *)&hif_op_mode->mode; 339 wid.val = (s8 *)&hif_op_mode->mode;
342 wid.size = sizeof(u32); 340 wid.size = sizeof(u32);
343 341
344 result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1, 342 ret = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
345 wilc_get_vif_idx(vif)); 343 wilc_get_vif_idx(vif));
346 344
347 if ((hif_op_mode->mode) == IDLE_MODE) 345 if ((hif_op_mode->mode) == IDLE_MODE)
348 up(&hif_sema_driver); 346 complete(&hif_driver_comp);
349 347
350 if (result) { 348 if (ret)
351 netdev_err(vif->ndev, "Failed to set driver handler\n"); 349 netdev_err(vif->ndev, "Failed to set driver handler\n");
352 return -EINVAL;
353 }
354
355 return result;
356} 350}
357 351
358static s32 handle_set_ip_address(struct wilc_vif *vif, u8 *ip_addr, u8 idx) 352static void handle_set_ip_address(struct wilc_vif *vif, u8 *ip_addr, u8 idx)
359{ 353{
360 s32 result = 0; 354 int ret = 0;
361 struct wid wid; 355 struct wid wid;
362 char firmware_ip_addr[4] = {0}; 356 char firmware_ip_addr[4] = {0};
363 357
@@ -371,22 +365,18 @@ static s32 handle_set_ip_address(struct wilc_vif *vif, u8 *ip_addr, u8 idx)
371 wid.val = (u8 *)ip_addr; 365 wid.val = (u8 *)ip_addr;
372 wid.size = IP_ALEN; 366 wid.size = IP_ALEN;
373 367
374 result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1, 368 ret = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
375 wilc_get_vif_idx(vif)); 369 wilc_get_vif_idx(vif));
376 370
377 host_int_get_ipaddress(vif, firmware_ip_addr, idx); 371 host_int_get_ipaddress(vif, firmware_ip_addr, idx);
378 372
379 if (result) { 373 if (ret)
380 netdev_err(vif->ndev, "Failed to set IP address\n"); 374 netdev_err(vif->ndev, "Failed to set IP address\n");
381 return -EINVAL;
382 }
383
384 return result;
385} 375}
386 376
387static s32 handle_get_ip_address(struct wilc_vif *vif, u8 idx) 377static void handle_get_ip_address(struct wilc_vif *vif, u8 idx)
388{ 378{
389 s32 result = 0; 379 int ret = 0;
390 struct wid wid; 380 struct wid wid;
391 381
392 wid.id = (u16)WID_IP_ADDRESS; 382 wid.id = (u16)WID_IP_ADDRESS;
@@ -394,8 +384,8 @@ static s32 handle_get_ip_address(struct wilc_vif *vif, u8 idx)
394 wid.val = kmalloc(IP_ALEN, GFP_KERNEL); 384 wid.val = kmalloc(IP_ALEN, GFP_KERNEL);
395 wid.size = IP_ALEN; 385 wid.size = IP_ALEN;
396 386
397 result = wilc_send_config_pkt(vif, GET_CFG, &wid, 1, 387 ret = wilc_send_config_pkt(vif, GET_CFG, &wid, 1,
398 wilc_get_vif_idx(vif)); 388 wilc_get_vif_idx(vif));
399 389
400 memcpy(get_ip[idx], wid.val, IP_ALEN); 390 memcpy(get_ip[idx], wid.val, IP_ALEN);
401 391
@@ -404,18 +394,14 @@ static s32 handle_get_ip_address(struct wilc_vif *vif, u8 idx)
404 if (memcmp(get_ip[idx], set_ip[idx], IP_ALEN) != 0) 394 if (memcmp(get_ip[idx], set_ip[idx], IP_ALEN) != 0)
405 wilc_setup_ipaddress(vif, set_ip[idx], idx); 395 wilc_setup_ipaddress(vif, set_ip[idx], idx);
406 396
407 if (result != 0) { 397 if (ret)
408 netdev_err(vif->ndev, "Failed to get IP address\n"); 398 netdev_err(vif->ndev, "Failed to get IP address\n");
409 return -EINVAL;
410 }
411
412 return result;
413} 399}
414 400
415static s32 handle_get_mac_address(struct wilc_vif *vif, 401static void handle_get_mac_address(struct wilc_vif *vif,
416 struct get_mac_addr *get_mac_addr) 402 struct get_mac_addr *get_mac_addr)
417{ 403{
418 s32 result = 0; 404 int ret = 0;
419 struct wid wid; 405 struct wid wid;
420 406
421 wid.id = (u16)WID_MAC_ADDR; 407 wid.id = (u16)WID_MAC_ADDR;
@@ -423,16 +409,12 @@ static s32 handle_get_mac_address(struct wilc_vif *vif,
423 wid.val = get_mac_addr->mac_addr; 409 wid.val = get_mac_addr->mac_addr;
424 wid.size = ETH_ALEN; 410 wid.size = ETH_ALEN;
425 411
426 result = wilc_send_config_pkt(vif, GET_CFG, &wid, 1, 412 ret = wilc_send_config_pkt(vif, GET_CFG, &wid, 1,
427 wilc_get_vif_idx(vif)); 413 wilc_get_vif_idx(vif));
428 414
429 if (result) { 415 if (ret)
430 netdev_err(vif->ndev, "Failed to get mac address\n"); 416 netdev_err(vif->ndev, "Failed to get mac address\n");
431 result = -EFAULT; 417 complete(&hif_wait_response);
432 }
433 up(&hif_sema_wait_response);
434
435 return result;
436} 418}
437 419
438static s32 handle_cfg_param(struct wilc_vif *vif, 420static s32 handle_cfg_param(struct wilc_vif *vif,
@@ -455,7 +437,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
455 } else { 437 } else {
456 netdev_err(vif->ndev, "check value 6 over\n"); 438 netdev_err(vif->ndev, "check value 6 over\n");
457 result = -EINVAL; 439 result = -EINVAL;
458 goto ERRORHANDLER; 440 goto unlock;
459 } 441 }
460 i++; 442 i++;
461 } 443 }
@@ -471,7 +453,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
471 } else { 453 } else {
472 netdev_err(vif->ndev, "Impossible value\n"); 454 netdev_err(vif->ndev, "Impossible value\n");
473 result = -EINVAL; 455 result = -EINVAL;
474 goto ERRORHANDLER; 456 goto unlock;
475 } 457 }
476 i++; 458 i++;
477 } 459 }
@@ -486,7 +468,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
486 } else { 468 } else {
487 netdev_err(vif->ndev, "Range(1 ~ 65535) over\n"); 469 netdev_err(vif->ndev, "Range(1 ~ 65535) over\n");
488 result = -EINVAL; 470 result = -EINVAL;
489 goto ERRORHANDLER; 471 goto unlock;
490 } 472 }
491 i++; 473 i++;
492 } 474 }
@@ -500,7 +482,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
500 } else { 482 } else {
501 netdev_err(vif->ndev, "Invalid power mode\n"); 483 netdev_err(vif->ndev, "Invalid power mode\n");
502 result = -EINVAL; 484 result = -EINVAL;
503 goto ERRORHANDLER; 485 goto unlock;
504 } 486 }
505 i++; 487 i++;
506 } 488 }
@@ -515,7 +497,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
515 } else { 497 } else {
516 netdev_err(vif->ndev, "Range(1~256) over\n"); 498 netdev_err(vif->ndev, "Range(1~256) over\n");
517 result = -EINVAL; 499 result = -EINVAL;
518 goto ERRORHANDLER; 500 goto unlock;
519 } 501 }
520 i++; 502 i++;
521 } 503 }
@@ -530,7 +512,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
530 } else { 512 } else {
531 netdev_err(vif->ndev, "Range(1~256) over\n"); 513 netdev_err(vif->ndev, "Range(1~256) over\n");
532 result = -EINVAL; 514 result = -EINVAL;
533 goto ERRORHANDLER; 515 goto unlock;
534 } 516 }
535 i++; 517 i++;
536 } 518 }
@@ -545,7 +527,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
545 } else { 527 } else {
546 netdev_err(vif->ndev, "Threshold Range fail\n"); 528 netdev_err(vif->ndev, "Threshold Range fail\n");
547 result = -EINVAL; 529 result = -EINVAL;
548 goto ERRORHANDLER; 530 goto unlock;
549 } 531 }
550 i++; 532 i++;
551 } 533 }
@@ -560,7 +542,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
560 } else { 542 } else {
561 netdev_err(vif->ndev, "Threshold Range fail\n"); 543 netdev_err(vif->ndev, "Threshold Range fail\n");
562 result = -EINVAL; 544 result = -EINVAL;
563 goto ERRORHANDLER; 545 goto unlock;
564 } 546 }
565 i++; 547 i++;
566 } 548 }
@@ -574,7 +556,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
574 } else { 556 } else {
575 netdev_err(vif->ndev, "Preamle Range(0~2) over\n"); 557 netdev_err(vif->ndev, "Preamle Range(0~2) over\n");
576 result = -EINVAL; 558 result = -EINVAL;
577 goto ERRORHANDLER; 559 goto unlock;
578 } 560 }
579 i++; 561 i++;
580 } 562 }
@@ -588,7 +570,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
588 } else { 570 } else {
589 netdev_err(vif->ndev, "Short slot(2) over\n"); 571 netdev_err(vif->ndev, "Short slot(2) over\n");
590 result = -EINVAL; 572 result = -EINVAL;
591 goto ERRORHANDLER; 573 goto unlock;
592 } 574 }
593 i++; 575 i++;
594 } 576 }
@@ -602,7 +584,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
602 } else { 584 } else {
603 netdev_err(vif->ndev, "TXOP prot disable\n"); 585 netdev_err(vif->ndev, "TXOP prot disable\n");
604 result = -EINVAL; 586 result = -EINVAL;
605 goto ERRORHANDLER; 587 goto unlock;
606 } 588 }
607 i++; 589 i++;
608 } 590 }
@@ -617,7 +599,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
617 } else { 599 } else {
618 netdev_err(vif->ndev, "Beacon interval(1~65535)fail\n"); 600 netdev_err(vif->ndev, "Beacon interval(1~65535)fail\n");
619 result = -EINVAL; 601 result = -EINVAL;
620 goto ERRORHANDLER; 602 goto unlock;
621 } 603 }
622 i++; 604 i++;
623 } 605 }
@@ -632,7 +614,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
632 } else { 614 } else {
633 netdev_err(vif->ndev, "DTIM range(1~255) fail\n"); 615 netdev_err(vif->ndev, "DTIM range(1~255) fail\n");
634 result = -EINVAL; 616 result = -EINVAL;
635 goto ERRORHANDLER; 617 goto unlock;
636 } 618 }
637 i++; 619 i++;
638 } 620 }
@@ -646,7 +628,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
646 } else { 628 } else {
647 netdev_err(vif->ndev, "Site survey disable\n"); 629 netdev_err(vif->ndev, "Site survey disable\n");
648 result = -EINVAL; 630 result = -EINVAL;
649 goto ERRORHANDLER; 631 goto unlock;
650 } 632 }
651 i++; 633 i++;
652 } 634 }
@@ -661,7 +643,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
661 } else { 643 } else {
662 netdev_err(vif->ndev, "Site scan time(1~65535) over\n"); 644 netdev_err(vif->ndev, "Site scan time(1~65535) over\n");
663 result = -EINVAL; 645 result = -EINVAL;
664 goto ERRORHANDLER; 646 goto unlock;
665 } 647 }
666 i++; 648 i++;
667 } 649 }
@@ -676,7 +658,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
676 } else { 658 } else {
677 netdev_err(vif->ndev, "Active time(1~65535) over\n"); 659 netdev_err(vif->ndev, "Active time(1~65535) over\n");
678 result = -EINVAL; 660 result = -EINVAL;
679 goto ERRORHANDLER; 661 goto unlock;
680 } 662 }
681 i++; 663 i++;
682 } 664 }
@@ -691,7 +673,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
691 } else { 673 } else {
692 netdev_err(vif->ndev, "Passive time(1~65535) over\n"); 674 netdev_err(vif->ndev, "Passive time(1~65535) over\n");
693 result = -EINVAL; 675 result = -EINVAL;
694 goto ERRORHANDLER; 676 goto unlock;
695 } 677 }
696 i++; 678 i++;
697 } 679 }
@@ -713,7 +695,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
713 } else { 695 } else {
714 netdev_err(vif->ndev, "out of TX rate\n"); 696 netdev_err(vif->ndev, "out of TX rate\n");
715 result = -EINVAL; 697 result = -EINVAL;
716 goto ERRORHANDLER; 698 goto unlock;
717 } 699 }
718 i++; 700 i++;
719 } 701 }
@@ -724,28 +706,24 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
724 if (result) 706 if (result)
725 netdev_err(vif->ndev, "Error in setting CFG params\n"); 707 netdev_err(vif->ndev, "Error in setting CFG params\n");
726 708
727ERRORHANDLER: 709unlock:
728 mutex_unlock(&hif_drv->cfg_values_lock); 710 mutex_unlock(&hif_drv->cfg_values_lock);
729 return result; 711 return result;
730} 712}
731 713
732static s32 Handle_ScanDone(struct wilc_vif *vif, 714static s32 handle_scan(struct wilc_vif *vif, struct scan_attr *scan_info)
733 enum scan_event enuEvent);
734
735static s32 Handle_Scan(struct wilc_vif *vif,
736 struct scan_attr *pstrHostIFscanAttr)
737{ 715{
738 s32 result = 0; 716 s32 result = 0;
739 struct wid strWIDList[5]; 717 struct wid wid_list[5];
740 u32 u32WidsCount = 0; 718 u32 index = 0;
741 u32 i; 719 u32 i;
742 u8 *pu8Buffer; 720 u8 *buffer;
743 u8 valuesize = 0; 721 u8 valuesize = 0;
744 u8 *pu8HdnNtwrksWidVal = NULL; 722 u8 *pu8HdnNtwrksWidVal = NULL;
745 struct host_if_drv *hif_drv = vif->hif_drv; 723 struct host_if_drv *hif_drv = vif->hif_drv;
746 724
747 hif_drv->usr_scan_req.scan_result = pstrHostIFscanAttr->result; 725 hif_drv->usr_scan_req.scan_result = scan_info->result;
748 hif_drv->usr_scan_req.arg = pstrHostIFscanAttr->arg; 726 hif_drv->usr_scan_req.arg = scan_info->arg;
749 727
750 if ((hif_drv->hif_state >= HOST_IF_SCANNING) && 728 if ((hif_drv->hif_state >= HOST_IF_SCANNING) &&
751 (hif_drv->hif_state < HOST_IF_CONNECTED)) { 729 (hif_drv->hif_state < HOST_IF_CONNECTED)) {
@@ -762,72 +740,70 @@ static s32 Handle_Scan(struct wilc_vif *vif,
762 740
763 hif_drv->usr_scan_req.rcvd_ch_cnt = 0; 741 hif_drv->usr_scan_req.rcvd_ch_cnt = 0;
764 742
765 strWIDList[u32WidsCount].id = (u16)WID_SSID_PROBE_REQ; 743 wid_list[index].id = (u16)WID_SSID_PROBE_REQ;
766 strWIDList[u32WidsCount].type = WID_STR; 744 wid_list[index].type = WID_STR;
767 745
768 for (i = 0; i < pstrHostIFscanAttr->hidden_network.n_ssids; i++) 746 for (i = 0; i < scan_info->hidden_network.n_ssids; i++)
769 valuesize += ((pstrHostIFscanAttr->hidden_network.net_info[i].ssid_len) + 1); 747 valuesize += ((scan_info->hidden_network.net_info[i].ssid_len) + 1);
770 pu8HdnNtwrksWidVal = kmalloc(valuesize + 1, GFP_KERNEL); 748 pu8HdnNtwrksWidVal = kmalloc(valuesize + 1, GFP_KERNEL);
771 strWIDList[u32WidsCount].val = pu8HdnNtwrksWidVal; 749 wid_list[index].val = pu8HdnNtwrksWidVal;
772 if (strWIDList[u32WidsCount].val) { 750 if (wid_list[index].val) {
773 pu8Buffer = strWIDList[u32WidsCount].val; 751 buffer = wid_list[index].val;
774 752
775 *pu8Buffer++ = pstrHostIFscanAttr->hidden_network.n_ssids; 753 *buffer++ = scan_info->hidden_network.n_ssids;
776 754
777 for (i = 0; i < pstrHostIFscanAttr->hidden_network.n_ssids; i++) { 755 for (i = 0; i < scan_info->hidden_network.n_ssids; i++) {
778 *pu8Buffer++ = pstrHostIFscanAttr->hidden_network.net_info[i].ssid_len; 756 *buffer++ = scan_info->hidden_network.net_info[i].ssid_len;
779 memcpy(pu8Buffer, pstrHostIFscanAttr->hidden_network.net_info[i].ssid, pstrHostIFscanAttr->hidden_network.net_info[i].ssid_len); 757 memcpy(buffer, scan_info->hidden_network.net_info[i].ssid, scan_info->hidden_network.net_info[i].ssid_len);
780 pu8Buffer += pstrHostIFscanAttr->hidden_network.net_info[i].ssid_len; 758 buffer += scan_info->hidden_network.net_info[i].ssid_len;
781 } 759 }
782 760
783 strWIDList[u32WidsCount].size = (s32)(valuesize + 1); 761 wid_list[index].size = (s32)(valuesize + 1);
784 u32WidsCount++; 762 index++;
785 } 763 }
786 764
787 { 765 wid_list[index].id = WID_INFO_ELEMENT_PROBE;
788 strWIDList[u32WidsCount].id = WID_INFO_ELEMENT_PROBE; 766 wid_list[index].type = WID_BIN_DATA;
789 strWIDList[u32WidsCount].type = WID_BIN_DATA; 767 wid_list[index].val = scan_info->ies;
790 strWIDList[u32WidsCount].val = pstrHostIFscanAttr->ies; 768 wid_list[index].size = scan_info->ies_len;
791 strWIDList[u32WidsCount].size = pstrHostIFscanAttr->ies_len; 769 index++;
792 u32WidsCount++;
793 }
794 770
795 strWIDList[u32WidsCount].id = WID_SCAN_TYPE; 771 wid_list[index].id = WID_SCAN_TYPE;
796 strWIDList[u32WidsCount].type = WID_CHAR; 772 wid_list[index].type = WID_CHAR;
797 strWIDList[u32WidsCount].size = sizeof(char); 773 wid_list[index].size = sizeof(char);
798 strWIDList[u32WidsCount].val = (s8 *)&pstrHostIFscanAttr->type; 774 wid_list[index].val = (s8 *)&scan_info->type;
799 u32WidsCount++; 775 index++;
800 776
801 strWIDList[u32WidsCount].id = WID_SCAN_CHANNEL_LIST; 777 wid_list[index].id = WID_SCAN_CHANNEL_LIST;
802 strWIDList[u32WidsCount].type = WID_BIN_DATA; 778 wid_list[index].type = WID_BIN_DATA;
803 779
804 if (pstrHostIFscanAttr->ch_freq_list && 780 if (scan_info->ch_freq_list &&
805 pstrHostIFscanAttr->ch_list_len > 0) { 781 scan_info->ch_list_len > 0) {
806 int i; 782 int i;
807 783
808 for (i = 0; i < pstrHostIFscanAttr->ch_list_len; i++) { 784 for (i = 0; i < scan_info->ch_list_len; i++) {
809 if (pstrHostIFscanAttr->ch_freq_list[i] > 0) 785 if (scan_info->ch_freq_list[i] > 0)
810 pstrHostIFscanAttr->ch_freq_list[i] = pstrHostIFscanAttr->ch_freq_list[i] - 1; 786 scan_info->ch_freq_list[i] = scan_info->ch_freq_list[i] - 1;
811 } 787 }
812 } 788 }
813 789
814 strWIDList[u32WidsCount].val = pstrHostIFscanAttr->ch_freq_list; 790 wid_list[index].val = scan_info->ch_freq_list;
815 strWIDList[u32WidsCount].size = pstrHostIFscanAttr->ch_list_len; 791 wid_list[index].size = scan_info->ch_list_len;
816 u32WidsCount++; 792 index++;
817 793
818 strWIDList[u32WidsCount].id = WID_START_SCAN_REQ; 794 wid_list[index].id = WID_START_SCAN_REQ;
819 strWIDList[u32WidsCount].type = WID_CHAR; 795 wid_list[index].type = WID_CHAR;
820 strWIDList[u32WidsCount].size = sizeof(char); 796 wid_list[index].size = sizeof(char);
821 strWIDList[u32WidsCount].val = (s8 *)&pstrHostIFscanAttr->src; 797 wid_list[index].val = (s8 *)&scan_info->src;
822 u32WidsCount++; 798 index++;
823 799
824 if (hif_drv->hif_state == HOST_IF_CONNECTED) 800 if (hif_drv->hif_state == HOST_IF_CONNECTED)
825 scan_while_connected = true; 801 scan_while_connected = true;
826 else if (hif_drv->hif_state == HOST_IF_IDLE) 802 else if (hif_drv->hif_state == HOST_IF_IDLE)
827 scan_while_connected = false; 803 scan_while_connected = false;
828 804
829 result = wilc_send_config_pkt(vif, SET_CFG, strWIDList, 805 result = wilc_send_config_pkt(vif, SET_CFG, wid_list,
830 u32WidsCount, 806 index,
831 wilc_get_vif_idx(vif)); 807 wilc_get_vif_idx(vif));
832 808
833 if (result) 809 if (result)
@@ -839,13 +815,13 @@ ERRORHANDLER:
839 Handle_ScanDone(vif, SCAN_EVENT_ABORTED); 815 Handle_ScanDone(vif, SCAN_EVENT_ABORTED);
840 } 816 }
841 817
842 kfree(pstrHostIFscanAttr->ch_freq_list); 818 kfree(scan_info->ch_freq_list);
843 pstrHostIFscanAttr->ch_freq_list = NULL; 819 scan_info->ch_freq_list = NULL;
844 820
845 kfree(pstrHostIFscanAttr->ies); 821 kfree(scan_info->ies);
846 pstrHostIFscanAttr->ies = NULL; 822 scan_info->ies = NULL;
847 kfree(pstrHostIFscanAttr->hidden_network.net_info); 823 kfree(scan_info->hidden_network.net_info);
848 pstrHostIFscanAttr->hidden_network.net_info = NULL; 824 scan_info->hidden_network.net_info = NULL;
849 825
850 kfree(pu8HdnNtwrksWidVal); 826 kfree(pu8HdnNtwrksWidVal);
851 827
@@ -1610,7 +1586,7 @@ static int Handle_Key(struct wilc_vif *vif,
1610 &wid, 1, 1586 &wid, 1,
1611 wilc_get_vif_idx(vif)); 1587 wilc_get_vif_idx(vif));
1612 } 1588 }
1613 up(&hif_drv->sem_test_key_block); 1589 complete(&hif_drv->comp_test_key_block);
1614 break; 1590 break;
1615 1591
1616 case WPA_RX_GTK: 1592 case WPA_RX_GTK:
@@ -1644,10 +1620,10 @@ static int Handle_Key(struct wilc_vif *vif,
1644 wilc_get_vif_idx(vif)); 1620 wilc_get_vif_idx(vif));
1645 1621
1646 kfree(pu8keybuf); 1622 kfree(pu8keybuf);
1647 up(&hif_drv->sem_test_key_block); 1623 complete(&hif_drv->comp_test_key_block);
1648 } else if (pstrHostIFkeyAttr->action & ADDKEY) { 1624 } else if (pstrHostIFkeyAttr->action & ADDKEY) {
1649 pu8keybuf = kzalloc(RX_MIC_KEY_MSG_LEN, GFP_KERNEL); 1625 pu8keybuf = kzalloc(RX_MIC_KEY_MSG_LEN, GFP_KERNEL);
1650 if (pu8keybuf == NULL) { 1626 if (!pu8keybuf) {
1651 ret = -ENOMEM; 1627 ret = -ENOMEM;
1652 goto _WPARxGtk_end_case_; 1628 goto _WPARxGtk_end_case_;
1653 } 1629 }
@@ -1673,7 +1649,7 @@ static int Handle_Key(struct wilc_vif *vif,
1673 wilc_get_vif_idx(vif)); 1649 wilc_get_vif_idx(vif));
1674 1650
1675 kfree(pu8keybuf); 1651 kfree(pu8keybuf);
1676 up(&hif_drv->sem_test_key_block); 1652 complete(&hif_drv->comp_test_key_block);
1677 } 1653 }
1678_WPARxGtk_end_case_: 1654_WPARxGtk_end_case_:
1679 kfree(pstrHostIFkeyAttr->attr.wpa.key); 1655 kfree(pstrHostIFkeyAttr->attr.wpa.key);
@@ -1711,7 +1687,7 @@ _WPARxGtk_end_case_:
1711 strWIDList, 2, 1687 strWIDList, 2,
1712 wilc_get_vif_idx(vif)); 1688 wilc_get_vif_idx(vif));
1713 kfree(pu8keybuf); 1689 kfree(pu8keybuf);
1714 up(&hif_drv->sem_test_key_block); 1690 complete(&hif_drv->comp_test_key_block);
1715 } else if (pstrHostIFkeyAttr->action & ADDKEY) { 1691 } else if (pstrHostIFkeyAttr->action & ADDKEY) {
1716 pu8keybuf = kmalloc(PTK_KEY_MSG_LEN, GFP_KERNEL); 1692 pu8keybuf = kmalloc(PTK_KEY_MSG_LEN, GFP_KERNEL);
1717 if (!pu8keybuf) { 1693 if (!pu8keybuf) {
@@ -1734,7 +1710,7 @@ _WPARxGtk_end_case_:
1734 &wid, 1, 1710 &wid, 1,
1735 wilc_get_vif_idx(vif)); 1711 wilc_get_vif_idx(vif));
1736 kfree(pu8keybuf); 1712 kfree(pu8keybuf);
1737 up(&hif_drv->sem_test_key_block); 1713 complete(&hif_drv->comp_test_key_block);
1738 } 1714 }
1739 1715
1740_WPAPtk_end_case_: 1716_WPAPtk_end_case_:
@@ -1856,7 +1832,7 @@ static void Handle_Disconnect(struct wilc_vif *vif)
1856 } 1832 }
1857 } 1833 }
1858 1834
1859 up(&hif_drv->sem_test_disconn_block); 1835 complete(&hif_drv->comp_test_disconn_block);
1860} 1836}
1861 1837
1862void wilc_resolve_disconnect_aberration(struct wilc_vif *vif) 1838void wilc_resolve_disconnect_aberration(struct wilc_vif *vif)
@@ -1885,7 +1861,7 @@ static void Handle_GetRssi(struct wilc_vif *vif)
1885 result = -EFAULT; 1861 result = -EFAULT;
1886 } 1862 }
1887 1863
1888 up(&vif->hif_drv->sem_get_rssi); 1864 complete(&vif->hif_drv->comp_get_rssi);
1889} 1865}
1890 1866
1891static s32 Handle_GetStatistics(struct wilc_vif *vif, 1867static s32 Handle_GetStatistics(struct wilc_vif *vif,
@@ -1938,7 +1914,7 @@ static s32 Handle_GetStatistics(struct wilc_vif *vif,
1938 wilc_enable_tcp_ack_filter(false); 1914 wilc_enable_tcp_ack_filter(false);
1939 1915
1940 if (pstrStatistics != &vif->wilc->dummy_statistics) 1916 if (pstrStatistics != &vif->wilc->dummy_statistics)
1941 up(&hif_sema_wait_response); 1917 complete(&hif_wait_response);
1942 return 0; 1918 return 0;
1943} 1919}
1944 1920
@@ -1979,7 +1955,7 @@ static s32 Handle_Get_InActiveTime(struct wilc_vif *vif,
1979 return -EFAULT; 1955 return -EFAULT;
1980 } 1956 }
1981 1957
1982 up(&hif_drv->sem_inactive_time); 1958 complete(&hif_drv->comp_inactive_time);
1983 1959
1984 return result; 1960 return result;
1985} 1961}
@@ -2172,7 +2148,7 @@ static void Handle_DelAllSta(struct wilc_vif *vif,
2172ERRORHANDLER: 2148ERRORHANDLER:
2173 kfree(wid.val); 2149 kfree(wid.val);
2174 2150
2175 up(&hif_sema_wait_response); 2151 complete(&hif_wait_response);
2176} 2152}
2177 2153
2178static void Handle_DelStation(struct wilc_vif *vif, 2154static void Handle_DelStation(struct wilc_vif *vif,
@@ -2472,7 +2448,7 @@ static void handle_set_tx_pwr(struct wilc_vif *vif, u8 tx_pwr)
2472 2448
2473static void handle_get_tx_pwr(struct wilc_vif *vif, u8 *tx_pwr) 2449static void handle_get_tx_pwr(struct wilc_vif *vif, u8 *tx_pwr)
2474{ 2450{
2475 s32 ret = 0; 2451 int ret = 0;
2476 struct wid wid; 2452 struct wid wid;
2477 2453
2478 wid.id = (u16)WID_TX_POWER; 2454 wid.id = (u16)WID_TX_POWER;
@@ -2485,7 +2461,7 @@ static void handle_get_tx_pwr(struct wilc_vif *vif, u8 *tx_pwr)
2485 if (ret) 2461 if (ret)
2486 netdev_err(vif->ndev, "Failed to get TX PWR\n"); 2462 netdev_err(vif->ndev, "Failed to get TX PWR\n");
2487 2463
2488 up(&hif_sema_wait_response); 2464 complete(&hif_wait_response);
2489} 2465}
2490 2466
2491static int hostIFthread(void *pvArg) 2467static int hostIFthread(void *pvArg)
@@ -2518,7 +2494,7 @@ static int hostIFthread(void *pvArg)
2518 2494
2519 switch (msg.id) { 2495 switch (msg.id) {
2520 case HOST_IF_MSG_SCAN: 2496 case HOST_IF_MSG_SCAN:
2521 Handle_Scan(msg.vif, &msg.body.scan_info); 2497 handle_scan(msg.vif, &msg.body.scan_info);
2522 break; 2498 break;
2523 2499
2524 case HOST_IF_MSG_CONNECT: 2500 case HOST_IF_MSG_CONNECT:
@@ -2667,7 +2643,7 @@ static int hostIFthread(void *pvArg)
2667 } 2643 }
2668 } 2644 }
2669 2645
2670 up(&hif_sema_thread); 2646 complete(&hif_thread_comp);
2671 return 0; 2647 return 0;
2672} 2648}
2673 2649
@@ -2730,7 +2706,8 @@ int wilc_remove_wep_key(struct wilc_vif *vif, u8 index)
2730 result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); 2706 result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
2731 if (result) 2707 if (result)
2732 netdev_err(vif->ndev, "Request to remove WEP key\n"); 2708 netdev_err(vif->ndev, "Request to remove WEP key\n");
2733 down(&hif_drv->sem_test_key_block); 2709 else
2710 wait_for_completion(&hif_drv->comp_test_key_block);
2734 2711
2735 return result; 2712 return result;
2736} 2713}
@@ -2758,7 +2735,8 @@ int wilc_set_wep_default_keyid(struct wilc_vif *vif, u8 index)
2758 result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); 2735 result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
2759 if (result) 2736 if (result)
2760 netdev_err(vif->ndev, "Default key index\n"); 2737 netdev_err(vif->ndev, "Default key index\n");
2761 down(&hif_drv->sem_test_key_block); 2738 else
2739 wait_for_completion(&hif_drv->comp_test_key_block);
2762 2740
2763 return result; 2741 return result;
2764} 2742}
@@ -2791,7 +2769,7 @@ int wilc_add_wep_key_bss_sta(struct wilc_vif *vif, const u8 *key, u8 len,
2791 result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); 2769 result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
2792 if (result) 2770 if (result)
2793 netdev_err(vif->ndev, "STA - WEP Key\n"); 2771 netdev_err(vif->ndev, "STA - WEP Key\n");
2794 down(&hif_drv->sem_test_key_block); 2772 wait_for_completion(&hif_drv->comp_test_key_block);
2795 2773
2796 return result; 2774 return result;
2797} 2775}
@@ -2827,7 +2805,8 @@ int wilc_add_wep_key_bss_ap(struct wilc_vif *vif, const u8 *key, u8 len,
2827 2805
2828 if (result) 2806 if (result)
2829 netdev_err(vif->ndev, "AP - WEP Key\n"); 2807 netdev_err(vif->ndev, "AP - WEP Key\n");
2830 down(&hif_drv->sem_test_key_block); 2808 else
2809 wait_for_completion(&hif_drv->comp_test_key_block);
2831 2810
2832 return result; 2811 return result;
2833} 2812}
@@ -2882,8 +2861,8 @@ int wilc_add_ptk(struct wilc_vif *vif, const u8 *ptk, u8 ptk_key_len,
2882 2861
2883 if (result) 2862 if (result)
2884 netdev_err(vif->ndev, "PTK Key\n"); 2863 netdev_err(vif->ndev, "PTK Key\n");
2885 2864 else
2886 down(&hif_drv->sem_test_key_block); 2865 wait_for_completion(&hif_drv->comp_test_key_block);
2887 2866
2888 return result; 2867 return result;
2889} 2868}
@@ -2950,8 +2929,8 @@ int wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *rx_gtk, u8 gtk_key_len,
2950 result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); 2929 result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
2951 if (result) 2930 if (result)
2952 netdev_err(vif->ndev, "RX GTK\n"); 2931 netdev_err(vif->ndev, "RX GTK\n");
2953 2932 else
2954 down(&hif_drv->sem_test_key_block); 2933 wait_for_completion(&hif_drv->comp_test_key_block);
2955 2934
2956 return result; 2935 return result;
2957} 2936}
@@ -2961,14 +2940,8 @@ int wilc_set_pmkid_info(struct wilc_vif *vif,
2961{ 2940{
2962 int result = 0; 2941 int result = 0;
2963 struct host_if_msg msg; 2942 struct host_if_msg msg;
2964 struct host_if_drv *hif_drv = vif->hif_drv;
2965 int i; 2943 int i;
2966 2944
2967 if (!hif_drv) {
2968 netdev_err(vif->ndev, "driver is null\n");
2969 return -EFAULT;
2970 }
2971
2972 memset(&msg, 0, sizeof(struct host_if_msg)); 2945 memset(&msg, 0, sizeof(struct host_if_msg));
2973 2946
2974 msg.id = HOST_IF_MSG_KEY; 2947 msg.id = HOST_IF_MSG_KEY;
@@ -3007,7 +2980,7 @@ int wilc_get_mac_address(struct wilc_vif *vif, u8 *mac_addr)
3007 return -EFAULT; 2980 return -EFAULT;
3008 } 2981 }
3009 2982
3010 down(&hif_sema_wait_response); 2983 wait_for_completion(&hif_wait_response);
3011 return result; 2984 return result;
3012} 2985}
3013 2986
@@ -3097,8 +3070,8 @@ int wilc_disconnect(struct wilc_vif *vif, u16 reason_code)
3097 result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); 3070 result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
3098 if (result) 3071 if (result)
3099 netdev_err(vif->ndev, "Failed to send message: disconnect\n"); 3072 netdev_err(vif->ndev, "Failed to send message: disconnect\n");
3100 3073 else
3101 down(&hif_drv->sem_test_disconn_block); 3074 wait_for_completion(&hif_drv->comp_test_disconn_block);
3102 3075
3103 return result; 3076 return result;
3104} 3077}
@@ -3110,12 +3083,6 @@ static s32 host_int_get_assoc_res_info(struct wilc_vif *vif,
3110{ 3083{
3111 s32 result = 0; 3084 s32 result = 0;
3112 struct wid wid; 3085 struct wid wid;
3113 struct host_if_drv *hif_drv = vif->hif_drv;
3114
3115 if (!hif_drv) {
3116 netdev_err(vif->ndev, "Driver is null\n");
3117 return -EFAULT;
3118 }
3119 3086
3120 wid.id = (u16)WID_ASSOC_RES_INFO; 3087 wid.id = (u16)WID_ASSOC_RES_INFO;
3121 wid.type = WID_STR; 3088 wid.type = WID_STR;
@@ -3138,12 +3105,6 @@ int wilc_set_mac_chnl_num(struct wilc_vif *vif, u8 channel)
3138{ 3105{
3139 int result; 3106 int result;
3140 struct host_if_msg msg; 3107 struct host_if_msg msg;
3141 struct host_if_drv *hif_drv = vif->hif_drv;
3142
3143 if (!hif_drv) {
3144 netdev_err(vif->ndev, "driver is null\n");
3145 return -EFAULT;
3146 }
3147 3108
3148 memset(&msg, 0, sizeof(struct host_if_msg)); 3109 memset(&msg, 0, sizeof(struct host_if_msg));
3149 msg.id = HOST_IF_MSG_SET_CHANNEL; 3110 msg.id = HOST_IF_MSG_SET_CHANNEL;
@@ -3219,8 +3180,8 @@ s32 wilc_get_inactive_time(struct wilc_vif *vif, const u8 *mac,
3219 result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); 3180 result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
3220 if (result) 3181 if (result)
3221 netdev_err(vif->ndev, "Failed to send get host ch param\n"); 3182 netdev_err(vif->ndev, "Failed to send get host ch param\n");
3222 3183 else
3223 down(&hif_drv->sem_inactive_time); 3184 wait_for_completion(&hif_drv->comp_inactive_time);
3224 3185
3225 *pu32InactiveTime = inactive_time; 3186 *pu32InactiveTime = inactive_time;
3226 3187
@@ -3243,7 +3204,7 @@ int wilc_get_rssi(struct wilc_vif *vif, s8 *rssi_level)
3243 return -EFAULT; 3204 return -EFAULT;
3244 } 3205 }
3245 3206
3246 down(&hif_drv->sem_get_rssi); 3207 wait_for_completion(&hif_drv->comp_get_rssi);
3247 3208
3248 if (!rssi_level) { 3209 if (!rssi_level) {
3249 netdev_err(vif->ndev, "RSS pointer value is null\n"); 3210 netdev_err(vif->ndev, "RSS pointer value is null\n");
@@ -3272,7 +3233,7 @@ int wilc_get_statistics(struct wilc_vif *vif, struct rf_info *stats)
3272 } 3233 }
3273 3234
3274 if (stats != &vif->wilc->dummy_statistics) 3235 if (stats != &vif->wilc->dummy_statistics)
3275 down(&hif_sema_wait_response); 3236 wait_for_completion(&hif_wait_response);
3276 return result; 3237 return result;
3277} 3238}
3278 3239
@@ -3382,7 +3343,7 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
3382 3343
3383 scan_while_connected = false; 3344 scan_while_connected = false;
3384 3345
3385 sema_init(&hif_sema_wait_response, 0); 3346 init_completion(&hif_wait_response);
3386 3347
3387 hif_drv = kzalloc(sizeof(struct host_if_drv), GFP_KERNEL); 3348 hif_drv = kzalloc(sizeof(struct host_if_drv), GFP_KERNEL);
3388 if (!hif_drv) { 3349 if (!hif_drv) {
@@ -3399,15 +3360,15 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
3399 wilc_optaining_ip = false; 3360 wilc_optaining_ip = false;
3400 3361
3401 if (clients_count == 0) { 3362 if (clients_count == 0) {
3402 sema_init(&hif_sema_thread, 0); 3363 init_completion(&hif_thread_comp);
3403 sema_init(&hif_sema_driver, 0); 3364 init_completion(&hif_driver_comp);
3404 sema_init(&hif_sema_deinit, 1); 3365 mutex_init(&hif_deinit_lock);
3405 } 3366 }
3406 3367
3407 sema_init(&hif_drv->sem_test_key_block, 0); 3368 init_completion(&hif_drv->comp_test_key_block);
3408 sema_init(&hif_drv->sem_test_disconn_block, 0); 3369 init_completion(&hif_drv->comp_test_disconn_block);
3409 sema_init(&hif_drv->sem_get_rssi, 0); 3370 init_completion(&hif_drv->comp_get_rssi);
3410 sema_init(&hif_drv->sem_inactive_time, 0); 3371 init_completion(&hif_drv->comp_inactive_time);
3411 3372
3412 if (clients_count == 0) { 3373 if (clients_count == 0) {
3413 result = wilc_mq_create(&hif_msg_q); 3374 result = wilc_mq_create(&hif_msg_q);
@@ -3469,7 +3430,7 @@ int wilc_deinit(struct wilc_vif *vif)
3469 return -EFAULT; 3430 return -EFAULT;
3470 } 3431 }
3471 3432
3472 down(&hif_sema_deinit); 3433 mutex_lock(&hif_deinit_lock);
3473 3434
3474 terminated_handle = hif_drv; 3435 terminated_handle = hif_drv;
3475 3436
@@ -3479,7 +3440,7 @@ int wilc_deinit(struct wilc_vif *vif)
3479 del_timer_sync(&hif_drv->remain_on_ch_timer); 3440 del_timer_sync(&hif_drv->remain_on_ch_timer);
3480 3441
3481 wilc_set_wfi_drv_handler(vif, 0, 0); 3442 wilc_set_wfi_drv_handler(vif, 0, 0);
3482 down(&hif_sema_driver); 3443 wait_for_completion(&hif_driver_comp);
3483 3444
3484 if (hif_drv->usr_scan_req.scan_result) { 3445 if (hif_drv->usr_scan_req.scan_result) {
3485 hif_drv->usr_scan_req.scan_result(SCAN_EVENT_ABORTED, NULL, 3446 hif_drv->usr_scan_req.scan_result(SCAN_EVENT_ABORTED, NULL,
@@ -3494,15 +3455,14 @@ int wilc_deinit(struct wilc_vif *vif)
3494 memset(&msg, 0, sizeof(struct host_if_msg)); 3455 memset(&msg, 0, sizeof(struct host_if_msg));
3495 3456
3496 if (clients_count == 1) { 3457 if (clients_count == 1) {
3497 del_timer_sync(&periodic_rssi);
3498 msg.id = HOST_IF_MSG_EXIT; 3458 msg.id = HOST_IF_MSG_EXIT;
3499 msg.vif = vif; 3459 msg.vif = vif;
3500 3460
3501 result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); 3461 result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
3502 if (result != 0) 3462 if (result != 0)
3503 netdev_err(vif->ndev, "deinit : Error(%d)\n", result); 3463 netdev_err(vif->ndev, "deinit : Error(%d)\n", result);
3504 3464 else
3505 down(&hif_sema_thread); 3465 wait_for_completion(&hif_thread_comp);
3506 3466
3507 wilc_mq_destroy(&hif_msg_q); 3467 wilc_mq_destroy(&hif_msg_q);
3508 } 3468 }
@@ -3511,7 +3471,7 @@ int wilc_deinit(struct wilc_vif *vif)
3511 3471
3512 clients_count--; 3472 clients_count--;
3513 terminated_handle = NULL; 3473 terminated_handle = NULL;
3514 up(&hif_sema_deinit); 3474 mutex_unlock(&hif_deinit_lock);
3515 return result; 3475 return result;
3516} 3476}
3517 3477
@@ -3558,25 +3518,25 @@ void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *pu8Buffer,
3558 struct host_if_drv *hif_drv = NULL; 3518 struct host_if_drv *hif_drv = NULL;
3559 struct wilc_vif *vif; 3519 struct wilc_vif *vif;
3560 3520
3561 down(&hif_sema_deinit); 3521 mutex_lock(&hif_deinit_lock);
3562 3522
3563 id = ((pu8Buffer[u32Length - 4]) | (pu8Buffer[u32Length - 3] << 8) | (pu8Buffer[u32Length - 2] << 16) | (pu8Buffer[u32Length - 1] << 24)); 3523 id = ((pu8Buffer[u32Length - 4]) | (pu8Buffer[u32Length - 3] << 8) | (pu8Buffer[u32Length - 2] << 16) | (pu8Buffer[u32Length - 1] << 24));
3564 vif = wilc_get_vif_from_idx(wilc, id); 3524 vif = wilc_get_vif_from_idx(wilc, id);
3565 if (!vif) { 3525 if (!vif) {
3566 up(&hif_sema_deinit); 3526 mutex_unlock(&hif_deinit_lock);
3567 return; 3527 return;
3568 } 3528 }
3569 3529
3570 hif_drv = vif->hif_drv; 3530 hif_drv = vif->hif_drv;
3571 3531
3572 if (!hif_drv || hif_drv == terminated_handle) { 3532 if (!hif_drv || hif_drv == terminated_handle) {
3573 up(&hif_sema_deinit); 3533 mutex_unlock(&hif_deinit_lock);
3574 return; 3534 return;
3575 } 3535 }
3576 3536
3577 if (!hif_drv->usr_conn_req.conn_result) { 3537 if (!hif_drv->usr_conn_req.conn_result) {
3578 netdev_err(vif->ndev, "there is no current Connect Request\n"); 3538 netdev_err(vif->ndev, "there is no current Connect Request\n");
3579 up(&hif_sema_deinit); 3539 mutex_unlock(&hif_deinit_lock);
3580 return; 3540 return;
3581 } 3541 }
3582 3542
@@ -3593,7 +3553,7 @@ void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *pu8Buffer,
3593 if (result) 3553 if (result)
3594 netdev_err(vif->ndev, "synchronous info (%d)\n", result); 3554 netdev_err(vif->ndev, "synchronous info (%d)\n", result);
3595 3555
3596 up(&hif_sema_deinit); 3556 mutex_unlock(&hif_deinit_lock);
3597} 3557}
3598 3558
3599void wilc_scan_complete_received(struct wilc *wilc, u8 *pu8Buffer, 3559void wilc_scan_complete_received(struct wilc *wilc, u8 *pu8Buffer,
@@ -3634,12 +3594,6 @@ int wilc_remain_on_channel(struct wilc_vif *vif, u32 session_id,
3634{ 3594{
3635 int result = 0; 3595 int result = 0;
3636 struct host_if_msg msg; 3596 struct host_if_msg msg;
3637 struct host_if_drv *hif_drv = vif->hif_drv;
3638
3639 if (!hif_drv) {
3640 netdev_err(vif->ndev, "driver is null\n");
3641 return -EFAULT;
3642 }
3643 3597
3644 memset(&msg, 0, sizeof(struct host_if_msg)); 3598 memset(&msg, 0, sizeof(struct host_if_msg));
3645 3599
@@ -3688,12 +3642,6 @@ int wilc_frame_register(struct wilc_vif *vif, u16 frame_type, bool reg)
3688{ 3642{
3689 int result = 0; 3643 int result = 0;
3690 struct host_if_msg msg; 3644 struct host_if_msg msg;
3691 struct host_if_drv *hif_drv = vif->hif_drv;
3692
3693 if (!hif_drv) {
3694 netdev_err(vif->ndev, "driver is null\n");
3695 return -EFAULT;
3696 }
3697 3645
3698 memset(&msg, 0, sizeof(struct host_if_msg)); 3646 memset(&msg, 0, sizeof(struct host_if_msg));
3699 3647
@@ -3727,12 +3675,6 @@ int wilc_add_beacon(struct wilc_vif *vif, u32 interval, u32 dtim_period,
3727 int result = 0; 3675 int result = 0;
3728 struct host_if_msg msg; 3676 struct host_if_msg msg;
3729 struct beacon_attr *beacon_info = &msg.body.beacon_info; 3677 struct beacon_attr *beacon_info = &msg.body.beacon_info;
3730 struct host_if_drv *hif_drv = vif->hif_drv;
3731
3732 if (!hif_drv) {
3733 netdev_err(vif->ndev, "driver is null\n");
3734 return -EFAULT;
3735 }
3736 3678
3737 memset(&msg, 0, sizeof(struct host_if_msg)); 3679 memset(&msg, 0, sizeof(struct host_if_msg));
3738 3680
@@ -3776,12 +3718,6 @@ int wilc_del_beacon(struct wilc_vif *vif)
3776{ 3718{
3777 int result = 0; 3719 int result = 0;
3778 struct host_if_msg msg; 3720 struct host_if_msg msg;
3779 struct host_if_drv *hif_drv = vif->hif_drv;
3780
3781 if (!hif_drv) {
3782 netdev_err(vif->ndev, "driver is null\n");
3783 return -EFAULT;
3784 }
3785 3721
3786 msg.id = HOST_IF_MSG_DEL_BEACON; 3722 msg.id = HOST_IF_MSG_DEL_BEACON;
3787 msg.vif = vif; 3723 msg.vif = vif;
@@ -3798,12 +3734,6 @@ int wilc_add_station(struct wilc_vif *vif, struct add_sta_param *sta_param)
3798 int result = 0; 3734 int result = 0;
3799 struct host_if_msg msg; 3735 struct host_if_msg msg;
3800 struct add_sta_param *add_sta_info = &msg.body.add_sta_info; 3736 struct add_sta_param *add_sta_info = &msg.body.add_sta_info;
3801 struct host_if_drv *hif_drv = vif->hif_drv;
3802
3803 if (!hif_drv) {
3804 netdev_err(vif->ndev, "driver is null\n");
3805 return -EFAULT;
3806 }
3807 3737
3808 memset(&msg, 0, sizeof(struct host_if_msg)); 3738 memset(&msg, 0, sizeof(struct host_if_msg));
3809 3739
@@ -3830,12 +3760,6 @@ int wilc_del_station(struct wilc_vif *vif, const u8 *mac_addr)
3830 int result = 0; 3760 int result = 0;
3831 struct host_if_msg msg; 3761 struct host_if_msg msg;
3832 struct del_sta *del_sta_info = &msg.body.del_sta_info; 3762 struct del_sta *del_sta_info = &msg.body.del_sta_info;
3833 struct host_if_drv *hif_drv = vif->hif_drv;
3834
3835 if (!hif_drv) {
3836 netdev_err(vif->ndev, "driver is null\n");
3837 return -EFAULT;
3838 }
3839 3763
3840 memset(&msg, 0, sizeof(struct host_if_msg)); 3764 memset(&msg, 0, sizeof(struct host_if_msg));
3841 3765
@@ -3858,16 +3782,10 @@ int wilc_del_allstation(struct wilc_vif *vif, u8 mac_addr[][ETH_ALEN])
3858 int result = 0; 3782 int result = 0;
3859 struct host_if_msg msg; 3783 struct host_if_msg msg;
3860 struct del_all_sta *del_all_sta_info = &msg.body.del_all_sta_info; 3784 struct del_all_sta *del_all_sta_info = &msg.body.del_all_sta_info;
3861 struct host_if_drv *hif_drv = vif->hif_drv;
3862 u8 zero_addr[ETH_ALEN] = {0}; 3785 u8 zero_addr[ETH_ALEN] = {0};
3863 int i; 3786 int i;
3864 u8 assoc_sta = 0; 3787 u8 assoc_sta = 0;
3865 3788
3866 if (!hif_drv) {
3867 netdev_err(vif->ndev, "driver is null\n");
3868 return -EFAULT;
3869 }
3870
3871 memset(&msg, 0, sizeof(struct host_if_msg)); 3789 memset(&msg, 0, sizeof(struct host_if_msg));
3872 3790
3873 msg.id = HOST_IF_MSG_DEL_ALL_STA; 3791 msg.id = HOST_IF_MSG_DEL_ALL_STA;
@@ -3887,8 +3805,8 @@ int wilc_del_allstation(struct wilc_vif *vif, u8 mac_addr[][ETH_ALEN])
3887 3805
3888 if (result) 3806 if (result)
3889 netdev_err(vif->ndev, "wilc_mq_send fail\n"); 3807 netdev_err(vif->ndev, "wilc_mq_send fail\n");
3890 3808 else
3891 down(&hif_sema_wait_response); 3809 wait_for_completion(&hif_wait_response);
3892 3810
3893 return result; 3811 return result;
3894} 3812}
@@ -3899,12 +3817,6 @@ int wilc_edit_station(struct wilc_vif *vif,
3899 int result = 0; 3817 int result = 0;
3900 struct host_if_msg msg; 3818 struct host_if_msg msg;
3901 struct add_sta_param *add_sta_info = &msg.body.add_sta_info; 3819 struct add_sta_param *add_sta_info = &msg.body.add_sta_info;
3902 struct host_if_drv *hif_drv = vif->hif_drv;
3903
3904 if (!hif_drv) {
3905 netdev_err(vif->ndev, "driver is null\n");
3906 return -EFAULT;
3907 }
3908 3820
3909 memset(&msg, 0, sizeof(struct host_if_msg)); 3821 memset(&msg, 0, sizeof(struct host_if_msg));
3910 3822
@@ -3932,12 +3844,6 @@ int wilc_set_power_mgmt(struct wilc_vif *vif, bool enabled, u32 timeout)
3932 int result = 0; 3844 int result = 0;
3933 struct host_if_msg msg; 3845 struct host_if_msg msg;
3934 struct power_mgmt_param *pwr_mgmt_info = &msg.body.pwr_mgmt_info; 3846 struct power_mgmt_param *pwr_mgmt_info = &msg.body.pwr_mgmt_info;
3935 struct host_if_drv *hif_drv = vif->hif_drv;
3936
3937 if (!hif_drv) {
3938 netdev_err(vif->ndev, "driver is null\n");
3939 return -EFAULT;
3940 }
3941 3847
3942 if (wilc_wlan_get_num_conn_ifcs(vif->wilc) == 2 && enabled) 3848 if (wilc_wlan_get_num_conn_ifcs(vif->wilc) == 2 && enabled)
3943 return 0; 3849 return 0;
@@ -3962,12 +3868,6 @@ int wilc_setup_multicast_filter(struct wilc_vif *vif, bool enabled,
3962 int result = 0; 3868 int result = 0;
3963 struct host_if_msg msg; 3869 struct host_if_msg msg;
3964 struct set_multicast *multicast_filter_param = &msg.body.multicast_info; 3870 struct set_multicast *multicast_filter_param = &msg.body.multicast_info;
3965 struct host_if_drv *hif_drv = vif->hif_drv;
3966
3967 if (!hif_drv) {
3968 netdev_err(vif->ndev, "driver is null\n");
3969 return -EFAULT;
3970 }
3971 3871
3972 memset(&msg, 0, sizeof(struct host_if_msg)); 3872 memset(&msg, 0, sizeof(struct host_if_msg));
3973 3873
@@ -4141,12 +4041,6 @@ int wilc_setup_ipaddress(struct wilc_vif *vif, u8 *ip_addr, u8 idx)
4141{ 4041{
4142 int result = 0; 4042 int result = 0;
4143 struct host_if_msg msg; 4043 struct host_if_msg msg;
4144 struct host_if_drv *hif_drv = vif->hif_drv;
4145
4146 if (!hif_drv) {
4147 netdev_err(vif->ndev, "driver is null\n");
4148 return -EFAULT;
4149 }
4150 4044
4151 memset(&msg, 0, sizeof(struct host_if_msg)); 4045 memset(&msg, 0, sizeof(struct host_if_msg));
4152 4046
@@ -4167,12 +4061,6 @@ static int host_int_get_ipaddress(struct wilc_vif *vif, u8 *ip_addr, u8 idx)
4167{ 4061{
4168 int result = 0; 4062 int result = 0;
4169 struct host_if_msg msg; 4063 struct host_if_msg msg;
4170 struct host_if_drv *hif_drv = vif->hif_drv;
4171
4172 if (!hif_drv) {
4173 netdev_err(vif->ndev, "driver is null\n");
4174 return -EFAULT;
4175 }
4176 4064
4177 memset(&msg, 0, sizeof(struct host_if_msg)); 4065 memset(&msg, 0, sizeof(struct host_if_msg));
4178 4066
@@ -4221,7 +4109,7 @@ int wilc_get_tx_power(struct wilc_vif *vif, u8 *tx_power)
4221 if (ret) 4109 if (ret)
4222 netdev_err(vif->ndev, "Failed to get TX PWR\n"); 4110 netdev_err(vif->ndev, "Failed to get TX PWR\n");
4223 4111
4224 down(&hif_sema_wait_response); 4112 wait_for_completion(&hif_wait_response);
4225 *tx_power = msg.body.tx_power.tx_pwr; 4113 *tx_power = msg.body.tx_power.tx_pwr;
4226 4114
4227 return ret; 4115 return ret;
diff --git a/drivers/staging/wilc1000/host_interface.h b/drivers/staging/wilc1000/host_interface.h
index 01f3222a4231..8d2dd0db0bed 100644
--- a/drivers/staging/wilc1000/host_interface.h
+++ b/drivers/staging/wilc1000/host_interface.h
@@ -275,10 +275,10 @@ struct host_if_drv {
275 struct cfg_param_attr cfg_values; 275 struct cfg_param_attr cfg_values;
276 276
277 struct mutex cfg_values_lock; 277 struct mutex cfg_values_lock;
278 struct semaphore sem_test_key_block; 278 struct completion comp_test_key_block;
279 struct semaphore sem_test_disconn_block; 279 struct completion comp_test_disconn_block;
280 struct semaphore sem_get_rssi; 280 struct completion comp_get_rssi;
281 struct semaphore sem_inactive_time; 281 struct completion comp_inactive_time;
282 282
283 struct timer_list scan_timer; 283 struct timer_list scan_timer;
284 struct timer_list connect_timer; 284 struct timer_list connect_timer;
diff --git a/drivers/staging/wilc1000/linux_mon.c b/drivers/staging/wilc1000/linux_mon.c
index 7d9e5ded8ff4..242f82f4d24f 100644
--- a/drivers/staging/wilc1000/linux_mon.c
+++ b/drivers/staging/wilc1000/linux_mon.c
@@ -24,7 +24,7 @@ struct wilc_wfi_radiotap_cb_hdr {
24 24
25static struct net_device *wilc_wfi_mon; /* global monitor netdev */ 25static struct net_device *wilc_wfi_mon; /* global monitor netdev */
26 26
27static u8 srcAdd[6]; 27static u8 srcadd[6];
28static u8 bssid[6]; 28static u8 bssid[6];
29static u8 broadcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 29static u8 broadcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
30/** 30/**
@@ -59,9 +59,10 @@ void WILC_WFI_monitor_rx(u8 *buff, u32 size)
59 59
60 /* Get WILC header */ 60 /* Get WILC header */
61 memcpy(&header, (buff - HOST_HDR_OFFSET), HOST_HDR_OFFSET); 61 memcpy(&header, (buff - HOST_HDR_OFFSET), HOST_HDR_OFFSET);
62 62 /*
63 /* The packet offset field conain info about what type of managment frame */ 63 * The packet offset field contain info about what type of management
64 /* we are dealing with and ack status */ 64 * the frame we are dealing with and ack status
65 */
65 pkt_offset = GET_PKT_OFFSET(header); 66 pkt_offset = GET_PKT_OFFSET(header);
66 67
67 if (pkt_offset & IS_MANAGMEMENT_CALLBACK) { 68 if (pkt_offset & IS_MANAGMEMENT_CALLBACK) {
@@ -105,7 +106,7 @@ void WILC_WFI_monitor_rx(u8 *buff, u32 size)
105 hdr->hdr.it_version = 0; /* PKTHDR_RADIOTAP_VERSION; */ 106 hdr->hdr.it_version = 0; /* PKTHDR_RADIOTAP_VERSION; */
106 hdr->hdr.it_len = cpu_to_le16(sizeof(struct wilc_wfi_radiotap_hdr)); 107 hdr->hdr.it_len = cpu_to_le16(sizeof(struct wilc_wfi_radiotap_hdr));
107 hdr->hdr.it_present = cpu_to_le32 108 hdr->hdr.it_present = cpu_to_le32
108 (1 << IEEE80211_RADIOTAP_RATE); /* | */ 109 (1 << IEEE80211_RADIOTAP_RATE); /* | */
109 hdr->rate = 5; /* txrate->bitrate / 5; */ 110 hdr->rate = 5; /* txrate->bitrate / 5; */
110 } 111 }
111 112
@@ -127,8 +128,10 @@ struct tx_complete_mon_data {
127static void mgmt_tx_complete(void *priv, int status) 128static void mgmt_tx_complete(void *priv, int status)
128{ 129{
129 struct tx_complete_mon_data *pv_data = priv; 130 struct tx_complete_mon_data *pv_data = priv;
130 131 /*
131 /* incase of fully hosting mode, the freeing will be done in response to the cfg packet */ 132 * in case of fully hosting mode, the freeing will be done
133 * in response to the cfg packet
134 */
132 kfree(pv_data->buff); 135 kfree(pv_data->buff);
133 136
134 kfree(pv_data); 137 kfree(pv_data);
@@ -225,11 +228,11 @@ static netdev_tx_t WILC_WFI_mon_xmit(struct sk_buff *skb,
225 skb->dev = mon_priv->real_ndev; 228 skb->dev = mon_priv->real_ndev;
226 229
227 /* Identify if Ethernet or MAC header (data or mgmt) */ 230 /* Identify if Ethernet or MAC header (data or mgmt) */
228 memcpy(srcAdd, &skb->data[10], 6); 231 memcpy(srcadd, &skb->data[10], 6);
229 memcpy(bssid, &skb->data[16], 6); 232 memcpy(bssid, &skb->data[16], 6);
230 /* if source address and bssid fields are equal>>Mac header */ 233 /* if source address and bssid fields are equal>>Mac header */
231 /*send it to mgmt frames handler */ 234 /*send it to mgmt frames handler */
232 if (!(memcmp(srcAdd, bssid, 6))) { 235 if (!(memcmp(srcadd, bssid, 6))) {
233 ret = mon_mgmt_tx(mon_priv->real_ndev, skb->data, skb->len); 236 ret = mon_mgmt_tx(mon_priv->real_ndev, skb->data, skb->len);
234 if (ret) 237 if (ret)
235 netdev_err(dev, "fail to mgmt tx\n"); 238 netdev_err(dev, "fail to mgmt tx\n");
@@ -255,7 +258,8 @@ static const struct net_device_ops wilc_wfi_netdev_ops = {
255 * @date 12 JUL 2012 258 * @date 12 JUL 2012
256 * @version 1.0 259 * @version 1.0
257 */ 260 */
258struct net_device *WILC_WFI_init_mon_interface(const char *name, struct net_device *real_dev) 261struct net_device *WILC_WFI_init_mon_interface(const char *name,
262 struct net_device *real_dev)
259{ 263{
260 u32 ret = 0; 264 u32 ret = 0;
261 struct WILC_WFI_mon_priv *priv; 265 struct WILC_WFI_mon_priv *priv;
diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c
index bfa754bb022d..4f93c11e73c0 100644
--- a/drivers/staging/wilc1000/linux_wlan.c
+++ b/drivers/staging/wilc1000/linux_wlan.c
@@ -22,6 +22,7 @@
22#include <linux/skbuff.h> 22#include <linux/skbuff.h>
23 23
24#include <linux/semaphore.h> 24#include <linux/semaphore.h>
25#include <linux/completion.h>
25 26
26static int dev_state_ev_handler(struct notifier_block *this, 27static int dev_state_ev_handler(struct notifier_block *this,
27 unsigned long event, void *ptr); 28 unsigned long event, void *ptr);
@@ -30,8 +31,6 @@ static struct notifier_block g_dev_notifier = {
30 .notifier_call = dev_state_ev_handler 31 .notifier_call = dev_state_ev_handler
31}; 32};
32 33
33#define IRQ_WAIT 1
34#define IRQ_NO_WAIT 0
35static struct semaphore close_exit_sync; 34static struct semaphore close_exit_sync;
36 35
37static int wlan_deinit_locks(struct net_device *dev); 36static int wlan_deinit_locks(struct net_device *dev);
@@ -259,10 +258,12 @@ static struct net_device *get_if_handler(struct wilc *wilc, u8 *mac_header)
259 258
260 for (i = 0; i < wilc->vif_num; i++) { 259 for (i = 0; i < wilc->vif_num; i++) {
261 if (wilc->vif[i]->mode == STATION_MODE) 260 if (wilc->vif[i]->mode == STATION_MODE)
262 if (!memcmp(bssid, wilc->vif[i]->bssid, ETH_ALEN)) 261 if (ether_addr_equal_unaligned(bssid,
262 wilc->vif[i]->bssid))
263 return wilc->vif[i]->ndev; 263 return wilc->vif[i]->ndev;
264 if (wilc->vif[i]->mode == AP_MODE) 264 if (wilc->vif[i]->mode == AP_MODE)
265 if (!memcmp(bssid1, wilc->vif[i]->bssid, ETH_ALEN)) 265 if (ether_addr_equal_unaligned(bssid1,
266 wilc->vif[i]->bssid))
266 return wilc->vif[i]->ndev; 267 return wilc->vif[i]->ndev;
267 } 268 }
268 269
@@ -303,40 +304,27 @@ int wilc_wlan_get_num_conn_ifcs(struct wilc *wilc)
303 return ret_val; 304 return ret_val;
304} 305}
305 306
306#define USE_TX_BACKOFF_DELAY_IF_NO_BUFFERS
307
308static int linux_wlan_txq_task(void *vp) 307static int linux_wlan_txq_task(void *vp)
309{ 308{
310 int ret, txq_count; 309 int ret, txq_count;
311 struct wilc_vif *vif; 310 struct wilc_vif *vif;
312 struct wilc *wl; 311 struct wilc *wl;
313 struct net_device *dev = vp; 312 struct net_device *dev = vp;
314#if defined USE_TX_BACKOFF_DELAY_IF_NO_BUFFERS
315#define TX_BACKOFF_WEIGHT_INCR_STEP (1)
316#define TX_BACKOFF_WEIGHT_DECR_STEP (1)
317#define TX_BACKOFF_WEIGHT_MAX (7)
318#define TX_BACKOFF_WEIGHT_MIN (0)
319#define TX_BACKOFF_WEIGHT_UNIT_MS (10)
320 int backoff_weight = TX_BACKOFF_WEIGHT_MIN;
321#endif
322 313
323 vif = netdev_priv(dev); 314 vif = netdev_priv(dev);
324 wl = vif->wilc; 315 wl = vif->wilc;
325 316
326 up(&wl->txq_thread_started); 317 complete(&wl->txq_thread_started);
327 while (1) { 318 while (1) {
328 down(&wl->txq_event); 319 down(&wl->txq_event);
329 320
330 if (wl->close) { 321 if (wl->close) {
331 up(&wl->txq_thread_started); 322 complete(&wl->txq_thread_started);
332 323
333 while (!kthread_should_stop()) 324 while (!kthread_should_stop())
334 schedule(); 325 schedule();
335 break; 326 break;
336 } 327 }
337#if !defined USE_TX_BACKOFF_DELAY_IF_NO_BUFFERS
338 ret = wilc_wlan_handle_txq(dev, &txq_count);
339#else
340 do { 328 do {
341 ret = wilc_wlan_handle_txq(dev, &txq_count); 329 ret = wilc_wlan_handle_txq(dev, &txq_count);
342 if (txq_count < FLOW_CONTROL_LOWER_THRESHOLD) { 330 if (txq_count < FLOW_CONTROL_LOWER_THRESHOLD) {
@@ -345,20 +333,7 @@ static int linux_wlan_txq_task(void *vp)
345 if (netif_queue_stopped(wl->vif[1]->ndev)) 333 if (netif_queue_stopped(wl->vif[1]->ndev))
346 netif_wake_queue(wl->vif[1]->ndev); 334 netif_wake_queue(wl->vif[1]->ndev);
347 } 335 }
348
349 if (ret == WILC_TX_ERR_NO_BUF) {
350 backoff_weight += TX_BACKOFF_WEIGHT_INCR_STEP;
351 if (backoff_weight > TX_BACKOFF_WEIGHT_MAX)
352 backoff_weight = TX_BACKOFF_WEIGHT_MAX;
353 } else {
354 if (backoff_weight > TX_BACKOFF_WEIGHT_MIN) {
355 backoff_weight -= TX_BACKOFF_WEIGHT_DECR_STEP;
356 if (backoff_weight < TX_BACKOFF_WEIGHT_MIN)
357 backoff_weight = TX_BACKOFF_WEIGHT_MIN;
358 }
359 }
360 } while (ret == WILC_TX_ERR_NO_BUF && !wl->close); 336 } while (ret == WILC_TX_ERR_NO_BUF && !wl->close);
361#endif
362 } 337 }
363 return 0; 338 return 0;
364} 339}
@@ -449,7 +424,6 @@ static int linux_wlan_init_test_config(struct net_device *dev,
449 struct wilc_vif *vif) 424 struct wilc_vif *vif)
450{ 425{
451 unsigned char c_val[64]; 426 unsigned char c_val[64];
452 unsigned char mac_add[] = {0x00, 0x80, 0xC2, 0x5E, 0xa2, 0xff};
453 struct wilc *wilc = vif->wilc; 427 struct wilc *wilc = vif->wilc;
454 struct wilc_priv *priv; 428 struct wilc_priv *priv;
455 struct host_if_drv *hif_drv; 429 struct host_if_drv *hif_drv;
@@ -458,9 +432,6 @@ static int linux_wlan_init_test_config(struct net_device *dev,
458 priv = wiphy_priv(dev->ieee80211_ptr->wiphy); 432 priv = wiphy_priv(dev->ieee80211_ptr->wiphy);
459 hif_drv = (struct host_if_drv *)priv->hif_drv; 433 hif_drv = (struct host_if_drv *)priv->hif_drv;
460 netdev_dbg(dev, "Host = %p\n", hif_drv); 434 netdev_dbg(dev, "Host = %p\n", hif_drv);
461 wilc_get_mac_address(vif, mac_add);
462
463 netdev_dbg(dev, "MAC address is : %pM\n", mac_add);
464 wilc_get_chipid(wilc, false); 435 wilc_get_chipid(wilc, false);
465 436
466 *(int *)c_val = 1; 437 *(int *)c_val = 1;
@@ -622,11 +593,6 @@ static int linux_wlan_init_test_config(struct net_device *dev,
622 0)) 593 0))
623 goto _fail_; 594 goto _fail_;
624 595
625 memcpy(c_val, mac_add, 6);
626
627 if (!wilc_wlan_cfg_set(vif, 0, WID_MAC_ADDR, c_val, 6, 0, 0))
628 goto _fail_;
629
630 c_val[0] = DETECT_PROTECT_REPORT; 596 c_val[0] = DETECT_PROTECT_REPORT;
631 if (!wilc_wlan_cfg_set(vif, 0, WID_11N_OBSS_NONHT_DETECTION, c_val, 1, 597 if (!wilc_wlan_cfg_set(vif, 0, WID_11N_OBSS_NONHT_DETECTION, c_val, 1,
632 0, 0)) 598 0, 0))
@@ -691,14 +657,6 @@ void wilc1000_wlan_deinit(struct net_device *dev)
691 657
692 wilc_wlan_stop(wl); 658 wilc_wlan_stop(wl);
693 wilc_wlan_cleanup(dev); 659 wilc_wlan_cleanup(dev);
694#if defined(PLAT_ALLWINNER_A20) || defined(PLAT_ALLWINNER_A23) || defined(PLAT_ALLWINNER_A31)
695 if (!wl->dev_irq_num &&
696 wl->hif_func->disable_interrupt) {
697 mutex_lock(&wl->hif_cs);
698 wl->hif_func->disable_interrupt(wl);
699 mutex_unlock(&wl->hif_cs);
700 }
701#endif
702 wlan_deinit_locks(dev); 660 wlan_deinit_locks(dev);
703 661
704 wl->initialized = false; 662 wl->initialized = false;
@@ -727,8 +685,7 @@ static int wlan_init_locks(struct net_device *dev)
727 685
728 sema_init(&wl->cfg_event, 0); 686 sema_init(&wl->cfg_event, 0);
729 sema_init(&wl->sync_event, 0); 687 sema_init(&wl->sync_event, 0);
730 688 init_completion(&wl->txq_thread_started);
731 sema_init(&wl->txq_thread_started, 0);
732 689
733 return 0; 690 return 0;
734} 691}
@@ -765,7 +722,7 @@ static int wlan_initialize_threads(struct net_device *dev)
765 wilc->close = 0; 722 wilc->close = 0;
766 return -ENOBUFS; 723 return -ENOBUFS;
767 } 724 }
768 down(&wilc->txq_thread_started); 725 wait_for_completion(&wilc->txq_thread_started);
769 726
770 return 0; 727 return 0;
771} 728}
@@ -896,25 +853,20 @@ static int mac_init_fn(struct net_device *ndev)
896int wilc_mac_open(struct net_device *ndev) 853int wilc_mac_open(struct net_device *ndev)
897{ 854{
898 struct wilc_vif *vif; 855 struct wilc_vif *vif;
899 struct wilc *wilc;
900 856
901 unsigned char mac_add[ETH_ALEN] = {0}; 857 unsigned char mac_add[ETH_ALEN] = {0};
902 int ret = 0; 858 int ret = 0;
903 int i = 0; 859 int i = 0;
904 struct wilc_priv *priv;
905 struct wilc *wl; 860 struct wilc *wl;
906 861
907 vif = netdev_priv(ndev); 862 vif = netdev_priv(ndev);
908 wl = vif->wilc; 863 wl = vif->wilc;
909 864
910 if (!wl || !wl->dev) { 865 if (!wl || !wl->dev) {
911 netdev_err(ndev, "wilc1000: SPI device not ready\n"); 866 netdev_err(ndev, "device not ready\n");
912 return -ENODEV; 867 return -ENODEV;
913 } 868 }
914 869
915 vif = netdev_priv(ndev);
916 wilc = vif->wilc;
917 priv = wiphy_priv(vif->ndev->ieee80211_ptr->wiphy);
918 netdev_dbg(ndev, "MAC OPEN[%p]\n", ndev); 870 netdev_dbg(ndev, "MAC OPEN[%p]\n", ndev);
919 871
920 ret = wilc_init_host_int(ndev); 872 ret = wilc_init_host_int(ndev);
@@ -933,13 +885,13 @@ int wilc_mac_open(struct net_device *ndev)
933 wilc_set_wfi_drv_handler(vif, 885 wilc_set_wfi_drv_handler(vif,
934 wilc_get_vif_idx(vif), 886 wilc_get_vif_idx(vif),
935 0); 887 0);
936 } else if (!wilc_wlan_get_num_conn_ifcs(wilc)) { 888 } else if (!wilc_wlan_get_num_conn_ifcs(wl)) {
937 wilc_set_wfi_drv_handler(vif, 889 wilc_set_wfi_drv_handler(vif,
938 wilc_get_vif_idx(vif), 890 wilc_get_vif_idx(vif),
939 wilc->open_ifcs); 891 wl->open_ifcs);
940 } else { 892 } else {
941 if (memcmp(wilc->vif[i ^ 1]->bssid, 893 if (memcmp(wl->vif[i ^ 1]->bssid,
942 wilc->vif[i ^ 1]->src_addr, 6)) 894 wl->vif[i ^ 1]->src_addr, 6))
943 wilc_set_wfi_drv_handler(vif, 895 wilc_set_wfi_drv_handler(vif,
944 wilc_get_vif_idx(vif), 896 wilc_get_vif_idx(vif),
945 0); 897 0);
@@ -969,12 +921,12 @@ int wilc_mac_open(struct net_device *ndev)
969 921
970 wilc_mgmt_frame_register(vif->ndev->ieee80211_ptr->wiphy, 922 wilc_mgmt_frame_register(vif->ndev->ieee80211_ptr->wiphy,
971 vif->ndev->ieee80211_ptr, 923 vif->ndev->ieee80211_ptr,
972 vif->g_struct_frame_reg[0].frame_type, 924 vif->frame_reg[0].type,
973 vif->g_struct_frame_reg[0].reg); 925 vif->frame_reg[0].reg);
974 wilc_mgmt_frame_register(vif->ndev->ieee80211_ptr->wiphy, 926 wilc_mgmt_frame_register(vif->ndev->ieee80211_ptr->wiphy,
975 vif->ndev->ieee80211_ptr, 927 vif->ndev->ieee80211_ptr,
976 vif->g_struct_frame_reg[1].frame_type, 928 vif->frame_reg[1].type,
977 vif->g_struct_frame_reg[1].reg); 929 vif->frame_reg[1].reg);
978 netif_wake_queue(ndev); 930 netif_wake_queue(ndev);
979 wl->open_ifcs++; 931 wl->open_ifcs++;
980 vif->mac_opened = 1; 932 vif->mac_opened = 1;
@@ -991,14 +943,10 @@ static struct net_device_stats *mac_stats(struct net_device *dev)
991static void wilc_set_multicast_list(struct net_device *dev) 943static void wilc_set_multicast_list(struct net_device *dev)
992{ 944{
993 struct netdev_hw_addr *ha; 945 struct netdev_hw_addr *ha;
994 struct wilc_priv *priv;
995 struct host_if_drv *hif_drv;
996 struct wilc_vif *vif; 946 struct wilc_vif *vif;
997 int i = 0; 947 int i = 0;
998 948
999 priv = wiphy_priv(dev->ieee80211_ptr->wiphy);
1000 vif = netdev_priv(dev); 949 vif = netdev_priv(dev);
1001 hif_drv = (struct host_if_drv *)priv->hif_drv;
1002 950
1003 if (dev->flags & IFF_PROMISC) 951 if (dev->flags & IFF_PROMISC)
1004 return; 952 return;
@@ -1152,7 +1100,6 @@ static int mac_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
1152 s8 rssi; 1100 s8 rssi;
1153 u32 size = 0, length = 0; 1101 u32 size = 0, length = 0;
1154 struct wilc_vif *vif; 1102 struct wilc_vif *vif;
1155 struct wilc_priv *priv;
1156 s32 ret = 0; 1103 s32 ret = 0;
1157 struct wilc *wilc; 1104 struct wilc *wilc;
1158 1105
@@ -1176,7 +1123,6 @@ static int mac_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
1176 return PTR_ERR(buff); 1123 return PTR_ERR(buff);
1177 1124
1178 if (strncasecmp(buff, "RSSI", length) == 0) { 1125 if (strncasecmp(buff, "RSSI", length) == 0) {
1179 priv = wiphy_priv(vif->ndev->ieee80211_ptr->wiphy);
1180 ret = wilc_get_rssi(vif, &rssi); 1126 ret = wilc_get_rssi(vif, &rssi);
1181 netdev_info(ndev, "RSSI :%d\n", rssi); 1127 netdev_info(ndev, "RSSI :%d\n", rssi);
1182 1128
@@ -1263,8 +1209,8 @@ void WILC_WFI_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size)
1263 } 1209 }
1264 1210
1265 vif = netdev_priv(wilc->vif[1]->ndev); 1211 vif = netdev_priv(wilc->vif[1]->ndev);
1266 if ((buff[0] == vif->g_struct_frame_reg[0].frame_type && vif->g_struct_frame_reg[0].reg) || 1212 if ((buff[0] == vif->frame_reg[0].type && vif->frame_reg[0].reg) ||
1267 (buff[0] == vif->g_struct_frame_reg[1].frame_type && vif->g_struct_frame_reg[1].reg)) 1213 (buff[0] == vif->frame_reg[1].type && vif->frame_reg[1].reg))
1268 WILC_WFI_p2p_rx(wilc->vif[1]->ndev, buff, size); 1214 WILC_WFI_p2p_rx(wilc->vif[1]->ndev, buff, size);
1269} 1215}
1270 1216
@@ -1280,8 +1226,10 @@ void wilc_netdev_cleanup(struct wilc *wilc)
1280 vif[i] = netdev_priv(wilc->vif[i]->ndev); 1226 vif[i] = netdev_priv(wilc->vif[i]->ndev);
1281 } 1227 }
1282 1228
1283 if (wilc && wilc->firmware) 1229 if (wilc && wilc->firmware) {
1284 release_firmware(wilc->firmware); 1230 release_firmware(wilc->firmware);
1231 wilc->firmware = NULL;
1232 }
1285 1233
1286 if (wilc && (wilc->vif[0]->ndev || wilc->vif[1]->ndev)) { 1234 if (wilc && (wilc->vif[0]->ndev || wilc->vif[1]->ndev)) {
1287 wilc_lock_timeout(wilc, &close_exit_sync, 5 * 1000); 1235 wilc_lock_timeout(wilc, &close_exit_sync, 5 * 1000);
diff --git a/drivers/staging/wilc1000/wilc_spi.c b/drivers/staging/wilc1000/wilc_spi.c
index d41b8b6790af..4268e2f29307 100644
--- a/drivers/staging/wilc1000/wilc_spi.c
+++ b/drivers/staging/wilc1000/wilc_spi.c
@@ -196,9 +196,6 @@ static int wilc_spi_tx(struct wilc *wilc, u8 *b, u32 len)
196 dev_err(&spi->dev, 196 dev_err(&spi->dev,
197 "can't write data with the following length: %d\n", 197 "can't write data with the following length: %d\n",
198 len); 198 len);
199 dev_err(&spi->dev,
200 "FAILED due to NULL buffer or ZERO length check the following length: %d\n",
201 len);
202 ret = -EINVAL; 199 ret = -EINVAL;
203 } 200 }
204 201
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
index 544917d8b2df..51aff4ff7d7c 100644
--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
+++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
@@ -451,7 +451,7 @@ static void CfgScanResult(enum scan_event scan_event,
451 } else if (scan_event == SCAN_EVENT_DONE) { 451 } else if (scan_event == SCAN_EVENT_DONE) {
452 refresh_scan(priv, 1, false); 452 refresh_scan(priv, 1, false);
453 453
454 down(&(priv->hSemScanReq)); 454 mutex_lock(&priv->scan_req_lock);
455 455
456 if (priv->pstrScanReq) { 456 if (priv->pstrScanReq) {
457 cfg80211_scan_done(priv->pstrScanReq, false); 457 cfg80211_scan_done(priv->pstrScanReq, false);
@@ -459,9 +459,9 @@ static void CfgScanResult(enum scan_event scan_event,
459 priv->bCfgScanning = false; 459 priv->bCfgScanning = false;
460 priv->pstrScanReq = NULL; 460 priv->pstrScanReq = NULL;
461 } 461 }
462 up(&(priv->hSemScanReq)); 462 mutex_unlock(&priv->scan_req_lock);
463 } else if (scan_event == SCAN_EVENT_ABORTED) { 463 } else if (scan_event == SCAN_EVENT_ABORTED) {
464 down(&(priv->hSemScanReq)); 464 mutex_lock(&priv->scan_req_lock);
465 465
466 if (priv->pstrScanReq) { 466 if (priv->pstrScanReq) {
467 update_scan_time(); 467 update_scan_time();
@@ -471,7 +471,7 @@ static void CfgScanResult(enum scan_event scan_event,
471 priv->bCfgScanning = false; 471 priv->bCfgScanning = false;
472 priv->pstrScanReq = NULL; 472 priv->pstrScanReq = NULL;
473 } 473 }
474 up(&(priv->hSemScanReq)); 474 mutex_unlock(&priv->scan_req_lock);
475 } 475 }
476 } 476 }
477} 477}
@@ -558,11 +558,11 @@ static void CfgConnectResult(enum conn_event enuConnDisconnEvent,
558 558
559 if (!pstrWFIDrv->p2p_connect) 559 if (!pstrWFIDrv->p2p_connect)
560 wlan_channel = INVALID_CHANNEL; 560 wlan_channel = INVALID_CHANNEL;
561 if ((pstrWFIDrv->IFC_UP) && (dev == wl->vif[1]->ndev)) { 561 if ((pstrWFIDrv->IFC_UP) && (dev == wl->vif[1]->ndev))
562 pstrDisconnectNotifInfo->reason = 3; 562 pstrDisconnectNotifInfo->reason = 3;
563 } else if ((!pstrWFIDrv->IFC_UP) && (dev == wl->vif[1]->ndev)) { 563 else if ((!pstrWFIDrv->IFC_UP) && (dev == wl->vif[1]->ndev))
564 pstrDisconnectNotifInfo->reason = 1; 564 pstrDisconnectNotifInfo->reason = 1;
565 } 565
566 cfg80211_disconnected(dev, pstrDisconnectNotifInfo->reason, pstrDisconnectNotifInfo->ie, 566 cfg80211_disconnected(dev, pstrDisconnectNotifInfo->reason, pstrDisconnectNotifInfo->ie,
567 pstrDisconnectNotifInfo->ie_len, false, 567 pstrDisconnectNotifInfo->ie_len, false,
568 GFP_KERNEL); 568 GFP_KERNEL);
@@ -739,18 +739,15 @@ static int connect(struct wiphy *wiphy, struct net_device *dev,
739 wilc_add_wep_key_bss_sta(vif, sme->key, sme->key_len, 739 wilc_add_wep_key_bss_sta(vif, sme->key, sme->key_len,
740 sme->key_idx); 740 sme->key_idx);
741 } else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2) { 741 } else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2) {
742 if (sme->crypto.cipher_group == WLAN_CIPHER_SUITE_TKIP) { 742 if (sme->crypto.cipher_group == WLAN_CIPHER_SUITE_TKIP)
743 u8security = ENCRYPT_ENABLED | WPA2 | TKIP; 743 u8security = ENCRYPT_ENABLED | WPA2 | TKIP;
744 } else { 744 else
745 u8security = ENCRYPT_ENABLED | WPA2 | AES; 745 u8security = ENCRYPT_ENABLED | WPA2 | AES;
746 }
747 } else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_1) { 746 } else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_1) {
748 if (sme->crypto.cipher_group == WLAN_CIPHER_SUITE_TKIP) { 747 if (sme->crypto.cipher_group == WLAN_CIPHER_SUITE_TKIP)
749 u8security = ENCRYPT_ENABLED | WPA | TKIP; 748 u8security = ENCRYPT_ENABLED | WPA | TKIP;
750 } else { 749 else
751 u8security = ENCRYPT_ENABLED | WPA | AES; 750 u8security = ENCRYPT_ENABLED | WPA | AES;
752 }
753
754 } else { 751 } else {
755 s32Error = -ENOTSUPP; 752 s32Error = -ENOTSUPP;
756 netdev_err(dev, "Not supported cipher\n"); 753 netdev_err(dev, "Not supported cipher\n");
@@ -762,11 +759,10 @@ static int connect(struct wiphy *wiphy, struct net_device *dev,
762 if ((sme->crypto.wpa_versions & NL80211_WPA_VERSION_1) 759 if ((sme->crypto.wpa_versions & NL80211_WPA_VERSION_1)
763 || (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2)) { 760 || (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2)) {
764 for (i = 0; i < sme->crypto.n_ciphers_pairwise; i++) { 761 for (i = 0; i < sme->crypto.n_ciphers_pairwise; i++) {
765 if (sme->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP) { 762 if (sme->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP)
766 u8security = u8security | TKIP; 763 u8security = u8security | TKIP;
767 } else { 764 else
768 u8security = u8security | AES; 765 u8security = u8security | AES;
769 }
770 } 766 }
771 } 767 }
772 768
@@ -823,11 +819,22 @@ static int disconnect(struct wiphy *wiphy, struct net_device *dev, u16 reason_co
823 struct wilc_priv *priv; 819 struct wilc_priv *priv;
824 struct host_if_drv *pstrWFIDrv; 820 struct host_if_drv *pstrWFIDrv;
825 struct wilc_vif *vif; 821 struct wilc_vif *vif;
822 struct wilc *wilc;
826 u8 NullBssid[ETH_ALEN] = {0}; 823 u8 NullBssid[ETH_ALEN] = {0};
827 824
828 wilc_connecting = 0; 825 wilc_connecting = 0;
829 priv = wiphy_priv(wiphy); 826 priv = wiphy_priv(wiphy);
830 vif = netdev_priv(priv->dev); 827 vif = netdev_priv(priv->dev);
828 wilc = vif->wilc;
829
830 if (!wilc)
831 return -EIO;
832
833 if (wilc->close) {
834 /* already disconnected done */
835 cfg80211_disconnected(dev, 0, NULL, 0, true, GFP_KERNEL);
836 return 0;
837 }
831 838
832 pstrWFIDrv = (struct host_if_drv *)priv->hif_drv; 839 pstrWFIDrv = (struct host_if_drv *)priv->hif_drv;
833 if (!pstrWFIDrv->p2p_connect) 840 if (!pstrWFIDrv->p2p_connect)
@@ -1115,9 +1122,12 @@ static int del_key(struct wiphy *wiphy, struct net_device *netdev,
1115 } 1122 }
1116 1123
1117 if (key_index >= 0 && key_index <= 3) { 1124 if (key_index >= 0 && key_index <= 3) {
1118 memset(priv->WILC_WFI_wep_key[key_index], 0, priv->WILC_WFI_wep_key_len[key_index]); 1125 if (priv->WILC_WFI_wep_key_len[key_index]) {
1119 priv->WILC_WFI_wep_key_len[key_index] = 0; 1126 memset(priv->WILC_WFI_wep_key[key_index], 0,
1120 wilc_remove_wep_key(vif, key_index); 1127 priv->WILC_WFI_wep_key_len[key_index]);
1128 priv->WILC_WFI_wep_key_len[key_index] = 0;
1129 wilc_remove_wep_key(vif, key_index);
1130 }
1121 } else { 1131 } else {
1122 wilc_remove_key(priv->hif_drv, mac_addr); 1132 wilc_remove_key(priv->hif_drv, mac_addr);
1123 } 1133 }
@@ -1355,9 +1365,8 @@ static void WILC_WFI_CfgParseRxAction(u8 *buf, u32 len)
1355 u8 channel_list_attr_index = 0; 1365 u8 channel_list_attr_index = 0;
1356 1366
1357 while (index < len) { 1367 while (index < len) {
1358 if (buf[index] == GO_INTENT_ATTR_ID) { 1368 if (buf[index] == GO_INTENT_ATTR_ID)
1359 buf[index + 3] = (buf[index + 3] & 0x01) | (0x00 << 1); 1369 buf[index + 3] = (buf[index + 3] & 0x01) | (0x00 << 1);
1360 }
1361 1370
1362 if (buf[index] == CHANLIST_ATTR_ID) 1371 if (buf[index] == CHANLIST_ATTR_ID)
1363 channel_list_attr_index = index; 1372 channel_list_attr_index = index;
@@ -1369,9 +1378,8 @@ static void WILC_WFI_CfgParseRxAction(u8 *buf, u32 len)
1369 if (channel_list_attr_index) { 1378 if (channel_list_attr_index) {
1370 for (i = channel_list_attr_index + 3; i < ((channel_list_attr_index + 3) + buf[channel_list_attr_index + 1]); i++) { 1379 for (i = channel_list_attr_index + 3; i < ((channel_list_attr_index + 3) + buf[channel_list_attr_index + 1]); i++) {
1371 if (buf[i] == 0x51) { 1380 if (buf[i] == 0x51) {
1372 for (j = i + 2; j < ((i + 2) + buf[i + 1]); j++) { 1381 for (j = i + 2; j < ((i + 2) + buf[i + 1]); j++)
1373 buf[j] = wlan_channel; 1382 buf[j] = wlan_channel;
1374 }
1375 break; 1383 break;
1376 } 1384 }
1377 } 1385 }
@@ -1409,9 +1417,8 @@ static void WILC_WFI_CfgParseTxAction(u8 *buf, u32 len, bool bOperChan, u8 iftyp
1409 if (channel_list_attr_index) { 1417 if (channel_list_attr_index) {
1410 for (i = channel_list_attr_index + 3; i < ((channel_list_attr_index + 3) + buf[channel_list_attr_index + 1]); i++) { 1418 for (i = channel_list_attr_index + 3; i < ((channel_list_attr_index + 3) + buf[channel_list_attr_index + 1]); i++) {
1411 if (buf[i] == 0x51) { 1419 if (buf[i] == 0x51) {
1412 for (j = i + 2; j < ((i + 2) + buf[i + 1]); j++) { 1420 for (j = i + 2; j < ((i + 2) + buf[i + 1]); j++)
1413 buf[j] = wlan_channel; 1421 buf[j] = wlan_channel;
1414 }
1415 break; 1422 break;
1416 } 1423 }
1417 } 1424 }
@@ -1752,15 +1759,15 @@ void wilc_mgmt_frame_register(struct wiphy *wiphy, struct wireless_dev *wdev,
1752 switch (frame_type) { 1759 switch (frame_type) {
1753 case PROBE_REQ: 1760 case PROBE_REQ:
1754 { 1761 {
1755 vif->g_struct_frame_reg[0].frame_type = frame_type; 1762 vif->frame_reg[0].type = frame_type;
1756 vif->g_struct_frame_reg[0].reg = reg; 1763 vif->frame_reg[0].reg = reg;
1757 } 1764 }
1758 break; 1765 break;
1759 1766
1760 case ACTION: 1767 case ACTION:
1761 { 1768 {
1762 vif->g_struct_frame_reg[1].frame_type = frame_type; 1769 vif->frame_reg[1].type = frame_type;
1763 vif->g_struct_frame_reg[1].reg = reg; 1770 vif->frame_reg[1].reg = reg;
1764 } 1771 }
1765 break; 1772 break;
1766 1773
@@ -1797,6 +1804,7 @@ static int dump_station(struct wiphy *wiphy, struct net_device *dev,
1797 1804
1798 wilc_get_rssi(vif, &sinfo->signal); 1805 wilc_get_rssi(vif, &sinfo->signal);
1799 1806
1807 memcpy(mac, priv->au8AssociatedBss, ETH_ALEN);
1800 return 0; 1808 return 0;
1801} 1809}
1802 1810
@@ -2269,7 +2277,6 @@ struct wireless_dev *wilc_create_wiphy(struct net_device *net, struct device *de
2269 } 2277 }
2270 2278
2271 priv = wdev_priv(wdev); 2279 priv = wdev_priv(wdev);
2272 sema_init(&(priv->SemHandleUpdateStats), 1);
2273 priv->wdev = wdev; 2280 priv->wdev = wdev;
2274 wdev->wiphy->max_scan_ssids = MAX_NUM_PROBED_SSID; 2281 wdev->wiphy->max_scan_ssids = MAX_NUM_PROBED_SSID;
2275#ifdef CONFIG_PM 2282#ifdef CONFIG_PM
@@ -2315,7 +2322,7 @@ int wilc_init_host_int(struct net_device *net)
2315 2322
2316 priv->bInP2PlistenState = false; 2323 priv->bInP2PlistenState = false;
2317 2324
2318 sema_init(&(priv->hSemScanReq), 1); 2325 mutex_init(&priv->scan_req_lock);
2319 s32Error = wilc_init(net, &priv->hif_drv); 2326 s32Error = wilc_init(net, &priv->hif_drv);
2320 if (s32Error) 2327 if (s32Error)
2321 netdev_err(net, "Error while initializing hostinterface\n"); 2328 netdev_err(net, "Error while initializing hostinterface\n");
diff --git a/drivers/staging/wilc1000/wilc_wfi_netdevice.h b/drivers/staging/wilc1000/wilc_wfi_netdevice.h
index 4123cffe3a6e..3a561df6d370 100644
--- a/drivers/staging/wilc1000/wilc_wfi_netdevice.h
+++ b/drivers/staging/wilc1000/wilc_wfi_netdevice.h
@@ -130,8 +130,7 @@ struct wilc_priv {
130 struct wilc_wfi_key *wilc_ptk[MAX_NUM_STA]; 130 struct wilc_wfi_key *wilc_ptk[MAX_NUM_STA];
131 u8 wilc_groupkey; 131 u8 wilc_groupkey;
132 /* semaphores */ 132 /* semaphores */
133 struct semaphore SemHandleUpdateStats; 133 struct mutex scan_req_lock;
134 struct semaphore hSemScanReq;
135 /* */ 134 /* */
136 bool gbAutoRateAdjusted; 135 bool gbAutoRateAdjusted;
137 136
@@ -139,18 +138,17 @@ struct wilc_priv {
139 138
140}; 139};
141 140
142typedef struct { 141struct frame_reg {
143 u16 frame_type; 142 u16 type;
144 bool reg; 143 bool reg;
145 144};
146} struct_frame_reg;
147 145
148struct wilc_vif { 146struct wilc_vif {
149 u8 idx; 147 u8 idx;
150 u8 iftype; 148 u8 iftype;
151 int monitor_flag; 149 int monitor_flag;
152 int mac_opened; 150 int mac_opened;
153 struct_frame_reg g_struct_frame_reg[num_reg_frame]; 151 struct frame_reg frame_reg[num_reg_frame];
154 struct net_device_stats netstats; 152 struct net_device_stats netstats;
155 struct wilc *wilc; 153 struct wilc *wilc;
156 u8 src_addr[ETH_ALEN]; 154 u8 src_addr[ETH_ALEN];
@@ -181,8 +179,7 @@ struct wilc {
181 struct semaphore cfg_event; 179 struct semaphore cfg_event;
182 struct semaphore sync_event; 180 struct semaphore sync_event;
183 struct semaphore txq_event; 181 struct semaphore txq_event;
184 182 struct completion txq_thread_started;
185 struct semaphore txq_thread_started;
186 183
187 struct task_struct *txq_thread; 184 struct task_struct *txq_thread;
188 185
diff --git a/drivers/staging/wilc1000/wilc_wlan.c b/drivers/staging/wilc1000/wilc_wlan.c
index fd938fb43dd3..11e16d56ace7 100644
--- a/drivers/staging/wilc1000/wilc_wlan.c
+++ b/drivers/staging/wilc1000/wilc_wlan.c
@@ -150,11 +150,6 @@ static u32 pending_base;
150static u32 tcp_session; 150static u32 tcp_session;
151static u32 pending_acks; 151static u32 pending_acks;
152 152
153static inline int init_tcp_tracking(void)
154{
155 return 0;
156}
157
158static inline int add_tcp_session(u32 src_prt, u32 dst_prt, u32 seq) 153static inline int add_tcp_session(u32 src_prt, u32 dst_prt, u32 seq)
159{ 154{
160 if (tcp_session < 2 * MAX_TCP_SESSION) { 155 if (tcp_session < 2 * MAX_TCP_SESSION) {
@@ -330,8 +325,11 @@ static int wilc_wlan_txq_add_cfg_pkt(struct wilc_vif *vif, u8 *buffer,
330 tqe->priv = NULL; 325 tqe->priv = NULL;
331 tqe->tcp_pending_ack_idx = NOT_TCP_ACK; 326 tqe->tcp_pending_ack_idx = NOT_TCP_ACK;
332 327
333 if (wilc_wlan_txq_add_to_head(vif, tqe)) 328 if (wilc_wlan_txq_add_to_head(vif, tqe)) {
329 kfree(tqe);
334 return 0; 330 return 0;
331 }
332
335 return 1; 333 return 1;
336} 334}
337 335
@@ -626,13 +624,12 @@ int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count)
626 624
627 if ((reg & 0x1) == 0) { 625 if ((reg & 0x1) == 0) {
628 break; 626 break;
629 } else { 627 }
630 counter++; 628 counter++;
631 if (counter > 200) { 629 if (counter > 200) {
632 counter = 0; 630 counter = 0;
633 ret = wilc->hif_func->hif_write_reg(wilc, WILC_HOST_TX_CTRL, 0); 631 ret = wilc->hif_func->hif_write_reg(wilc, WILC_HOST_TX_CTRL, 0);
634 break; 632 break;
635 }
636 } 633 }
637 } while (!wilc->quit); 634 } while (!wilc->quit);
638 635
@@ -658,9 +655,8 @@ int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count)
658 if ((reg >> 2) & 0x1) { 655 if ((reg >> 2) & 0x1) {
659 entries = ((reg >> 3) & 0x3f); 656 entries = ((reg >> 3) & 0x3f);
660 break; 657 break;
661 } else {
662 release_bus(wilc, RELEASE_ALLOW_SLEEP);
663 } 658 }
659 release_bus(wilc, RELEASE_ALLOW_SLEEP);
664 } while (--timeout); 660 } while (--timeout);
665 if (timeout <= 0) { 661 if (timeout <= 0) {
666 ret = wilc->hif_func->hif_write_reg(wilc, WILC_HOST_VMM_CTL, 0x0); 662 ret = wilc->hif_func->hif_write_reg(wilc, WILC_HOST_VMM_CTL, 0x0);
@@ -679,9 +675,8 @@ int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count)
679 if (!ret) 675 if (!ret)
680 break; 676 break;
681 break; 677 break;
682 } else {
683 break;
684 } 678 }
679 break;
685 } while (1); 680 } while (1);
686 681
687 if (!ret) 682 if (!ret)
@@ -900,8 +895,6 @@ static void wilc_wlan_handle_isr_ext(struct wilc *wilc, u32 int_status)
900 DATA_INT_CLR | ENABLE_RX_VMM); 895 DATA_INT_CLR | ENABLE_RX_VMM);
901 ret = wilc->hif_func->hif_block_rx_ext(wilc, 0, buffer, size); 896 ret = wilc->hif_func->hif_block_rx_ext(wilc, 0, buffer, size);
902 897
903 if (!ret)
904 goto _end_;
905_end_: 898_end_:
906 if (ret) { 899 if (ret) {
907 offset += size; 900 offset += size;
@@ -951,10 +944,8 @@ int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer,
951 blksz = BIT(12); 944 blksz = BIT(12);
952 945
953 dma_buffer = kmalloc(blksz, GFP_KERNEL); 946 dma_buffer = kmalloc(blksz, GFP_KERNEL);
954 if (!dma_buffer) { 947 if (!dma_buffer)
955 ret = -EIO; 948 return -EIO;
956 goto _fail_1;
957 }
958 949
959 offset = 0; 950 offset = 0;
960 do { 951 do {
@@ -992,8 +983,6 @@ _fail_:
992 983
993 kfree(dma_buffer); 984 kfree(dma_buffer);
994 985
995_fail_1:
996
997 return (ret < 0) ? ret : 0; 986 return (ret < 0) ? ret : 0;
998} 987}
999 988
@@ -1211,7 +1200,7 @@ static int wilc_wlan_cfg_commit(struct wilc_vif *vif, int type,
1211 return 0; 1200 return 0;
1212} 1201}
1213 1202
1214int wilc_wlan_cfg_set(struct wilc_vif *vif, int start, u32 wid, u8 *buffer, 1203int wilc_wlan_cfg_set(struct wilc_vif *vif, int start, u16 wid, u8 *buffer,
1215 u32 buffer_size, int commit, u32 drv_handler) 1204 u32 buffer_size, int commit, u32 drv_handler)
1216{ 1205{
1217 u32 offset; 1206 u32 offset;
@@ -1226,7 +1215,7 @@ int wilc_wlan_cfg_set(struct wilc_vif *vif, int start, u32 wid, u8 *buffer,
1226 1215
1227 offset = wilc->cfg_frame_offset; 1216 offset = wilc->cfg_frame_offset;
1228 ret_size = wilc_wlan_cfg_set_wid(wilc->cfg_frame.frame, offset, 1217 ret_size = wilc_wlan_cfg_set_wid(wilc->cfg_frame.frame, offset,
1229 (u16)wid, buffer, buffer_size); 1218 wid, buffer, buffer_size);
1230 offset += ret_size; 1219 offset += ret_size;
1231 wilc->cfg_frame_offset = offset; 1220 wilc->cfg_frame_offset = offset;
1232 1221
@@ -1253,7 +1242,7 @@ int wilc_wlan_cfg_set(struct wilc_vif *vif, int start, u32 wid, u8 *buffer,
1253 return ret_size; 1242 return ret_size;
1254} 1243}
1255 1244
1256int wilc_wlan_cfg_get(struct wilc_vif *vif, int start, u32 wid, int commit, 1245int wilc_wlan_cfg_get(struct wilc_vif *vif, int start, u16 wid, int commit,
1257 u32 drv_handler) 1246 u32 drv_handler)
1258{ 1247{
1259 u32 offset; 1248 u32 offset;
@@ -1267,8 +1256,7 @@ int wilc_wlan_cfg_get(struct wilc_vif *vif, int start, u32 wid, int commit,
1267 wilc->cfg_frame_offset = 0; 1256 wilc->cfg_frame_offset = 0;
1268 1257
1269 offset = wilc->cfg_frame_offset; 1258 offset = wilc->cfg_frame_offset;
1270 ret_size = wilc_wlan_cfg_get_wid(wilc->cfg_frame.frame, offset, 1259 ret_size = wilc_wlan_cfg_get_wid(wilc->cfg_frame.frame, offset, wid);
1271 (u16)wid);
1272 offset += ret_size; 1260 offset += ret_size;
1273 wilc->cfg_frame_offset = offset; 1261 wilc->cfg_frame_offset = offset;
1274 1262
@@ -1291,9 +1279,9 @@ int wilc_wlan_cfg_get(struct wilc_vif *vif, int start, u32 wid, int commit,
1291 return ret_size; 1279 return ret_size;
1292} 1280}
1293 1281
1294int wilc_wlan_cfg_get_val(u32 wid, u8 *buffer, u32 buffer_size) 1282int wilc_wlan_cfg_get_val(u16 wid, u8 *buffer, u32 buffer_size)
1295{ 1283{
1296 return wilc_wlan_cfg_get_wid_value((u16)wid, buffer, buffer_size); 1284 return wilc_wlan_cfg_get_wid_value(wid, buffer, buffer_size);
1297} 1285}
1298 1286
1299int wilc_send_config_pkt(struct wilc_vif *vif, u8 mode, struct wid *wids, 1287int wilc_send_config_pkt(struct wilc_vif *vif, u8 mode, struct wid *wids,
@@ -1440,7 +1428,6 @@ int wilc_wlan_init(struct net_device *dev)
1440 ret = -EIO; 1428 ret = -EIO;
1441 goto _fail_; 1429 goto _fail_;
1442 } 1430 }
1443 init_tcp_tracking();
1444 1431
1445 return 1; 1432 return 1;
1446 1433
diff --git a/drivers/staging/wilc1000/wilc_wlan.h b/drivers/staging/wilc1000/wilc_wlan.h
index bcd4bfa5accc..30e5312ee87e 100644
--- a/drivers/staging/wilc1000/wilc_wlan.h
+++ b/drivers/staging/wilc1000/wilc_wlan.h
@@ -284,11 +284,11 @@ int wilc_wlan_txq_add_net_pkt(struct net_device *dev, void *priv, u8 *buffer,
284int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count); 284int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count);
285void wilc_handle_isr(struct wilc *wilc); 285void wilc_handle_isr(struct wilc *wilc);
286void wilc_wlan_cleanup(struct net_device *dev); 286void wilc_wlan_cleanup(struct net_device *dev);
287int wilc_wlan_cfg_set(struct wilc_vif *vif, int start, u32 wid, u8 *buffer, 287int wilc_wlan_cfg_set(struct wilc_vif *vif, int start, u16 wid, u8 *buffer,
288 u32 buffer_size, int commit, u32 drv_handler); 288 u32 buffer_size, int commit, u32 drv_handler);
289int wilc_wlan_cfg_get(struct wilc_vif *vif, int start, u32 wid, int commit, 289int wilc_wlan_cfg_get(struct wilc_vif *vif, int start, u16 wid, int commit,
290 u32 drv_handler); 290 u32 drv_handler);
291int wilc_wlan_cfg_get_val(u32 wid, u8 *buffer, u32 buffer_size); 291int wilc_wlan_cfg_get_val(u16 wid, u8 *buffer, u32 buffer_size);
292int wilc_wlan_txq_add_mgmt_pkt(struct net_device *dev, void *priv, u8 *buffer, 292int wilc_wlan_txq_add_mgmt_pkt(struct net_device *dev, void *priv, u8 *buffer,
293 u32 buffer_size, wilc_tx_complete_func_t func); 293 u32 buffer_size, wilc_tx_complete_func_t func);
294void wilc_chip_sleep_manually(struct wilc *wilc); 294void wilc_chip_sleep_manually(struct wilc *wilc);
diff --git a/drivers/staging/wilc1000/wilc_wlan_cfg.c b/drivers/staging/wilc1000/wilc_wlan_cfg.c
index b3425b9cec94..926fc16319b6 100644
--- a/drivers/staging/wilc1000/wilc_wlan_cfg.c
+++ b/drivers/staging/wilc1000/wilc_wlan_cfg.c
@@ -230,7 +230,7 @@ static int wilc_wlan_cfg_set_str(u8 *frame, u32 offset, u16 id, u8 *str, u32 siz
230 buf[1] = (u8)(id >> 8); 230 buf[1] = (u8)(id >> 8);
231 buf[2] = (u8)size; 231 buf[2] = (u8)size;
232 232
233 if ((str != NULL) && (size != 0)) 233 if ((str) && (size != 0))
234 memcpy(&buf[3], str, size); 234 memcpy(&buf[3], str, size);
235 235
236 return (size + 3); 236 return (size + 3);
@@ -251,11 +251,10 @@ static int wilc_wlan_cfg_set_bin(u8 *frame, u32 offset, u16 id, u8 *b, u32 size)
251 buf[2] = (u8)size; 251 buf[2] = (u8)size;
252 buf[3] = (u8)(size >> 8); 252 buf[3] = (u8)(size >> 8);
253 253
254 if ((b != NULL) && (size != 0)) { 254 if ((b) && (size != 0)) {
255 memcpy(&buf[4], b, size); 255 memcpy(&buf[4], b, size);
256 for (i = 0; i < size; i++) { 256 for (i = 0; i < size; i++)
257 checksum += buf[i + 4]; 257 checksum += buf[i + 4];
258 }
259 } 258 }
260 259
261 buf[size + 4] = checksum; 260 buf[size + 4] = checksum;
diff --git a/drivers/staging/wilc1000/wilc_wlan_if.h b/drivers/staging/wilc1000/wilc_wlan_if.h
index 83cf84dd63b5..410bfc034319 100644
--- a/drivers/staging/wilc1000/wilc_wlan_if.h
+++ b/drivers/staging/wilc1000/wilc_wlan_if.h
@@ -15,18 +15,6 @@
15 15
16/******************************************** 16/********************************************
17 * 17 *
18 * Debug Flags
19 *
20 ********************************************/
21
22#define N_INIT 0x00000001
23#define N_ERR 0x00000002
24#define N_TXQ 0x00000004
25#define N_INTR 0x00000008
26#define N_RXQ 0x00000010
27
28/********************************************
29 *
30 * Host Interface Defines 18 * Host Interface Defines
31 * 19 *
32 ********************************************/ 20 ********************************************/
@@ -37,15 +25,6 @@
37 25
38/******************************************** 26/********************************************
39 * 27 *
40 * Tx/Rx Buffer Size Defines
41 *
42 ********************************************/
43
44#define CE_TX_BUFFER_SIZE (64 * 1024)
45#define CE_RX_BUFFER_SIZE (384 * 1024)
46
47/********************************************
48 *
49 * Wlan Interface Defines 28 * Wlan Interface Defines
50 * 29 *
51 ********************************************/ 30 ********************************************/
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index 2438cf7cc695..a6e6fb9f42e1 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -771,8 +771,10 @@ static struct wiphy *wlan_create_wiphy(struct device *dev, wlandevice_t *wlandev
771 wiphy->n_cipher_suites = PRISM2_NUM_CIPHER_SUITES; 771 wiphy->n_cipher_suites = PRISM2_NUM_CIPHER_SUITES;
772 wiphy->cipher_suites = prism2_cipher_suites; 772 wiphy->cipher_suites = prism2_cipher_suites;
773 773
774 if (wiphy_register(wiphy) < 0) 774 if (wiphy_register(wiphy) < 0) {
775 wiphy_free(wiphy);
775 return NULL; 776 return NULL;
777 }
776 778
777 return wiphy; 779 return wiphy;
778} 780}
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index 21a92df85931..337810750f2b 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -614,7 +614,7 @@ static hfa384x_usbctlx_t *usbctlx_alloc(void)
614 614
615 ctlx = kzalloc(sizeof(*ctlx), 615 ctlx = kzalloc(sizeof(*ctlx),
616 in_interrupt() ? GFP_ATOMIC : GFP_KERNEL); 616 in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
617 if (ctlx != NULL) 617 if (ctlx)
618 init_completion(&ctlx->done); 618 init_completion(&ctlx->done);
619 619
620 return ctlx; 620 return ctlx;
@@ -797,7 +797,7 @@ static inline struct usbctlx_completor *init_rmem_completor(
797----------------------------------------------------------------*/ 797----------------------------------------------------------------*/
798static void hfa384x_cb_status(hfa384x_t *hw, const hfa384x_usbctlx_t *ctlx) 798static void hfa384x_cb_status(hfa384x_t *hw, const hfa384x_usbctlx_t *ctlx)
799{ 799{
800 if (ctlx->usercb != NULL) { 800 if (ctlx->usercb) {
801 hfa384x_cmdresult_t cmdresult; 801 hfa384x_cmdresult_t cmdresult;
802 802
803 if (ctlx->state != CTLX_COMPLETE) { 803 if (ctlx->state != CTLX_COMPLETE) {
@@ -2738,7 +2738,7 @@ static void hfa384x_usbctlx_completion_task(unsigned long data)
2738 /* Call the completion function that this 2738 /* Call the completion function that this
2739 * command was assigned, assuming it has one. 2739 * command was assigned, assuming it has one.
2740 */ 2740 */
2741 if (ctlx->cmdcb != NULL) { 2741 if (ctlx->cmdcb) {
2742 spin_unlock_irqrestore(&hw->ctlxq.lock, flags); 2742 spin_unlock_irqrestore(&hw->ctlxq.lock, flags);
2743 ctlx->cmdcb(hw, ctlx); 2743 ctlx->cmdcb(hw, ctlx);
2744 spin_lock_irqsave(&hw->ctlxq.lock, flags); 2744 spin_lock_irqsave(&hw->ctlxq.lock, flags);
@@ -3629,7 +3629,7 @@ static void hfa384x_ctlxout_callback(struct urb *urb)
3629 dbprint_urb(urb); 3629 dbprint_urb(urb);
3630#endif 3630#endif
3631 if ((urb->status == -ESHUTDOWN) || 3631 if ((urb->status == -ESHUTDOWN) ||
3632 (urb->status == -ENODEV) || (hw == NULL)) 3632 (urb->status == -ENODEV) || !hw)
3633 return; 3633 return;
3634 3634
3635retry: 3635retry:
diff --git a/drivers/staging/wlan-ng/p80211conv.c b/drivers/staging/wlan-ng/p80211conv.c
index 0a8f3960d465..6354036ffb42 100644
--- a/drivers/staging/wlan-ng/p80211conv.c
+++ b/drivers/staging/wlan-ng/p80211conv.c
@@ -75,8 +75,8 @@
75#include "p80211ioctl.h" 75#include "p80211ioctl.h"
76#include "p80211req.h" 76#include "p80211req.h"
77 77
78static u8 oui_rfc1042[] = { 0x00, 0x00, 0x00 }; 78static const u8 oui_rfc1042[] = { 0x00, 0x00, 0x00 };
79static u8 oui_8021h[] = { 0x00, 0x00, 0xf8 }; 79static const u8 oui_8021h[] = { 0x00, 0x00, 0xf8 };
80 80
81/*---------------------------------------------------------------- 81/*----------------------------------------------------------------
82* p80211pb_ether_to_80211 82* p80211pb_ether_to_80211
@@ -243,7 +243,6 @@ static void orinoco_spy_gather(wlandevice_t *wlandev, char *mac,
243 243
244 for (i = 0; i < wlandev->spy_number; i++) { 244 for (i = 0; i < wlandev->spy_number; i++) {
245 if (!memcmp(wlandev->spy_address[i], mac, ETH_ALEN)) { 245 if (!memcmp(wlandev->spy_address[i], mac, ETH_ALEN)) {
246 memcpy(wlandev->spy_address[i], mac, ETH_ALEN);
247 wlandev->spy_stat[i].level = rxmeta->signal; 246 wlandev->spy_stat[i].level = rxmeta->signal;
248 wlandev->spy_stat[i].noise = rxmeta->noise; 247 wlandev->spy_stat[i].noise = rxmeta->noise;
249 wlandev->spy_stat[i].qual = 248 wlandev->spy_stat[i].qual =
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index 1f9dfba5dbb3..90cc8cdcf969 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -156,7 +156,7 @@ static int p80211knetdev_open(netdevice_t *netdev)
156 return -ENODEV; 156 return -ENODEV;
157 157
158 /* Tell the MSD to open */ 158 /* Tell the MSD to open */
159 if (wlandev->open != NULL) { 159 if (wlandev->open) {
160 result = wlandev->open(wlandev); 160 result = wlandev->open(wlandev);
161 if (result == 0) { 161 if (result == 0) {
162 netif_start_queue(wlandev->netdev); 162 netif_start_queue(wlandev->netdev);
@@ -186,7 +186,7 @@ static int p80211knetdev_stop(netdevice_t *netdev)
186 int result = 0; 186 int result = 0;
187 wlandevice_t *wlandev = netdev->ml_priv; 187 wlandevice_t *wlandev = netdev->ml_priv;
188 188
189 if (wlandev->close != NULL) 189 if (wlandev->close)
190 result = wlandev->close(wlandev); 190 result = wlandev->close(wlandev);
191 191
192 netif_stop_queue(wlandev->netdev); 192 netif_stop_queue(wlandev->netdev);
diff --git a/drivers/staging/wlan-ng/p80211netdev.h b/drivers/staging/wlan-ng/p80211netdev.h
index 810ee68aa18e..820a0e20a941 100644
--- a/drivers/staging/wlan-ng/p80211netdev.h
+++ b/drivers/staging/wlan-ng/p80211netdev.h
@@ -158,7 +158,6 @@ extern int wlan_wext_write;
158 158
159/* WLAN device type */ 159/* WLAN device type */
160typedef struct wlandevice { 160typedef struct wlandevice {
161 struct wlandevice *next; /* link for list of devices */
162 void *priv; /* private data for MSD */ 161 void *priv; /* private data for MSD */
163 162
164 /* Subsystem State */ 163 /* Subsystem State */
diff --git a/drivers/staging/wlan-ng/prism2fw.c b/drivers/staging/wlan-ng/prism2fw.c
index 8564d9eb918f..56bffd93c982 100644
--- a/drivers/staging/wlan-ng/prism2fw.c
+++ b/drivers/staging/wlan-ng/prism2fw.c
@@ -278,7 +278,8 @@ static int prism2_fwapply(const struct ihex_binrec *rfptr,
278 /* Build the PDA we're going to use. */ 278 /* Build the PDA we're going to use. */
279 if (read_cardpda(&pda, wlandev)) { 279 if (read_cardpda(&pda, wlandev)) {
280 netdev_err(wlandev->netdev, "load_cardpda failed, exiting.\n"); 280 netdev_err(wlandev->netdev, "load_cardpda failed, exiting.\n");
281 return 1; 281 result = 1;
282 goto out;
282 } 283 }
283 284
284 /* read the card's PRI-SUP */ 285 /* read the card's PRI-SUP */
@@ -315,55 +316,58 @@ static int prism2_fwapply(const struct ihex_binrec *rfptr,
315 if (result) { 316 if (result) {
316 netdev_err(wlandev->netdev, 317 netdev_err(wlandev->netdev,
317 "Failed to read the data exiting.\n"); 318 "Failed to read the data exiting.\n");
318 return 1; 319 goto out;
319 } 320 }
320 321
321 result = validate_identity(); 322 result = validate_identity();
322
323 if (result) { 323 if (result) {
324 netdev_err(wlandev->netdev, "Incompatible firmware image.\n"); 324 netdev_err(wlandev->netdev, "Incompatible firmware image.\n");
325 return 1; 325 goto out;
326 } 326 }
327 327
328 if (startaddr == 0x00000000) { 328 if (startaddr == 0x00000000) {
329 netdev_err(wlandev->netdev, 329 netdev_err(wlandev->netdev,
330 "Can't RAM download a Flash image!\n"); 330 "Can't RAM download a Flash image!\n");
331 return 1; 331 result = 1;
332 goto out;
332 } 333 }
333 334
334 /* Make the image chunks */ 335 /* Make the image chunks */
335 result = mkimage(fchunk, &nfchunks); 336 result = mkimage(fchunk, &nfchunks);
336 if (result) { 337 if (result) {
337 netdev_err(wlandev->netdev, "Failed to make image chunk.\n"); 338 netdev_err(wlandev->netdev, "Failed to make image chunk.\n");
338 return 1; 339 goto free_chunks;
339 } 340 }
340 341
341 /* Do any plugging */ 342 /* Do any plugging */
342 result = plugimage(fchunk, nfchunks, s3plug, ns3plug, &pda); 343 result = plugimage(fchunk, nfchunks, s3plug, ns3plug, &pda);
343 if (result) { 344 if (result) {
344 netdev_err(wlandev->netdev, "Failed to plug data.\n"); 345 netdev_err(wlandev->netdev, "Failed to plug data.\n");
345 return 1; 346 goto free_chunks;
346 } 347 }
347 348
348 /* Insert any CRCs */ 349 /* Insert any CRCs */
349 if (crcimage(fchunk, nfchunks, s3crc, ns3crc)) { 350 result = crcimage(fchunk, nfchunks, s3crc, ns3crc);
351 if (result) {
350 netdev_err(wlandev->netdev, "Failed to insert all CRCs\n"); 352 netdev_err(wlandev->netdev, "Failed to insert all CRCs\n");
351 return 1; 353 goto free_chunks;
352 } 354 }
353 355
354 /* Write the image */ 356 /* Write the image */
355 result = writeimage(wlandev, fchunk, nfchunks); 357 result = writeimage(wlandev, fchunk, nfchunks);
356 if (result) { 358 if (result) {
357 netdev_err(wlandev->netdev, "Failed to ramwrite image data.\n"); 359 netdev_err(wlandev->netdev, "Failed to ramwrite image data.\n");
358 return 1; 360 goto free_chunks;
359 } 361 }
360 362
363 netdev_info(wlandev->netdev, "prism2_usb: firmware loading finished.\n");
364
365free_chunks:
361 /* clear any allocated memory */ 366 /* clear any allocated memory */
362 free_chunks(fchunk, &nfchunks); 367 free_chunks(fchunk, &nfchunks);
363 free_srecs(); 368 free_srecs();
364 369
365 netdev_info(wlandev->netdev, "prism2_usb: firmware loading finished.\n"); 370out:
366
367 return result; 371 return result;
368} 372}
369 373
diff --git a/drivers/staging/wlan-ng/prism2usb.c b/drivers/staging/wlan-ng/prism2usb.c
index 41358bbc6246..b26d09ff840c 100644
--- a/drivers/staging/wlan-ng/prism2usb.c
+++ b/drivers/staging/wlan-ng/prism2usb.c
@@ -8,7 +8,7 @@
8 { USB_DEVICE(vid, pid), \ 8 { USB_DEVICE(vid, pid), \
9 .driver_info = (unsigned long)name } 9 .driver_info = (unsigned long)name }
10 10
11static struct usb_device_id usb_prism_tbl[] = { 11static const struct usb_device_id usb_prism_tbl[] = {
12 PRISM_DEV(0x04bb, 0x0922, "IOData AirPort WN-B11/USBS"), 12 PRISM_DEV(0x04bb, 0x0922, "IOData AirPort WN-B11/USBS"),
13 PRISM_DEV(0x07aa, 0x0012, "Corega Wireless LAN USB Stick-11"), 13 PRISM_DEV(0x07aa, 0x0012, "Corega Wireless LAN USB Stick-11"),
14 PRISM_DEV(0x09aa, 0x3642, "Prism2.x 11Mbps WLAN USB Adapter"), 14 PRISM_DEV(0x09aa, 0x3642, "Prism2.x 11Mbps WLAN USB Adapter"),
diff --git a/drivers/staging/xgifb/XGI_main_26.c b/drivers/staging/xgifb/XGI_main_26.c
index 7eadf922b21f..d56ef1425f6b 100644
--- a/drivers/staging/xgifb/XGI_main_26.c
+++ b/drivers/staging/xgifb/XGI_main_26.c
@@ -1130,8 +1130,9 @@ static int XGIfb_get_cmap_len(const struct fb_var_screeninfo *var)
1130 return (var->bits_per_pixel == 8) ? 256 : 16; 1130 return (var->bits_per_pixel == 8) ? 256 : 16;
1131} 1131}
1132 1132
1133static int XGIfb_setcolreg(unsigned regno, unsigned red, unsigned green, 1133static int XGIfb_setcolreg(unsigned int regno, unsigned int red,
1134 unsigned blue, unsigned transp, struct fb_info *info) 1134 unsigned int green, unsigned int blue,
1135 unsigned int transp, struct fb_info *info)
1135{ 1136{
1136 struct xgifb_video_info *xgifb_info = info->par; 1137 struct xgifb_video_info *xgifb_info = info->par;
1137 1138
diff --git a/drivers/staging/xgifb/vb_init.c b/drivers/staging/xgifb/vb_init.c
index 26b539bc6faf..062ece22ed84 100644
--- a/drivers/staging/xgifb/vb_init.c
+++ b/drivers/staging/xgifb/vb_init.c
@@ -355,7 +355,8 @@ static void XGINew_DDR2_DefaultRegister(
355 unsigned long P3d4 = Port, P3c4 = Port - 0x10; 355 unsigned long P3d4 = Port, P3c4 = Port - 0x10;
356 356
357 /* keep following setting sequence, each setting in 357 /* keep following setting sequence, each setting in
358 * the same reg insert idle */ 358 * the same reg insert idle
359 */
359 xgifb_reg_set(P3d4, 0x82, 0x77); 360 xgifb_reg_set(P3d4, 0x82, 0x77);
360 xgifb_reg_set(P3d4, 0x86, 0x00); 361 xgifb_reg_set(P3d4, 0x86, 0x00);
361 xgifb_reg_get(P3d4, 0x86); /* Insert read command for delay */ 362 xgifb_reg_get(P3d4, 0x86); /* Insert read command for delay */
@@ -551,7 +552,8 @@ static int XGINew_ReadWriteRest(unsigned short StopAddr,
551 writel(Position, fbaddr + Position); 552 writel(Position, fbaddr + Position);
552 } 553 }
553 554
554 usleep_range(500, 1500); /* Fix #1759 Memory Size error in Multi-Adapter. */ 555 /* Fix #1759 Memory Size error in Multi-Adapter. */
556 usleep_range(500, 1500);
555 557
556 Position = 0; 558 Position = 0;
557 559
@@ -699,11 +701,11 @@ static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension,
699 break; 701 break;
700 case XG42: 702 case XG42:
701 /* 703 /*
702 XG42 SR14 D[3] Reserve 704 * XG42 SR14 D[3] Reserve
703 D[2] = 1, Dual Channel 705 * D[2] = 1, Dual Channel
704 = 0, Single Channel 706 * = 0, Single Channel
705 707 *
706 It's Different from Other XG40 Series. 708 * It's Different from Other XG40 Series.
707 */ 709 */
708 if (XGINew_CheckFrequence(pVBInfo) == 1) { /* DDRII, DDR2x */ 710 if (XGINew_CheckFrequence(pVBInfo) == 1) { /* DDRII, DDR2x */
709 pVBInfo->ram_bus = 32; /* 32 bits */ 711 pVBInfo->ram_bus = 32; /* 32 bits */
diff --git a/drivers/staging/xgifb/vb_setmode.c b/drivers/staging/xgifb/vb_setmode.c
index f97c77d88173..50c8ea4f5ab7 100644
--- a/drivers/staging/xgifb/vb_setmode.c
+++ b/drivers/staging/xgifb/vb_setmode.c
@@ -108,9 +108,9 @@ static void XGI_SetATTRegs(unsigned short ModeIdIndex,
108 if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) { 108 if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
109 ARdata = 0; 109 ARdata = 0;
110 } else if ((pVBInfo->VBInfo & 110 } else if ((pVBInfo->VBInfo &
111 (SetCRT2ToTV | SetCRT2ToLCD)) && 111 (SetCRT2ToTV | SetCRT2ToLCD)) &&
112 (pVBInfo->VBInfo & SetInSlaveMode)) { 112 (pVBInfo->VBInfo & SetInSlaveMode)) {
113 ARdata = 0; 113 ARdata = 0;
114 } 114 }
115 } 115 }
116 116
@@ -1992,7 +1992,8 @@ static void XGI_GetVBInfo(unsigned short ModeIdIndex,
1992 } 1992 }
1993 1993
1994 /* LCD+TV can't support in slave mode 1994 /* LCD+TV can't support in slave mode
1995 * (Force LCDA+TV->LCDB) */ 1995 * (Force LCDA+TV->LCDB)
1996 */
1996 if ((tempbx & SetInSlaveMode) && (tempbx & XGI_SetCRT2ToLCDA)) { 1997 if ((tempbx & SetInSlaveMode) && (tempbx & XGI_SetCRT2ToLCDA)) {
1997 tempbx ^= (SetCRT2ToLCD | XGI_SetCRT2ToLCDA | 1998 tempbx ^= (SetCRT2ToLCD | XGI_SetCRT2ToLCDA |
1998 SetCRT2ToDualEdge); 1999 SetCRT2ToDualEdge);
@@ -2983,7 +2984,7 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
2983 2984
2984 if ((pVBInfo->VBInfo & SetCRT2ToHiVision) && 2985 if ((pVBInfo->VBInfo & SetCRT2ToHiVision) &&
2985 !(pVBInfo->VBType & VB_SIS301LV) && (resinfo == 7)) 2986 !(pVBInfo->VBType & VB_SIS301LV) && (resinfo == 7))
2986 temp -= 2; 2987 temp -= 2;
2987 } 2988 }
2988 2989
2989 /* 0x05 Horizontal Display Start */ 2990 /* 0x05 Horizontal Display Start */
@@ -3450,8 +3451,9 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
3450 if (!(pVBInfo->TVInfo & 3451 if (!(pVBInfo->TVInfo &
3451 (TVSetYPbPr525p | TVSetYPbPr750p))) 3452 (TVSetYPbPr525p | TVSetYPbPr750p)))
3452 tempbx >>= 1; 3453 tempbx >>= 1;
3453 } else 3454 } else {
3454 tempbx >>= 1; 3455 tempbx >>= 1;
3456 }
3455 } 3457 }
3456 3458
3457 tempbx -= 2; 3459 tempbx -= 2;
@@ -3839,9 +3841,9 @@ static void XGI_SetLCDRegs(unsigned short ModeIdIndex,
3839 if (pVBInfo->VGAVDE == 525) { 3841 if (pVBInfo->VGAVDE == 525) {
3840 if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B 3842 if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B
3841 | VB_SIS301LV | VB_SIS302LV 3843 | VB_SIS301LV | VB_SIS302LV
3842 | VB_XGI301C)) { 3844 | VB_XGI301C))
3843 temp = 0xC6; 3845 temp = 0xC6;
3844 } else 3846 else
3845 temp = 0xC4; 3847 temp = 0xC4;
3846 3848
3847 xgifb_reg_set(pVBInfo->Part2Port, 0x2f, temp); 3849 xgifb_reg_set(pVBInfo->Part2Port, 0x2f, temp);
@@ -3851,9 +3853,9 @@ static void XGI_SetLCDRegs(unsigned short ModeIdIndex,
3851 if (pVBInfo->VGAVDE == 420) { 3853 if (pVBInfo->VGAVDE == 420) {
3852 if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B 3854 if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B
3853 | VB_SIS301LV | VB_SIS302LV 3855 | VB_SIS301LV | VB_SIS302LV
3854 | VB_XGI301C)) { 3856 | VB_XGI301C))
3855 temp = 0x4F; 3857 temp = 0x4F;
3856 } else 3858 else
3857 temp = 0x4E; 3859 temp = 0x4E;
3858 xgifb_reg_set(pVBInfo->Part2Port, 0x2f, temp); 3860 xgifb_reg_set(pVBInfo->Part2Port, 0x2f, temp);
3859 } 3861 }
diff --git a/drivers/staging/xgifb/vb_table.h b/drivers/staging/xgifb/vb_table.h
index 45f2c992cd44..c801deb142f6 100644
--- a/drivers/staging/xgifb/vb_table.h
+++ b/drivers/staging/xgifb/vb_table.h
@@ -58,8 +58,9 @@ static const unsigned char XGI27_cr41[24][3] = {
58 {0xC4, 0x40, 0x84}, /* 1 CR8A */ 58 {0xC4, 0x40, 0x84}, /* 1 CR8A */
59 {0xC4, 0x40, 0x84}, /* 2 CR8B */ 59 {0xC4, 0x40, 0x84}, /* 2 CR8B */
60 {0xB3, 0x13, 0xa4}, /* 3 CR40[7], 60 {0xB3, 0x13, 0xa4}, /* 3 CR40[7],
61 CR99[2:0], 61 * CR99[2:0],
62 CR45[3:0]*/ 62 * CR45[3:0]
63 */
63 {0xf0, 0xf5, 0xf0}, /* 4 CR59 */ 64 {0xf0, 0xf5, 0xf0}, /* 4 CR59 */
64 {0x90, 0x90, 0x24}, /* 5 CR68 */ 65 {0x90, 0x90, 0x24}, /* 5 CR68 */
65 {0x77, 0x67, 0x44}, /* 6 CR69 */ 66 {0x77, 0x67, 0x44}, /* 6 CR69 */
@@ -101,9 +102,11 @@ const struct XGI_ExtStruct XGI330_EModeIDTable[] = {
101 {0x38, 0x0a1b, 0x0508, 0x08, 0x00, 0x16}, 102 {0x38, 0x0a1b, 0x0508, 0x08, 0x00, 0x16},
102 {0x3a, 0x0e3b, 0x0609, 0x09, 0x00, 0x1e}, 103 {0x3a, 0x0e3b, 0x0609, 0x09, 0x00, 0x1e},
103 {0x3c, 0x0e3b, 0x070a, 0x0a, 0x00, 0x22}, /* mode 1600x1200 104 {0x3c, 0x0e3b, 0x070a, 0x0a, 0x00, 0x22}, /* mode 1600x1200
104 add CRT2MODE [2003/10/07] */ 105 * add CRT2MODE [2003/10/07]
106 */
105 {0x3d, 0x0e7d, 0x070a, 0x0a, 0x00, 0x22}, /* mode 1600x1200 107 {0x3d, 0x0e7d, 0x070a, 0x0a, 0x00, 0x22}, /* mode 1600x1200
106 add CRT2MODE */ 108 * add CRT2MODE
109 */
107 {0x40, 0x9a1c, 0x0000, 0x00, 0x04, 0x00}, 110 {0x40, 0x9a1c, 0x0000, 0x00, 0x04, 0x00},
108 {0x41, 0x9a1d, 0x0000, 0x00, 0x04, 0x00}, 111 {0x41, 0x9a1d, 0x0000, 0x00, 0x04, 0x00},
109 {0x43, 0x0a1c, 0x0306, 0x06, 0x05, 0x06}, 112 {0x43, 0x0a1c, 0x0306, 0x06, 0x05, 0x06},
@@ -129,7 +132,8 @@ const struct XGI_ExtStruct XGI330_EModeIDTable[] = {
129 {0x64, 0x0a7f, 0x0508, 0x08, 0x00, 0x16}, 132 {0x64, 0x0a7f, 0x0508, 0x08, 0x00, 0x16},
130 {0x65, 0x0eff, 0x0609, 0x09, 0x00, 0x1e}, 133 {0x65, 0x0eff, 0x0609, 0x09, 0x00, 0x1e},
131 {0x66, 0x0eff, 0x070a, 0x0a, 0x00, 0x22}, /* mode 1600x1200 134 {0x66, 0x0eff, 0x070a, 0x0a, 0x00, 0x22}, /* mode 1600x1200
132 add CRT2MODE */ 135 * add CRT2MODE
136 */
133 {0x68, 0x067b, 0x080b, 0x0b, 0x00, 0x29}, 137 {0x68, 0x067b, 0x080b, 0x0b, 0x00, 0x29},
134 {0x69, 0x06fd, 0x080b, 0x0b, 0x00, 0x29}, 138 {0x69, 0x06fd, 0x080b, 0x0b, 0x00, 0x29},
135 {0x6b, 0x07ff, 0x080b, 0x0b, 0x00, 0x29}, 139 {0x6b, 0x07ff, 0x080b, 0x0b, 0x00, 0x29},
@@ -223,38 +227,38 @@ const struct XGI_CRT1TableStruct XGI_CRT1Table[] = {
223 0x0D, 0x3E, 0xE0, 0x83, 0xDF, 0x0E, 0x90} }, /* 0xb */ 227 0x0D, 0x3E, 0xE0, 0x83, 0xDF, 0x0E, 0x90} }, /* 0xb */
224 { {0x65, 0x4F, 0x89, 0x57, 0x9F, 0x00, 0x01, 0x00, 228 { {0x65, 0x4F, 0x89, 0x57, 0x9F, 0x00, 0x01, 0x00,
225 0xFB, 0x1F, 0xE6, 0x8A, 0xDF, 0xFC, 0x10} }, /* 0xc */ 229 0xFB, 0x1F, 0xE6, 0x8A, 0xDF, 0xFC, 0x10} }, /* 0xc */
226 { {0x7B, 0x63, 0x9F, 0x6A, 0x93, 0x00, 0x05, 0x00, /* ; 230 /* 0D (800x600,56Hz) */
227 0D (800x600,56Hz) */ 231 { {0x7B, 0x63, 0x9F, 0x6A, 0x93, 0x00, 0x05, 0x00,
228 0x6F, 0xF0, 0x58, 0x8A, 0x57, 0x70, 0xA0} }, /* ; 232 /* (VCLK 36.0MHz) */
229 (VCLK 36.0MHz) */ 233 0x6F, 0xF0, 0x58, 0x8A, 0x57, 0x70, 0xA0} },
230 { {0x7F, 0x63, 0x83, 0x6C, 0x1C, 0x00, 0x06, 0x00, /* ; 234 /* 0E (800x600,60Hz) */
231 0E (800x600,60Hz) */ 235 { {0x7F, 0x63, 0x83, 0x6C, 0x1C, 0x00, 0x06, 0x00,
232 0x72, 0xF0, 0x58, 0x8C, 0x57, 0x73, 0xA0} }, /* ; 236 /* (VCLK 40.0MHz) */
233 (VCLK 40.0MHz) */ 237 0x72, 0xF0, 0x58, 0x8C, 0x57, 0x73, 0xA0} },
234 { {0x7D, 0x63, 0x81, 0x6E, 0x1D, 0x00, 0x06, 0x00, /* ; 238 /* 0F (800x600,72Hz) */
235 0F (800x600,72Hz) */ 239 { {0x7D, 0x63, 0x81, 0x6E, 0x1D, 0x00, 0x06, 0x00,
236 0x98, 0xF0, 0x7C, 0x82, 0x57, 0x99, 0x80} }, /* ; 240 /* (VCLK 50.0MHz) */
237 (VCLK 50.0MHz) */ 241 0x98, 0xF0, 0x7C, 0x82, 0x57, 0x99, 0x80} },
238 { {0x7F, 0x63, 0x83, 0x69, 0x13, 0x00, 0x06, 0x00, /* ; 242 /* 10 (800x600,75Hz) */
239 10 (800x600,75Hz) */ 243 { {0x7F, 0x63, 0x83, 0x69, 0x13, 0x00, 0x06, 0x00,
240 0x6F, 0xF0, 0x58, 0x8B, 0x57, 0x70, 0xA0} }, /* ; 244 /* (VCLK 49.5MHz) */
241 (VCLK 49.5MHz) */ 245 0x6F, 0xF0, 0x58, 0x8B, 0x57, 0x70, 0xA0} },
242 { {0x7E, 0x63, 0x82, 0x6B, 0x13, 0x00, 0x06, 0x00, /* ; 246 /* 11 (800x600,85Hz) */
243 11 (800x600,85Hz) */ 247 { {0x7E, 0x63, 0x82, 0x6B, 0x13, 0x00, 0x06, 0x00,
244 0x75, 0xF0, 0x58, 0x8B, 0x57, 0x76, 0xA0} }, /* ; 248 /* (VCLK 56.25MHz) */
245 (VCLK 56.25MHz) */ 249 0x75, 0xF0, 0x58, 0x8B, 0x57, 0x76, 0xA0} },
246 { {0x81, 0x63, 0x85, 0x6D, 0x18, 0x00, 0x06, 0x60, /* ; 250 /* 12 (800x600,100Hz) */
247 12 (800x600,100Hz) */ 251 { {0x81, 0x63, 0x85, 0x6D, 0x18, 0x00, 0x06, 0x60,
248 0x7A, 0xF0, 0x58, 0x8B, 0x57, 0x7B, 0xA0} }, /* ; 252 /* (VCLK 75.8MHz) */
249 (VCLK 75.8MHz) */ 253 0x7A, 0xF0, 0x58, 0x8B, 0x57, 0x7B, 0xA0} },
250 { {0x83, 0x63, 0x87, 0x6E, 0x19, 0x00, 0x06, 0x60, /* ; 254 /* 13 (800x600,120Hz) */
251 13 (800x600,120Hz) */ 255 { {0x83, 0x63, 0x87, 0x6E, 0x19, 0x00, 0x06, 0x60,
252 0x81, 0xF0, 0x58, 0x8B, 0x57, 0x82, 0xA0} }, /* ; 256 /* (VCLK 79.411MHz) */
253 (VCLK 79.411MHz) */ 257 0x81, 0xF0, 0x58, 0x8B, 0x57, 0x82, 0xA0} },
254 { {0x85, 0x63, 0x89, 0x6F, 0x1A, 0x00, 0x06, 0x60, /* ; 258 /* 14 (800x600,160Hz) */
255 14 (800x600,160Hz) */ 259 { {0x85, 0x63, 0x89, 0x6F, 0x1A, 0x00, 0x06, 0x60,
256 0x91, 0xF0, 0x58, 0x8B, 0x57, 0x92, 0xA0} }, /* ; 260 /* (VCLK 105.822MHz) */
257 (VCLK 105.822MHz) */ 261 0x91, 0xF0, 0x58, 0x8B, 0x57, 0x92, 0xA0} },
258 { {0x99, 0x7F, 0x9D, 0x84, 0x1A, 0x00, 0x02, 0x00, 262 { {0x99, 0x7F, 0x9D, 0x84, 0x1A, 0x00, 0x02, 0x00,
259 0x96, 0x1F, 0x7F, 0x83, 0x7F, 0x97, 0x10} }, /* 0x15 */ 263 0x96, 0x1F, 0x7F, 0x83, 0x7F, 0x97, 0x10} }, /* 0x15 */
260 { {0xA3, 0x7F, 0x87, 0x86, 0x97, 0x00, 0x02, 0x00, 264 { {0xA3, 0x7F, 0x87, 0x86, 0x97, 0x00, 0x02, 0x00,
@@ -388,7 +392,8 @@ static const struct SiS_LCDData XGI_ExtLCD1024x768Data[] = {
388 392
389static const struct SiS_LCDData XGI_CetLCD1024x768Data[] = { 393static const struct SiS_LCDData XGI_CetLCD1024x768Data[] = {
390 {1, 1, 1344, 806, 1344, 806}, /* ; 00 (320x200,320x400, 394 {1, 1, 1344, 806, 1344, 806}, /* ; 00 (320x200,320x400,
391 640x200,640x400) */ 395 * 640x200,640x400)
396 */
392 {1, 1, 1344, 806, 1344, 806}, /* 01 (320x350,640x350) */ 397 {1, 1, 1344, 806, 1344, 806}, /* 01 (320x350,640x350) */
393 {1, 1, 1344, 806, 1344, 806}, /* 02 (360x400,720x400) */ 398 {1, 1, 1344, 806, 1344, 806}, /* 02 (360x400,720x400) */
394 {1, 1, 1344, 806, 1344, 806}, /* 03 (720x350) */ 399 {1, 1, 1344, 806, 1344, 806}, /* 03 (720x350) */
@@ -421,7 +426,8 @@ static const struct SiS_LCDData XGI_ExtLCD1280x1024Data[] = {
421 426
422static const struct SiS_LCDData XGI_CetLCD1280x1024Data[] = { 427static const struct SiS_LCDData XGI_CetLCD1280x1024Data[] = {
423 {1, 1, 1688, 1066, 1688, 1066}, /* 00 (320x200,320x400, 428 {1, 1, 1688, 1066, 1688, 1066}, /* 00 (320x200,320x400,
424 640x200,640x400) */ 429 * 640x200,640x400)
430 */
425 {1, 1, 1688, 1066, 1688, 1066}, /* 01 (320x350,640x350) */ 431 {1, 1, 1688, 1066, 1688, 1066}, /* 01 (320x350,640x350) */
426 {1, 1, 1688, 1066, 1688, 1066}, /* 02 (360x400,720x400) */ 432 {1, 1, 1688, 1066, 1688, 1066}, /* 02 (360x400,720x400) */
427 {1, 1, 1688, 1066, 1688, 1066}, /* 03 (720x350) */ 433 {1, 1, 1688, 1066, 1688, 1066}, /* 03 (720x350) */
@@ -434,7 +440,8 @@ static const struct SiS_LCDData XGI_CetLCD1280x1024Data[] = {
434 440
435static const struct SiS_LCDData xgifb_lcd_1400x1050[] = { 441static const struct SiS_LCDData xgifb_lcd_1400x1050[] = {
436 {211, 100, 2100, 408, 1688, 1066}, /* 00 (320x200,320x400, 442 {211, 100, 2100, 408, 1688, 1066}, /* 00 (320x200,320x400,
437 640x200,640x400) */ 443 * 640x200,640x400)
444 */
438 {211, 64, 1536, 358, 1688, 1066}, /* 01 (320x350,640x350) */ 445 {211, 64, 1536, 358, 1688, 1066}, /* 01 (320x350,640x350) */
439 {211, 100, 2100, 408, 1688, 1066}, /* 02 (360x400,720x400) */ 446 {211, 100, 2100, 408, 1688, 1066}, /* 02 (360x400,720x400) */
440 {211, 64, 1536, 358, 1688, 1066}, /* 03 (720x350) */ 447 {211, 64, 1536, 358, 1688, 1066}, /* 03 (720x350) */
@@ -442,13 +449,15 @@ static const struct SiS_LCDData xgifb_lcd_1400x1050[] = {
442 {211, 72, 1008, 609, 1688, 1066}, /* 05 (800x600x60Hz) */ 449 {211, 72, 1008, 609, 1688, 1066}, /* 05 (800x600x60Hz) */
443 {211, 128, 1400, 776, 1688, 1066}, /* 06 (1024x768x60Hz) */ 450 {211, 128, 1400, 776, 1688, 1066}, /* 06 (1024x768x60Hz) */
444 {1, 1, 1688, 1066, 1688, 1066}, /* 07 (1280x1024x60Hz 451 {1, 1, 1688, 1066, 1688, 1066}, /* 07 (1280x1024x60Hz
445 w/o Scaling) */ 452 * w/o Scaling)
453 */
446 {1, 1, 1688, 1066, 1688, 1066} /* 08 (1400x1050x60Hz) */ 454 {1, 1, 1688, 1066, 1688, 1066} /* 08 (1400x1050x60Hz) */
447}; 455};
448 456
449static const struct SiS_LCDData XGI_ExtLCD1600x1200Data[] = { 457static const struct SiS_LCDData XGI_ExtLCD1600x1200Data[] = {
450 {4, 1, 1620, 420, 2160, 1250}, /* 00 (320x200,320x400, 458 {4, 1, 1620, 420, 2160, 1250}, /* 00 (320x200,320x400,
451 640x200,640x400)*/ 459 * 640x200,640x400)
460 */
452 {27, 7, 1920, 375, 2160, 1250}, /* 01 (320x350,640x350) */ 461 {27, 7, 1920, 375, 2160, 1250}, /* 01 (320x350,640x350) */
453 {4, 1, 1620, 420, 2160, 1250}, /* 02 (360x400,720x400)*/ 462 {4, 1, 1620, 420, 2160, 1250}, /* 02 (360x400,720x400)*/
454 {27, 7, 1920, 375, 2160, 1250}, /* 03 (720x350) */ 463 {27, 7, 1920, 375, 2160, 1250}, /* 03 (720x350) */
@@ -462,7 +471,8 @@ static const struct SiS_LCDData XGI_ExtLCD1600x1200Data[] = {
462 471
463static const struct SiS_LCDData XGI_StLCD1600x1200Data[] = { 472static const struct SiS_LCDData XGI_StLCD1600x1200Data[] = {
464 {27, 4, 800, 500, 2160, 1250}, /* 00 (320x200,320x400, 473 {27, 4, 800, 500, 2160, 1250}, /* 00 (320x200,320x400,
465 640x200,640x400) */ 474 * 640x200,640x400)
475 */
466 {27, 4, 800, 500, 2160, 1250}, /* 01 (320x350,640x350) */ 476 {27, 4, 800, 500, 2160, 1250}, /* 01 (320x350,640x350) */
467 {27, 4, 800, 500, 2160, 1250}, /* 02 (360x400,720x400) */ 477 {27, 4, 800, 500, 2160, 1250}, /* 02 (360x400,720x400) */
468 {27, 4, 800, 500, 2160, 1250}, /* 03 (720x350) */ 478 {27, 4, 800, 500, 2160, 1250}, /* 03 (720x350) */
@@ -489,7 +499,8 @@ static const struct SiS_LCDData XGI_NoScalingData[] = {
489 499
490static const struct SiS_LCDData XGI_ExtLCD1024x768x75Data[] = { 500static const struct SiS_LCDData XGI_ExtLCD1024x768x75Data[] = {
491 {42, 25, 1536, 419, 1344, 806}, /* ; 00 (320x200,320x400, 501 {42, 25, 1536, 419, 1344, 806}, /* ; 00 (320x200,320x400,
492 640x200,640x400) */ 502 * 640x200,640x400)
503 */
493 {48, 25, 1536, 369, 1344, 806}, /* ; 01 (320x350,640x350) */ 504 {48, 25, 1536, 369, 1344, 806}, /* ; 01 (320x350,640x350) */
494 {42, 25, 1536, 419, 1344, 806}, /* ; 02 (360x400,720x400) */ 505 {42, 25, 1536, 419, 1344, 806}, /* ; 02 (360x400,720x400) */
495 {48, 25, 1536, 369, 1344, 806}, /* ; 03 (720x350) */ 506 {48, 25, 1536, 369, 1344, 806}, /* ; 03 (720x350) */
@@ -500,7 +511,8 @@ static const struct SiS_LCDData XGI_ExtLCD1024x768x75Data[] = {
500 511
501static const struct SiS_LCDData XGI_CetLCD1024x768x75Data[] = { 512static const struct SiS_LCDData XGI_CetLCD1024x768x75Data[] = {
502 {1, 1, 1312, 800, 1312, 800}, /* ; 00 (320x200,320x400, 513 {1, 1, 1312, 800, 1312, 800}, /* ; 00 (320x200,320x400,
503 640x200,640x400) */ 514 * 640x200,640x400)
515 */
504 {1, 1, 1312, 800, 1312, 800}, /* ; 01 (320x350,640x350) */ 516 {1, 1, 1312, 800, 1312, 800}, /* ; 01 (320x350,640x350) */
505 {1, 1, 1312, 800, 1312, 800}, /* ; 02 (360x400,720x400) */ 517 {1, 1, 1312, 800, 1312, 800}, /* ; 02 (360x400,720x400) */
506 {1, 1, 1312, 800, 1312, 800}, /* ; 03 (720x350) */ 518 {1, 1, 1312, 800, 1312, 800}, /* ; 03 (720x350) */
@@ -511,7 +523,8 @@ static const struct SiS_LCDData XGI_CetLCD1024x768x75Data[] = {
511 523
512static const struct SiS_LCDData xgifb_lcd_1280x1024x75[] = { 524static const struct SiS_LCDData xgifb_lcd_1280x1024x75[] = {
513 {211, 60, 1024, 501, 1688, 1066}, /* ; 00 (320x200,320x400, 525 {211, 60, 1024, 501, 1688, 1066}, /* ; 00 (320x200,320x400,
514 640x200,640x400) */ 526 * 640x200,640x400)
527 */
515 {211, 60, 1024, 508, 1688, 1066}, /* ; 01 (320x350,640x350) */ 528 {211, 60, 1024, 508, 1688, 1066}, /* ; 01 (320x350,640x350) */
516 {211, 60, 1024, 501, 1688, 1066}, /* ; 02 (360x400,720x400) */ 529 {211, 60, 1024, 501, 1688, 1066}, /* ; 02 (360x400,720x400) */
517 {211, 60, 1024, 508, 1688, 1066}, /* ; 03 (720x350) */ 530 {211, 60, 1024, 508, 1688, 1066}, /* ; 03 (720x350) */
@@ -525,7 +538,8 @@ static const struct SiS_LCDData xgifb_lcd_1280x1024x75[] = {
525 538
526static const struct SiS_LCDData XGI_NoScalingDatax75[] = { 539static const struct SiS_LCDData XGI_NoScalingDatax75[] = {
527 {1, 1, 800, 449, 800, 449}, /* ; 00 (320x200, 320x400, 540 {1, 1, 800, 449, 800, 449}, /* ; 00 (320x200, 320x400,
528 640x200, 640x400) */ 541 * 640x200, 640x400)
542 */
529 {1, 1, 800, 449, 800, 449}, /* ; 01 (320x350, 640x350) */ 543 {1, 1, 800, 449, 800, 449}, /* ; 01 (320x350, 640x350) */
530 {1, 1, 900, 449, 900, 449}, /* ; 02 (360x400, 720x400) */ 544 {1, 1, 900, 449, 900, 449}, /* ; 02 (360x400, 720x400) */
531 {1, 1, 900, 449, 900, 449}, /* ; 03 (720x350) */ 545 {1, 1, 900, 449, 900, 449}, /* ; 03 (720x350) */
@@ -732,7 +746,8 @@ static const struct XGI_LCDDesStruct XGI_StLCDDes1600x1200Data[] = {
732 746
733static const struct XGI330_LCDDataDesStruct2 XGI_NoScalingDesData[] = { 747static const struct XGI330_LCDDataDesStruct2 XGI_NoScalingDesData[] = {
734 {9, 657, 448, 405, 96, 2}, /* 00 (320x200,320x400, 748 {9, 657, 448, 405, 96, 2}, /* 00 (320x200,320x400,
735 640x200,640x400) */ 749 * 640x200,640x400)
750 */
736 {9, 657, 448, 355, 96, 2}, /* 01 (320x350,640x350) */ 751 {9, 657, 448, 355, 96, 2}, /* 01 (320x350,640x350) */
737 {9, 657, 448, 405, 96, 2}, /* 02 (360x400,720x400) */ 752 {9, 657, 448, 405, 96, 2}, /* 02 (360x400,720x400) */
738 {9, 657, 448, 355, 96, 2}, /* 03 (720x350) */ 753 {9, 657, 448, 355, 96, 2}, /* 03 (720x350) */
@@ -818,7 +833,8 @@ static const struct XGI_LCDDesStruct XGI_CetLCDDes1280x1024x75Data[] = {
818/* Scaling LCD 75Hz */ 833/* Scaling LCD 75Hz */
819static const struct XGI330_LCDDataDesStruct2 XGI_NoScalingDesDatax75[] = { 834static const struct XGI330_LCDDataDesStruct2 XGI_NoScalingDesDatax75[] = {
820 {9, 657, 448, 405, 96, 2}, /* ; 00 (320x200,320x400, 835 {9, 657, 448, 405, 96, 2}, /* ; 00 (320x200,320x400,
821 640x200,640x400) */ 836 * 640x200,640x400)
837 */
822 {9, 657, 448, 355, 96, 2}, /* ; 01 (320x350,640x350) */ 838 {9, 657, 448, 355, 96, 2}, /* ; 01 (320x350,640x350) */
823 {9, 738, 448, 405, 108, 2}, /* ; 02 (360x400,720x400) */ 839 {9, 738, 448, 405, 108, 2}, /* ; 02 (360x400,720x400) */
824 {9, 738, 448, 355, 108, 2}, /* ; 03 (720x350) */ 840 {9, 738, 448, 355, 108, 2}, /* ; 03 (720x350) */
@@ -873,7 +889,8 @@ static const struct SiS_TVData XGI_ExtNTSCData[] = {
873 889
874static const struct SiS_TVData XGI_St1HiTVData[] = { 890static const struct SiS_TVData XGI_St1HiTVData[] = {
875 {1, 1, 892, 563, 690, 800, 0, 0, 0}, /* 00 (320x200,320x400, 891 {1, 1, 892, 563, 690, 800, 0, 0, 0}, /* 00 (320x200,320x400,
876 640x200,640x400) */ 892 * 640x200,640x400)
893 */
877 {1, 1, 892, 563, 690, 700, 0, 0, 0}, /* 01 (320x350,640x350) */ 894 {1, 1, 892, 563, 690, 700, 0, 0, 0}, /* 01 (320x350,640x350) */
878 {1, 1, 1000, 563, 785, 800, 0, 0, 0}, /* 02 (360x400,720x400) */ 895 {1, 1, 1000, 563, 785, 800, 0, 0, 0}, /* 02 (360x400,720x400) */
879 {1, 1, 1000, 563, 785, 700, 0, 0, 0}, /* 03 (720x350) */ 896 {1, 1, 1000, 563, 785, 700, 0, 0, 0}, /* 03 (720x350) */
@@ -883,7 +900,8 @@ static const struct SiS_TVData XGI_St1HiTVData[] = {
883 900
884static const struct SiS_TVData XGI_St2HiTVData[] = { 901static const struct SiS_TVData XGI_St2HiTVData[] = {
885 {3, 1, 840, 483, 1648, 960, 0x032, 0, 0}, /* 00 (320x200,320x400, 902 {3, 1, 840, 483, 1648, 960, 0x032, 0, 0}, /* 00 (320x200,320x400,
886 640x200,640x400) */ 903 * 640x200,640x400)
904 */
887 {1, 1, 892, 563, 690, 700, 0, 0, 0}, /* 01 (320x350,640x350) */ 905 {1, 1, 892, 563, 690, 700, 0, 0, 0}, /* 01 (320x350,640x350) */
888 {3, 1, 840, 483, 1648, 960, 0x032, 0, 0}, /* 02 (360x400,720x400) */ 906 {3, 1, 840, 483, 1648, 960, 0x032, 0, 0}, /* 02 (360x400,720x400) */
889 {1, 1, 1000, 563, 785, 700, 0, 0, 0}, /* 03 (720x350) */ 907 {1, 1, 1000, 563, 785, 700, 0, 0, 0}, /* 03 (720x350) */
@@ -893,7 +911,8 @@ static const struct SiS_TVData XGI_St2HiTVData[] = {
893 911
894static const struct SiS_TVData XGI_ExtHiTVData[] = { 912static const struct SiS_TVData XGI_ExtHiTVData[] = {
895 {6, 1, 840, 563, 1632, 960, 0, 0, 0}, /* 00 (320x200,320x400, 913 {6, 1, 840, 563, 1632, 960, 0, 0, 0}, /* 00 (320x200,320x400,
896 640x200,640x400) */ 914 * 640x200,640x400)
915 */
897 {3, 1, 960, 563, 1632, 960, 0, 0, 0}, /* 01 (320x350,640x350) */ 916 {3, 1, 960, 563, 1632, 960, 0, 0, 0}, /* 01 (320x350,640x350) */
898 {3, 1, 840, 483, 1632, 960, 0, 0, 0}, /* 02 (360x400,720x400) */ 917 {3, 1, 840, 483, 1632, 960, 0, 0, 0}, /* 02 (360x400,720x400) */
899 {3, 1, 960, 563, 1632, 960, 0, 0, 0}, /* 03 (720x350) */ 918 {3, 1, 960, 563, 1632, 960, 0, 0, 0}, /* 03 (720x350) */
@@ -948,7 +967,8 @@ static const struct SiS_TVData XGI_StYPbPr525pData[] = {
948 967
949static const struct SiS_TVData XGI_ExtYPbPr750pData[] = { 968static const struct SiS_TVData XGI_ExtYPbPr750pData[] = {
950 { 3, 1, 935, 470, 1130, 680, 50, 0, 0}, /* 00 (320x200,320x400, 969 { 3, 1, 935, 470, 1130, 680, 50, 0, 0}, /* 00 (320x200,320x400,
951 640x200,640x400) */ 970 * 640x200,640x400)
971 */
952 {24, 7, 935, 420, 1130, 680, 50, 0, 0}, /* 01 (320x350,640x350) */ 972 {24, 7, 935, 420, 1130, 680, 50, 0, 0}, /* 01 (320x350,640x350) */
953 { 3, 1, 935, 470, 1130, 680, 50, 0, 0}, /* 02 (360x400,720x400) */ 973 { 3, 1, 935, 470, 1130, 680, 50, 0, 0}, /* 02 (360x400,720x400) */
954 {24, 7, 935, 420, 1130, 680, 50, 0, 0}, /* 03 (720x350) */ 974 {24, 7, 935, 420, 1130, 680, 50, 0, 0}, /* 03 (720x350) */
@@ -1269,7 +1289,8 @@ static const struct SiS_LVDSData XGI_LVDSNoScalingDatax75[] = {
1269 {1312, 800, 1312, 800}, /* ; 06 (1024x768x75Hz) */ 1289 {1312, 800, 1312, 800}, /* ; 06 (1024x768x75Hz) */
1270 {1688, 1066, 1688, 1066}, /* ; 07 (1280x1024x75Hz) */ 1290 {1688, 1066, 1688, 1066}, /* ; 07 (1280x1024x75Hz) */
1271 {1688, 1066, 1688, 1066}, /* ; 08 (1400x1050x75Hz) 1291 {1688, 1066, 1688, 1066}, /* ; 08 (1400x1050x75Hz)
1272 ;;[ycchen] 12/19/02 */ 1292 * ;;[ycchen] 12/19/02
1293 */
1273 {2160, 1250, 2160, 1250}, /* ; 09 (1600x1200x75Hz) */ 1294 {2160, 1250, 2160, 1250}, /* ; 09 (1600x1200x75Hz) */
1274 {1688, 806, 1688, 806}, /* ; 0A (1280x768x75Hz) */ 1295 {1688, 806, 1688, 806}, /* ; 0A (1280x768x75Hz) */
1275}; 1296};
@@ -1364,7 +1385,8 @@ static const struct SiS_LVDSData XGI_LVDS1600x1200Des_1[] = {
1364 1385
1365static const struct XGI330_LCDDataDesStruct2 XGI_LVDSNoScalingDesData[] = { 1386static const struct XGI330_LCDDataDesStruct2 XGI_LVDSNoScalingDesData[] = {
1366 {0, 648, 448, 405, 96, 2}, /* 00 (320x200,320x400, 1387 {0, 648, 448, 405, 96, 2}, /* 00 (320x200,320x400,
1367 640x200,640x400) */ 1388 * 640x200,640x400)
1389 */
1368 {0, 648, 448, 355, 96, 2}, /* 01 (320x350,640x350) */ 1390 {0, 648, 448, 355, 96, 2}, /* 01 (320x350,640x350) */
1369 {0, 648, 448, 405, 96, 2}, /* 02 (360x400,720x400) */ 1391 {0, 648, 448, 405, 96, 2}, /* 02 (360x400,720x400) */
1370 {0, 648, 448, 355, 96, 2}, /* 03 (720x350) */ 1392 {0, 648, 448, 355, 96, 2}, /* 03 (720x350) */
@@ -1435,7 +1457,8 @@ static const struct SiS_LVDSData XGI_LVDS1280x1024Des_2x75[] = {
1435/* Scaling LCD 75Hz */ 1457/* Scaling LCD 75Hz */
1436static const struct XGI330_LCDDataDesStruct2 XGI_LVDSNoScalingDesDatax75[] = { 1458static const struct XGI330_LCDDataDesStruct2 XGI_LVDSNoScalingDesDatax75[] = {
1437 {0, 648, 448, 405, 96, 2}, /* ; 00 (320x200,320x400, 1459 {0, 648, 448, 405, 96, 2}, /* ; 00 (320x200,320x400,
1438 640x200,640x400) */ 1460 * 640x200,640x400)
1461 */
1439 {0, 648, 448, 355, 96, 2}, /* ; 01 (320x350,640x350) */ 1462 {0, 648, 448, 355, 96, 2}, /* ; 01 (320x350,640x350) */
1440 {0, 729, 448, 405, 108, 2}, /* ; 02 (360x400,720x400) */ 1463 {0, 729, 448, 405, 108, 2}, /* ; 02 (360x400,720x400) */
1441 {0, 729, 448, 355, 108, 2}, /* ; 03 (720x350) */ 1464 {0, 729, 448, 355, 108, 2}, /* ; 03 (720x350) */
diff --git a/drivers/staging/xgifb/vb_util.h b/drivers/staging/xgifb/vb_util.h
index f613f54d522f..08db58b396b2 100644
--- a/drivers/staging/xgifb/vb_util.h
+++ b/drivers/staging/xgifb/vb_util.h
@@ -13,7 +13,7 @@ static inline u8 xgifb_reg_get(unsigned long port, u8 index)
13} 13}
14 14
15static inline void xgifb_reg_and_or(unsigned long port, u8 index, 15static inline void xgifb_reg_and_or(unsigned long port, u8 index,
16 unsigned data_and, unsigned data_or) 16 unsigned int data_and, unsigned int data_or)
17{ 17{
18 u8 temp; 18 u8 temp;
19 19
@@ -22,7 +22,8 @@ static inline void xgifb_reg_and_or(unsigned long port, u8 index,
22 xgifb_reg_set(port, index, temp); 22 xgifb_reg_set(port, index, temp);
23} 23}
24 24
25static inline void xgifb_reg_and(unsigned long port, u8 index, unsigned data_and) 25static inline void xgifb_reg_and(unsigned long port, u8 index,
26 unsigned int data_and)
26{ 27{
27 u8 temp; 28 u8 temp;
28 29
@@ -31,7 +32,8 @@ static inline void xgifb_reg_and(unsigned long port, u8 index, unsigned data_and
31 xgifb_reg_set(port, index, temp); 32 xgifb_reg_set(port, index, temp);
32} 33}
33 34
34static inline void xgifb_reg_or(unsigned long port, u8 index, unsigned data_or) 35static inline void xgifb_reg_or(unsigned long port, u8 index,
36 unsigned int data_or)
35{ 37{
36 u8 temp; 38 u8 temp;
37 39
diff --git a/include/dt-bindings/iio/adi,ad5592r.h b/include/dt-bindings/iio/adi,ad5592r.h
new file mode 100644
index 000000000000..c48aca1dcade
--- /dev/null
+++ b/include/dt-bindings/iio/adi,ad5592r.h
@@ -0,0 +1,16 @@
1
2#ifndef _DT_BINDINGS_ADI_AD5592R_H
3#define _DT_BINDINGS_ADI_AD5592R_H
4
5#define CH_MODE_UNUSED 0
6#define CH_MODE_ADC 1
7#define CH_MODE_DAC 2
8#define CH_MODE_DAC_AND_ADC 3
9#define CH_MODE_GPIO 8
10
11#define CH_OFFSTATE_PULLDOWN 0
12#define CH_OFFSTATE_OUT_LOW 1
13#define CH_OFFSTATE_OUT_HIGH 2
14#define CH_OFFSTATE_OUT_TRISTATE 3
15
16#endif /* _DT_BINDINGS_ADI_AD5592R_H */
diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h
index 2ec3ad58e8a0..70a5164f4728 100644
--- a/include/linux/iio/buffer.h
+++ b/include/linux/iio/buffer.h
@@ -83,10 +83,12 @@ struct iio_buffer_access_funcs {
83 * @access: [DRIVER] buffer access functions associated with the 83 * @access: [DRIVER] buffer access functions associated with the
84 * implementation. 84 * implementation.
85 * @scan_el_dev_attr_list:[INTERN] list of scan element related attributes. 85 * @scan_el_dev_attr_list:[INTERN] list of scan element related attributes.
86 * @buffer_group: [INTERN] attributes of the buffer group
86 * @scan_el_group: [DRIVER] attribute group for those attributes not 87 * @scan_el_group: [DRIVER] attribute group for those attributes not
87 * created from the iio_chan_info array. 88 * created from the iio_chan_info array.
88 * @pollq: [INTERN] wait queue to allow for polling on the buffer. 89 * @pollq: [INTERN] wait queue to allow for polling on the buffer.
89 * @stufftoread: [INTERN] flag to indicate new data. 90 * @stufftoread: [INTERN] flag to indicate new data.
91 * @attrs: [INTERN] standard attributes of the buffer
90 * @demux_list: [INTERN] list of operations required to demux the scan. 92 * @demux_list: [INTERN] list of operations required to demux the scan.
91 * @demux_bounce: [INTERN] buffer for doing gather from incoming scan. 93 * @demux_bounce: [INTERN] buffer for doing gather from incoming scan.
92 * @buffer_list: [INTERN] entry in the devices list of current buffers. 94 * @buffer_list: [INTERN] entry in the devices list of current buffers.
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
index 6670c3d25c58..d029ffac0d69 100644
--- a/include/linux/iio/common/st_sensors.h
+++ b/include/linux/iio/common/st_sensors.h
@@ -37,6 +37,7 @@
37#define ST_SENSORS_DEFAULT_AXIS_ADDR 0x20 37#define ST_SENSORS_DEFAULT_AXIS_ADDR 0x20
38#define ST_SENSORS_DEFAULT_AXIS_MASK 0x07 38#define ST_SENSORS_DEFAULT_AXIS_MASK 0x07
39#define ST_SENSORS_DEFAULT_AXIS_N_BIT 3 39#define ST_SENSORS_DEFAULT_AXIS_N_BIT 3
40#define ST_SENSORS_DEFAULT_STAT_ADDR 0x27
40 41
41#define ST_SENSORS_MAX_NAME 17 42#define ST_SENSORS_MAX_NAME 17
42#define ST_SENSORS_MAX_4WAI 7 43#define ST_SENSORS_MAX_4WAI 7
@@ -121,6 +122,9 @@ struct st_sensor_bdu {
121 * @mask_int2: mask to enable/disable IRQ on INT2 pin. 122 * @mask_int2: mask to enable/disable IRQ on INT2 pin.
122 * @addr_ihl: address to enable/disable active low on the INT lines. 123 * @addr_ihl: address to enable/disable active low on the INT lines.
123 * @mask_ihl: mask to enable/disable active low on the INT lines. 124 * @mask_ihl: mask to enable/disable active low on the INT lines.
125 * @addr_od: address to enable/disable Open Drain on the INT lines.
126 * @mask_od: mask to enable/disable Open Drain on the INT lines.
127 * @addr_stat_drdy: address to read status of DRDY (data ready) interrupt
124 * struct ig1 - represents the Interrupt Generator 1 of sensors. 128 * struct ig1 - represents the Interrupt Generator 1 of sensors.
125 * @en_addr: address of the enable ig1 register. 129 * @en_addr: address of the enable ig1 register.
126 * @en_mask: mask to write the on/off value for enable. 130 * @en_mask: mask to write the on/off value for enable.
@@ -131,6 +135,9 @@ struct st_sensor_data_ready_irq {
131 u8 mask_int2; 135 u8 mask_int2;
132 u8 addr_ihl; 136 u8 addr_ihl;
133 u8 mask_ihl; 137 u8 mask_ihl;
138 u8 addr_od;
139 u8 mask_od;
140 u8 addr_stat_drdy;
134 struct { 141 struct {
135 u8 en_addr; 142 u8 en_addr;
136 u8 en_mask; 143 u8 en_mask;
@@ -212,6 +219,7 @@ struct st_sensor_settings {
212 * @odr: Output data rate of the sensor [Hz]. 219 * @odr: Output data rate of the sensor [Hz].
213 * num_data_channels: Number of data channels used in buffer. 220 * num_data_channels: Number of data channels used in buffer.
214 * @drdy_int_pin: Redirect DRDY on pin 1 (1) or pin 2 (2). 221 * @drdy_int_pin: Redirect DRDY on pin 1 (1) or pin 2 (2).
222 * @int_pin_open_drain: Set the interrupt/DRDY to open drain.
215 * @get_irq_data_ready: Function to get the IRQ used for data ready signal. 223 * @get_irq_data_ready: Function to get the IRQ used for data ready signal.
216 * @tf: Transfer function structure used by I/O operations. 224 * @tf: Transfer function structure used by I/O operations.
217 * @tb: Transfer buffers and mutex used by I/O operations. 225 * @tb: Transfer buffers and mutex used by I/O operations.
@@ -233,6 +241,7 @@ struct st_sensor_data {
233 unsigned int num_data_channels; 241 unsigned int num_data_channels;
234 242
235 u8 drdy_int_pin; 243 u8 drdy_int_pin;
244 bool int_pin_open_drain;
236 245
237 unsigned int (*get_irq_data_ready) (struct iio_dev *indio_dev); 246 unsigned int (*get_irq_data_ready) (struct iio_dev *indio_dev);
238 247
diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h
index fad58671c49e..3d672f72e7ec 100644
--- a/include/linux/iio/consumer.h
+++ b/include/linux/iio/consumer.h
@@ -49,6 +49,33 @@ struct iio_channel *iio_channel_get(struct device *dev,
49void iio_channel_release(struct iio_channel *chan); 49void iio_channel_release(struct iio_channel *chan);
50 50
51/** 51/**
52 * devm_iio_channel_get() - Resource managed version of iio_channel_get().
53 * @dev: Pointer to consumer device. Device name must match
54 * the name of the device as provided in the iio_map
55 * with which the desired provider to consumer mapping
56 * was registered.
57 * @consumer_channel: Unique name to identify the channel on the consumer
58 * side. This typically describes the channels use within
59 * the consumer. E.g. 'battery_voltage'
60 *
61 * Returns a pointer to negative errno if it is not able to get the iio channel
62 * otherwise returns valid pointer for iio channel.
63 *
64 * The allocated iio channel is automatically released when the device is
65 * unbound.
66 */
67struct iio_channel *devm_iio_channel_get(struct device *dev,
68 const char *consumer_channel);
69/**
70 * devm_iio_channel_release() - Resource managed version of
71 * iio_channel_release().
72 * @dev: Pointer to consumer device for which resource
73 * is allocared.
74 * @chan: The channel to be released.
75 */
76void devm_iio_channel_release(struct device *dev, struct iio_channel *chan);
77
78/**
52 * iio_channel_get_all() - get all channels associated with a client 79 * iio_channel_get_all() - get all channels associated with a client
53 * @dev: Pointer to consumer device. 80 * @dev: Pointer to consumer device.
54 * 81 *
@@ -65,6 +92,32 @@ struct iio_channel *iio_channel_get_all(struct device *dev);
65 */ 92 */
66void iio_channel_release_all(struct iio_channel *chan); 93void iio_channel_release_all(struct iio_channel *chan);
67 94
95/**
96 * devm_iio_channel_get_all() - Resource managed version of
97 * iio_channel_get_all().
98 * @dev: Pointer to consumer device.
99 *
100 * Returns a pointer to negative errno if it is not able to get the iio channel
101 * otherwise returns an array of iio_channel structures terminated with one with
102 * null iio_dev pointer.
103 *
104 * This function is used by fairly generic consumers to get all the
105 * channels registered as having this consumer.
106 *
107 * The allocated iio channels are automatically released when the device is
108 * unbounded.
109 */
110struct iio_channel *devm_iio_channel_get_all(struct device *dev);
111
112/**
113 * devm_iio_channel_release_all() - Resource managed version of
114 * iio_channel_release_all().
115 * @dev: Pointer to consumer device for which resource
116 * is allocared.
117 * @chan: Array channel to be released.
118 */
119void devm_iio_channel_release_all(struct device *dev, struct iio_channel *chan);
120
68struct iio_cb_buffer; 121struct iio_cb_buffer;
69/** 122/**
70 * iio_channel_get_all_cb() - register callback for triggered capture 123 * iio_channel_get_all_cb() - register callback for triggered capture
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index b2b16772c651..7c29cb0124ae 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -148,6 +148,37 @@ ssize_t iio_enum_write(struct iio_dev *indio_dev,
148} 148}
149 149
150/** 150/**
151 * struct iio_mount_matrix - iio mounting matrix
152 * @rotation: 3 dimensional space rotation matrix defining sensor alignment with
153 * main hardware
154 */
155struct iio_mount_matrix {
156 const char *rotation[9];
157};
158
159ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv,
160 const struct iio_chan_spec *chan, char *buf);
161int of_iio_read_mount_matrix(const struct device *dev, const char *propname,
162 struct iio_mount_matrix *matrix);
163
164typedef const struct iio_mount_matrix *
165 (iio_get_mount_matrix_t)(const struct iio_dev *indio_dev,
166 const struct iio_chan_spec *chan);
167
168/**
169 * IIO_MOUNT_MATRIX() - Initialize mount matrix extended channel attribute
170 * @_shared: Whether the attribute is shared between all channels
171 * @_get: Pointer to an iio_get_mount_matrix_t accessor
172 */
173#define IIO_MOUNT_MATRIX(_shared, _get) \
174{ \
175 .name = "mount_matrix", \
176 .shared = (_shared), \
177 .read = iio_show_mount_matrix, \
178 .private = (uintptr_t)(_get), \
179}
180
181/**
151 * struct iio_event_spec - specification for a channel event 182 * struct iio_event_spec - specification for a channel event
152 * @type: Type of the event 183 * @type: Type of the event
153 * @dir: Direction of the event 184 * @dir: Direction of the event
@@ -527,6 +558,8 @@ void iio_device_unregister(struct iio_dev *indio_dev);
527int devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev); 558int devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev);
528void devm_iio_device_unregister(struct device *dev, struct iio_dev *indio_dev); 559void devm_iio_device_unregister(struct device *dev, struct iio_dev *indio_dev);
529int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp); 560int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp);
561int iio_device_claim_direct_mode(struct iio_dev *indio_dev);
562void iio_device_release_direct_mode(struct iio_dev *indio_dev);
530 563
531extern struct bus_type iio_bus_type; 564extern struct bus_type iio_bus_type;
532 565
diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h
index fa2d01ef8f55..360da7d18a3d 100644
--- a/include/linux/iio/imu/adis.h
+++ b/include/linux/iio/imu/adis.h
@@ -41,6 +41,7 @@ struct adis_data {
41 unsigned int diag_stat_reg; 41 unsigned int diag_stat_reg;
42 42
43 unsigned int self_test_mask; 43 unsigned int self_test_mask;
44 bool self_test_no_autoclear;
44 unsigned int startup_delay; 45 unsigned int startup_delay;
45 46
46 const char * const *status_error_msgs; 47 const char * const *status_error_msgs;
diff --git a/include/linux/iio/magnetometer/ak8975.h b/include/linux/iio/magnetometer/ak8975.h
new file mode 100644
index 000000000000..c8400959d197
--- /dev/null
+++ b/include/linux/iio/magnetometer/ak8975.h
@@ -0,0 +1,16 @@
1#ifndef __IIO_MAGNETOMETER_AK8975_H__
2#define __IIO_MAGNETOMETER_AK8975_H__
3
4#include <linux/iio/iio.h>
5
6/**
7 * struct ak8975_platform_data - AK8975 magnetometer driver platform data
8 * @eoc_gpio: data ready event gpio
9 * @orientation: mounting matrix relative to main hardware
10 */
11struct ak8975_platform_data {
12 int eoc_gpio;
13 struct iio_mount_matrix orientation;
14};
15
16#endif
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index cc7398287fdd..94aa10ffe156 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -53,6 +53,13 @@
53 53
54#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) 54#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
55 55
56#define u64_to_user_ptr(x) ( \
57{ \
58 typecheck(u64, x); \
59 (void __user *)(uintptr_t)x; \
60} \
61)
62
56/* 63/*
57 * This looks more complex than it should be. But we need to 64 * This looks more complex than it should be. But we need to
58 * get the type for the ~ right in round_down (it needs to be 65 * get the type for the ~ right in round_down (it needs to be
diff --git a/include/linux/platform_data/invensense_mpu6050.h b/include/linux/platform_data/invensense_mpu6050.h
index ad3aa7b95f35..554b59801aa8 100644
--- a/include/linux/platform_data/invensense_mpu6050.h
+++ b/include/linux/platform_data/invensense_mpu6050.h
@@ -16,13 +16,16 @@
16 16
17/** 17/**
18 * struct inv_mpu6050_platform_data - Platform data for the mpu driver 18 * struct inv_mpu6050_platform_data - Platform data for the mpu driver
19 * @orientation: Orientation matrix of the chip 19 * @orientation: Orientation matrix of the chip (deprecated in favor of
20 * mounting matrix retrieved from device-tree)
20 * 21 *
21 * Contains platform specific information on how to configure the MPU6050 to 22 * Contains platform specific information on how to configure the MPU6050 to
22 * work on this platform. The orientation matricies are 3x3 rotation matricies 23 * work on this platform. The orientation matricies are 3x3 rotation matricies
23 * that are applied to the data to rotate from the mounting orientation to the 24 * that are applied to the data to rotate from the mounting orientation to the
24 * platform orientation. The values must be one of 0, 1, or -1 and each row and 25 * platform orientation. The values must be one of 0, 1, or -1 and each row and
25 * column should have exactly 1 non-zero value. 26 * column should have exactly 1 non-zero value.
27 *
28 * Deprecated in favor of mounting matrix retrieved from device-tree.
26 */ 29 */
27struct inv_mpu6050_platform_data { 30struct inv_mpu6050_platform_data {
28 __s8 orientation[9]; 31 __s8 orientation[9];
diff --git a/include/linux/platform_data/st_sensors_pdata.h b/include/linux/platform_data/st_sensors_pdata.h
index 753839187ba0..79b0e4cdb814 100644
--- a/include/linux/platform_data/st_sensors_pdata.h
+++ b/include/linux/platform_data/st_sensors_pdata.h
@@ -16,9 +16,11 @@
16 * @drdy_int_pin: Redirect DRDY on pin 1 (1) or pin 2 (2). 16 * @drdy_int_pin: Redirect DRDY on pin 1 (1) or pin 2 (2).
17 * Available only for accelerometer and pressure sensors. 17 * Available only for accelerometer and pressure sensors.
18 * Accelerometer DRDY on LSM330 available only on pin 1 (see datasheet). 18 * Accelerometer DRDY on LSM330 available only on pin 1 (see datasheet).
19 * @open_drain: set the interrupt line to be open drain if possible.
19 */ 20 */
20struct st_sensors_platform_data { 21struct st_sensors_platform_data {
21 u8 drdy_int_pin; 22 u8 drdy_int_pin;
23 bool open_drain;
22}; 24};
23 25
24#endif /* ST_SENSORS_PDATA_H */ 26#endif /* ST_SENSORS_PDATA_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 31bd0d97d178..0fc28e45c142 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2248,6 +2248,7 @@ static inline void memalloc_noio_restore(unsigned int flags)
2248#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */ 2248#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */
2249#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */ 2249#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
2250#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ 2250#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
2251#define PFA_LMK_WAITING 3 /* Lowmemorykiller is waiting */
2251 2252
2252 2253
2253#define TASK_PFA_TEST(name, func) \ 2254#define TASK_PFA_TEST(name, func) \
@@ -2271,6 +2272,9 @@ TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
2271TASK_PFA_SET(SPREAD_SLAB, spread_slab) 2272TASK_PFA_SET(SPREAD_SLAB, spread_slab)
2272TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab) 2273TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
2273 2274
2275TASK_PFA_TEST(LMK_WAITING, lmk_waiting)
2276TASK_PFA_SET(LMK_WAITING, lmk_waiting)
2277
2274/* 2278/*
2275 * task->jobctl flags 2279 * task->jobctl flags
2276 */ 2280 */
diff --git a/include/linux/sync_file.h b/include/linux/sync_file.h
new file mode 100644
index 000000000000..c6ffe8b0725c
--- /dev/null
+++ b/include/linux/sync_file.h
@@ -0,0 +1,57 @@
1/*
2 * include/linux/sync_file.h
3 *
4 * Copyright (C) 2012 Google, Inc.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 */
12
13#ifndef _LINUX_SYNC_FILE_H
14#define _LINUX_SYNC_FILE_H
15
16#include <linux/types.h>
17#include <linux/kref.h>
18#include <linux/ktime.h>
19#include <linux/list.h>
20#include <linux/spinlock.h>
21#include <linux/fence.h>
22
23struct sync_file_cb {
24 struct fence_cb cb;
25 struct fence *fence;
26 struct sync_file *sync_file;
27};
28
29/**
30 * struct sync_file - sync file to export to the userspace
31 * @file: file representing this fence
32 * @kref: reference count on fence.
33 * @name: name of sync_file. Useful for debugging
34 * @sync_file_list: membership in global file list
35 * @num_fences: number of sync_pts in the fence
36 * @wq: wait queue for fence signaling
37 * @status: 0: signaled, >0:active, <0: error
38 * @cbs: sync_pts callback information
39 */
40struct sync_file {
41 struct file *file;
42 struct kref kref;
43 char name[32];
44#ifdef CONFIG_DEBUG_FS
45 struct list_head sync_file_list;
46#endif
47 int num_fences;
48
49 wait_queue_head_t wq;
50 atomic_t status;
51
52 struct sync_file_cb cbs[];
53};
54
55struct sync_file *sync_file_create(struct fence *fence);
56
57#endif /* _LINUX_SYNC_H */
diff --git a/include/uapi/linux/iio/types.h b/include/uapi/linux/iio/types.h
index c077617f3304..b0916fc72cce 100644
--- a/include/uapi/linux/iio/types.h
+++ b/include/uapi/linux/iio/types.h
@@ -38,6 +38,7 @@ enum iio_chan_type {
38 IIO_CONCENTRATION, 38 IIO_CONCENTRATION,
39 IIO_RESISTANCE, 39 IIO_RESISTANCE,
40 IIO_PH, 40 IIO_PH,
41 IIO_UVINDEX,
41}; 42};
42 43
43enum iio_modifier { 44enum iio_modifier {
@@ -77,6 +78,7 @@ enum iio_modifier {
77 IIO_MOD_Q, 78 IIO_MOD_Q,
78 IIO_MOD_CO2, 79 IIO_MOD_CO2,
79 IIO_MOD_VOC, 80 IIO_MOD_VOC,
81 IIO_MOD_LIGHT_UV,
80}; 82};
81 83
82enum iio_event_type { 84enum iio_event_type {
diff --git a/drivers/staging/android/uapi/sync.h b/include/uapi/linux/sync_file.h
index a0cf357e598d..413303d37b56 100644
--- a/drivers/staging/android/uapi/sync.h
+++ b/include/uapi/linux/sync_file.h
@@ -16,57 +16,73 @@
16 16
17/** 17/**
18 * struct sync_merge_data - data passed to merge ioctl 18 * struct sync_merge_data - data passed to merge ioctl
19 * @fd2: file descriptor of second fence
20 * @name: name of new fence 19 * @name: name of new fence
20 * @fd2: file descriptor of second fence
21 * @fence: returns the fd of the new fence to userspace 21 * @fence: returns the fd of the new fence to userspace
22 * @flags: merge_data flags
23 * @pad: padding for 64-bit alignment, should always be zero
22 */ 24 */
23struct sync_merge_data { 25struct sync_merge_data {
24 __s32 fd2; /* fd of second fence */ 26 char name[32];
25 char name[32]; /* name of new fence */ 27 __s32 fd2;
26 __s32 fence; /* fd on newly created fence */ 28 __s32 fence;
29 __u32 flags;
30 __u32 pad;
27}; 31};
28 32
29/** 33/**
30 * struct sync_fence_info - detailed fence information 34 * struct sync_fence_info - detailed fence information
31 * @obj_name: name of parent sync_timeline 35 * @obj_name: name of parent sync_timeline
32 * @driver_name: name of driver implementing the parent 36* @driver_name: name of driver implementing the parent
33 * @status: status of the fence 0:active 1:signaled <0:error 37* @status: status of the fence 0:active 1:signaled <0:error
38 * @flags: fence_info flags
34 * @timestamp_ns: timestamp of status change in nanoseconds 39 * @timestamp_ns: timestamp of status change in nanoseconds
35 */ 40 */
36struct sync_fence_info { 41struct sync_fence_info {
37 char obj_name[32]; 42 char obj_name[32];
38 char driver_name[32]; 43 char driver_name[32];
39 __s32 status; 44 __s32 status;
45 __u32 flags;
40 __u64 timestamp_ns; 46 __u64 timestamp_ns;
41}; 47};
42 48
43/** 49/**
44 * struct sync_file_info - data returned from fence info ioctl 50 * struct sync_file_info - data returned from fence info ioctl
45 * @len: ioctl caller writes the size of the buffer its passing in.
46 * ioctl returns length of sync_file_info returned to
47 * userspace including pt_info.
48 * @name: name of fence 51 * @name: name of fence
49 * @status: status of fence. 1: signaled 0:active <0:error 52 * @status: status of fence. 1: signaled 0:active <0:error
50 * @sync_fence_info: array of sync_fence_info for every fence in the sync_file 53 * @flags: sync_file_info flags
54 * @num_fences number of fences in the sync_file
55 * @pad: padding for 64-bit alignment, should always be zero
56 * @sync_fence_info: pointer to array of structs sync_fence_info with all
57 * fences in the sync_file
51 */ 58 */
52struct sync_file_info { 59struct sync_file_info {
53 __u32 len;
54 char name[32]; 60 char name[32];
55 __s32 status; 61 __s32 status;
62 __u32 flags;
63 __u32 num_fences;
64 __u32 pad;
56 65
57 __u8 sync_fence_info[0]; 66 __u64 sync_fence_info;
58}; 67};
59 68
60#define SYNC_IOC_MAGIC '>' 69#define SYNC_IOC_MAGIC '>'
61 70
62/** 71/**
72 * Opcodes 0, 1 and 2 were burned during a API change to avoid users of the
73 * old API to get weird errors when trying to handling sync_files. The API
74 * change happened during the de-stage of the Sync Framework when there was
75 * no upstream users available.
76 */
77
78/**
63 * DOC: SYNC_IOC_MERGE - merge two fences 79 * DOC: SYNC_IOC_MERGE - merge two fences
64 * 80 *
65 * Takes a struct sync_merge_data. Creates a new fence containing copies of 81 * Takes a struct sync_merge_data. Creates a new fence containing copies of
66 * the sync_pts in both the calling fd and sync_merge_data.fd2. Returns the 82 * the sync_pts in both the calling fd and sync_merge_data.fd2. Returns the
67 * new fence's fd in sync_merge_data.fence 83 * new fence's fd in sync_merge_data.fence
68 */ 84 */
69#define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 1, struct sync_merge_data) 85#define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 3, struct sync_merge_data)
70 86
71/** 87/**
72 * DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence 88 * DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence
@@ -79,6 +95,6 @@ struct sync_file_info {
79 * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence. 95 * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence.
80 * To iterate over the sync_pt_infos, use the sync_pt_info.len field. 96 * To iterate over the sync_pt_infos, use the sync_pt_info.len field.
81 */ 97 */
82#define SYNC_IOC_FENCE_INFO _IOWR(SYNC_IOC_MAGIC, 2, struct sync_file_info) 98#define SYNC_IOC_FILE_INFO _IOWR(SYNC_IOC_MAGIC, 4, struct sync_file_info)
83 99
84#endif /* _UAPI_LINUX_SYNC_H */ 100#endif /* _UAPI_LINUX_SYNC_H */
diff --git a/tools/iio/generic_buffer.c b/tools/iio/generic_buffer.c
index 01c4f67801e0..2429c78de940 100644
--- a/tools/iio/generic_buffer.c
+++ b/tools/iio/generic_buffer.c
@@ -35,6 +35,15 @@
35#include "iio_utils.h" 35#include "iio_utils.h"
36 36
37/** 37/**
38 * enum autochan - state for the automatic channel enabling mechanism
39 */
40enum autochan {
41 AUTOCHANNELS_DISABLED,
42 AUTOCHANNELS_ENABLED,
43 AUTOCHANNELS_ACTIVE,
44};
45
46/**
38 * size_from_channelarray() - calculate the storage size of a scan 47 * size_from_channelarray() - calculate the storage size of a scan
39 * @channels: the channel info array 48 * @channels: the channel info array
40 * @num_channels: number of channels 49 * @num_channels: number of channels
@@ -191,10 +200,51 @@ void process_scan(char *data,
191 printf("\n"); 200 printf("\n");
192} 201}
193 202
203static int enable_disable_all_channels(char *dev_dir_name, int enable)
204{
205 const struct dirent *ent;
206 char scanelemdir[256];
207 DIR *dp;
208 int ret;
209
210 snprintf(scanelemdir, sizeof(scanelemdir),
211 FORMAT_SCAN_ELEMENTS_DIR, dev_dir_name);
212 scanelemdir[sizeof(scanelemdir)-1] = '\0';
213
214 dp = opendir(scanelemdir);
215 if (!dp) {
216 fprintf(stderr, "Enabling/disabling channels: can't open %s\n",
217 scanelemdir);
218 return -EIO;
219 }
220
221 ret = -ENOENT;
222 while (ent = readdir(dp), ent) {
223 if (iioutils_check_suffix(ent->d_name, "_en")) {
224 printf("%sabling: %s\n",
225 enable ? "En" : "Dis",
226 ent->d_name);
227 ret = write_sysfs_int(ent->d_name, scanelemdir,
228 enable);
229 if (ret < 0)
230 fprintf(stderr, "Failed to enable/disable %s\n",
231 ent->d_name);
232 }
233 }
234
235 if (closedir(dp) == -1) {
236 perror("Enabling/disabling channels: "
237 "Failed to close directory");
238 return -errno;
239 }
240 return 0;
241}
242
194void print_usage(void) 243void print_usage(void)
195{ 244{
196 fprintf(stderr, "Usage: generic_buffer [options]...\n" 245 fprintf(stderr, "Usage: generic_buffer [options]...\n"
197 "Capture, convert and output data from IIO device buffer\n" 246 "Capture, convert and output data from IIO device buffer\n"
247 " -a Auto-activate all available channels\n"
198 " -c <n> Do n conversions\n" 248 " -c <n> Do n conversions\n"
199 " -e Disable wait for event (new data)\n" 249 " -e Disable wait for event (new data)\n"
200 " -g Use trigger-less mode\n" 250 " -g Use trigger-less mode\n"
@@ -225,12 +275,16 @@ int main(int argc, char **argv)
225 int scan_size; 275 int scan_size;
226 int noevents = 0; 276 int noevents = 0;
227 int notrigger = 0; 277 int notrigger = 0;
278 enum autochan autochannels = AUTOCHANNELS_DISABLED;
228 char *dummy; 279 char *dummy;
229 280
230 struct iio_channel_info *channels; 281 struct iio_channel_info *channels;
231 282
232 while ((c = getopt(argc, argv, "c:egl:n:t:w:")) != -1) { 283 while ((c = getopt(argc, argv, "ac:egl:n:t:w:")) != -1) {
233 switch (c) { 284 switch (c) {
285 case 'a':
286 autochannels = AUTOCHANNELS_ENABLED;
287 break;
234 case 'c': 288 case 'c':
235 errno = 0; 289 errno = 0;
236 num_loops = strtoul(optarg, &dummy, 10); 290 num_loops = strtoul(optarg, &dummy, 10);
@@ -304,7 +358,19 @@ int main(int argc, char **argv)
304 } 358 }
305 } 359 }
306 360
307 /* Verify the trigger exists */ 361 /* Look for this "-devN" trigger */
362 trig_num = find_type_by_name(trigger_name, "trigger");
363 if (trig_num < 0) {
364 /* OK try the simpler "-trigger" suffix instead */
365 free(trigger_name);
366 ret = asprintf(&trigger_name,
367 "%s-trigger", device_name);
368 if (ret < 0) {
369 ret = -ENOMEM;
370 goto error_free_dev_dir_name;
371 }
372 }
373
308 trig_num = find_type_by_name(trigger_name, "trigger"); 374 trig_num = find_type_by_name(trigger_name, "trigger");
309 if (trig_num < 0) { 375 if (trig_num < 0) {
310 fprintf(stderr, "Failed to find the trigger %s\n", 376 fprintf(stderr, "Failed to find the trigger %s\n",
@@ -328,12 +394,47 @@ int main(int argc, char **argv)
328 "diag %s\n", dev_dir_name); 394 "diag %s\n", dev_dir_name);
329 goto error_free_triggername; 395 goto error_free_triggername;
330 } 396 }
331 if (!num_channels) { 397 if (num_channels && autochannels == AUTOCHANNELS_ENABLED) {
398 fprintf(stderr, "Auto-channels selected but some channels "
399 "are already activated in sysfs\n");
400 fprintf(stderr, "Proceeding without activating any channels\n");
401 }
402
403 if (!num_channels && autochannels == AUTOCHANNELS_ENABLED) {
404 fprintf(stderr,
405 "No channels are enabled, enabling all channels\n");
406
407 ret = enable_disable_all_channels(dev_dir_name, 1);
408 if (ret) {
409 fprintf(stderr, "Failed to enable all channels\n");
410 goto error_free_triggername;
411 }
412
413 /* This flags that we need to disable the channels again */
414 autochannels = AUTOCHANNELS_ACTIVE;
415
416 ret = build_channel_array(dev_dir_name, &channels,
417 &num_channels);
418 if (ret) {
419 fprintf(stderr, "Problem reading scan element "
420 "information\n"
421 "diag %s\n", dev_dir_name);
422 goto error_disable_channels;
423 }
424 if (!num_channels) {
425 fprintf(stderr, "Still no channels after "
426 "auto-enabling, giving up\n");
427 goto error_disable_channels;
428 }
429 }
430
431 if (!num_channels && autochannels == AUTOCHANNELS_DISABLED) {
332 fprintf(stderr, 432 fprintf(stderr,
333 "No channels are enabled, we have nothing to scan.\n"); 433 "No channels are enabled, we have nothing to scan.\n");
334 fprintf(stderr, "Enable channels manually in " 434 fprintf(stderr, "Enable channels manually in "
335 FORMAT_SCAN_ELEMENTS_DIR 435 FORMAT_SCAN_ELEMENTS_DIR
336 "/*_en and try again.\n", dev_dir_name); 436 "/*_en or pass -a to autoenable channels and "
437 "try again.\n", dev_dir_name);
337 ret = -ENOENT; 438 ret = -ENOENT;
338 goto error_free_triggername; 439 goto error_free_triggername;
339 } 440 }
@@ -467,7 +568,12 @@ error_free_channels:
467error_free_triggername: 568error_free_triggername:
468 if (datardytrigger) 569 if (datardytrigger)
469 free(trigger_name); 570 free(trigger_name);
470 571error_disable_channels:
572 if (autochannels == AUTOCHANNELS_ACTIVE) {
573 ret = enable_disable_all_channels(dev_dir_name, 0);
574 if (ret)
575 fprintf(stderr, "Failed to disable all channels\n");
576 }
471error_free_dev_dir_name: 577error_free_dev_dir_name:
472 free(dev_dir_name); 578 free(dev_dir_name);
473 579
diff --git a/tools/iio/iio_event_monitor.c b/tools/iio/iio_event_monitor.c
index d51eb04202e9..d9b7e0f306c6 100644
--- a/tools/iio/iio_event_monitor.c
+++ b/tools/iio/iio_event_monitor.c
@@ -53,6 +53,10 @@ static const char * const iio_chan_type_name_spec[] = {
53 [IIO_ENERGY] = "energy", 53 [IIO_ENERGY] = "energy",
54 [IIO_DISTANCE] = "distance", 54 [IIO_DISTANCE] = "distance",
55 [IIO_VELOCITY] = "velocity", 55 [IIO_VELOCITY] = "velocity",
56 [IIO_CONCENTRATION] = "concentration",
57 [IIO_RESISTANCE] = "resistance",
58 [IIO_PH] = "ph",
59 [IIO_UVINDEX] = "uvindex",
56}; 60};
57 61
58static const char * const iio_ev_type_text[] = { 62static const char * const iio_ev_type_text[] = {
@@ -90,6 +94,7 @@ static const char * const iio_modifier_names[] = {
90 [IIO_MOD_LIGHT_RED] = "red", 94 [IIO_MOD_LIGHT_RED] = "red",
91 [IIO_MOD_LIGHT_GREEN] = "green", 95 [IIO_MOD_LIGHT_GREEN] = "green",
92 [IIO_MOD_LIGHT_BLUE] = "blue", 96 [IIO_MOD_LIGHT_BLUE] = "blue",
97 [IIO_MOD_LIGHT_UV] = "uv",
93 [IIO_MOD_QUATERNION] = "quaternion", 98 [IIO_MOD_QUATERNION] = "quaternion",
94 [IIO_MOD_TEMP_AMBIENT] = "ambient", 99 [IIO_MOD_TEMP_AMBIENT] = "ambient",
95 [IIO_MOD_TEMP_OBJECT] = "object", 100 [IIO_MOD_TEMP_OBJECT] = "object",
@@ -102,6 +107,10 @@ static const char * const iio_modifier_names[] = {
102 [IIO_MOD_WALKING] = "walking", 107 [IIO_MOD_WALKING] = "walking",
103 [IIO_MOD_STILL] = "still", 108 [IIO_MOD_STILL] = "still",
104 [IIO_MOD_ROOT_SUM_SQUARED_X_Y_Z] = "sqrt(x^2+y^2+z^2)", 109 [IIO_MOD_ROOT_SUM_SQUARED_X_Y_Z] = "sqrt(x^2+y^2+z^2)",
110 [IIO_MOD_I] = "i",
111 [IIO_MOD_Q] = "q",
112 [IIO_MOD_CO2] = "co2",
113 [IIO_MOD_VOC] = "voc",
105}; 114};
106 115
107static bool event_is_known(struct iio_event_data *event) 116static bool event_is_known(struct iio_event_data *event)
@@ -136,6 +145,10 @@ static bool event_is_known(struct iio_event_data *event)
136 case IIO_ENERGY: 145 case IIO_ENERGY:
137 case IIO_DISTANCE: 146 case IIO_DISTANCE:
138 case IIO_VELOCITY: 147 case IIO_VELOCITY:
148 case IIO_CONCENTRATION:
149 case IIO_RESISTANCE:
150 case IIO_PH:
151 case IIO_UVINDEX:
139 break; 152 break;
140 default: 153 default:
141 return false; 154 return false;
@@ -162,6 +175,7 @@ static bool event_is_known(struct iio_event_data *event)
162 case IIO_MOD_LIGHT_RED: 175 case IIO_MOD_LIGHT_RED:
163 case IIO_MOD_LIGHT_GREEN: 176 case IIO_MOD_LIGHT_GREEN:
164 case IIO_MOD_LIGHT_BLUE: 177 case IIO_MOD_LIGHT_BLUE:
178 case IIO_MOD_LIGHT_UV:
165 case IIO_MOD_QUATERNION: 179 case IIO_MOD_QUATERNION:
166 case IIO_MOD_TEMP_AMBIENT: 180 case IIO_MOD_TEMP_AMBIENT:
167 case IIO_MOD_TEMP_OBJECT: 181 case IIO_MOD_TEMP_OBJECT:
@@ -174,6 +188,10 @@ static bool event_is_known(struct iio_event_data *event)
174 case IIO_MOD_WALKING: 188 case IIO_MOD_WALKING:
175 case IIO_MOD_STILL: 189 case IIO_MOD_STILL:
176 case IIO_MOD_ROOT_SUM_SQUARED_X_Y_Z: 190 case IIO_MOD_ROOT_SUM_SQUARED_X_Y_Z:
191 case IIO_MOD_I:
192 case IIO_MOD_Q:
193 case IIO_MOD_CO2:
194 case IIO_MOD_VOC:
177 break; 195 break;
178 default: 196 default:
179 return false; 197 return false;
diff --git a/tools/iio/iio_utils.h b/tools/iio/iio_utils.h
index e3503bfe538b..780f2014f8fa 100644
--- a/tools/iio/iio_utils.h
+++ b/tools/iio/iio_utils.h
@@ -52,6 +52,13 @@ struct iio_channel_info {
52 unsigned location; 52 unsigned location;
53}; 53};
54 54
55static inline int iioutils_check_suffix(const char *str, const char *suffix)
56{
57 return strlen(str) >= strlen(suffix) &&
58 strncmp(str+strlen(str)-strlen(suffix),
59 suffix, strlen(suffix)) == 0;
60}
61
55int iioutils_break_up_name(const char *full_name, char **generic_name); 62int iioutils_break_up_name(const char *full_name, char **generic_name);
56int iioutils_get_type(unsigned *is_signed, unsigned *bytes, unsigned *bits_used, 63int iioutils_get_type(unsigned *is_signed, unsigned *bytes, unsigned *bits_used,
57 unsigned *shift, uint64_t *mask, unsigned *be, 64 unsigned *shift, uint64_t *mask, unsigned *be,