aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-20 14:23:18 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-20 14:23:18 -0400
commit71e7ff2578c3bc67fd893a9ba7f69fd563f271de (patch)
tree07f8f6e950f54f745857c6be07a5186fd68a74d2 /drivers
parent4a52246302f01596f0edf7b4a3e6425e23479192 (diff)
parentbc01caf53da4de53361376734707336de8fff839 (diff)
Merge tag 'staging-3.3' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging
Pull big staging driver updates from Greg KH: "Here is the big drivers/staging/ merge for 3.4-rc1 Lots of new driver updates here, with the addition of a few new ones, and only one moving out of the staging tree to the "real" part of the kernel (the hyperv scsi driver, acked by the scsi maintainer). There are also loads of cleanups, fixes, and other minor things in here, all self-contained in the drivers/staging/ tree. Overall we reversed the recent trend by adding more lines than we removed: 379 files changed, 37952 insertions(+), 14153 deletions(-)" * tag 'staging-3.3' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging: (360 commits) staging/zmem: Use lockdep_assert_held instead of spin_is_locked Staging: rtl8187se: r8180_wx.c: Cleaned up comments Staging: rtl8187se: r8180_wx.c: Removed old comments Staging: rtl8187se: r8180_dm.c: Removed old comments Staging: android: ram_console.c: Staging: rtl8187se: r8180_dm.c: Fix comments Staging: rtl8187se: r8180_dm.c: Fix spacing issues Staging: rtl8187se: r8180_dm.c Fixed indentation issues Staging: rtl8187se: r8180_dm.c: Fix brackets Staging: rtl8187se: r8180_dm.c: Removed spaces before tab stop staging: vme: fix section mismatches in linux-next 20120314 Staging: rtl8187se: r8180_core.c: Fix some long line issues Staging: rtl8187se: r8180_core.c: Fix some spacing issues Staging: rtl8187se: r8180_core.c: Removed trailing spaces staging: mei: remove driver internal versioning Staging: rtl8187se: r8180_core.c: Cleaned up if statement staging: ozwpan depends on NET staging: ozwpan: added maintainer for ozwpan driver staging/mei: propagate error codes up in the write flow drivers:staging:mei Fix some typos in staging/mei ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/scsi/Kconfig7
-rw-r--r--drivers/scsi/Makefile3
-rw-r--r--drivers/scsi/storvsc_drv.c (renamed from drivers/staging/hv/storvsc_drv.c)1020
-rw-r--r--drivers/staging/Kconfig10
-rw-r--r--drivers/staging/Makefile6
-rw-r--r--drivers/staging/android/Kconfig86
-rw-r--r--drivers/staging/android/Makefile3
-rw-r--r--drivers/staging/android/TODO2
-rw-r--r--drivers/staging/android/alarm-dev.c297
-rw-r--r--drivers/staging/android/alarm.c601
-rw-r--r--drivers/staging/android/android_alarm.h121
-rw-r--r--drivers/staging/android/ashmem.c4
-rw-r--r--drivers/staging/android/binder.c9
-rw-r--r--drivers/staging/android/logger.c78
-rw-r--r--drivers/staging/android/lowmemorykiller.c91
-rw-r--r--drivers/staging/android/persistent_ram.c470
-rw-r--r--drivers/staging/android/persistent_ram.h78
-rw-r--r--drivers/staging/android/ram_console.c420
-rw-r--r--drivers/staging/android/timed_gpio.c6
-rw-r--r--drivers/staging/android/timed_gpio.h6
-rw-r--r--drivers/staging/asus_oled/asus_oled.c19
-rw-r--r--drivers/staging/bcm/Bcmchar.c41
-rw-r--r--drivers/staging/bcm/CmHost.c3113
-rw-r--r--drivers/staging/bcm/led_control.h80
-rw-r--r--drivers/staging/comedi/Kconfig5
-rw-r--r--drivers/staging/comedi/drivers/adv_pci_dio.c29
-rw-r--r--drivers/staging/comedi/drivers/dt2801.c12
-rw-r--r--drivers/staging/comedi/drivers/dt9812.c4
-rw-r--r--drivers/staging/comedi/drivers/me4000.c12
-rw-r--r--drivers/staging/comedi/drivers/ni_pcidio.c61
-rw-r--r--drivers/staging/comedi/drivers/ni_pcimio.c27
-rw-r--r--drivers/staging/comedi/drivers/unioxx5.c2
-rw-r--r--drivers/staging/comedi/drivers/usbduxsigma.c42
-rw-r--r--drivers/staging/crystalhd/bc_dts_glob_lnx.h3
-rw-r--r--drivers/staging/crystalhd/bc_dts_types.h40
-rw-r--r--drivers/staging/crystalhd/crystalhd.h14
-rw-r--r--drivers/staging/crystalhd/crystalhd_cmds.c3
-rw-r--r--drivers/staging/crystalhd/crystalhd_cmds.h4
-rw-r--r--drivers/staging/crystalhd/crystalhd_hw.c11
-rw-r--r--drivers/staging/crystalhd/crystalhd_hw.h3
-rw-r--r--drivers/staging/crystalhd/crystalhd_lnx.c7
-rw-r--r--drivers/staging/crystalhd/crystalhd_lnx.h5
-rw-r--r--drivers/staging/crystalhd/crystalhd_misc.c5
-rw-r--r--drivers/staging/crystalhd/crystalhd_misc.h34
-rw-r--r--drivers/staging/et131x/README2
-rw-r--r--drivers/staging/et131x/et131x.c10
-rw-r--r--drivers/staging/et131x/et131x.h4
-rw-r--r--drivers/staging/frontier/alphatrack.c2
-rw-r--r--drivers/staging/frontier/tranzport.c2
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c6
-rw-r--r--drivers/staging/hv/Kconfig5
-rw-r--r--drivers/staging/hv/Makefile3
-rw-r--r--drivers/staging/hv/TODO5
-rw-r--r--drivers/staging/iio/Documentation/device.txt2
-rw-r--r--drivers/staging/iio/Documentation/iio_event_monitor.c241
-rw-r--r--drivers/staging/iio/Documentation/inkernel.txt58
-rw-r--r--drivers/staging/iio/Kconfig9
-rw-r--r--drivers/staging/iio/Makefile4
-rw-r--r--drivers/staging/iio/accel/adis16201_ring.c2
-rw-r--r--drivers/staging/iio/accel/adis16203_ring.c2
-rw-r--r--drivers/staging/iio/accel/adis16204_ring.c2
-rw-r--r--drivers/staging/iio/accel/adis16209_ring.c2
-rw-r--r--drivers/staging/iio/accel/adis16240_ring.c2
-rw-r--r--drivers/staging/iio/accel/lis3l02dq.h2
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_ring.c4
-rw-r--r--drivers/staging/iio/accel/sca3000.h2
-rw-r--r--drivers/staging/iio/adc/Kconfig9
-rw-r--r--drivers/staging/iio/adc/Makefile1
-rw-r--r--drivers/staging/iio/adc/ad7192.c45
-rw-r--r--drivers/staging/iio/adc/ad7291.c14
-rw-r--r--drivers/staging/iio/adc/ad7298_ring.c3
-rw-r--r--drivers/staging/iio/adc/ad7476_ring.c4
-rw-r--r--drivers/staging/iio/adc/ad7606_core.c83
-rw-r--r--drivers/staging/iio/adc/ad7606_par.c13
-rw-r--r--drivers/staging/iio/adc/ad7606_ring.c2
-rw-r--r--drivers/staging/iio/adc/ad7793.c2
-rw-r--r--drivers/staging/iio/adc/ad7887_ring.c2
-rw-r--r--drivers/staging/iio/adc/ad799x_core.c4
-rw-r--r--drivers/staging/iio/adc/ad799x_ring.c4
-rw-r--r--drivers/staging/iio/adc/adt7310.c21
-rw-r--r--drivers/staging/iio/adc/adt7410.c21
-rw-r--r--drivers/staging/iio/adc/lpc32xx_adc.c237
-rw-r--r--drivers/staging/iio/adc/max1363_core.c50
-rw-r--r--drivers/staging/iio/adc/max1363_ring.c2
-rw-r--r--drivers/staging/iio/addac/adt7316-i2c.c18
-rw-r--r--drivers/staging/iio/addac/adt7316-spi.c18
-rw-r--r--drivers/staging/iio/addac/adt7316.c11
-rw-r--r--drivers/staging/iio/addac/adt7316.h9
-rw-r--r--drivers/staging/iio/buffer.h2
-rw-r--r--drivers/staging/iio/cdc/ad7150.c10
-rw-r--r--drivers/staging/iio/consumer.h96
-rw-r--r--drivers/staging/iio/dac/Kconfig7
-rw-r--r--drivers/staging/iio/dac/ad5064.c369
-rw-r--r--drivers/staging/iio/dac/ad5360.c4
-rw-r--r--drivers/staging/iio/dac/ad5380.c4
-rw-r--r--drivers/staging/iio/dac/ad5421.c13
-rw-r--r--drivers/staging/iio/dac/ad5446.c35
-rw-r--r--drivers/staging/iio/dac/ad5764.c13
-rw-r--r--drivers/staging/iio/dac/max517.c18
-rw-r--r--drivers/staging/iio/dds/ad9834.c53
-rw-r--r--drivers/staging/iio/driver.h34
-rw-r--r--drivers/staging/iio/events.h4
-rw-r--r--drivers/staging/iio/gyro/adis16260_ring.c2
-rw-r--r--drivers/staging/iio/iio.h70
-rw-r--r--drivers/staging/iio/iio_core.h4
-rw-r--r--drivers/staging/iio/iio_dummy_evgen.c2
-rw-r--r--drivers/staging/iio/iio_hwmon.c232
-rw-r--r--drivers/staging/iio/iio_simple_dummy_buffer.c2
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c3
-rw-r--r--drivers/staging/iio/imu/adis16400_ring.c2
-rw-r--r--drivers/staging/iio/industrialio-buffer.c6
-rw-r--r--drivers/staging/iio/industrialio-core.c658
-rw-r--r--drivers/staging/iio/industrialio-event.c453
-rw-r--r--drivers/staging/iio/inkern.c292
-rw-r--r--drivers/staging/iio/kfifo_buf.c46
-rw-r--r--drivers/staging/iio/kfifo_buf.h2
-rw-r--r--drivers/staging/iio/light/isl29018.c7
-rw-r--r--drivers/staging/iio/light/tsl2563.c65
-rw-r--r--drivers/staging/iio/light/tsl2583.c19
-rw-r--r--drivers/staging/iio/machine.h24
-rw-r--r--drivers/staging/iio/magnetometer/ak8975.c8
-rw-r--r--drivers/staging/iio/magnetometer/hmc5843.c26
-rw-r--r--drivers/staging/iio/meter/ade7758_ring.c4
-rw-r--r--drivers/staging/iio/meter/meter.h2
-rw-r--r--drivers/staging/iio/ring_sw.c26
-rw-r--r--drivers/staging/iio/ring_sw.h5
-rw-r--r--drivers/staging/iio/trigger/iio-trig-bfin-timer.c12
-rw-r--r--drivers/staging/iio/trigger/iio-trig-gpio.c12
-rw-r--r--drivers/staging/iio/trigger/iio-trig-periodic-rtc.c12
-rw-r--r--drivers/staging/iio/types.h4
-rw-r--r--drivers/staging/keucr/TODO2
-rw-r--r--drivers/staging/line6/capture.c54
-rw-r--r--drivers/staging/line6/capture.h2
-rw-r--r--drivers/staging/line6/driver.c2
-rw-r--r--drivers/staging/line6/pcm.c109
-rw-r--r--drivers/staging/line6/pcm.h167
-rw-r--r--drivers/staging/line6/playback.c68
-rw-r--r--drivers/staging/line6/playback.h2
-rw-r--r--drivers/staging/line6/toneport.c12
-rw-r--r--drivers/staging/line6/usbdefs.h44
-rw-r--r--drivers/staging/media/easycap/easycap_main.c1
-rw-r--r--drivers/staging/media/lirc/lirc_sasem.c17
-rw-r--r--drivers/staging/mei/TODO3
-rw-r--r--drivers/staging/mei/hw.h47
-rw-r--r--drivers/staging/mei/init.c24
-rw-r--r--drivers/staging/mei/interface.c72
-rw-r--r--drivers/staging/mei/interface.h7
-rw-r--r--drivers/staging/mei/interrupt.c106
-rw-r--r--drivers/staging/mei/iorw.c17
-rw-r--r--drivers/staging/mei/main.c14
-rw-r--r--drivers/staging/mei/mei-amt-version.c481
-rw-r--r--drivers/staging/mei/mei.h127
-rw-r--r--drivers/staging/mei/mei.txt6
-rw-r--r--drivers/staging/mei/mei_dev.h10
-rw-r--r--drivers/staging/mei/mei_version.h31
-rw-r--r--drivers/staging/mei/wd.c8
-rw-r--r--drivers/staging/nvec/Kconfig6
-rw-r--r--drivers/staging/nvec/nvec.c19
-rw-r--r--drivers/staging/nvec/nvec_ps2.c53
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c4
-rw-r--r--drivers/staging/omapdrm/omap_crtc.c37
-rw-r--r--drivers/staging/omapdrm/omap_debugfs.c97
-rw-r--r--drivers/staging/omapdrm/omap_dmm_tiler.c91
-rw-r--r--drivers/staging/omapdrm/omap_dmm_tiler.h15
-rw-r--r--drivers/staging/omapdrm/omap_drv.c16
-rw-r--r--drivers/staging/omapdrm/omap_drv.h19
-rw-r--r--drivers/staging/omapdrm/omap_fb.c124
-rw-r--r--drivers/staging/omapdrm/omap_fbdev.c26
-rw-r--r--drivers/staging/omapdrm/omap_gem.c172
-rw-r--r--drivers/staging/omapdrm/omap_gem_helpers.c2
-rw-r--r--drivers/staging/omapdrm/omap_plane.c197
-rw-r--r--drivers/staging/ozwpan/Kbuild19
-rw-r--r--drivers/staging/ozwpan/Kconfig9
-rw-r--r--drivers/staging/ozwpan/README25
-rw-r--r--drivers/staging/ozwpan/TODO12
-rw-r--r--drivers/staging/ozwpan/ozappif.h46
-rw-r--r--drivers/staging/ozwpan/ozcdev.c521
-rw-r--r--drivers/staging/ozwpan/ozcdev.h18
-rw-r--r--drivers/staging/ozwpan/ozconfig.h27
-rw-r--r--drivers/staging/ozwpan/ozeltbuf.c339
-rw-r--r--drivers/staging/ozwpan/ozeltbuf.h70
-rw-r--r--drivers/staging/ozwpan/ozevent.c116
-rw-r--r--drivers/staging/ozwpan/ozevent.h31
-rw-r--r--drivers/staging/ozwpan/ozeventdef.h47
-rw-r--r--drivers/staging/ozwpan/ozhcd.c2256
-rw-r--r--drivers/staging/ozwpan/ozhcd.h15
-rw-r--r--drivers/staging/ozwpan/ozmain.c58
-rw-r--r--drivers/staging/ozwpan/ozpd.c832
-rw-r--r--drivers/staging/ozwpan/ozpd.h121
-rw-r--r--drivers/staging/ozwpan/ozproto.c957
-rw-r--r--drivers/staging/ozwpan/ozproto.h69
-rw-r--r--drivers/staging/ozwpan/ozprotocol.h372
-rw-r--r--drivers/staging/ozwpan/oztrace.c36
-rw-r--r--drivers/staging/ozwpan/oztrace.h35
-rw-r--r--drivers/staging/ozwpan/ozurbparanoia.c53
-rw-r--r--drivers/staging/ozwpan/ozurbparanoia.h19
-rw-r--r--drivers/staging/ozwpan/ozusbif.h43
-rw-r--r--drivers/staging/ozwpan/ozusbsvc.c245
-rw-r--r--drivers/staging/ozwpan/ozusbsvc.h32
-rw-r--r--drivers/staging/ozwpan/ozusbsvc1.c437
-rw-r--r--drivers/staging/quickstart/quickstart.c370
-rw-r--r--drivers/staging/ramster/Kconfig17
-rw-r--r--drivers/staging/ramster/Makefile1
-rw-r--r--drivers/staging/ramster/TODO13
-rw-r--r--drivers/staging/ramster/cluster/Makefile3
-rw-r--r--drivers/staging/ramster/cluster/heartbeat.c464
-rw-r--r--drivers/staging/ramster/cluster/heartbeat.h87
-rw-r--r--drivers/staging/ramster/cluster/masklog.c155
-rw-r--r--drivers/staging/ramster/cluster/masklog.h220
-rw-r--r--drivers/staging/ramster/cluster/nodemanager.c992
-rw-r--r--drivers/staging/ramster/cluster/nodemanager.h88
-rw-r--r--drivers/staging/ramster/cluster/ramster_nodemanager.h39
-rw-r--r--drivers/staging/ramster/cluster/tcp.c2256
-rw-r--r--drivers/staging/ramster/cluster/tcp.h159
-rw-r--r--drivers/staging/ramster/cluster/tcp_internal.h248
-rw-r--r--drivers/staging/ramster/r2net.c401
-rw-r--r--drivers/staging/ramster/ramster.h118
-rw-r--r--drivers/staging/ramster/tmem.c851
-rw-r--r--drivers/staging/ramster/tmem.h244
-rw-r--r--drivers/staging/ramster/xvmalloc.c (renamed from drivers/staging/zram/xvmalloc.c)0
-rw-r--r--drivers/staging/ramster/xvmalloc.h (renamed from drivers/staging/zram/xvmalloc.h)0
-rw-r--r--drivers/staging/ramster/xvmalloc_int.h (renamed from drivers/staging/zram/xvmalloc_int.h)0
-rw-r--r--drivers/staging/ramster/zcache-main.c3320
-rw-r--r--drivers/staging/ramster/zcache.h22
-rw-r--r--drivers/staging/rtl8187se/r8180_core.c111
-rw-r--r--drivers/staging/rtl8187se/r8180_dm.c1792
-rw-r--r--drivers/staging/rtl8187se/r8180_wx.c286
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c2
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c3
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c2
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c1
-rw-r--r--drivers/staging/rtl8712/Kconfig7
-rw-r--r--drivers/staging/rtl8712/drv_types.h1
-rw-r--r--drivers/staging/rtl8712/os_intfs.c6
-rw-r--r--drivers/staging/rtl8712/osdep_service.h17
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.c2
-rw-r--r--drivers/staging/rtl8712/rtl871x_io.c1
-rw-r--r--drivers/staging/rtl8712/rtl871x_io.h1
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c8
-rw-r--r--drivers/staging/rtl8712/rtl871x_pwrctrl.c11
-rw-r--r--drivers/staging/rtl8712/rtl871x_pwrctrl.h1
-rw-r--r--drivers/staging/rtl8712/rtl871x_recv.c1
-rw-r--r--drivers/staging/rtl8712/rtl871x_recv.h3
-rw-r--r--drivers/staging/rtl8712/rtl871x_sta_mgt.c4
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.c3
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.h3
-rw-r--r--drivers/staging/rtl8712/sta_info.h4
-rw-r--r--drivers/staging/rtl8712/usb_intf.c9
-rw-r--r--drivers/staging/rts5139/TODO6
-rw-r--r--drivers/staging/rts5139/ms.h4
-rw-r--r--drivers/staging/rts5139/rts51x_chip.c14
-rw-r--r--drivers/staging/rts5139/rts51x_chip.h6
-rw-r--r--drivers/staging/rts5139/rts51x_fop.h2
-rw-r--r--drivers/staging/rts5139/rts51x_transport.c2
-rw-r--r--drivers/staging/rts5139/rts51x_transport.h2
-rw-r--r--drivers/staging/rts5139/sd_cprm.c2
-rw-r--r--drivers/staging/rts_pstor/TODO6
-rw-r--r--drivers/staging/sbe-2t3e3/intr.c2
-rw-r--r--drivers/staging/sep/Kconfig3
-rw-r--r--drivers/staging/sep/Makefile5
-rw-r--r--drivers/staging/sep/TODO5
-rw-r--r--drivers/staging/sep/sep_crypto.c4058
-rw-r--r--drivers/staging/sep/sep_crypto.h359
-rw-r--r--drivers/staging/sep/sep_dev.h98
-rw-r--r--drivers/staging/sep/sep_driver.c2932
-rw-r--r--drivers/staging/sep/sep_driver_api.h293
-rw-r--r--drivers/staging/sep/sep_driver_config.h79
-rw-r--r--drivers/staging/sep/sep_driver_hw_defs.h185
-rw-r--r--drivers/staging/sep/sep_main.c4518
-rw-r--r--drivers/staging/sep/sep_trace_events.h188
-rw-r--r--drivers/staging/slicoss/README2
-rw-r--r--drivers/staging/sm7xx/smtcfb.c3
-rw-r--r--drivers/staging/sm7xx/smtcfb.h2
-rw-r--r--drivers/staging/telephony/Kconfig (renamed from drivers/telephony/Kconfig)0
-rw-r--r--drivers/staging/telephony/Makefile (renamed from drivers/telephony/Makefile)0
-rw-r--r--drivers/staging/telephony/TODO10
-rw-r--r--drivers/staging/telephony/ixj-ver.h (renamed from drivers/telephony/ixj-ver.h)0
-rw-r--r--drivers/staging/telephony/ixj.c (renamed from drivers/telephony/ixj.c)0
-rw-r--r--drivers/staging/telephony/ixj.h (renamed from drivers/telephony/ixj.h)0
-rw-r--r--drivers/staging/telephony/ixj_pcmcia.c (renamed from drivers/telephony/ixj_pcmcia.c)0
-rw-r--r--drivers/staging/telephony/phonedev.c (renamed from drivers/telephony/phonedev.c)0
-rw-r--r--drivers/staging/tidspbridge/Kconfig22
-rw-r--r--drivers/staging/tidspbridge/Makefile4
-rw-r--r--drivers/staging/tidspbridge/core/chnl_sm.c34
-rw-r--r--drivers/staging/tidspbridge/core/dsp-clock.c3
-rw-r--r--drivers/staging/tidspbridge/core/io_sm.c29
-rw-r--r--drivers/staging/tidspbridge/core/msg_sm.c3
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430.c19
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430_pwr.c1
-rw-r--r--drivers/staging/tidspbridge/core/tiomap_io.c18
-rw-r--r--drivers/staging/tidspbridge/core/wdt.c24
-rw-r--r--drivers/staging/tidspbridge/gen/gh.c18
-rw-r--r--drivers/staging/tidspbridge/gen/uuidutil.c7
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/_chnl_sm.h4
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/chnl.h29
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/cmm.h30
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/cod.h29
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dbc.h46
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dev.h27
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/disp.h31
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dmm.h4
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/drv.h23
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/gh.h2
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/io.h29
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/io_sm.h2
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/msg.h27
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/nldr.h2
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/nldrdefs.h34
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/node.h41
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/nodepriv.h1
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/proc.h28
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/rmm.h25
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/strm.h38
-rw-r--r--drivers/staging/tidspbridge/pmgr/chnl.c47
-rw-r--r--drivers/staging/tidspbridge/pmgr/cmm.c97
-rw-r--r--drivers/staging/tidspbridge/pmgr/cod.c103
-rw-r--r--drivers/staging/tidspbridge/pmgr/dbll.c125
-rw-r--r--drivers/staging/tidspbridge/pmgr/dev.c182
-rw-r--r--drivers/staging/tidspbridge/pmgr/dmm.c46
-rw-r--r--drivers/staging/tidspbridge/pmgr/dspapi.c82
-rw-r--r--drivers/staging/tidspbridge/pmgr/io.c45
-rw-r--r--drivers/staging/tidspbridge/pmgr/msg.c38
-rw-r--r--drivers/staging/tidspbridge/rmgr/dbdcd.c103
-rw-r--r--drivers/staging/tidspbridge/rmgr/disp.c69
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv.c74
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv_interface.c366
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv_interface.h28
-rw-r--r--drivers/staging/tidspbridge/rmgr/dspdrv.c5
-rw-r--r--drivers/staging/tidspbridge/rmgr/mgr.c45
-rw-r--r--drivers/staging/tidspbridge/rmgr/nldr.c99
-rw-r--r--drivers/staging/tidspbridge/rmgr/node.c129
-rw-r--r--drivers/staging/tidspbridge/rmgr/proc.c119
-rw-r--r--drivers/staging/tidspbridge/rmgr/rmm.c56
-rw-r--r--drivers/staging/tidspbridge/rmgr/strm.c114
-rw-r--r--drivers/staging/usbip/stub.h1
-rw-r--r--drivers/staging/usbip/stub_dev.c2
-rw-r--r--drivers/staging/usbip/stub_rx.c9
-rw-r--r--drivers/staging/usbip/usbip_common.c11
-rw-r--r--drivers/staging/usbip/usbip_common.h2
-rw-r--r--drivers/staging/usbip/vhci_hcd.c41
-rw-r--r--drivers/staging/usbip/vhci_rx.c3
-rw-r--r--drivers/staging/vme/devices/vme_pio2.h4
-rw-r--r--drivers/staging/vme/devices/vme_pio2_gpio.c4
-rw-r--r--drivers/staging/vme/vme.h2
-rw-r--r--drivers/staging/vt6655/bssdb.c4
-rw-r--r--drivers/staging/vt6655/ioctl.c23
-rw-r--r--drivers/staging/vt6656/bssdb.c4
-rw-r--r--drivers/staging/vt6656/iwctl.c230
-rw-r--r--drivers/staging/vt6656/iwctl.h13
-rw-r--r--drivers/staging/vt6656/main_usb.c13
-rw-r--r--drivers/staging/vt6656/wpactl.c937
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c2
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c2
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.c1
-rw-r--r--drivers/staging/xgifb/XGI_main.h78
-rw-r--r--drivers/staging/xgifb/XGI_main_26.c208
-rw-r--r--drivers/staging/xgifb/XGIfb.h2
-rw-r--r--drivers/staging/xgifb/vb_def.h178
-rw-r--r--drivers/staging/xgifb/vb_init.c20
-rw-r--r--drivers/staging/xgifb/vb_setmode.c836
-rw-r--r--drivers/staging/xgifb/vb_struct.h79
-rw-r--r--drivers/staging/xgifb/vb_table.h346
-rw-r--r--drivers/staging/xgifb/vgatypes.h9
-rw-r--r--drivers/staging/zcache/Kconfig13
-rw-r--r--drivers/staging/zcache/tmem.h2
-rw-r--r--drivers/staging/zcache/zcache-main.c237
-rw-r--r--drivers/staging/zram/Kconfig10
-rw-r--r--drivers/staging/zram/Makefile1
-rw-r--r--drivers/staging/zram/zram_drv.c116
-rw-r--r--drivers/staging/zram/zram_drv.h12
-rw-r--r--drivers/staging/zram/zram_sysfs.c4
-rw-r--r--drivers/staging/zsmalloc/Kconfig14
-rw-r--r--drivers/staging/zsmalloc/Makefile3
-rw-r--r--drivers/staging/zsmalloc/zsmalloc-main.c745
-rw-r--r--drivers/staging/zsmalloc/zsmalloc.h31
-rw-r--r--drivers/staging/zsmalloc/zsmalloc_int.h155
378 files changed, 37807 insertions, 14013 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 5afe5d1f199b..decf8e420856 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -40,8 +40,6 @@ source "drivers/net/Kconfig"
40 40
41source "drivers/isdn/Kconfig" 41source "drivers/isdn/Kconfig"
42 42
43source "drivers/telephony/Kconfig"
44
45# input before char - char/joystick depends on it. As does USB. 43# input before char - char/joystick depends on it. As does USB.
46 44
47source "drivers/input/Kconfig" 45source "drivers/input/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index c07be024b962..932e8bf20356 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -86,7 +86,6 @@ obj-$(CONFIG_POWER_SUPPLY) += power/
86obj-$(CONFIG_HWMON) += hwmon/ 86obj-$(CONFIG_HWMON) += hwmon/
87obj-$(CONFIG_THERMAL) += thermal/ 87obj-$(CONFIG_THERMAL) += thermal/
88obj-$(CONFIG_WATCHDOG) += watchdog/ 88obj-$(CONFIG_WATCHDOG) += watchdog/
89obj-$(CONFIG_PHONE) += telephony/
90obj-$(CONFIG_MD) += md/ 89obj-$(CONFIG_MD) += md/
91obj-$(CONFIG_BT) += bluetooth/ 90obj-$(CONFIG_BT) += bluetooth/
92obj-$(CONFIG_ACCESSIBILITY) += accessibility/ 91obj-$(CONFIG_ACCESSIBILITY) += accessibility/
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 16570aa84aac..d3d18e89cb57 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -662,6 +662,13 @@ config VMWARE_PVSCSI
662 To compile this driver as a module, choose M here: the 662 To compile this driver as a module, choose M here: the
663 module will be called vmw_pvscsi. 663 module will be called vmw_pvscsi.
664 664
665config HYPERV_STORAGE
666 tristate "Microsoft Hyper-V virtual storage driver"
667 depends on SCSI && HYPERV
668 default HYPERV
669 help
670 Select this option to enable the Hyper-V virtual storage driver.
671
665config LIBFC 672config LIBFC
666 tristate "LibFC module" 673 tristate "LibFC module"
667 select SCSI_FC_ATTRS 674 select SCSI_FC_ATTRS
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 2b887498be50..e4c1a69f8fab 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -142,6 +142,7 @@ obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/
142obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/ 142obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/
143obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o 143obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o
144obj-$(CONFIG_VMWARE_PVSCSI) += vmw_pvscsi.o 144obj-$(CONFIG_VMWARE_PVSCSI) += vmw_pvscsi.o
145obj-$(CONFIG_HYPERV_STORAGE) += hv_storvsc.o
145 146
146obj-$(CONFIG_ARM) += arm/ 147obj-$(CONFIG_ARM) += arm/
147 148
@@ -170,6 +171,8 @@ scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o
170scsi_mod-y += scsi_trace.o 171scsi_mod-y += scsi_trace.o
171scsi_mod-$(CONFIG_PM) += scsi_pm.o 172scsi_mod-$(CONFIG_PM) += scsi_pm.o
172 173
174hv_storvsc-y := storvsc_drv.o
175
173scsi_tgt-y += scsi_tgt_lib.o scsi_tgt_if.o 176scsi_tgt-y += scsi_tgt_lib.o scsi_tgt_if.o
174 177
175sd_mod-objs := sd.o 178sd_mod-objs := sd.o
diff --git a/drivers/staging/hv/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index eb853f71089a..695ffc36e02d 100644
--- a/drivers/staging/hv/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -42,56 +42,23 @@
42#include <scsi/scsi_devinfo.h> 42#include <scsi/scsi_devinfo.h>
43#include <scsi/scsi_dbg.h> 43#include <scsi/scsi_dbg.h>
44 44
45/*
46 * All wire protocol details (storage protocol between the guest and the host)
47 * are consolidated here.
48 *
49 * Begin protocol definitions.
50 */
45 51
46#define STORVSC_MIN_BUF_NR 64 52/*
47#define STORVSC_RING_BUFFER_SIZE (20*PAGE_SIZE) 53 * Version history:
48static int storvsc_ringbuffer_size = STORVSC_RING_BUFFER_SIZE; 54 * V1 Beta: 0.1
49 55 * V1 RC < 2008/1/31: 1.0
50module_param(storvsc_ringbuffer_size, int, S_IRUGO); 56 * V1 RC > 2008/1/31: 2.0
51MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)"); 57 * Win7: 4.2
52 58 */
53/* to alert the user that structure sizes may be mismatched even though the */
54/* protocol versions match. */
55
56
57#define REVISION_STRING(REVISION_) #REVISION_
58#define FILL_VMSTOR_REVISION(RESULT_LVALUE_) \
59 do { \
60 char *revision_string \
61 = REVISION_STRING($Rev : 6 $) + 6; \
62 RESULT_LVALUE_ = 0; \
63 while (*revision_string >= '0' \
64 && *revision_string <= '9') { \
65 RESULT_LVALUE_ *= 10; \
66 RESULT_LVALUE_ += *revision_string - '0'; \
67 revision_string++; \
68 } \
69 } while (0)
70
71/* Major/minor macros. Minor version is in LSB, meaning that earlier flat */
72/* version numbers will be interpreted as "0.x" (i.e., 1 becomes 0.1). */
73#define VMSTOR_PROTOCOL_MAJOR(VERSION_) (((VERSION_) >> 8) & 0xff)
74#define VMSTOR_PROTOCOL_MINOR(VERSION_) (((VERSION_)) & 0xff)
75#define VMSTOR_PROTOCOL_VERSION(MAJOR_, MINOR_) ((((MAJOR_) & 0xff) << 8) | \
76 (((MINOR_) & 0xff)))
77#define VMSTOR_INVALID_PROTOCOL_VERSION (-1)
78
79/* Version history: */
80/* V1 Beta 0.1 */
81/* V1 RC < 2008/1/31 1.0 */
82/* V1 RC > 2008/1/31 2.0 */
83#define VMSTOR_PROTOCOL_VERSION_CURRENT VMSTOR_PROTOCOL_VERSION(4, 2)
84
85
86
87 59
88/* This will get replaced with the max transfer length that is possible on */ 60#define VMSTOR_CURRENT_MAJOR 4
89/* the host adapter. */ 61#define VMSTOR_CURRENT_MINOR 2
90/* The max transfer length will be published when we offer a vmbus channel. */
91#define MAX_TRANSFER_LENGTH 0x40000
92#define DEFAULT_PACKET_SIZE (sizeof(struct vmdata_gpa_direct) + \
93 sizeof(struct vstor_packet) + \
94 sizesizeof(u64) * (MAX_TRANSFER_LENGTH / PAGE_SIZE)))
95 62
96 63
97/* Packet structure describing virtual storage requests. */ 64/* Packet structure describing virtual storage requests. */
@@ -115,35 +82,31 @@ enum vstor_packet_operation {
115 * this remains the same across the write regardless of 32/64 bit 82 * this remains the same across the write regardless of 32/64 bit
116 * note: it's patterned off the SCSI_PASS_THROUGH structure 83 * note: it's patterned off the SCSI_PASS_THROUGH structure
117 */ 84 */
118#define CDB16GENERIC_LENGTH 0x10 85#define STORVSC_MAX_CMD_LEN 0x10
119 86#define STORVSC_SENSE_BUFFER_SIZE 0x12
120#ifndef SENSE_BUFFER_SIZE 87#define STORVSC_MAX_BUF_LEN_WITH_PADDING 0x14
121#define SENSE_BUFFER_SIZE 0x12
122#endif
123
124#define MAX_DATA_BUF_LEN_WITH_PADDING 0x14
125 88
126struct vmscsi_request { 89struct vmscsi_request {
127 unsigned short length; 90 u16 length;
128 unsigned char srb_status; 91 u8 srb_status;
129 unsigned char scsi_status; 92 u8 scsi_status;
130 93
131 unsigned char port_number; 94 u8 port_number;
132 unsigned char path_id; 95 u8 path_id;
133 unsigned char target_id; 96 u8 target_id;
134 unsigned char lun; 97 u8 lun;
135 98
136 unsigned char cdb_length; 99 u8 cdb_length;
137 unsigned char sense_info_length; 100 u8 sense_info_length;
138 unsigned char data_in; 101 u8 data_in;
139 unsigned char reserved; 102 u8 reserved;
140 103
141 unsigned int data_transfer_length; 104 u32 data_transfer_length;
142 105
143 union { 106 union {
144 unsigned char cdb[CDB16GENERIC_LENGTH]; 107 u8 cdb[STORVSC_MAX_CMD_LEN];
145 unsigned char sense_data[SENSE_BUFFER_SIZE]; 108 u8 sense_data[STORVSC_SENSE_BUFFER_SIZE];
146 unsigned char reserved_array[MAX_DATA_BUF_LEN_WITH_PADDING]; 109 u8 reserved_array[STORVSC_MAX_BUF_LEN_WITH_PADDING];
147 }; 110 };
148} __attribute((packed)); 111} __attribute((packed));
149 112
@@ -153,32 +116,36 @@ struct vmscsi_request {
153 * properties of the channel. 116 * properties of the channel.
154 */ 117 */
155struct vmstorage_channel_properties { 118struct vmstorage_channel_properties {
156 unsigned short protocol_version; 119 u16 protocol_version;
157 unsigned char path_id; 120 u8 path_id;
158 unsigned char target_id; 121 u8 target_id;
159 122
160 /* Note: port number is only really known on the client side */ 123 /* Note: port number is only really known on the client side */
161 unsigned int port_number; 124 u32 port_number;
162 unsigned int flags; 125 u32 flags;
163 unsigned int max_transfer_bytes; 126 u32 max_transfer_bytes;
164 127
165 /* This id is unique for each channel and will correspond with */ 128 /*
166 /* vendor specific data in the inquirydata */ 129 * This id is unique for each channel and will correspond with
167 unsigned long long unique_id; 130 * vendor specific data in the inquiry data.
131 */
132
133 u64 unique_id;
168} __packed; 134} __packed;
169 135
170/* This structure is sent during the storage protocol negotiations. */ 136/* This structure is sent during the storage protocol negotiations. */
171struct vmstorage_protocol_version { 137struct vmstorage_protocol_version {
172 /* Major (MSW) and minor (LSW) version numbers. */ 138 /* Major (MSW) and minor (LSW) version numbers. */
173 unsigned short major_minor; 139 u16 major_minor;
174 140
175 /* 141 /*
176 * Revision number is auto-incremented whenever this file is changed 142 * Revision number is auto-incremented whenever this file is changed
177 * (See FILL_VMSTOR_REVISION macro above). Mismatch does not 143 * (See FILL_VMSTOR_REVISION macro above). Mismatch does not
178 * definitely indicate incompatibility--but it does indicate mismatched 144 * definitely indicate incompatibility--but it does indicate mismatched
179 * builds. 145 * builds.
146 * This is only used on the windows side. Just set it to 0.
180 */ 147 */
181 unsigned short revision; 148 u16 revision;
182} __packed; 149} __packed;
183 150
184/* Channel Property Flags */ 151/* Channel Property Flags */
@@ -190,10 +157,10 @@ struct vstor_packet {
190 enum vstor_packet_operation operation; 157 enum vstor_packet_operation operation;
191 158
192 /* Flags - see below for values */ 159 /* Flags - see below for values */
193 unsigned int flags; 160 u32 flags;
194 161
195 /* Status of the request returned from the server side. */ 162 /* Status of the request returned from the server side. */
196 unsigned int status; 163 u32 status;
197 164
198 /* Data payload area */ 165 /* Data payload area */
199 union { 166 union {
@@ -211,18 +178,47 @@ struct vstor_packet {
211 }; 178 };
212} __packed; 179} __packed;
213 180
214/* Packet flags */
215/* 181/*
182 * Packet Flags:
183 *
216 * This flag indicates that the server should send back a completion for this 184 * This flag indicates that the server should send back a completion for this
217 * packet. 185 * packet.
218 */ 186 */
187
219#define REQUEST_COMPLETION_FLAG 0x1 188#define REQUEST_COMPLETION_FLAG 0x1
220 189
221/* This is the set of flags that the vsc can set in any packets it sends */ 190/* Matches Windows-end */
222#define VSC_LEGAL_FLAGS (REQUEST_COMPLETION_FLAG) 191enum storvsc_request_type {
192 WRITE_TYPE = 0,
193 READ_TYPE,
194 UNKNOWN_TYPE,
195};
223 196
197/*
198 * SRB status codes and masks; a subset of the codes used here.
199 */
224 200
225/* Defines */ 201#define SRB_STATUS_AUTOSENSE_VALID 0x80
202#define SRB_STATUS_INVALID_LUN 0x20
203#define SRB_STATUS_SUCCESS 0x01
204#define SRB_STATUS_ERROR 0x04
205
206/*
207 * This is the end of Protocol specific defines.
208 */
209
210
211/*
212 * We setup a mempool to allocate request structures for this driver
213 * on a per-lun basis. The following define specifies the number of
214 * elements in the pool.
215 */
216
217#define STORVSC_MIN_BUF_NR 64
218static int storvsc_ringbuffer_size = (20 * PAGE_SIZE);
219
220module_param(storvsc_ringbuffer_size, int, S_IRUGO);
221MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)");
226 222
227#define STORVSC_MAX_IO_REQUESTS 128 223#define STORVSC_MAX_IO_REQUESTS 128
228 224
@@ -235,27 +231,23 @@ struct vstor_packet {
235#define STORVSC_MAX_LUNS_PER_TARGET 64 231#define STORVSC_MAX_LUNS_PER_TARGET 64
236#define STORVSC_MAX_TARGETS 1 232#define STORVSC_MAX_TARGETS 1
237#define STORVSC_MAX_CHANNELS 1 233#define STORVSC_MAX_CHANNELS 1
238#define STORVSC_MAX_CMD_LEN 16
239 234
240/* Matches Windows-end */
241enum storvsc_request_type {
242 WRITE_TYPE,
243 READ_TYPE,
244 UNKNOWN_TYPE,
245};
246 235
247 236
248struct hv_storvsc_request { 237struct storvsc_cmd_request {
238 struct list_head entry;
239 struct scsi_cmnd *cmd;
240
241 unsigned int bounce_sgl_count;
242 struct scatterlist *bounce_sgl;
243
249 struct hv_device *device; 244 struct hv_device *device;
250 245
251 /* Synchronize the request/response if needed */ 246 /* Synchronize the request/response if needed */
252 struct completion wait_event; 247 struct completion wait_event;
253 248
254 unsigned char *sense_buffer; 249 unsigned char *sense_buffer;
255 void *context;
256 void (*on_io_completion)(struct hv_storvsc_request *request);
257 struct hv_multipage_buffer data_buffer; 250 struct hv_multipage_buffer data_buffer;
258
259 struct vstor_packet vstor_packet; 251 struct vstor_packet vstor_packet;
260}; 252};
261 253
@@ -281,8 +273,8 @@ struct storvsc_device {
281 unsigned char target_id; 273 unsigned char target_id;
282 274
283 /* Used for vsc/vsp channel reset process */ 275 /* Used for vsc/vsp channel reset process */
284 struct hv_storvsc_request init_request; 276 struct storvsc_cmd_request init_request;
285 struct hv_storvsc_request reset_request; 277 struct storvsc_cmd_request reset_request;
286}; 278};
287 279
288struct stor_mem_pools { 280struct stor_mem_pools {
@@ -297,16 +289,6 @@ struct hv_host_device {
297 unsigned char target; 289 unsigned char target;
298}; 290};
299 291
300struct storvsc_cmd_request {
301 struct list_head entry;
302 struct scsi_cmnd *cmd;
303
304 unsigned int bounce_sgl_count;
305 struct scatterlist *bounce_sgl;
306
307 struct hv_storvsc_request request;
308};
309
310struct storvsc_scan_work { 292struct storvsc_scan_work {
311 struct work_struct work; 293 struct work_struct work;
312 struct Scsi_Host *host; 294 struct Scsi_Host *host;
@@ -352,6 +334,34 @@ done:
352 kfree(wrk); 334 kfree(wrk);
353} 335}
354 336
337/*
338 * Major/minor macros. Minor version is in LSB, meaning that earlier flat
339 * version numbers will be interpreted as "0.x" (i.e., 1 becomes 0.1).
340 */
341
342static inline u16 storvsc_get_version(u8 major, u8 minor)
343{
344 u16 version;
345
346 version = ((major << 8) | minor);
347 return version;
348}
349
350/*
351 * We can get incoming messages from the host that are not in response to
352 * messages that we have sent out. An example of this would be messages
353 * received by the guest to notify dynamic addition/removal of LUNs. To
354 * deal with potential race conditions where the driver may be in the
355 * midst of being unloaded when we might receive an unsolicited message
356 * from the host, we have implemented a mechanism to gurantee sequential
357 * consistency:
358 *
359 * 1) Once the device is marked as being destroyed, we will fail all
360 * outgoing messages.
361 * 2) We permit incoming messages when the device is being destroyed,
362 * only to properly account for messages already sent out.
363 */
364
355static inline struct storvsc_device *get_out_stor_device( 365static inline struct storvsc_device *get_out_stor_device(
356 struct hv_device *device) 366 struct hv_device *device)
357{ 367{
@@ -398,10 +408,231 @@ get_in_err:
398 408
399} 409}
400 410
411static void destroy_bounce_buffer(struct scatterlist *sgl,
412 unsigned int sg_count)
413{
414 int i;
415 struct page *page_buf;
416
417 for (i = 0; i < sg_count; i++) {
418 page_buf = sg_page((&sgl[i]));
419 if (page_buf != NULL)
420 __free_page(page_buf);
421 }
422
423 kfree(sgl);
424}
425
426static int do_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count)
427{
428 int i;
429
430 /* No need to check */
431 if (sg_count < 2)
432 return -1;
433
434 /* We have at least 2 sg entries */
435 for (i = 0; i < sg_count; i++) {
436 if (i == 0) {
437 /* make sure 1st one does not have hole */
438 if (sgl[i].offset + sgl[i].length != PAGE_SIZE)
439 return i;
440 } else if (i == sg_count - 1) {
441 /* make sure last one does not have hole */
442 if (sgl[i].offset != 0)
443 return i;
444 } else {
445 /* make sure no hole in the middle */
446 if (sgl[i].length != PAGE_SIZE || sgl[i].offset != 0)
447 return i;
448 }
449 }
450 return -1;
451}
452
453static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl,
454 unsigned int sg_count,
455 unsigned int len,
456 int write)
457{
458 int i;
459 int num_pages;
460 struct scatterlist *bounce_sgl;
461 struct page *page_buf;
462 unsigned int buf_len = ((write == WRITE_TYPE) ? 0 : PAGE_SIZE);
463
464 num_pages = ALIGN(len, PAGE_SIZE) >> PAGE_SHIFT;
465
466 bounce_sgl = kcalloc(num_pages, sizeof(struct scatterlist), GFP_ATOMIC);
467 if (!bounce_sgl)
468 return NULL;
469
470 for (i = 0; i < num_pages; i++) {
471 page_buf = alloc_page(GFP_ATOMIC);
472 if (!page_buf)
473 goto cleanup;
474 sg_set_page(&bounce_sgl[i], page_buf, buf_len, 0);
475 }
476
477 return bounce_sgl;
478
479cleanup:
480 destroy_bounce_buffer(bounce_sgl, num_pages);
481 return NULL;
482}
483
484/* Assume the original sgl has enough room */
485static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
486 struct scatterlist *bounce_sgl,
487 unsigned int orig_sgl_count,
488 unsigned int bounce_sgl_count)
489{
490 int i;
491 int j = 0;
492 unsigned long src, dest;
493 unsigned int srclen, destlen, copylen;
494 unsigned int total_copied = 0;
495 unsigned long bounce_addr = 0;
496 unsigned long dest_addr = 0;
497 unsigned long flags;
498
499 local_irq_save(flags);
500
501 for (i = 0; i < orig_sgl_count; i++) {
502 dest_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])),
503 KM_IRQ0) + orig_sgl[i].offset;
504 dest = dest_addr;
505 destlen = orig_sgl[i].length;
506
507 if (bounce_addr == 0)
508 bounce_addr =
509 (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])),
510 KM_IRQ0);
511
512 while (destlen) {
513 src = bounce_addr + bounce_sgl[j].offset;
514 srclen = bounce_sgl[j].length - bounce_sgl[j].offset;
515
516 copylen = min(srclen, destlen);
517 memcpy((void *)dest, (void *)src, copylen);
518
519 total_copied += copylen;
520 bounce_sgl[j].offset += copylen;
521 destlen -= copylen;
522 dest += copylen;
523
524 if (bounce_sgl[j].offset == bounce_sgl[j].length) {
525 /* full */
526 kunmap_atomic((void *)bounce_addr, KM_IRQ0);
527 j++;
528
529 /*
530 * It is possible that the number of elements
531 * in the bounce buffer may not be equal to
532 * the number of elements in the original
533 * scatter list. Handle this correctly.
534 */
535
536 if (j == bounce_sgl_count) {
537 /*
538 * We are done; cleanup and return.
539 */
540 kunmap_atomic((void *)(dest_addr -
541 orig_sgl[i].offset),
542 KM_IRQ0);
543 local_irq_restore(flags);
544 return total_copied;
545 }
546
547 /* if we need to use another bounce buffer */
548 if (destlen || i != orig_sgl_count - 1)
549 bounce_addr =
550 (unsigned long)kmap_atomic(
551 sg_page((&bounce_sgl[j])), KM_IRQ0);
552 } else if (destlen == 0 && i == orig_sgl_count - 1) {
553 /* unmap the last bounce that is < PAGE_SIZE */
554 kunmap_atomic((void *)bounce_addr, KM_IRQ0);
555 }
556 }
557
558 kunmap_atomic((void *)(dest_addr - orig_sgl[i].offset),
559 KM_IRQ0);
560 }
561
562 local_irq_restore(flags);
563
564 return total_copied;
565}
566
567/* Assume the bounce_sgl has enough room ie using the create_bounce_buffer() */
568static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
569 struct scatterlist *bounce_sgl,
570 unsigned int orig_sgl_count)
571{
572 int i;
573 int j = 0;
574 unsigned long src, dest;
575 unsigned int srclen, destlen, copylen;
576 unsigned int total_copied = 0;
577 unsigned long bounce_addr = 0;
578 unsigned long src_addr = 0;
579 unsigned long flags;
580
581 local_irq_save(flags);
582
583 for (i = 0; i < orig_sgl_count; i++) {
584 src_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])),
585 KM_IRQ0) + orig_sgl[i].offset;
586 src = src_addr;
587 srclen = orig_sgl[i].length;
588
589 if (bounce_addr == 0)
590 bounce_addr =
591 (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])),
592 KM_IRQ0);
593
594 while (srclen) {
595 /* assume bounce offset always == 0 */
596 dest = bounce_addr + bounce_sgl[j].length;
597 destlen = PAGE_SIZE - bounce_sgl[j].length;
598
599 copylen = min(srclen, destlen);
600 memcpy((void *)dest, (void *)src, copylen);
601
602 total_copied += copylen;
603 bounce_sgl[j].length += copylen;
604 srclen -= copylen;
605 src += copylen;
606
607 if (bounce_sgl[j].length == PAGE_SIZE) {
608 /* full..move to next entry */
609 kunmap_atomic((void *)bounce_addr, KM_IRQ0);
610 j++;
611
612 /* if we need to use another bounce buffer */
613 if (srclen || i != orig_sgl_count - 1)
614 bounce_addr =
615 (unsigned long)kmap_atomic(
616 sg_page((&bounce_sgl[j])), KM_IRQ0);
617
618 } else if (srclen == 0 && i == orig_sgl_count - 1) {
619 /* unmap the last bounce that is < PAGE_SIZE */
620 kunmap_atomic((void *)bounce_addr, KM_IRQ0);
621 }
622 }
623
624 kunmap_atomic((void *)(src_addr - orig_sgl[i].offset), KM_IRQ0);
625 }
626
627 local_irq_restore(flags);
628
629 return total_copied;
630}
631
401static int storvsc_channel_init(struct hv_device *device) 632static int storvsc_channel_init(struct hv_device *device)
402{ 633{
403 struct storvsc_device *stor_device; 634 struct storvsc_device *stor_device;
404 struct hv_storvsc_request *request; 635 struct storvsc_cmd_request *request;
405 struct vstor_packet *vstor_packet; 636 struct vstor_packet *vstor_packet;
406 int ret, t; 637 int ret, t;
407 638
@@ -416,7 +647,7 @@ static int storvsc_channel_init(struct hv_device *device)
416 * Now, initiate the vsc/vsp initialization protocol on the open 647 * Now, initiate the vsc/vsp initialization protocol on the open
417 * channel 648 * channel
418 */ 649 */
419 memset(request, 0, sizeof(struct hv_storvsc_request)); 650 memset(request, 0, sizeof(struct storvsc_cmd_request));
420 init_completion(&request->wait_event); 651 init_completion(&request->wait_event);
421 vstor_packet->operation = VSTOR_OPERATION_BEGIN_INITIALIZATION; 652 vstor_packet->operation = VSTOR_OPERATION_BEGIN_INITIALIZATION;
422 vstor_packet->flags = REQUEST_COMPLETION_FLAG; 653 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
@@ -445,8 +676,13 @@ static int storvsc_channel_init(struct hv_device *device)
445 vstor_packet->operation = VSTOR_OPERATION_QUERY_PROTOCOL_VERSION; 676 vstor_packet->operation = VSTOR_OPERATION_QUERY_PROTOCOL_VERSION;
446 vstor_packet->flags = REQUEST_COMPLETION_FLAG; 677 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
447 678
448 vstor_packet->version.major_minor = VMSTOR_PROTOCOL_VERSION_CURRENT; 679 vstor_packet->version.major_minor =
449 FILL_VMSTOR_REVISION(vstor_packet->version.revision); 680 storvsc_get_version(VMSTOR_CURRENT_MAJOR, VMSTOR_CURRENT_MINOR);
681
682 /*
683 * The revision number is only used in Windows; set it to 0.
684 */
685 vstor_packet->version.revision = 0;
450 686
451 ret = vmbus_sendpacket(device->channel, vstor_packet, 687 ret = vmbus_sendpacket(device->channel, vstor_packet,
452 sizeof(struct vstor_packet), 688 sizeof(struct vstor_packet),
@@ -524,9 +760,84 @@ cleanup:
524 return ret; 760 return ret;
525} 761}
526 762
763
764static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
765{
766 struct scsi_cmnd *scmnd = cmd_request->cmd;
767 struct hv_host_device *host_dev = shost_priv(scmnd->device->host);
768 void (*scsi_done_fn)(struct scsi_cmnd *);
769 struct scsi_sense_hdr sense_hdr;
770 struct vmscsi_request *vm_srb;
771 struct storvsc_scan_work *wrk;
772 struct stor_mem_pools *memp = scmnd->device->hostdata;
773
774 vm_srb = &cmd_request->vstor_packet.vm_srb;
775 if (cmd_request->bounce_sgl_count) {
776 if (vm_srb->data_in == READ_TYPE)
777 copy_from_bounce_buffer(scsi_sglist(scmnd),
778 cmd_request->bounce_sgl,
779 scsi_sg_count(scmnd),
780 cmd_request->bounce_sgl_count);
781 destroy_bounce_buffer(cmd_request->bounce_sgl,
782 cmd_request->bounce_sgl_count);
783 }
784
785 /*
786 * If there is an error; offline the device since all
787 * error recovery strategies would have already been
788 * deployed on the host side.
789 */
790 if (vm_srb->srb_status == SRB_STATUS_ERROR)
791 scmnd->result = DID_TARGET_FAILURE << 16;
792 else
793 scmnd->result = vm_srb->scsi_status;
794
795 /*
796 * If the LUN is invalid; remove the device.
797 */
798 if (vm_srb->srb_status == SRB_STATUS_INVALID_LUN) {
799 struct storvsc_device *stor_dev;
800 struct hv_device *dev = host_dev->dev;
801 struct Scsi_Host *host;
802
803 stor_dev = get_in_stor_device(dev);
804 host = stor_dev->host;
805
806 wrk = kmalloc(sizeof(struct storvsc_scan_work),
807 GFP_ATOMIC);
808 if (!wrk) {
809 scmnd->result = DID_TARGET_FAILURE << 16;
810 } else {
811 wrk->host = host;
812 wrk->lun = vm_srb->lun;
813 INIT_WORK(&wrk->work, storvsc_remove_lun);
814 schedule_work(&wrk->work);
815 }
816 }
817
818 if (scmnd->result) {
819 if (scsi_normalize_sense(scmnd->sense_buffer,
820 SCSI_SENSE_BUFFERSIZE, &sense_hdr))
821 scsi_print_sense_hdr("storvsc", &sense_hdr);
822 }
823
824 scsi_set_resid(scmnd,
825 cmd_request->data_buffer.len -
826 vm_srb->data_transfer_length);
827
828 scsi_done_fn = scmnd->scsi_done;
829
830 scmnd->host_scribble = NULL;
831 scmnd->scsi_done = NULL;
832
833 scsi_done_fn(scmnd);
834
835 mempool_free(cmd_request, memp->request_mempool);
836}
837
527static void storvsc_on_io_completion(struct hv_device *device, 838static void storvsc_on_io_completion(struct hv_device *device,
528 struct vstor_packet *vstor_packet, 839 struct vstor_packet *vstor_packet,
529 struct hv_storvsc_request *request) 840 struct storvsc_cmd_request *request)
530{ 841{
531 struct storvsc_device *stor_device; 842 struct storvsc_device *stor_device;
532 struct vstor_packet *stor_pkt; 843 struct vstor_packet *stor_pkt;
@@ -546,9 +857,9 @@ static void storvsc_on_io_completion(struct hv_device *device,
546 */ 857 */
547 858
548 if ((stor_pkt->vm_srb.cdb[0] == INQUIRY) || 859 if ((stor_pkt->vm_srb.cdb[0] == INQUIRY) ||
549 (stor_pkt->vm_srb.cdb[0] == MODE_SENSE)) { 860 (stor_pkt->vm_srb.cdb[0] == MODE_SENSE)) {
550 vstor_packet->vm_srb.scsi_status = 0; 861 vstor_packet->vm_srb.scsi_status = 0;
551 vstor_packet->vm_srb.srb_status = 0x1; 862 vstor_packet->vm_srb.srb_status = SRB_STATUS_SUCCESS;
552 } 863 }
553 864
554 865
@@ -559,7 +870,7 @@ static void storvsc_on_io_completion(struct hv_device *device,
559 vstor_packet->vm_srb.sense_info_length; 870 vstor_packet->vm_srb.sense_info_length;
560 871
561 if (vstor_packet->vm_srb.scsi_status != 0 || 872 if (vstor_packet->vm_srb.scsi_status != 0 ||
562 vstor_packet->vm_srb.srb_status != 1){ 873 vstor_packet->vm_srb.srb_status != SRB_STATUS_SUCCESS){
563 dev_warn(&device->device, 874 dev_warn(&device->device,
564 "cmd 0x%x scsi status 0x%x srb status 0x%x\n", 875 "cmd 0x%x scsi status 0x%x srb status 0x%x\n",
565 stor_pkt->vm_srb.cdb[0], 876 stor_pkt->vm_srb.cdb[0],
@@ -569,7 +880,8 @@ static void storvsc_on_io_completion(struct hv_device *device,
569 880
570 if ((vstor_packet->vm_srb.scsi_status & 0xFF) == 0x02) { 881 if ((vstor_packet->vm_srb.scsi_status & 0xFF) == 0x02) {
571 /* CHECK_CONDITION */ 882 /* CHECK_CONDITION */
572 if (vstor_packet->vm_srb.srb_status & 0x80) { 883 if (vstor_packet->vm_srb.srb_status &
884 SRB_STATUS_AUTOSENSE_VALID) {
573 /* autosense data available */ 885 /* autosense data available */
574 dev_warn(&device->device, 886 dev_warn(&device->device,
575 "stor pkt %p autosense data valid - len %d\n", 887 "stor pkt %p autosense data valid - len %d\n",
@@ -586,7 +898,7 @@ static void storvsc_on_io_completion(struct hv_device *device,
586 stor_pkt->vm_srb.data_transfer_length = 898 stor_pkt->vm_srb.data_transfer_length =
587 vstor_packet->vm_srb.data_transfer_length; 899 vstor_packet->vm_srb.data_transfer_length;
588 900
589 request->on_io_completion(request); 901 storvsc_command_completion(request);
590 902
591 if (atomic_dec_and_test(&stor_device->num_outstanding_req) && 903 if (atomic_dec_and_test(&stor_device->num_outstanding_req) &&
592 stor_device->drain_notify) 904 stor_device->drain_notify)
@@ -597,7 +909,7 @@ static void storvsc_on_io_completion(struct hv_device *device,
597 909
598static void storvsc_on_receive(struct hv_device *device, 910static void storvsc_on_receive(struct hv_device *device,
599 struct vstor_packet *vstor_packet, 911 struct vstor_packet *vstor_packet,
600 struct hv_storvsc_request *request) 912 struct storvsc_cmd_request *request)
601{ 913{
602 struct storvsc_scan_work *work; 914 struct storvsc_scan_work *work;
603 struct storvsc_device *stor_device; 915 struct storvsc_device *stor_device;
@@ -631,7 +943,7 @@ static void storvsc_on_channel_callback(void *context)
631 u32 bytes_recvd; 943 u32 bytes_recvd;
632 u64 request_id; 944 u64 request_id;
633 unsigned char packet[ALIGN(sizeof(struct vstor_packet), 8)]; 945 unsigned char packet[ALIGN(sizeof(struct vstor_packet), 8)];
634 struct hv_storvsc_request *request; 946 struct storvsc_cmd_request *request;
635 int ret; 947 int ret;
636 948
637 949
@@ -645,7 +957,7 @@ static void storvsc_on_channel_callback(void *context)
645 &bytes_recvd, &request_id); 957 &bytes_recvd, &request_id);
646 if (ret == 0 && bytes_recvd > 0) { 958 if (ret == 0 && bytes_recvd > 0) {
647 959
648 request = (struct hv_storvsc_request *) 960 request = (struct storvsc_cmd_request *)
649 (unsigned long)request_id; 961 (unsigned long)request_id;
650 962
651 if ((request == &stor_device->init_request) || 963 if ((request == &stor_device->init_request) ||
@@ -674,7 +986,6 @@ static int storvsc_connect_to_vsp(struct hv_device *device, u32 ring_size)
674 986
675 memset(&props, 0, sizeof(struct vmstorage_channel_properties)); 987 memset(&props, 0, sizeof(struct vmstorage_channel_properties));
676 988
677 /* Open the channel */
678 ret = vmbus_open(device->channel, 989 ret = vmbus_open(device->channel,
679 ring_size, 990 ring_size,
680 ring_size, 991 ring_size,
@@ -728,7 +1039,7 @@ static int storvsc_dev_remove(struct hv_device *device)
728} 1039}
729 1040
730static int storvsc_do_io(struct hv_device *device, 1041static int storvsc_do_io(struct hv_device *device,
731 struct hv_storvsc_request *request) 1042 struct storvsc_cmd_request *request)
732{ 1043{
733 struct storvsc_device *stor_device; 1044 struct storvsc_device *stor_device;
734 struct vstor_packet *vstor_packet; 1045 struct vstor_packet *vstor_packet;
@@ -749,7 +1060,7 @@ static int storvsc_do_io(struct hv_device *device,
749 vstor_packet->vm_srb.length = sizeof(struct vmscsi_request); 1060 vstor_packet->vm_srb.length = sizeof(struct vmscsi_request);
750 1061
751 1062
752 vstor_packet->vm_srb.sense_info_length = SENSE_BUFFER_SIZE; 1063 vstor_packet->vm_srb.sense_info_length = STORVSC_SENSE_BUFFER_SIZE;
753 1064
754 1065
755 vstor_packet->vm_srb.data_transfer_length = 1066 vstor_packet->vm_srb.data_transfer_length =
@@ -779,18 +1090,6 @@ static int storvsc_do_io(struct hv_device *device,
779 return ret; 1090 return ret;
780} 1091}
781 1092
782static void storvsc_get_ide_info(struct hv_device *dev, int *target, int *path)
783{
784 *target =
785 dev->dev_instance.b[5] << 8 | dev->dev_instance.b[4];
786
787 *path =
788 dev->dev_instance.b[3] << 24 |
789 dev->dev_instance.b[2] << 16 |
790 dev->dev_instance.b[1] << 8 | dev->dev_instance.b[0];
791}
792
793
794static int storvsc_device_alloc(struct scsi_device *sdevice) 1093static int storvsc_device_alloc(struct scsi_device *sdevice)
795{ 1094{
796 struct stor_mem_pools *memp; 1095 struct stor_mem_pools *memp;
@@ -849,245 +1148,6 @@ static int storvsc_device_configure(struct scsi_device *sdevice)
849 return 0; 1148 return 0;
850} 1149}
851 1150
852static void destroy_bounce_buffer(struct scatterlist *sgl,
853 unsigned int sg_count)
854{
855 int i;
856 struct page *page_buf;
857
858 for (i = 0; i < sg_count; i++) {
859 page_buf = sg_page((&sgl[i]));
860 if (page_buf != NULL)
861 __free_page(page_buf);
862 }
863
864 kfree(sgl);
865}
866
867static int do_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count)
868{
869 int i;
870
871 /* No need to check */
872 if (sg_count < 2)
873 return -1;
874
875 /* We have at least 2 sg entries */
876 for (i = 0; i < sg_count; i++) {
877 if (i == 0) {
878 /* make sure 1st one does not have hole */
879 if (sgl[i].offset + sgl[i].length != PAGE_SIZE)
880 return i;
881 } else if (i == sg_count - 1) {
882 /* make sure last one does not have hole */
883 if (sgl[i].offset != 0)
884 return i;
885 } else {
886 /* make sure no hole in the middle */
887 if (sgl[i].length != PAGE_SIZE || sgl[i].offset != 0)
888 return i;
889 }
890 }
891 return -1;
892}
893
894static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl,
895 unsigned int sg_count,
896 unsigned int len,
897 int write)
898{
899 int i;
900 int num_pages;
901 struct scatterlist *bounce_sgl;
902 struct page *page_buf;
903 unsigned int buf_len = ((write == WRITE_TYPE) ? 0 : PAGE_SIZE);
904
905 num_pages = ALIGN(len, PAGE_SIZE) >> PAGE_SHIFT;
906
907 bounce_sgl = kcalloc(num_pages, sizeof(struct scatterlist), GFP_ATOMIC);
908 if (!bounce_sgl)
909 return NULL;
910
911 for (i = 0; i < num_pages; i++) {
912 page_buf = alloc_page(GFP_ATOMIC);
913 if (!page_buf)
914 goto cleanup;
915 sg_set_page(&bounce_sgl[i], page_buf, buf_len, 0);
916 }
917
918 return bounce_sgl;
919
920cleanup:
921 destroy_bounce_buffer(bounce_sgl, num_pages);
922 return NULL;
923}
924
925
926/* Assume the original sgl has enough room */
927static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
928 struct scatterlist *bounce_sgl,
929 unsigned int orig_sgl_count,
930 unsigned int bounce_sgl_count)
931{
932 int i;
933 int j = 0;
934 unsigned long src, dest;
935 unsigned int srclen, destlen, copylen;
936 unsigned int total_copied = 0;
937 unsigned long bounce_addr = 0;
938 unsigned long dest_addr = 0;
939 unsigned long flags;
940
941 local_irq_save(flags);
942
943 for (i = 0; i < orig_sgl_count; i++) {
944 dest_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])),
945 KM_IRQ0) + orig_sgl[i].offset;
946 dest = dest_addr;
947 destlen = orig_sgl[i].length;
948
949 if (bounce_addr == 0)
950 bounce_addr =
951 (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])),
952 KM_IRQ0);
953
954 while (destlen) {
955 src = bounce_addr + bounce_sgl[j].offset;
956 srclen = bounce_sgl[j].length - bounce_sgl[j].offset;
957
958 copylen = min(srclen, destlen);
959 memcpy((void *)dest, (void *)src, copylen);
960
961 total_copied += copylen;
962 bounce_sgl[j].offset += copylen;
963 destlen -= copylen;
964 dest += copylen;
965
966 if (bounce_sgl[j].offset == bounce_sgl[j].length) {
967 /* full */
968 kunmap_atomic((void *)bounce_addr, KM_IRQ0);
969 j++;
970
971 /*
972 * It is possible that the number of elements
973 * in the bounce buffer may not be equal to
974 * the number of elements in the original
975 * scatter list. Handle this correctly.
976 */
977
978 if (j == bounce_sgl_count) {
979 /*
980 * We are done; cleanup and return.
981 */
982 kunmap_atomic((void *)(dest_addr -
983 orig_sgl[i].offset),
984 KM_IRQ0);
985 local_irq_restore(flags);
986 return total_copied;
987 }
988
989 /* if we need to use another bounce buffer */
990 if (destlen || i != orig_sgl_count - 1)
991 bounce_addr =
992 (unsigned long)kmap_atomic(
993 sg_page((&bounce_sgl[j])), KM_IRQ0);
994 } else if (destlen == 0 && i == orig_sgl_count - 1) {
995 /* unmap the last bounce that is < PAGE_SIZE */
996 kunmap_atomic((void *)bounce_addr, KM_IRQ0);
997 }
998 }
999
1000 kunmap_atomic((void *)(dest_addr - orig_sgl[i].offset),
1001 KM_IRQ0);
1002 }
1003
1004 local_irq_restore(flags);
1005
1006 return total_copied;
1007}
1008
1009
1010/* Assume the bounce_sgl has enough room ie using the create_bounce_buffer() */
1011static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
1012 struct scatterlist *bounce_sgl,
1013 unsigned int orig_sgl_count)
1014{
1015 int i;
1016 int j = 0;
1017 unsigned long src, dest;
1018 unsigned int srclen, destlen, copylen;
1019 unsigned int total_copied = 0;
1020 unsigned long bounce_addr = 0;
1021 unsigned long src_addr = 0;
1022 unsigned long flags;
1023
1024 local_irq_save(flags);
1025
1026 for (i = 0; i < orig_sgl_count; i++) {
1027 src_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])),
1028 KM_IRQ0) + orig_sgl[i].offset;
1029 src = src_addr;
1030 srclen = orig_sgl[i].length;
1031
1032 if (bounce_addr == 0)
1033 bounce_addr =
1034 (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])),
1035 KM_IRQ0);
1036
1037 while (srclen) {
1038 /* assume bounce offset always == 0 */
1039 dest = bounce_addr + bounce_sgl[j].length;
1040 destlen = PAGE_SIZE - bounce_sgl[j].length;
1041
1042 copylen = min(srclen, destlen);
1043 memcpy((void *)dest, (void *)src, copylen);
1044
1045 total_copied += copylen;
1046 bounce_sgl[j].length += copylen;
1047 srclen -= copylen;
1048 src += copylen;
1049
1050 if (bounce_sgl[j].length == PAGE_SIZE) {
1051 /* full..move to next entry */
1052 kunmap_atomic((void *)bounce_addr, KM_IRQ0);
1053 j++;
1054
1055 /* if we need to use another bounce buffer */
1056 if (srclen || i != orig_sgl_count - 1)
1057 bounce_addr =
1058 (unsigned long)kmap_atomic(
1059 sg_page((&bounce_sgl[j])), KM_IRQ0);
1060
1061 } else if (srclen == 0 && i == orig_sgl_count - 1) {
1062 /* unmap the last bounce that is < PAGE_SIZE */
1063 kunmap_atomic((void *)bounce_addr, KM_IRQ0);
1064 }
1065 }
1066
1067 kunmap_atomic((void *)(src_addr - orig_sgl[i].offset), KM_IRQ0);
1068 }
1069
1070 local_irq_restore(flags);
1071
1072 return total_copied;
1073}
1074
1075
1076static int storvsc_remove(struct hv_device *dev)
1077{
1078 struct storvsc_device *stor_device = hv_get_drvdata(dev);
1079 struct Scsi_Host *host = stor_device->host;
1080
1081 scsi_remove_host(host);
1082
1083 scsi_host_put(host);
1084
1085 storvsc_dev_remove(dev);
1086
1087 return 0;
1088}
1089
1090
1091static int storvsc_get_chs(struct scsi_device *sdev, struct block_device * bdev, 1151static int storvsc_get_chs(struct scsi_device *sdev, struct block_device * bdev,
1092 sector_t capacity, int *info) 1152 sector_t capacity, int *info)
1093{ 1153{
@@ -1111,10 +1171,13 @@ static int storvsc_get_chs(struct scsi_device *sdev, struct block_device * bdev,
1111 return 0; 1171 return 0;
1112} 1172}
1113 1173
1114static int storvsc_host_reset(struct hv_device *device) 1174static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
1115{ 1175{
1176 struct hv_host_device *host_dev = shost_priv(scmnd->device->host);
1177 struct hv_device *device = host_dev->dev;
1178
1116 struct storvsc_device *stor_device; 1179 struct storvsc_device *stor_device;
1117 struct hv_storvsc_request *request; 1180 struct storvsc_cmd_request *request;
1118 struct vstor_packet *vstor_packet; 1181 struct vstor_packet *vstor_packet;
1119 int ret, t; 1182 int ret, t;
1120 1183
@@ -1153,105 +1216,16 @@ static int storvsc_host_reset(struct hv_device *device)
1153 return SUCCESS; 1216 return SUCCESS;
1154} 1217}
1155 1218
1156 1219static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd)
1157/*
1158 * storvsc_host_reset_handler - Reset the scsi HBA
1159 */
1160static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
1161{
1162 struct hv_host_device *host_dev = shost_priv(scmnd->device->host);
1163 struct hv_device *dev = host_dev->dev;
1164
1165 return storvsc_host_reset(dev);
1166}
1167
1168
1169/*
1170 * storvsc_command_completion - Command completion processing
1171 */
1172static void storvsc_command_completion(struct hv_storvsc_request *request)
1173{
1174 struct storvsc_cmd_request *cmd_request =
1175 (struct storvsc_cmd_request *)request->context;
1176 struct scsi_cmnd *scmnd = cmd_request->cmd;
1177 struct hv_host_device *host_dev = shost_priv(scmnd->device->host);
1178 void (*scsi_done_fn)(struct scsi_cmnd *);
1179 struct scsi_sense_hdr sense_hdr;
1180 struct vmscsi_request *vm_srb;
1181 struct storvsc_scan_work *wrk;
1182 struct stor_mem_pools *memp = scmnd->device->hostdata;
1183
1184 vm_srb = &request->vstor_packet.vm_srb;
1185 if (cmd_request->bounce_sgl_count) {
1186 if (vm_srb->data_in == READ_TYPE)
1187 copy_from_bounce_buffer(scsi_sglist(scmnd),
1188 cmd_request->bounce_sgl,
1189 scsi_sg_count(scmnd),
1190 cmd_request->bounce_sgl_count);
1191 destroy_bounce_buffer(cmd_request->bounce_sgl,
1192 cmd_request->bounce_sgl_count);
1193 }
1194
1195 /*
1196 * If there is an error; offline the device since all
1197 * error recovery strategies would have already been
1198 * deployed on the host side.
1199 */
1200 if (vm_srb->srb_status == 0x4)
1201 scmnd->result = DID_TARGET_FAILURE << 16;
1202 else
1203 scmnd->result = vm_srb->scsi_status;
1204
1205 /*
1206 * If the LUN is invalid; remove the device.
1207 */
1208 if (vm_srb->srb_status == 0x20) {
1209 struct storvsc_device *stor_dev;
1210 struct hv_device *dev = host_dev->dev;
1211 struct Scsi_Host *host;
1212
1213 stor_dev = get_in_stor_device(dev);
1214 host = stor_dev->host;
1215
1216 wrk = kmalloc(sizeof(struct storvsc_scan_work),
1217 GFP_ATOMIC);
1218 if (!wrk) {
1219 scmnd->result = DID_TARGET_FAILURE << 16;
1220 } else {
1221 wrk->host = host;
1222 wrk->lun = vm_srb->lun;
1223 INIT_WORK(&wrk->work, storvsc_remove_lun);
1224 schedule_work(&wrk->work);
1225 }
1226 }
1227
1228 if (scmnd->result) {
1229 if (scsi_normalize_sense(scmnd->sense_buffer,
1230 SCSI_SENSE_BUFFERSIZE, &sense_hdr))
1231 scsi_print_sense_hdr("storvsc", &sense_hdr);
1232 }
1233
1234 scsi_set_resid(scmnd,
1235 request->data_buffer.len -
1236 vm_srb->data_transfer_length);
1237
1238 scsi_done_fn = scmnd->scsi_done;
1239
1240 scmnd->host_scribble = NULL;
1241 scmnd->scsi_done = NULL;
1242
1243 scsi_done_fn(scmnd);
1244
1245 mempool_free(cmd_request, memp->request_mempool);
1246}
1247
1248static bool storvsc_check_scsi_cmd(struct scsi_cmnd *scmnd)
1249{ 1220{
1250 bool allowed = true; 1221 bool allowed = true;
1251 u8 scsi_op = scmnd->cmnd[0]; 1222 u8 scsi_op = scmnd->cmnd[0];
1252 1223
1253 switch (scsi_op) { 1224 switch (scsi_op) {
1254 /* smartd sends this command, which will offline the device */ 1225 /*
1226 * smartd sends this command and the host does not handle
1227 * this. So, don't send it.
1228 */
1255 case SET_WINDOW: 1229 case SET_WINDOW:
1256 scmnd->result = ILLEGAL_REQUEST << 16; 1230 scmnd->result = ILLEGAL_REQUEST << 16;
1257 allowed = false; 1231 allowed = false;
@@ -1262,15 +1236,11 @@ static bool storvsc_check_scsi_cmd(struct scsi_cmnd *scmnd)
1262 return allowed; 1236 return allowed;
1263} 1237}
1264 1238
1265/*
1266 * storvsc_queuecommand - Initiate command processing
1267 */
1268static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) 1239static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
1269{ 1240{
1270 int ret; 1241 int ret;
1271 struct hv_host_device *host_dev = shost_priv(host); 1242 struct hv_host_device *host_dev = shost_priv(host);
1272 struct hv_device *dev = host_dev->dev; 1243 struct hv_device *dev = host_dev->dev;
1273 struct hv_storvsc_request *request;
1274 struct storvsc_cmd_request *cmd_request; 1244 struct storvsc_cmd_request *cmd_request;
1275 unsigned int request_size = 0; 1245 unsigned int request_size = 0;
1276 int i; 1246 int i;
@@ -1279,38 +1249,31 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
1279 struct vmscsi_request *vm_srb; 1249 struct vmscsi_request *vm_srb;
1280 struct stor_mem_pools *memp = scmnd->device->hostdata; 1250 struct stor_mem_pools *memp = scmnd->device->hostdata;
1281 1251
1282 if (storvsc_check_scsi_cmd(scmnd) == false) { 1252 if (!storvsc_scsi_cmd_ok(scmnd)) {
1283 scmnd->scsi_done(scmnd); 1253 scmnd->scsi_done(scmnd);
1284 return 0; 1254 return 0;
1285 } 1255 }
1286 1256
1287 /* If retrying, no need to prep the cmd */
1288 if (scmnd->host_scribble) {
1289
1290 cmd_request =
1291 (struct storvsc_cmd_request *)scmnd->host_scribble;
1292
1293 goto retry_request;
1294 }
1295
1296 request_size = sizeof(struct storvsc_cmd_request); 1257 request_size = sizeof(struct storvsc_cmd_request);
1297 1258
1298 cmd_request = mempool_alloc(memp->request_mempool, 1259 cmd_request = mempool_alloc(memp->request_mempool,
1299 GFP_ATOMIC); 1260 GFP_ATOMIC);
1261
1262 /*
1263 * We might be invoked in an interrupt context; hence
1264 * mempool_alloc() can fail.
1265 */
1300 if (!cmd_request) 1266 if (!cmd_request)
1301 return SCSI_MLQUEUE_DEVICE_BUSY; 1267 return SCSI_MLQUEUE_DEVICE_BUSY;
1302 1268
1303 memset(cmd_request, 0, sizeof(struct storvsc_cmd_request)); 1269 memset(cmd_request, 0, sizeof(struct storvsc_cmd_request));
1304 1270
1305 /* Setup the cmd request */ 1271 /* Setup the cmd request */
1306 cmd_request->bounce_sgl_count = 0;
1307 cmd_request->bounce_sgl = NULL;
1308 cmd_request->cmd = scmnd; 1272 cmd_request->cmd = scmnd;
1309 1273
1310 scmnd->host_scribble = (unsigned char *)cmd_request; 1274 scmnd->host_scribble = (unsigned char *)cmd_request;
1311 1275
1312 request = &cmd_request->request; 1276 vm_srb = &cmd_request->vstor_packet.vm_srb;
1313 vm_srb = &request->vstor_packet.vm_srb;
1314 1277
1315 1278
1316 /* Build the SRB */ 1279 /* Build the SRB */
@@ -1326,8 +1289,6 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
1326 break; 1289 break;
1327 } 1290 }
1328 1291
1329 request->on_io_completion = storvsc_command_completion;
1330 request->context = cmd_request;/* scmnd; */
1331 1292
1332 vm_srb->port_number = host_dev->port; 1293 vm_srb->port_number = host_dev->port;
1333 vm_srb->path_id = scmnd->device->channel; 1294 vm_srb->path_id = scmnd->device->channel;
@@ -1338,10 +1299,10 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
1338 1299
1339 memcpy(vm_srb->cdb, scmnd->cmnd, vm_srb->cdb_length); 1300 memcpy(vm_srb->cdb, scmnd->cmnd, vm_srb->cdb_length);
1340 1301
1341 request->sense_buffer = scmnd->sense_buffer; 1302 cmd_request->sense_buffer = scmnd->sense_buffer;
1342 1303
1343 1304
1344 request->data_buffer.len = scsi_bufflen(scmnd); 1305 cmd_request->data_buffer.len = scsi_bufflen(scmnd);
1345 if (scsi_sg_count(scmnd)) { 1306 if (scsi_sg_count(scmnd)) {
1346 sgl = (struct scatterlist *)scsi_sglist(scmnd); 1307 sgl = (struct scatterlist *)scsi_sglist(scmnd);
1347 sg_count = scsi_sg_count(scmnd); 1308 sg_count = scsi_sg_count(scmnd);
@@ -1353,11 +1314,8 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
1353 scsi_bufflen(scmnd), 1314 scsi_bufflen(scmnd),
1354 vm_srb->data_in); 1315 vm_srb->data_in);
1355 if (!cmd_request->bounce_sgl) { 1316 if (!cmd_request->bounce_sgl) {
1356 scmnd->host_scribble = NULL; 1317 ret = SCSI_MLQUEUE_HOST_BUSY;
1357 mempool_free(cmd_request, 1318 goto queue_error;
1358 memp->request_mempool);
1359
1360 return SCSI_MLQUEUE_HOST_BUSY;
1361 } 1319 }
1362 1320
1363 cmd_request->bounce_sgl_count = 1321 cmd_request->bounce_sgl_count =
@@ -1373,41 +1331,42 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
1373 sg_count = cmd_request->bounce_sgl_count; 1331 sg_count = cmd_request->bounce_sgl_count;
1374 } 1332 }
1375 1333
1376 request->data_buffer.offset = sgl[0].offset; 1334 cmd_request->data_buffer.offset = sgl[0].offset;
1377 1335
1378 for (i = 0; i < sg_count; i++) 1336 for (i = 0; i < sg_count; i++)
1379 request->data_buffer.pfn_array[i] = 1337 cmd_request->data_buffer.pfn_array[i] =
1380 page_to_pfn(sg_page((&sgl[i]))); 1338 page_to_pfn(sg_page((&sgl[i])));
1381 1339
1382 } else if (scsi_sglist(scmnd)) { 1340 } else if (scsi_sglist(scmnd)) {
1383 request->data_buffer.offset = 1341 cmd_request->data_buffer.offset =
1384 virt_to_phys(scsi_sglist(scmnd)) & (PAGE_SIZE-1); 1342 virt_to_phys(scsi_sglist(scmnd)) & (PAGE_SIZE-1);
1385 request->data_buffer.pfn_array[0] = 1343 cmd_request->data_buffer.pfn_array[0] =
1386 virt_to_phys(scsi_sglist(scmnd)) >> PAGE_SHIFT; 1344 virt_to_phys(scsi_sglist(scmnd)) >> PAGE_SHIFT;
1387 } 1345 }
1388 1346
1389retry_request:
1390 /* Invokes the vsc to start an IO */ 1347 /* Invokes the vsc to start an IO */
1391 ret = storvsc_do_io(dev, &cmd_request->request); 1348 ret = storvsc_do_io(dev, cmd_request);
1392 1349
1393 if (ret == -EAGAIN) { 1350 if (ret == -EAGAIN) {
1394 /* no more space */ 1351 /* no more space */
1395 1352
1396 if (cmd_request->bounce_sgl_count) 1353 if (cmd_request->bounce_sgl_count) {
1397 destroy_bounce_buffer(cmd_request->bounce_sgl, 1354 destroy_bounce_buffer(cmd_request->bounce_sgl,
1398 cmd_request->bounce_sgl_count); 1355 cmd_request->bounce_sgl_count);
1399 1356
1400 mempool_free(cmd_request, memp->request_mempool); 1357 ret = SCSI_MLQUEUE_DEVICE_BUSY;
1401 1358 goto queue_error;
1402 scmnd->host_scribble = NULL; 1359 }
1403
1404 ret = SCSI_MLQUEUE_DEVICE_BUSY;
1405 } 1360 }
1406 1361
1362 return 0;
1363
1364queue_error:
1365 mempool_free(cmd_request, memp->request_mempool);
1366 scmnd->host_scribble = NULL;
1407 return ret; 1367 return ret;
1408} 1368}
1409 1369
1410/* Scsi driver */
1411static struct scsi_host_template scsi_driver = { 1370static struct scsi_host_template scsi_driver = {
1412 .module = THIS_MODULE, 1371 .module = THIS_MODULE,
1413 .name = "storvsc_host_t", 1372 .name = "storvsc_host_t",
@@ -1448,11 +1407,6 @@ static const struct hv_vmbus_device_id id_table[] = {
1448 1407
1449MODULE_DEVICE_TABLE(vmbus, id_table); 1408MODULE_DEVICE_TABLE(vmbus, id_table);
1450 1409
1451
1452/*
1453 * storvsc_probe - Add a new device for this driver
1454 */
1455
1456static int storvsc_probe(struct hv_device *device, 1410static int storvsc_probe(struct hv_device *device,
1457 const struct hv_vmbus_device_id *dev_id) 1411 const struct hv_vmbus_device_id *dev_id)
1458{ 1412{
@@ -1460,7 +1414,6 @@ static int storvsc_probe(struct hv_device *device,
1460 struct Scsi_Host *host; 1414 struct Scsi_Host *host;
1461 struct hv_host_device *host_dev; 1415 struct hv_host_device *host_dev;
1462 bool dev_is_ide = ((dev_id->driver_data == IDE_GUID) ? true : false); 1416 bool dev_is_ide = ((dev_id->driver_data == IDE_GUID) ? true : false);
1463 int path = 0;
1464 int target = 0; 1417 int target = 0;
1465 struct storvsc_device *stor_device; 1418 struct storvsc_device *stor_device;
1466 1419
@@ -1493,9 +1446,6 @@ static int storvsc_probe(struct hv_device *device,
1493 if (ret) 1446 if (ret)
1494 goto err_out1; 1447 goto err_out1;
1495 1448
1496 if (dev_is_ide)
1497 storvsc_get_ide_info(device, &target, &path);
1498
1499 host_dev->path = stor_device->path_id; 1449 host_dev->path = stor_device->path_id;
1500 host_dev->target = stor_device->target_id; 1450 host_dev->target = stor_device->target_id;
1501 1451
@@ -1515,12 +1465,14 @@ static int storvsc_probe(struct hv_device *device,
1515 1465
1516 if (!dev_is_ide) { 1466 if (!dev_is_ide) {
1517 scsi_scan_host(host); 1467 scsi_scan_host(host);
1518 return 0; 1468 } else {
1519 } 1469 target = (device->dev_instance.b[5] << 8 |
1520 ret = scsi_add_device(host, 0, target, 0); 1470 device->dev_instance.b[4]);
1521 if (ret) { 1471 ret = scsi_add_device(host, 0, target, 0);
1522 scsi_remove_host(host); 1472 if (ret) {
1523 goto err_out2; 1473 scsi_remove_host(host);
1474 goto err_out2;
1475 }
1524 } 1476 }
1525 return 0; 1477 return 0;
1526 1478
@@ -1542,7 +1494,17 @@ err_out0:
1542 return ret; 1494 return ret;
1543} 1495}
1544 1496
1545/* The one and only one */ 1497static int storvsc_remove(struct hv_device *dev)
1498{
1499 struct storvsc_device *stor_device = hv_get_drvdata(dev);
1500 struct Scsi_Host *host = stor_device->host;
1501
1502 scsi_remove_host(host);
1503 storvsc_dev_remove(dev);
1504 scsi_host_put(host);
1505
1506 return 0;
1507}
1546 1508
1547static struct hv_driver storvsc_drv = { 1509static struct hv_driver storvsc_drv = {
1548 .name = KBUILD_MODNAME, 1510 .name = KBUILD_MODNAME,
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 9e6347249783..f1abfb179b47 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -76,8 +76,6 @@ source "drivers/staging/vt6655/Kconfig"
76 76
77source "drivers/staging/vt6656/Kconfig" 77source "drivers/staging/vt6656/Kconfig"
78 78
79source "drivers/staging/hv/Kconfig"
80
81source "drivers/staging/vme/Kconfig" 79source "drivers/staging/vme/Kconfig"
82 80
83source "drivers/staging/sep/Kconfig" 81source "drivers/staging/sep/Kconfig"
@@ -88,6 +86,8 @@ source "drivers/staging/zram/Kconfig"
88 86
89source "drivers/staging/zcache/Kconfig" 87source "drivers/staging/zcache/Kconfig"
90 88
89source "drivers/staging/zsmalloc/Kconfig"
90
91source "drivers/staging/wlags49_h2/Kconfig" 91source "drivers/staging/wlags49_h2/Kconfig"
92 92
93source "drivers/staging/wlags49_h25/Kconfig" 93source "drivers/staging/wlags49_h25/Kconfig"
@@ -128,4 +128,10 @@ source "drivers/staging/omapdrm/Kconfig"
128 128
129source "drivers/staging/android/Kconfig" 129source "drivers/staging/android/Kconfig"
130 130
131source "drivers/staging/telephony/Kconfig"
132
133source "drivers/staging/ramster/Kconfig"
134
135source "drivers/staging/ozwpan/Kconfig"
136
131endif # STAGING 137endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 943e14830753..ffe7d44374e6 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -29,13 +29,12 @@ obj-$(CONFIG_USB_SERIAL_QUATECH_USB2) += quatech_usb2/
29obj-$(CONFIG_OCTEON_ETHERNET) += octeon/ 29obj-$(CONFIG_OCTEON_ETHERNET) += octeon/
30obj-$(CONFIG_VT6655) += vt6655/ 30obj-$(CONFIG_VT6655) += vt6655/
31obj-$(CONFIG_VT6656) += vt6656/ 31obj-$(CONFIG_VT6656) += vt6656/
32obj-$(CONFIG_HYPERV) += hv/
33obj-$(CONFIG_VME_BUS) += vme/ 32obj-$(CONFIG_VME_BUS) += vme/
34obj-$(CONFIG_DX_SEP) += sep/ 33obj-$(CONFIG_DX_SEP) += sep/
35obj-$(CONFIG_IIO) += iio/ 34obj-$(CONFIG_IIO) += iio/
36obj-$(CONFIG_ZRAM) += zram/ 35obj-$(CONFIG_ZRAM) += zram/
37obj-$(CONFIG_XVMALLOC) += zram/
38obj-$(CONFIG_ZCACHE) += zcache/ 36obj-$(CONFIG_ZCACHE) += zcache/
37obj-$(CONFIG_ZSMALLOC) += zsmalloc/
39obj-$(CONFIG_WLAGS49_H2) += wlags49_h2/ 38obj-$(CONFIG_WLAGS49_H2) += wlags49_h2/
40obj-$(CONFIG_WLAGS49_H25) += wlags49_h25/ 39obj-$(CONFIG_WLAGS49_H25) += wlags49_h25/
41obj-$(CONFIG_FB_SM7XX) += sm7xx/ 40obj-$(CONFIG_FB_SM7XX) += sm7xx/
@@ -55,3 +54,6 @@ obj-$(CONFIG_INTEL_MEI) += mei/
55obj-$(CONFIG_MFD_NVEC) += nvec/ 54obj-$(CONFIG_MFD_NVEC) += nvec/
56obj-$(CONFIG_DRM_OMAP) += omapdrm/ 55obj-$(CONFIG_DRM_OMAP) += omapdrm/
57obj-$(CONFIG_ANDROID) += android/ 56obj-$(CONFIG_ANDROID) += android/
57obj-$(CONFIG_PHONE) += telephony/
58obj-$(CONFIG_RAMSTER) += ramster/
59obj-$(CONFIG_USB_WPAN_HCD) += ozwpan/
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index fef3580ce8de..08a3b1133d29 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -25,65 +25,17 @@ config ANDROID_LOGGER
25 tristate "Android log driver" 25 tristate "Android log driver"
26 default n 26 default n
27 27
28config ANDROID_RAM_CONSOLE 28config ANDROID_PERSISTENT_RAM
29 bool "Android RAM buffer console" 29 bool
30 depends on !S390 && !UML
31 default n
32
33config ANDROID_RAM_CONSOLE_ENABLE_VERBOSE
34 bool "Enable verbose console messages on Android RAM console"
35 default y
36 depends on ANDROID_RAM_CONSOLE
37
38menuconfig ANDROID_RAM_CONSOLE_ERROR_CORRECTION
39 bool "Android RAM Console Enable error correction"
40 default n
41 depends on ANDROID_RAM_CONSOLE
42 depends on !ANDROID_RAM_CONSOLE_EARLY_INIT
43 select REED_SOLOMON 30 select REED_SOLOMON
44 select REED_SOLOMON_ENC8 31 select REED_SOLOMON_ENC8
45 select REED_SOLOMON_DEC8 32 select REED_SOLOMON_DEC8
46 33
47if ANDROID_RAM_CONSOLE_ERROR_CORRECTION 34config ANDROID_RAM_CONSOLE
48 35 bool "Android RAM buffer console"
49config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE 36 depends on !S390 && !UML
50 int "Android RAM Console Data data size" 37 select ANDROID_PERSISTENT_RAM
51 default 128
52 help
53 Must be a power of 2.
54
55config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE
56 int "Android RAM Console ECC size"
57 default 16
58
59config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE
60 int "Android RAM Console Symbol size"
61 default 8
62
63config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL
64 hex "Android RAM Console Polynomial"
65 default 0x19 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 4)
66 default 0x29 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 5)
67 default 0x61 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 6)
68 default 0x89 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 7)
69 default 0x11d if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 8)
70
71endif # ANDROID_RAM_CONSOLE_ERROR_CORRECTION
72
73config ANDROID_RAM_CONSOLE_EARLY_INIT
74 bool "Start Android RAM console early"
75 default n 38 default n
76 depends on ANDROID_RAM_CONSOLE
77
78config ANDROID_RAM_CONSOLE_EARLY_ADDR
79 hex "Android RAM console virtual address"
80 default 0
81 depends on ANDROID_RAM_CONSOLE_EARLY_INIT
82
83config ANDROID_RAM_CONSOLE_EARLY_SIZE
84 hex "Android RAM console buffer size"
85 default 0
86 depends on ANDROID_RAM_CONSOLE_EARLY_INIT
87 39
88config ANDROID_TIMED_OUTPUT 40config ANDROID_TIMED_OUTPUT
89 bool "Timed output class driver" 41 bool "Timed output class driver"
@@ -102,6 +54,32 @@ config ANDROID_LOW_MEMORY_KILLER
102 54
103source "drivers/staging/android/switch/Kconfig" 55source "drivers/staging/android/switch/Kconfig"
104 56
57config ANDROID_INTF_ALARM
58 bool "Android alarm driver"
59 depends on RTC_CLASS
60 default n
61 help
62 Provides non-wakeup and rtc backed wakeup alarms based on rtc or
63 elapsed realtime, and a non-wakeup alarm on the monotonic clock.
64 Also provides an interface to set the wall time which must be used
65 for elapsed realtime to work.
66
67config ANDROID_INTF_ALARM_DEV
68 bool "Android alarm device"
69 depends on ANDROID_INTF_ALARM
70 default y
71 help
72 Exports the alarm interface to user-space.
73
74config ANDROID_ALARM_OLDDRV_COMPAT
75 bool "Android Alarm compatability with old drivers"
76 depends on ANDROID_INTF_ALARM
77 default n
78 help
79 Provides preprocessor alias to aid compatability with
80 older out-of-tree drivers that use the Android Alarm
81 in-kernel API. This will be removed eventually.
82
105endif # if ANDROID 83endif # if ANDROID
106 84
107endmenu 85endmenu
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
index 5fcc24ffdd58..9b6c9ed91f69 100644
--- a/drivers/staging/android/Makefile
+++ b/drivers/staging/android/Makefile
@@ -1,8 +1,11 @@
1obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o 1obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o
2obj-$(CONFIG_ASHMEM) += ashmem.o 2obj-$(CONFIG_ASHMEM) += ashmem.o
3obj-$(CONFIG_ANDROID_LOGGER) += logger.o 3obj-$(CONFIG_ANDROID_LOGGER) += logger.o
4obj-$(CONFIG_ANDROID_PERSISTENT_RAM) += persistent_ram.o
4obj-$(CONFIG_ANDROID_RAM_CONSOLE) += ram_console.o 5obj-$(CONFIG_ANDROID_RAM_CONSOLE) += ram_console.o
5obj-$(CONFIG_ANDROID_TIMED_OUTPUT) += timed_output.o 6obj-$(CONFIG_ANDROID_TIMED_OUTPUT) += timed_output.o
6obj-$(CONFIG_ANDROID_TIMED_GPIO) += timed_gpio.o 7obj-$(CONFIG_ANDROID_TIMED_GPIO) += timed_gpio.o
7obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o 8obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o
8obj-$(CONFIG_ANDROID_SWITCH) += switch/ 9obj-$(CONFIG_ANDROID_SWITCH) += switch/
10obj-$(CONFIG_ANDROID_INTF_ALARM) += alarm.o
11obj-$(CONFIG_ANDROID_INTF_ALARM_DEV) += alarm-dev.o
diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO
index e59c5be4be2b..b15fb0d6b152 100644
--- a/drivers/staging/android/TODO
+++ b/drivers/staging/android/TODO
@@ -3,7 +3,7 @@ TODO:
3 - sparse fixes 3 - sparse fixes
4 - rename files to be not so "generic" 4 - rename files to be not so "generic"
5 - make sure things build as modules properly 5 - make sure things build as modules properly
6 - add proper arch dependancies as needed 6 - add proper arch dependencies as needed
7 - audit userspace interfaces to make sure they are sane 7 - audit userspace interfaces to make sure they are sane
8 8
9Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc: 9Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc:
diff --git a/drivers/staging/android/alarm-dev.c b/drivers/staging/android/alarm-dev.c
new file mode 100644
index 000000000000..03efb34cbe2e
--- /dev/null
+++ b/drivers/staging/android/alarm-dev.c
@@ -0,0 +1,297 @@
1/* drivers/rtc/alarm-dev.c
2 *
3 * Copyright (C) 2007-2009 Google, Inc.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15
16#include <linux/time.h>
17#include <linux/module.h>
18#include <linux/device.h>
19#include <linux/miscdevice.h>
20#include <linux/fs.h>
21#include <linux/platform_device.h>
22#include <linux/sched.h>
23#include <linux/spinlock.h>
24#include <linux/uaccess.h>
25#include "android_alarm.h"
26
27/* XXX - Hack out wakelocks, while they are out of tree */
28struct wake_lock {
29 int i;
30};
31#define wake_lock(x)
32#define wake_lock_timeout(x, y)
33#define wake_unlock(x)
34#define WAKE_LOCK_SUSPEND 0
35#define wake_lock_init(x, y, z) ((x)->i = 1)
36#define wake_lock_destroy(x)
37
38#define ANDROID_ALARM_PRINT_INFO (1U << 0)
39#define ANDROID_ALARM_PRINT_IO (1U << 1)
40#define ANDROID_ALARM_PRINT_INT (1U << 2)
41
42
43static int debug_mask = ANDROID_ALARM_PRINT_INFO;
44module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
45
46#define pr_alarm(debug_level_mask, args...) \
47 do { \
48 if (debug_mask & ANDROID_ALARM_PRINT_##debug_level_mask) { \
49 pr_info(args); \
50 } \
51 } while (0)
52
53#define ANDROID_ALARM_WAKEUP_MASK ( \
54 ANDROID_ALARM_RTC_WAKEUP_MASK | \
55 ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK)
56
57/* support old usespace code */
58#define ANDROID_ALARM_SET_OLD _IOW('a', 2, time_t) /* set alarm */
59#define ANDROID_ALARM_SET_AND_WAIT_OLD _IOW('a', 3, time_t)
60
61static int alarm_opened;
62static DEFINE_SPINLOCK(alarm_slock);
63static struct wake_lock alarm_wake_lock;
64static DECLARE_WAIT_QUEUE_HEAD(alarm_wait_queue);
65static uint32_t alarm_pending;
66static uint32_t alarm_enabled;
67static uint32_t wait_pending;
68
69static struct android_alarm alarms[ANDROID_ALARM_TYPE_COUNT];
70
71static long alarm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
72{
73 int rv = 0;
74 unsigned long flags;
75 struct timespec new_alarm_time;
76 struct timespec new_rtc_time;
77 struct timespec tmp_time;
78 enum android_alarm_type alarm_type = ANDROID_ALARM_IOCTL_TO_TYPE(cmd);
79 uint32_t alarm_type_mask = 1U << alarm_type;
80
81 if (alarm_type >= ANDROID_ALARM_TYPE_COUNT)
82 return -EINVAL;
83
84 if (ANDROID_ALARM_BASE_CMD(cmd) != ANDROID_ALARM_GET_TIME(0)) {
85 if ((file->f_flags & O_ACCMODE) == O_RDONLY)
86 return -EPERM;
87 if (file->private_data == NULL &&
88 cmd != ANDROID_ALARM_SET_RTC) {
89 spin_lock_irqsave(&alarm_slock, flags);
90 if (alarm_opened) {
91 spin_unlock_irqrestore(&alarm_slock, flags);
92 return -EBUSY;
93 }
94 alarm_opened = 1;
95 file->private_data = (void *)1;
96 spin_unlock_irqrestore(&alarm_slock, flags);
97 }
98 }
99
100 switch (ANDROID_ALARM_BASE_CMD(cmd)) {
101 case ANDROID_ALARM_CLEAR(0):
102 spin_lock_irqsave(&alarm_slock, flags);
103 pr_alarm(IO, "alarm %d clear\n", alarm_type);
104 android_alarm_try_to_cancel(&alarms[alarm_type]);
105 if (alarm_pending) {
106 alarm_pending &= ~alarm_type_mask;
107 if (!alarm_pending && !wait_pending)
108 wake_unlock(&alarm_wake_lock);
109 }
110 alarm_enabled &= ~alarm_type_mask;
111 spin_unlock_irqrestore(&alarm_slock, flags);
112 break;
113
114 case ANDROID_ALARM_SET_OLD:
115 case ANDROID_ALARM_SET_AND_WAIT_OLD:
116 if (get_user(new_alarm_time.tv_sec, (int __user *)arg)) {
117 rv = -EFAULT;
118 goto err1;
119 }
120 new_alarm_time.tv_nsec = 0;
121 goto from_old_alarm_set;
122
123 case ANDROID_ALARM_SET_AND_WAIT(0):
124 case ANDROID_ALARM_SET(0):
125 if (copy_from_user(&new_alarm_time, (void __user *)arg,
126 sizeof(new_alarm_time))) {
127 rv = -EFAULT;
128 goto err1;
129 }
130from_old_alarm_set:
131 spin_lock_irqsave(&alarm_slock, flags);
132 pr_alarm(IO, "alarm %d set %ld.%09ld\n", alarm_type,
133 new_alarm_time.tv_sec, new_alarm_time.tv_nsec);
134 alarm_enabled |= alarm_type_mask;
135 android_alarm_start_range(&alarms[alarm_type],
136 timespec_to_ktime(new_alarm_time),
137 timespec_to_ktime(new_alarm_time));
138 spin_unlock_irqrestore(&alarm_slock, flags);
139 if (ANDROID_ALARM_BASE_CMD(cmd) != ANDROID_ALARM_SET_AND_WAIT(0)
140 && cmd != ANDROID_ALARM_SET_AND_WAIT_OLD)
141 break;
142 /* fall though */
143 case ANDROID_ALARM_WAIT:
144 spin_lock_irqsave(&alarm_slock, flags);
145 pr_alarm(IO, "alarm wait\n");
146 if (!alarm_pending && wait_pending) {
147 wake_unlock(&alarm_wake_lock);
148 wait_pending = 0;
149 }
150 spin_unlock_irqrestore(&alarm_slock, flags);
151 rv = wait_event_interruptible(alarm_wait_queue, alarm_pending);
152 if (rv)
153 goto err1;
154 spin_lock_irqsave(&alarm_slock, flags);
155 rv = alarm_pending;
156 wait_pending = 1;
157 alarm_pending = 0;
158 spin_unlock_irqrestore(&alarm_slock, flags);
159 break;
160 case ANDROID_ALARM_SET_RTC:
161 if (copy_from_user(&new_rtc_time, (void __user *)arg,
162 sizeof(new_rtc_time))) {
163 rv = -EFAULT;
164 goto err1;
165 }
166 rv = android_alarm_set_rtc(new_rtc_time);
167 spin_lock_irqsave(&alarm_slock, flags);
168 alarm_pending |= ANDROID_ALARM_TIME_CHANGE_MASK;
169 wake_up(&alarm_wait_queue);
170 spin_unlock_irqrestore(&alarm_slock, flags);
171 if (rv < 0)
172 goto err1;
173 break;
174 case ANDROID_ALARM_GET_TIME(0):
175 switch (alarm_type) {
176 case ANDROID_ALARM_RTC_WAKEUP:
177 case ANDROID_ALARM_RTC:
178 getnstimeofday(&tmp_time);
179 break;
180 case ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP:
181 case ANDROID_ALARM_ELAPSED_REALTIME:
182 tmp_time =
183 ktime_to_timespec(alarm_get_elapsed_realtime());
184 break;
185 case ANDROID_ALARM_TYPE_COUNT:
186 case ANDROID_ALARM_SYSTEMTIME:
187 ktime_get_ts(&tmp_time);
188 break;
189 }
190 if (copy_to_user((void __user *)arg, &tmp_time,
191 sizeof(tmp_time))) {
192 rv = -EFAULT;
193 goto err1;
194 }
195 break;
196
197 default:
198 rv = -EINVAL;
199 goto err1;
200 }
201err1:
202 return rv;
203}
204
205static int alarm_open(struct inode *inode, struct file *file)
206{
207 file->private_data = NULL;
208 return 0;
209}
210
211static int alarm_release(struct inode *inode, struct file *file)
212{
213 int i;
214 unsigned long flags;
215
216 spin_lock_irqsave(&alarm_slock, flags);
217 if (file->private_data != 0) {
218 for (i = 0; i < ANDROID_ALARM_TYPE_COUNT; i++) {
219 uint32_t alarm_type_mask = 1U << i;
220 if (alarm_enabled & alarm_type_mask) {
221 pr_alarm(INFO, "alarm_release: clear alarm, "
222 "pending %d\n",
223 !!(alarm_pending & alarm_type_mask));
224 alarm_enabled &= ~alarm_type_mask;
225 }
226 spin_unlock_irqrestore(&alarm_slock, flags);
227 android_alarm_cancel(&alarms[i]);
228 spin_lock_irqsave(&alarm_slock, flags);
229 }
230 if (alarm_pending | wait_pending) {
231 if (alarm_pending)
232 pr_alarm(INFO, "alarm_release: clear "
233 "pending alarms %x\n", alarm_pending);
234 wake_unlock(&alarm_wake_lock);
235 wait_pending = 0;
236 alarm_pending = 0;
237 }
238 alarm_opened = 0;
239 }
240 spin_unlock_irqrestore(&alarm_slock, flags);
241 return 0;
242}
243
244static void alarm_triggered(struct android_alarm *alarm)
245{
246 unsigned long flags;
247 uint32_t alarm_type_mask = 1U << alarm->type;
248
249 pr_alarm(INT, "alarm_triggered type %d\n", alarm->type);
250 spin_lock_irqsave(&alarm_slock, flags);
251 if (alarm_enabled & alarm_type_mask) {
252 wake_lock_timeout(&alarm_wake_lock, 5 * HZ);
253 alarm_enabled &= ~alarm_type_mask;
254 alarm_pending |= alarm_type_mask;
255 wake_up(&alarm_wait_queue);
256 }
257 spin_unlock_irqrestore(&alarm_slock, flags);
258}
259
260static const struct file_operations alarm_fops = {
261 .owner = THIS_MODULE,
262 .unlocked_ioctl = alarm_ioctl,
263 .open = alarm_open,
264 .release = alarm_release,
265};
266
267static struct miscdevice alarm_device = {
268 .minor = MISC_DYNAMIC_MINOR,
269 .name = "alarm",
270 .fops = &alarm_fops,
271};
272
273static int __init alarm_dev_init(void)
274{
275 int err;
276 int i;
277
278 err = misc_register(&alarm_device);
279 if (err)
280 return err;
281
282 for (i = 0; i < ANDROID_ALARM_TYPE_COUNT; i++)
283 android_alarm_init(&alarms[i], i, alarm_triggered);
284 wake_lock_init(&alarm_wake_lock, WAKE_LOCK_SUSPEND, "alarm");
285
286 return 0;
287}
288
289static void __exit alarm_dev_exit(void)
290{
291 misc_deregister(&alarm_device);
292 wake_lock_destroy(&alarm_wake_lock);
293}
294
295module_init(alarm_dev_init);
296module_exit(alarm_dev_exit);
297
diff --git a/drivers/staging/android/alarm.c b/drivers/staging/android/alarm.c
new file mode 100644
index 000000000000..c68950b9e08f
--- /dev/null
+++ b/drivers/staging/android/alarm.c
@@ -0,0 +1,601 @@
1/* drivers/rtc/alarm.c
2 *
3 * Copyright (C) 2007-2009 Google, Inc.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15
16#include <linux/time.h>
17#include <linux/module.h>
18#include <linux/device.h>
19#include <linux/miscdevice.h>
20#include <linux/platform_device.h>
21#include <linux/rtc.h>
22#include <linux/sched.h>
23#include <linux/spinlock.h>
24#include "android_alarm.h"
25
26/* XXX - Hack out wakelocks, while they are out of tree */
27struct wake_lock {
28 int i;
29};
30#define wake_lock(x)
31#define wake_lock_timeout(x, y)
32#define wake_unlock(x)
33#define WAKE_LOCK_SUSPEND 0
34#define wake_lock_init(x, y, z) ((x)->i = 1)
35#define wake_lock_destroy(x)
36
37#define ANDROID_ALARM_PRINT_ERROR (1U << 0)
38#define ANDROID_ALARM_PRINT_INIT_STATUS (1U << 1)
39#define ANDROID_ALARM_PRINT_TSET (1U << 2)
40#define ANDROID_ALARM_PRINT_CALL (1U << 3)
41#define ANDROID_ALARM_PRINT_SUSPEND (1U << 4)
42#define ANDROID_ALARM_PRINT_INT (1U << 5)
43#define ANDROID_ALARM_PRINT_FLOW (1U << 6)
44
45static int debug_mask = ANDROID_ALARM_PRINT_ERROR | \
46 ANDROID_ALARM_PRINT_INIT_STATUS;
47module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
48
49#define pr_alarm(debug_level_mask, args...) \
50 do { \
51 if (debug_mask & ANDROID_ALARM_PRINT_##debug_level_mask) { \
52 pr_info(args); \
53 } \
54 } while (0)
55
56#define ANDROID_ALARM_WAKEUP_MASK ( \
57 ANDROID_ALARM_RTC_WAKEUP_MASK | \
58 ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK)
59
60/* support old usespace code */
61#define ANDROID_ALARM_SET_OLD _IOW('a', 2, time_t) /* set alarm */
62#define ANDROID_ALARM_SET_AND_WAIT_OLD _IOW('a', 3, time_t)
63
64struct alarm_queue {
65 struct rb_root alarms;
66 struct rb_node *first;
67 struct hrtimer timer;
68 ktime_t delta;
69 bool stopped;
70 ktime_t stopped_time;
71};
72
73static struct rtc_device *alarm_rtc_dev;
74static DEFINE_SPINLOCK(alarm_slock);
75static DEFINE_MUTEX(alarm_setrtc_mutex);
76static struct wake_lock alarm_rtc_wake_lock;
77static struct platform_device *alarm_platform_dev;
78struct alarm_queue alarms[ANDROID_ALARM_TYPE_COUNT];
79static bool suspended;
80
81static void update_timer_locked(struct alarm_queue *base, bool head_removed)
82{
83 struct android_alarm *alarm;
84 bool is_wakeup = base == &alarms[ANDROID_ALARM_RTC_WAKEUP] ||
85 base == &alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP];
86
87 if (base->stopped) {
88 pr_alarm(FLOW, "changed alarm while setting the wall time\n");
89 return;
90 }
91
92 if (is_wakeup && !suspended && head_removed)
93 wake_unlock(&alarm_rtc_wake_lock);
94
95 if (!base->first)
96 return;
97
98 alarm = container_of(base->first, struct android_alarm, node);
99
100 pr_alarm(FLOW, "selected alarm, type %d, func %pF at %lld\n",
101 alarm->type, alarm->function, ktime_to_ns(alarm->expires));
102
103 if (is_wakeup && suspended) {
104 pr_alarm(FLOW, "changed alarm while suspened\n");
105 wake_lock_timeout(&alarm_rtc_wake_lock, 1 * HZ);
106 return;
107 }
108
109 hrtimer_try_to_cancel(&base->timer);
110 base->timer.node.expires = ktime_add(base->delta, alarm->expires);
111 base->timer._softexpires = ktime_add(base->delta, alarm->softexpires);
112 hrtimer_start_expires(&base->timer, HRTIMER_MODE_ABS);
113}
114
115static void alarm_enqueue_locked(struct android_alarm *alarm)
116{
117 struct alarm_queue *base = &alarms[alarm->type];
118 struct rb_node **link = &base->alarms.rb_node;
119 struct rb_node *parent = NULL;
120 struct android_alarm *entry;
121 int leftmost = 1;
122 bool was_first = false;
123
124 pr_alarm(FLOW, "added alarm, type %d, func %pF at %lld\n",
125 alarm->type, alarm->function, ktime_to_ns(alarm->expires));
126
127 if (base->first == &alarm->node) {
128 base->first = rb_next(&alarm->node);
129 was_first = true;
130 }
131 if (!RB_EMPTY_NODE(&alarm->node)) {
132 rb_erase(&alarm->node, &base->alarms);
133 RB_CLEAR_NODE(&alarm->node);
134 }
135
136 while (*link) {
137 parent = *link;
138 entry = rb_entry(parent, struct android_alarm, node);
139 /*
140 * We dont care about collisions. Nodes with
141 * the same expiry time stay together.
142 */
143 if (alarm->expires.tv64 < entry->expires.tv64) {
144 link = &(*link)->rb_left;
145 } else {
146 link = &(*link)->rb_right;
147 leftmost = 0;
148 }
149 }
150 if (leftmost)
151 base->first = &alarm->node;
152 if (leftmost || was_first)
153 update_timer_locked(base, was_first);
154
155 rb_link_node(&alarm->node, parent, link);
156 rb_insert_color(&alarm->node, &base->alarms);
157}
158
159/**
160 * android_alarm_init - initialize an alarm
161 * @alarm: the alarm to be initialized
162 * @type: the alarm type to be used
163 * @function: alarm callback function
164 */
165void android_alarm_init(struct android_alarm *alarm,
166 enum android_alarm_type type, void (*function)(struct android_alarm *))
167{
168 RB_CLEAR_NODE(&alarm->node);
169 alarm->type = type;
170 alarm->function = function;
171
172 pr_alarm(FLOW, "created alarm, type %d, func %pF\n", type, function);
173}
174
175
176/**
177 * android_alarm_start_range - (re)start an alarm
178 * @alarm: the alarm to be added
179 * @start: earliest expiry time
180 * @end: expiry time
181 */
182void android_alarm_start_range(struct android_alarm *alarm, ktime_t start,
183 ktime_t end)
184{
185 unsigned long flags;
186
187 spin_lock_irqsave(&alarm_slock, flags);
188 alarm->softexpires = start;
189 alarm->expires = end;
190 alarm_enqueue_locked(alarm);
191 spin_unlock_irqrestore(&alarm_slock, flags);
192}
193
194/**
195 * android_alarm_try_to_cancel - try to deactivate an alarm
196 * @alarm: alarm to stop
197 *
198 * Returns:
199 * 0 when the alarm was not active
200 * 1 when the alarm was active
201 * -1 when the alarm may currently be excuting the callback function and
202 * cannot be stopped (it may also be inactive)
203 */
204int android_alarm_try_to_cancel(struct android_alarm *alarm)
205{
206 struct alarm_queue *base = &alarms[alarm->type];
207 unsigned long flags;
208 bool first = false;
209 int ret = 0;
210
211 spin_lock_irqsave(&alarm_slock, flags);
212 if (!RB_EMPTY_NODE(&alarm->node)) {
213 pr_alarm(FLOW, "canceled alarm, type %d, func %pF at %lld\n",
214 alarm->type, alarm->function,
215 ktime_to_ns(alarm->expires));
216 ret = 1;
217 if (base->first == &alarm->node) {
218 base->first = rb_next(&alarm->node);
219 first = true;
220 }
221 rb_erase(&alarm->node, &base->alarms);
222 RB_CLEAR_NODE(&alarm->node);
223 if (first)
224 update_timer_locked(base, true);
225 } else
226 pr_alarm(FLOW, "tried to cancel alarm, type %d, func %pF\n",
227 alarm->type, alarm->function);
228 spin_unlock_irqrestore(&alarm_slock, flags);
229 if (!ret && hrtimer_callback_running(&base->timer))
230 ret = -1;
231 return ret;
232}
233
234/**
235 * android_alarm_cancel - cancel an alarm and wait for the handler to finish.
236 * @alarm: the alarm to be cancelled
237 *
238 * Returns:
239 * 0 when the alarm was not active
240 * 1 when the alarm was active
241 */
242int android_alarm_cancel(struct android_alarm *alarm)
243{
244 for (;;) {
245 int ret = android_alarm_try_to_cancel(alarm);
246 if (ret >= 0)
247 return ret;
248 cpu_relax();
249 }
250}
251
252/**
253 * alarm_set_rtc - set the kernel and rtc walltime
254 * @new_time: timespec value containing the new time
255 */
256int android_alarm_set_rtc(struct timespec new_time)
257{
258 int i;
259 int ret;
260 unsigned long flags;
261 struct rtc_time rtc_new_rtc_time;
262 struct timespec tmp_time;
263
264 rtc_time_to_tm(new_time.tv_sec, &rtc_new_rtc_time);
265
266 pr_alarm(TSET, "set rtc %ld %ld - rtc %02d:%02d:%02d %02d/%02d/%04d\n",
267 new_time.tv_sec, new_time.tv_nsec,
268 rtc_new_rtc_time.tm_hour, rtc_new_rtc_time.tm_min,
269 rtc_new_rtc_time.tm_sec, rtc_new_rtc_time.tm_mon + 1,
270 rtc_new_rtc_time.tm_mday,
271 rtc_new_rtc_time.tm_year + 1900);
272
273 mutex_lock(&alarm_setrtc_mutex);
274 spin_lock_irqsave(&alarm_slock, flags);
275 wake_lock(&alarm_rtc_wake_lock);
276 getnstimeofday(&tmp_time);
277 for (i = 0; i < ANDROID_ALARM_SYSTEMTIME; i++) {
278 hrtimer_try_to_cancel(&alarms[i].timer);
279 alarms[i].stopped = true;
280 alarms[i].stopped_time = timespec_to_ktime(tmp_time);
281 }
282 alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP].delta =
283 alarms[ANDROID_ALARM_ELAPSED_REALTIME].delta =
284 ktime_sub(alarms[ANDROID_ALARM_ELAPSED_REALTIME].delta,
285 timespec_to_ktime(timespec_sub(tmp_time, new_time)));
286 spin_unlock_irqrestore(&alarm_slock, flags);
287 ret = do_settimeofday(&new_time);
288 spin_lock_irqsave(&alarm_slock, flags);
289 for (i = 0; i < ANDROID_ALARM_SYSTEMTIME; i++) {
290 alarms[i].stopped = false;
291 update_timer_locked(&alarms[i], false);
292 }
293 spin_unlock_irqrestore(&alarm_slock, flags);
294 if (ret < 0) {
295 pr_alarm(ERROR, "alarm_set_rtc: Failed to set time\n");
296 goto err;
297 }
298 if (!alarm_rtc_dev) {
299 pr_alarm(ERROR,
300 "alarm_set_rtc: no RTC, time will be lost on reboot\n");
301 goto err;
302 }
303 ret = rtc_set_time(alarm_rtc_dev, &rtc_new_rtc_time);
304 if (ret < 0)
305 pr_alarm(ERROR, "alarm_set_rtc: "
306 "Failed to set RTC, time will be lost on reboot\n");
307err:
308 wake_unlock(&alarm_rtc_wake_lock);
309 mutex_unlock(&alarm_setrtc_mutex);
310 return ret;
311}
312
313/**
314 * alarm_get_elapsed_realtime - get the elapsed real time in ktime_t format
315 *
316 * returns the time in ktime_t format
317 */
318ktime_t alarm_get_elapsed_realtime(void)
319{
320 ktime_t now;
321 unsigned long flags;
322 struct alarm_queue *base = &alarms[ANDROID_ALARM_ELAPSED_REALTIME];
323
324 spin_lock_irqsave(&alarm_slock, flags);
325 now = base->stopped ? base->stopped_time : ktime_get_real();
326 now = ktime_sub(now, base->delta);
327 spin_unlock_irqrestore(&alarm_slock, flags);
328 return now;
329}
330
331static enum hrtimer_restart alarm_timer_triggered(struct hrtimer *timer)
332{
333 struct alarm_queue *base;
334 struct android_alarm *alarm;
335 unsigned long flags;
336 ktime_t now;
337
338 spin_lock_irqsave(&alarm_slock, flags);
339
340 base = container_of(timer, struct alarm_queue, timer);
341 now = base->stopped ? base->stopped_time : hrtimer_cb_get_time(timer);
342 now = ktime_sub(now, base->delta);
343
344 pr_alarm(INT, "alarm_timer_triggered type %ld at %lld\n",
345 base - alarms, ktime_to_ns(now));
346
347 while (base->first) {
348 alarm = container_of(base->first, struct android_alarm, node);
349 if (alarm->softexpires.tv64 > now.tv64) {
350 pr_alarm(FLOW, "don't call alarm, %pF, %lld (s %lld)\n",
351 alarm->function, ktime_to_ns(alarm->expires),
352 ktime_to_ns(alarm->softexpires));
353 break;
354 }
355 base->first = rb_next(&alarm->node);
356 rb_erase(&alarm->node, &base->alarms);
357 RB_CLEAR_NODE(&alarm->node);
358 pr_alarm(CALL, "call alarm, type %d, func %pF, %lld (s %lld)\n",
359 alarm->type, alarm->function,
360 ktime_to_ns(alarm->expires),
361 ktime_to_ns(alarm->softexpires));
362 spin_unlock_irqrestore(&alarm_slock, flags);
363 alarm->function(alarm);
364 spin_lock_irqsave(&alarm_slock, flags);
365 }
366 if (!base->first)
367 pr_alarm(FLOW, "no more alarms of type %ld\n", base - alarms);
368 update_timer_locked(base, true);
369 spin_unlock_irqrestore(&alarm_slock, flags);
370 return HRTIMER_NORESTART;
371}
372
373static void alarm_triggered_func(void *p)
374{
375 struct rtc_device *rtc = alarm_rtc_dev;
376 if (!(rtc->irq_data & RTC_AF))
377 return;
378 pr_alarm(INT, "rtc alarm triggered\n");
379 wake_lock_timeout(&alarm_rtc_wake_lock, 1 * HZ);
380}
381
382static int alarm_suspend(struct platform_device *pdev, pm_message_t state)
383{
384 int err = 0;
385 unsigned long flags;
386 struct rtc_wkalrm rtc_alarm;
387 struct rtc_time rtc_current_rtc_time;
388 unsigned long rtc_current_time;
389 unsigned long rtc_alarm_time;
390 struct timespec rtc_delta;
391 struct timespec wall_time;
392 struct alarm_queue *wakeup_queue = NULL;
393 struct alarm_queue *tmp_queue = NULL;
394
395 pr_alarm(SUSPEND, "alarm_suspend(%p, %d)\n", pdev, state.event);
396
397 spin_lock_irqsave(&alarm_slock, flags);
398 suspended = true;
399 spin_unlock_irqrestore(&alarm_slock, flags);
400
401 hrtimer_cancel(&alarms[ANDROID_ALARM_RTC_WAKEUP].timer);
402 hrtimer_cancel(&alarms[
403 ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP].timer);
404
405 tmp_queue = &alarms[ANDROID_ALARM_RTC_WAKEUP];
406 if (tmp_queue->first)
407 wakeup_queue = tmp_queue;
408 tmp_queue = &alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP];
409 if (tmp_queue->first && (!wakeup_queue ||
410 hrtimer_get_expires(&tmp_queue->timer).tv64 <
411 hrtimer_get_expires(&wakeup_queue->timer).tv64))
412 wakeup_queue = tmp_queue;
413 if (wakeup_queue) {
414 rtc_read_time(alarm_rtc_dev, &rtc_current_rtc_time);
415 getnstimeofday(&wall_time);
416 rtc_tm_to_time(&rtc_current_rtc_time, &rtc_current_time);
417 set_normalized_timespec(&rtc_delta,
418 wall_time.tv_sec - rtc_current_time,
419 wall_time.tv_nsec);
420
421 rtc_alarm_time = timespec_sub(ktime_to_timespec(
422 hrtimer_get_expires(&wakeup_queue->timer)),
423 rtc_delta).tv_sec;
424
425 rtc_time_to_tm(rtc_alarm_time, &rtc_alarm.time);
426 rtc_alarm.enabled = 1;
427 rtc_set_alarm(alarm_rtc_dev, &rtc_alarm);
428 rtc_read_time(alarm_rtc_dev, &rtc_current_rtc_time);
429 rtc_tm_to_time(&rtc_current_rtc_time, &rtc_current_time);
430 pr_alarm(SUSPEND,
431 "rtc alarm set at %ld, now %ld, rtc delta %ld.%09ld\n",
432 rtc_alarm_time, rtc_current_time,
433 rtc_delta.tv_sec, rtc_delta.tv_nsec);
434 if (rtc_current_time + 1 >= rtc_alarm_time) {
435 pr_alarm(SUSPEND, "alarm about to go off\n");
436 memset(&rtc_alarm, 0, sizeof(rtc_alarm));
437 rtc_alarm.enabled = 0;
438 rtc_set_alarm(alarm_rtc_dev, &rtc_alarm);
439
440 spin_lock_irqsave(&alarm_slock, flags);
441 suspended = false;
442 wake_lock_timeout(&alarm_rtc_wake_lock, 2 * HZ);
443 update_timer_locked(&alarms[ANDROID_ALARM_RTC_WAKEUP],
444 false);
445 update_timer_locked(&alarms[
446 ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP], false);
447 err = -EBUSY;
448 spin_unlock_irqrestore(&alarm_slock, flags);
449 }
450 }
451 return err;
452}
453
454static int alarm_resume(struct platform_device *pdev)
455{
456 struct rtc_wkalrm alarm;
457 unsigned long flags;
458
459 pr_alarm(SUSPEND, "alarm_resume(%p)\n", pdev);
460
461 memset(&alarm, 0, sizeof(alarm));
462 alarm.enabled = 0;
463 rtc_set_alarm(alarm_rtc_dev, &alarm);
464
465 spin_lock_irqsave(&alarm_slock, flags);
466 suspended = false;
467 update_timer_locked(&alarms[ANDROID_ALARM_RTC_WAKEUP], false);
468 update_timer_locked(&alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP],
469 false);
470 spin_unlock_irqrestore(&alarm_slock, flags);
471
472 return 0;
473}
474
475static struct rtc_task alarm_rtc_task = {
476 .func = alarm_triggered_func
477};
478
479static int rtc_alarm_add_device(struct device *dev,
480 struct class_interface *class_intf)
481{
482 int err;
483 struct rtc_device *rtc = to_rtc_device(dev);
484
485 mutex_lock(&alarm_setrtc_mutex);
486
487 if (alarm_rtc_dev) {
488 err = -EBUSY;
489 goto err1;
490 }
491
492 alarm_platform_dev =
493 platform_device_register_simple("alarm", -1, NULL, 0);
494 if (IS_ERR(alarm_platform_dev)) {
495 err = PTR_ERR(alarm_platform_dev);
496 goto err2;
497 }
498 err = rtc_irq_register(rtc, &alarm_rtc_task);
499 if (err)
500 goto err3;
501 alarm_rtc_dev = rtc;
502 pr_alarm(INIT_STATUS, "using rtc device, %s, for alarms", rtc->name);
503 mutex_unlock(&alarm_setrtc_mutex);
504
505 return 0;
506
507err3:
508 platform_device_unregister(alarm_platform_dev);
509err2:
510err1:
511 mutex_unlock(&alarm_setrtc_mutex);
512 return err;
513}
514
515static void rtc_alarm_remove_device(struct device *dev,
516 struct class_interface *class_intf)
517{
518 if (dev == &alarm_rtc_dev->dev) {
519 pr_alarm(INIT_STATUS, "lost rtc device for alarms");
520 rtc_irq_unregister(alarm_rtc_dev, &alarm_rtc_task);
521 platform_device_unregister(alarm_platform_dev);
522 alarm_rtc_dev = NULL;
523 }
524}
525
526static struct class_interface rtc_alarm_interface = {
527 .add_dev = &rtc_alarm_add_device,
528 .remove_dev = &rtc_alarm_remove_device,
529};
530
531static struct platform_driver alarm_driver = {
532 .suspend = alarm_suspend,
533 .resume = alarm_resume,
534 .driver = {
535 .name = "alarm"
536 }
537};
538
539static int __init alarm_late_init(void)
540{
541 unsigned long flags;
542 struct timespec tmp_time, system_time;
543
544 /* this needs to run after the rtc is read at boot */
545 spin_lock_irqsave(&alarm_slock, flags);
546 /* We read the current rtc and system time so we can later calulate
547 * elasped realtime to be (boot_systemtime + rtc - boot_rtc) ==
548 * (rtc - (boot_rtc - boot_systemtime))
549 */
550 getnstimeofday(&tmp_time);
551 ktime_get_ts(&system_time);
552 alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP].delta =
553 alarms[ANDROID_ALARM_ELAPSED_REALTIME].delta =
554 timespec_to_ktime(timespec_sub(tmp_time, system_time));
555
556 spin_unlock_irqrestore(&alarm_slock, flags);
557 return 0;
558}
559
560static int __init alarm_driver_init(void)
561{
562 int err;
563 int i;
564
565 for (i = 0; i < ANDROID_ALARM_SYSTEMTIME; i++) {
566 hrtimer_init(&alarms[i].timer,
567 CLOCK_REALTIME, HRTIMER_MODE_ABS);
568 alarms[i].timer.function = alarm_timer_triggered;
569 }
570 hrtimer_init(&alarms[ANDROID_ALARM_SYSTEMTIME].timer,
571 CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
572 alarms[ANDROID_ALARM_SYSTEMTIME].timer.function = alarm_timer_triggered;
573 err = platform_driver_register(&alarm_driver);
574 if (err < 0)
575 goto err1;
576 wake_lock_init(&alarm_rtc_wake_lock, WAKE_LOCK_SUSPEND, "alarm_rtc");
577 rtc_alarm_interface.class = rtc_class;
578 err = class_interface_register(&rtc_alarm_interface);
579 if (err < 0)
580 goto err2;
581
582 return 0;
583
584err2:
585 wake_lock_destroy(&alarm_rtc_wake_lock);
586 platform_driver_unregister(&alarm_driver);
587err1:
588 return err;
589}
590
591static void __exit alarm_exit(void)
592{
593 class_interface_unregister(&rtc_alarm_interface);
594 wake_lock_destroy(&alarm_rtc_wake_lock);
595 platform_driver_unregister(&alarm_driver);
596}
597
598late_initcall(alarm_late_init);
599module_init(alarm_driver_init);
600module_exit(alarm_exit);
601
diff --git a/drivers/staging/android/android_alarm.h b/drivers/staging/android/android_alarm.h
new file mode 100644
index 000000000000..6eecbde2ef6f
--- /dev/null
+++ b/drivers/staging/android/android_alarm.h
@@ -0,0 +1,121 @@
1/* include/linux/android_alarm.h
2 *
3 * Copyright (C) 2006-2007 Google, Inc.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15
16#ifndef _LINUX_ANDROID_ALARM_H
17#define _LINUX_ANDROID_ALARM_H
18
19#include <linux/ioctl.h>
20#include <linux/time.h>
21
22enum android_alarm_type {
23 /* return code bit numbers or set alarm arg */
24 ANDROID_ALARM_RTC_WAKEUP,
25 ANDROID_ALARM_RTC,
26 ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
27 ANDROID_ALARM_ELAPSED_REALTIME,
28 ANDROID_ALARM_SYSTEMTIME,
29
30 ANDROID_ALARM_TYPE_COUNT,
31
32 /* return code bit numbers */
33 /* ANDROID_ALARM_TIME_CHANGE = 16 */
34};
35
36#ifdef __KERNEL__
37
38#include <linux/ktime.h>
39#include <linux/rbtree.h>
40
41/*
42 * The alarm interface is similar to the hrtimer interface but adds support
43 * for wakeup from suspend. It also adds an elapsed realtime clock that can
44 * be used for periodic timers that need to keep runing while the system is
45 * suspended and not be disrupted when the wall time is set.
46 */
47
48/**
49 * struct alarm - the basic alarm structure
50 * @node: red black tree node for time ordered insertion
51 * @type: alarm type. rtc/elapsed-realtime/systemtime, wakeup/non-wakeup.
52 * @softexpires: the absolute earliest expiry time of the alarm.
53 * @expires: the absolute expiry time.
54 * @function: alarm expiry callback function
55 *
56 * The alarm structure must be initialized by alarm_init()
57 *
58 */
59
60struct android_alarm {
61 struct rb_node node;
62 enum android_alarm_type type;
63 ktime_t softexpires;
64 ktime_t expires;
65 void (*function)(struct android_alarm *);
66};
67
68void android_alarm_init(struct android_alarm *alarm,
69 enum android_alarm_type type, void (*function)(struct android_alarm *));
70void android_alarm_start_range(struct android_alarm *alarm, ktime_t start,
71 ktime_t end);
72int android_alarm_try_to_cancel(struct android_alarm *alarm);
73int android_alarm_cancel(struct android_alarm *alarm);
74ktime_t alarm_get_elapsed_realtime(void);
75
76/* set rtc while preserving elapsed realtime */
77int android_alarm_set_rtc(const struct timespec ts);
78
79#ifdef CONFIG_ANDROID_ALARM_OLDDRV_COMPAT
80/*
81 * Some older drivers depend on the old API,
82 * so provide compatability macros for now.
83 */
84#define alarm android_alarm
85#define alarm_init(x, y, z) android_alarm_init(x, y, z)
86#define alarm_start_range(x, y, z) android_alarm_start_range(x, y, z)
87#define alarm_try_to_cancel(x) android_alarm_try_to_cancel(x)
88#define alarm_cancel(x) android_alarm_cancel(x)
89#define alarm_set_rtc(x) android_alarm_set_rtc(x)
90#endif
91
92
93#endif
94
95enum android_alarm_return_flags {
96 ANDROID_ALARM_RTC_WAKEUP_MASK = 1U << ANDROID_ALARM_RTC_WAKEUP,
97 ANDROID_ALARM_RTC_MASK = 1U << ANDROID_ALARM_RTC,
98 ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK =
99 1U << ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
100 ANDROID_ALARM_ELAPSED_REALTIME_MASK =
101 1U << ANDROID_ALARM_ELAPSED_REALTIME,
102 ANDROID_ALARM_SYSTEMTIME_MASK = 1U << ANDROID_ALARM_SYSTEMTIME,
103 ANDROID_ALARM_TIME_CHANGE_MASK = 1U << 16
104};
105
106/* Disable alarm */
107#define ANDROID_ALARM_CLEAR(type) _IO('a', 0 | ((type) << 4))
108
109/* Ack last alarm and wait for next */
110#define ANDROID_ALARM_WAIT _IO('a', 1)
111
112#define ALARM_IOW(c, type, size) _IOW('a', (c) | ((type) << 4), size)
113/* Set alarm */
114#define ANDROID_ALARM_SET(type) ALARM_IOW(2, type, struct timespec)
115#define ANDROID_ALARM_SET_AND_WAIT(type) ALARM_IOW(3, type, struct timespec)
116#define ANDROID_ALARM_GET_TIME(type) ALARM_IOW(4, type, struct timespec)
117#define ANDROID_ALARM_SET_RTC _IOW('a', 5, struct timespec)
118#define ANDROID_ALARM_BASE_CMD(cmd) (cmd & ~(_IOC(0, 0, 0xf0, 0)))
119#define ANDROID_ALARM_IOCTL_TO_TYPE(cmd) (_IOC_NR(cmd) >> 4)
120
121#endif
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 99052bfd3a2d..9f1f27e7c86e 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -315,7 +315,7 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
315 get_file(asma->file); 315 get_file(asma->file);
316 316
317 /* 317 /*
318 * XXX - Reworked to use shmem_zero_setup() instead of 318 * XXX - Reworked to use shmem_zero_setup() instead of
319 * shmem_set_file while we're in staging. -jstultz 319 * shmem_set_file while we're in staging. -jstultz
320 */ 320 */
321 if (vma->vm_flags & VM_SHARED) { 321 if (vma->vm_flags & VM_SHARED) {
@@ -680,7 +680,7 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
680 return ret; 680 return ret;
681} 681}
682 682
683static struct file_operations ashmem_fops = { 683static const struct file_operations ashmem_fops = {
684 .owner = THIS_MODULE, 684 .owner = THIS_MODULE,
685 .open = ashmem_open, 685 .open = ashmem_open,
686 .release = ashmem_release, 686 .release = ashmem_release,
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
index f0b7e6605ab5..59e095362c81 100644
--- a/drivers/staging/android/binder.c
+++ b/drivers/staging/android/binder.c
@@ -103,7 +103,7 @@ static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
103 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; 103 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
104module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO); 104module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
105 105
106static int binder_debug_no_lock; 106static bool binder_debug_no_lock;
107module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO); 107module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
108 108
109static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); 109static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
@@ -258,7 +258,7 @@ struct binder_ref {
258}; 258};
259 259
260struct binder_buffer { 260struct binder_buffer {
261 struct list_head entry; /* free and allocated entries by addesss */ 261 struct list_head entry; /* free and allocated entries by address */
262 struct rb_node rb_node; /* free entry by size or allocated entry */ 262 struct rb_node rb_node; /* free entry by size or allocated entry */
263 /* by address */ 263 /* by address */
264 unsigned free:1; 264 unsigned free:1;
@@ -288,6 +288,7 @@ struct binder_proc {
288 struct rb_root refs_by_node; 288 struct rb_root refs_by_node;
289 int pid; 289 int pid;
290 struct vm_area_struct *vma; 290 struct vm_area_struct *vma;
291 struct mm_struct *vma_vm_mm;
291 struct task_struct *tsk; 292 struct task_struct *tsk;
292 struct files_struct *files; 293 struct files_struct *files;
293 struct hlist_node deferred_work_node; 294 struct hlist_node deferred_work_node;
@@ -633,7 +634,7 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
633 if (mm) { 634 if (mm) {
634 down_write(&mm->mmap_sem); 635 down_write(&mm->mmap_sem);
635 vma = proc->vma; 636 vma = proc->vma;
636 if (vma && mm != vma->vm_mm) { 637 if (vma && mm != proc->vma_vm_mm) {
637 pr_err("binder: %d: vma mm and task mm mismatch\n", 638 pr_err("binder: %d: vma mm and task mm mismatch\n",
638 proc->pid); 639 proc->pid);
639 vma = NULL; 640 vma = NULL;
@@ -2776,6 +2777,7 @@ static void binder_vma_close(struct vm_area_struct *vma)
2776 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 2777 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2777 (unsigned long)pgprot_val(vma->vm_page_prot)); 2778 (unsigned long)pgprot_val(vma->vm_page_prot));
2778 proc->vma = NULL; 2779 proc->vma = NULL;
2780 proc->vma_vm_mm = NULL;
2779 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); 2781 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
2780} 2782}
2781 2783
@@ -2858,6 +2860,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
2858 barrier(); 2860 barrier();
2859 proc->files = get_files_struct(proc->tsk); 2861 proc->files = get_files_struct(proc->tsk);
2860 proc->vma = vma; 2862 proc->vma = vma;
2863 proc->vma_vm_mm = vma->vm_mm;
2861 2864
2862 /*printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p\n", 2865 /*printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p\n",
2863 proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/ 2866 proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c
index ffc2d043dd8e..ea69b6a77dac 100644
--- a/drivers/staging/android/logger.c
+++ b/drivers/staging/android/logger.c
@@ -60,7 +60,11 @@ struct logger_reader {
60}; 60};
61 61
62/* logger_offset - returns index 'n' into the log via (optimized) modulus */ 62/* logger_offset - returns index 'n' into the log via (optimized) modulus */
63#define logger_offset(n) ((n) & (log->size - 1)) 63size_t logger_offset(struct logger_log *log, size_t n)
64{
65 return n & (log->size-1);
66}
67
64 68
65/* 69/*
66 * file_get_log - Given a file structure, return the associated log 70 * file_get_log - Given a file structure, return the associated log
@@ -89,20 +93,24 @@ static inline struct logger_log *file_get_log(struct file *file)
89 * get_entry_len - Grabs the length of the payload of the next entry starting 93 * get_entry_len - Grabs the length of the payload of the next entry starting
90 * from 'off'. 94 * from 'off'.
91 * 95 *
96 * An entry length is 2 bytes (16 bits) in host endian order.
97 * In the log, the length does not include the size of the log entry structure.
98 * This function returns the size including the log entry structure.
99 *
92 * Caller needs to hold log->mutex. 100 * Caller needs to hold log->mutex.
93 */ 101 */
94static __u32 get_entry_len(struct logger_log *log, size_t off) 102static __u32 get_entry_len(struct logger_log *log, size_t off)
95{ 103{
96 __u16 val; 104 __u16 val;
97 105
98 switch (log->size - off) { 106 /* copy 2 bytes from buffer, in memcpy order, */
99 case 1: 107 /* handling possible wrap at end of buffer */
100 memcpy(&val, log->buffer + off, 1); 108
101 memcpy(((char *) &val) + 1, log->buffer, 1); 109 ((__u8 *)&val)[0] = log->buffer[off];
102 break; 110 if (likely(off+1 < log->size))
103 default: 111 ((__u8 *)&val)[1] = log->buffer[off+1];
104 memcpy(&val, log->buffer + off, 2); 112 else
105 } 113 ((__u8 *)&val)[1] = log->buffer[0];
106 114
107 return sizeof(struct logger_entry) + val; 115 return sizeof(struct logger_entry) + val;
108} 116}
@@ -137,7 +145,7 @@ static ssize_t do_read_log_to_user(struct logger_log *log,
137 if (copy_to_user(buf + len, log->buffer, count - len)) 145 if (copy_to_user(buf + len, log->buffer, count - len))
138 return -EFAULT; 146 return -EFAULT;
139 147
140 reader->r_off = logger_offset(reader->r_off + count); 148 reader->r_off = logger_offset(log, reader->r_off + count);
141 149
142 return count; 150 return count;
143} 151}
@@ -164,9 +172,10 @@ static ssize_t logger_read(struct file *file, char __user *buf,
164 172
165start: 173start:
166 while (1) { 174 while (1) {
175 mutex_lock(&log->mutex);
176
167 prepare_to_wait(&log->wq, &wait, TASK_INTERRUPTIBLE); 177 prepare_to_wait(&log->wq, &wait, TASK_INTERRUPTIBLE);
168 178
169 mutex_lock(&log->mutex);
170 ret = (log->w_off == reader->r_off); 179 ret = (log->w_off == reader->r_off);
171 mutex_unlock(&log->mutex); 180 mutex_unlock(&log->mutex);
172 if (!ret) 181 if (!ret)
@@ -225,7 +234,7 @@ static size_t get_next_entry(struct logger_log *log, size_t off, size_t len)
225 234
226 do { 235 do {
227 size_t nr = get_entry_len(log, off); 236 size_t nr = get_entry_len(log, off);
228 off = logger_offset(off + nr); 237 off = logger_offset(log, off + nr);
229 count += nr; 238 count += nr;
230 } while (count < len); 239 } while (count < len);
231 240
@@ -233,16 +242,28 @@ static size_t get_next_entry(struct logger_log *log, size_t off, size_t len)
233} 242}
234 243
235/* 244/*
236 * clock_interval - is a < c < b in mod-space? Put another way, does the line 245 * is_between - is a < c < b, accounting for wrapping of a, b, and c
237 * from a to b cross c? 246 * positions in the buffer
247 *
248 * That is, if a<b, check for c between a and b
249 * and if a>b, check for c outside (not between) a and b
250 *
251 * |------- a xxxxxxxx b --------|
252 * c^
253 *
254 * |xxxxx b --------- a xxxxxxxxx|
255 * c^
256 * or c^
238 */ 257 */
239static inline int clock_interval(size_t a, size_t b, size_t c) 258static inline int is_between(size_t a, size_t b, size_t c)
240{ 259{
241 if (b < a) { 260 if (a < b) {
242 if (a < c || b >= c) 261 /* is c between a and b? */
262 if (a < c && c <= b)
243 return 1; 263 return 1;
244 } else { 264 } else {
245 if (a < c && b >= c) 265 /* is c outside of b through a? */
266 if (c <= b || a < c)
246 return 1; 267 return 1;
247 } 268 }
248 269
@@ -260,14 +281,14 @@ static inline int clock_interval(size_t a, size_t b, size_t c)
260static void fix_up_readers(struct logger_log *log, size_t len) 281static void fix_up_readers(struct logger_log *log, size_t len)
261{ 282{
262 size_t old = log->w_off; 283 size_t old = log->w_off;
263 size_t new = logger_offset(old + len); 284 size_t new = logger_offset(log, old + len);
264 struct logger_reader *reader; 285 struct logger_reader *reader;
265 286
266 if (clock_interval(old, new, log->head)) 287 if (is_between(old, new, log->head))
267 log->head = get_next_entry(log, log->head, len); 288 log->head = get_next_entry(log, log->head, len);
268 289
269 list_for_each_entry(reader, &log->readers, list) 290 list_for_each_entry(reader, &log->readers, list)
270 if (clock_interval(old, new, reader->r_off)) 291 if (is_between(old, new, reader->r_off))
271 reader->r_off = get_next_entry(log, reader->r_off, len); 292 reader->r_off = get_next_entry(log, reader->r_off, len);
272} 293}
273 294
@@ -286,7 +307,7 @@ static void do_write_log(struct logger_log *log, const void *buf, size_t count)
286 if (count != len) 307 if (count != len)
287 memcpy(log->buffer, buf + len, count - len); 308 memcpy(log->buffer, buf + len, count - len);
288 309
289 log->w_off = logger_offset(log->w_off + count); 310 log->w_off = logger_offset(log, log->w_off + count);
290 311
291} 312}
292 313
@@ -309,9 +330,15 @@ static ssize_t do_write_log_from_user(struct logger_log *log,
309 330
310 if (count != len) 331 if (count != len)
311 if (copy_from_user(log->buffer, buf + len, count - len)) 332 if (copy_from_user(log->buffer, buf + len, count - len))
333 /*
334 * Note that by not updating w_off, this abandons the
335 * portion of the new entry that *was* successfully
336 * copied, just above. This is intentional to avoid
337 * message corruption from missing fragments.
338 */
312 return -EFAULT; 339 return -EFAULT;
313 340
314 log->w_off = logger_offset(log->w_off + count); 341 log->w_off = logger_offset(log, log->w_off + count);
315 342
316 return count; 343 return count;
317} 344}
@@ -432,7 +459,12 @@ static int logger_release(struct inode *ignored, struct file *file)
432{ 459{
433 if (file->f_mode & FMODE_READ) { 460 if (file->f_mode & FMODE_READ) {
434 struct logger_reader *reader = file->private_data; 461 struct logger_reader *reader = file->private_data;
462 struct logger_log *log = reader->log;
463
464 mutex_lock(&log->mutex);
435 list_del(&reader->list); 465 list_del(&reader->list);
466 mutex_unlock(&log->mutex);
467
436 kfree(reader); 468 kfree(reader);
437 } 469 }
438 470
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index efc7dc1f4831..052b43e4e505 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -1,16 +1,17 @@
1/* drivers/misc/lowmemorykiller.c 1/* drivers/misc/lowmemorykiller.c
2 * 2 *
3 * The lowmemorykiller driver lets user-space specify a set of memory thresholds 3 * The lowmemorykiller driver lets user-space specify a set of memory thresholds
4 * where processes with a range of oom_adj values will get killed. Specify the 4 * where processes with a range of oom_score_adj values will get killed. Specify
5 * minimum oom_adj values in /sys/module/lowmemorykiller/parameters/adj and the 5 * the minimum oom_score_adj values in
6 * number of free pages in /sys/module/lowmemorykiller/parameters/minfree. Both 6 * /sys/module/lowmemorykiller/parameters/adj and the number of free pages in
7 * files take a comma separated list of numbers in ascending order. 7 * /sys/module/lowmemorykiller/parameters/minfree. Both files take a comma
8 * separated list of numbers in ascending order.
8 * 9 *
9 * For example, write "0,8" to /sys/module/lowmemorykiller/parameters/adj and 10 * For example, write "0,8" to /sys/module/lowmemorykiller/parameters/adj and
10 * "1024,4096" to /sys/module/lowmemorykiller/parameters/minfree to kill 11 * "1024,4096" to /sys/module/lowmemorykiller/parameters/minfree to kill
11 * processes with a oom_adj value of 8 or higher when the free memory drops 12 * processes with a oom_score_adj value of 8 or higher when the free memory
12 * below 4096 pages and kill processes with a oom_adj value of 0 or higher 13 * drops below 4096 pages and kill processes with a oom_score_adj value of 0 or
13 * when the free memory drops below 1024 pages. 14 * higher when the free memory drops below 1024 pages.
14 * 15 *
15 * The driver considers memory used for caches to be free, but if a large 16 * The driver considers memory used for caches to be free, but if a large
16 * percentage of the cached memory is locked this can be very inaccurate 17 * percentage of the cached memory is locked this can be very inaccurate
@@ -34,6 +35,7 @@
34#include <linux/mm.h> 35#include <linux/mm.h>
35#include <linux/oom.h> 36#include <linux/oom.h>
36#include <linux/sched.h> 37#include <linux/sched.h>
38#include <linux/rcupdate.h>
37#include <linux/profile.h> 39#include <linux/profile.h>
38#include <linux/notifier.h> 40#include <linux/notifier.h>
39 41
@@ -45,7 +47,7 @@ static int lowmem_adj[6] = {
45 12, 47 12,
46}; 48};
47static int lowmem_adj_size = 4; 49static int lowmem_adj_size = 4;
48static size_t lowmem_minfree[6] = { 50static int lowmem_minfree[6] = {
49 3 * 512, /* 6MB */ 51 3 * 512, /* 6MB */
50 2 * 1024, /* 8MB */ 52 2 * 1024, /* 8MB */
51 4 * 1024, /* 16MB */ 53 4 * 1024, /* 16MB */
@@ -73,23 +75,23 @@ static int
73task_notify_func(struct notifier_block *self, unsigned long val, void *data) 75task_notify_func(struct notifier_block *self, unsigned long val, void *data)
74{ 76{
75 struct task_struct *task = data; 77 struct task_struct *task = data;
76 if (task == lowmem_deathpending) { 78
79 if (task == lowmem_deathpending)
77 lowmem_deathpending = NULL; 80 lowmem_deathpending = NULL;
78 task_handoff_unregister(&task_nb); 81
79 }
80 return NOTIFY_OK; 82 return NOTIFY_OK;
81} 83}
82 84
83static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc) 85static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
84{ 86{
85 struct task_struct *p; 87 struct task_struct *tsk;
86 struct task_struct *selected = NULL; 88 struct task_struct *selected = NULL;
87 int rem = 0; 89 int rem = 0;
88 int tasksize; 90 int tasksize;
89 int i; 91 int i;
90 int min_adj = OOM_ADJUST_MAX + 1; 92 int min_score_adj = OOM_SCORE_ADJ_MAX + 1;
91 int selected_tasksize = 0; 93 int selected_tasksize = 0;
92 int selected_oom_adj; 94 int selected_oom_score_adj;
93 int array_size = ARRAY_SIZE(lowmem_adj); 95 int array_size = ARRAY_SIZE(lowmem_adj);
94 int other_free = global_page_state(NR_FREE_PAGES); 96 int other_free = global_page_state(NR_FREE_PAGES);
95 int other_file = global_page_state(NR_FILE_PAGES) - 97 int other_file = global_page_state(NR_FILE_PAGES) -
@@ -115,80 +117,77 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
115 for (i = 0; i < array_size; i++) { 117 for (i = 0; i < array_size; i++) {
116 if (other_free < lowmem_minfree[i] && 118 if (other_free < lowmem_minfree[i] &&
117 other_file < lowmem_minfree[i]) { 119 other_file < lowmem_minfree[i]) {
118 min_adj = lowmem_adj[i]; 120 min_score_adj = lowmem_adj[i];
119 break; 121 break;
120 } 122 }
121 } 123 }
122 if (sc->nr_to_scan > 0) 124 if (sc->nr_to_scan > 0)
123 lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n", 125 lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n",
124 sc->nr_to_scan, sc->gfp_mask, other_free, 126 sc->nr_to_scan, sc->gfp_mask, other_free,
125 other_file, min_adj); 127 other_file, min_score_adj);
126 rem = global_page_state(NR_ACTIVE_ANON) + 128 rem = global_page_state(NR_ACTIVE_ANON) +
127 global_page_state(NR_ACTIVE_FILE) + 129 global_page_state(NR_ACTIVE_FILE) +
128 global_page_state(NR_INACTIVE_ANON) + 130 global_page_state(NR_INACTIVE_ANON) +
129 global_page_state(NR_INACTIVE_FILE); 131 global_page_state(NR_INACTIVE_FILE);
130 if (sc->nr_to_scan <= 0 || min_adj == OOM_ADJUST_MAX + 1) { 132 if (sc->nr_to_scan <= 0 || min_score_adj == OOM_SCORE_ADJ_MAX + 1) {
131 lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n", 133 lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n",
132 sc->nr_to_scan, sc->gfp_mask, rem); 134 sc->nr_to_scan, sc->gfp_mask, rem);
133 return rem; 135 return rem;
134 } 136 }
135 selected_oom_adj = min_adj; 137 selected_oom_score_adj = min_score_adj;
136 138
137 read_lock(&tasklist_lock); 139 rcu_read_lock();
138 for_each_process(p) { 140 for_each_process(tsk) {
139 struct mm_struct *mm; 141 struct task_struct *p;
140 struct signal_struct *sig; 142 int oom_score_adj;
141 int oom_adj; 143
142 144 if (tsk->flags & PF_KTHREAD)
143 task_lock(p);
144 mm = p->mm;
145 sig = p->signal;
146 if (!mm || !sig) {
147 task_unlock(p);
148 continue; 145 continue;
149 } 146
150 oom_adj = sig->oom_adj; 147 p = find_lock_task_mm(tsk);
151 if (oom_adj < min_adj) { 148 if (!p)
149 continue;
150
151 oom_score_adj = p->signal->oom_score_adj;
152 if (oom_score_adj < min_score_adj) {
152 task_unlock(p); 153 task_unlock(p);
153 continue; 154 continue;
154 } 155 }
155 tasksize = get_mm_rss(mm); 156 tasksize = get_mm_rss(p->mm);
156 task_unlock(p); 157 task_unlock(p);
157 if (tasksize <= 0) 158 if (tasksize <= 0)
158 continue; 159 continue;
159 if (selected) { 160 if (selected) {
160 if (oom_adj < selected_oom_adj) 161 if (oom_score_adj < selected_oom_score_adj)
161 continue; 162 continue;
162 if (oom_adj == selected_oom_adj && 163 if (oom_score_adj == selected_oom_score_adj &&
163 tasksize <= selected_tasksize) 164 tasksize <= selected_tasksize)
164 continue; 165 continue;
165 } 166 }
166 selected = p; 167 selected = p;
167 selected_tasksize = tasksize; 168 selected_tasksize = tasksize;
168 selected_oom_adj = oom_adj; 169 selected_oom_score_adj = oom_score_adj;
169 lowmem_print(2, "select %d (%s), adj %d, size %d, to kill\n", 170 lowmem_print(2, "select %d (%s), adj %d, size %d, to kill\n",
170 p->pid, p->comm, oom_adj, tasksize); 171 p->pid, p->comm, oom_score_adj, tasksize);
171 } 172 }
172 if (selected) { 173 if (selected) {
173 lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n", 174 lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n",
174 selected->pid, selected->comm, 175 selected->pid, selected->comm,
175 selected_oom_adj, selected_tasksize); 176 selected_oom_score_adj, selected_tasksize);
176 /* 177 /*
177 * If CONFIG_PROFILING is off, then task_handoff_register() 178 * If CONFIG_PROFILING is off, then we don't want to stall
178 * is a nop. In that case we don't want to stall the killer 179 * the killer by setting lowmem_deathpending.
179 * by setting lowmem_deathpending.
180 */ 180 */
181#ifdef CONFIG_PROFILING 181#ifdef CONFIG_PROFILING
182 lowmem_deathpending = selected; 182 lowmem_deathpending = selected;
183 lowmem_deathpending_timeout = jiffies + HZ; 183 lowmem_deathpending_timeout = jiffies + HZ;
184 task_handoff_register(&task_nb);
185#endif 184#endif
186 force_sig(SIGKILL, selected); 185 send_sig(SIGKILL, selected, 0);
187 rem -= selected_tasksize; 186 rem -= selected_tasksize;
188 } 187 }
189 lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n", 188 lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n",
190 sc->nr_to_scan, sc->gfp_mask, rem); 189 sc->nr_to_scan, sc->gfp_mask, rem);
191 read_unlock(&tasklist_lock); 190 rcu_read_unlock();
192 return rem; 191 return rem;
193} 192}
194 193
@@ -199,6 +198,7 @@ static struct shrinker lowmem_shrinker = {
199 198
200static int __init lowmem_init(void) 199static int __init lowmem_init(void)
201{ 200{
201 task_handoff_register(&task_nb);
202 register_shrinker(&lowmem_shrinker); 202 register_shrinker(&lowmem_shrinker);
203 return 0; 203 return 0;
204} 204}
@@ -206,6 +206,7 @@ static int __init lowmem_init(void)
206static void __exit lowmem_exit(void) 206static void __exit lowmem_exit(void)
207{ 207{
208 unregister_shrinker(&lowmem_shrinker); 208 unregister_shrinker(&lowmem_shrinker);
209 task_handoff_unregister(&task_nb);
209} 210}
210 211
211module_param_named(cost, lowmem_shrinker.seeks, int, S_IRUGO | S_IWUSR); 212module_param_named(cost, lowmem_shrinker.seeks, int, S_IRUGO | S_IWUSR);
diff --git a/drivers/staging/android/persistent_ram.c b/drivers/staging/android/persistent_ram.c
new file mode 100644
index 000000000000..e08f2574e30a
--- /dev/null
+++ b/drivers/staging/android/persistent_ram.c
@@ -0,0 +1,470 @@
1/*
2 * Copyright (C) 2012 Google, Inc.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#include <linux/device.h>
16#include <linux/err.h>
17#include <linux/errno.h>
18#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/io.h>
21#include <linux/list.h>
22#include <linux/memblock.h>
23#include <linux/rslib.h>
24#include <linux/slab.h>
25#include <linux/vmalloc.h>
26#include "persistent_ram.h"
27
28struct persistent_ram_buffer {
29 uint32_t sig;
30 atomic_t start;
31 atomic_t size;
32 uint8_t data[0];
33};
34
35#define PERSISTENT_RAM_SIG (0x43474244) /* DBGC */
36
37static __initdata LIST_HEAD(persistent_ram_list);
38
39static inline size_t buffer_size(struct persistent_ram_zone *prz)
40{
41 return atomic_read(&prz->buffer->size);
42}
43
44static inline size_t buffer_start(struct persistent_ram_zone *prz)
45{
46 return atomic_read(&prz->buffer->start);
47}
48
49/* increase and wrap the start pointer, returning the old value */
50static inline size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
51{
52 int old;
53 int new;
54
55 do {
56 old = atomic_read(&prz->buffer->start);
57 new = old + a;
58 while (unlikely(new > prz->buffer_size))
59 new -= prz->buffer_size;
60 } while (atomic_cmpxchg(&prz->buffer->start, old, new) != old);
61
62 return old;
63}
64
65/* increase the size counter until it hits the max size */
66static inline void buffer_size_add(struct persistent_ram_zone *prz, size_t a)
67{
68 size_t old;
69 size_t new;
70
71 if (atomic_read(&prz->buffer->size) == prz->buffer_size)
72 return;
73
74 do {
75 old = atomic_read(&prz->buffer->size);
76 new = old + a;
77 if (new > prz->buffer_size)
78 new = prz->buffer_size;
79 } while (atomic_cmpxchg(&prz->buffer->size, old, new) != old);
80}
81
82/* increase the size counter, retuning an error if it hits the max size */
83static inline ssize_t buffer_size_add_clamp(struct persistent_ram_zone *prz,
84 size_t a)
85{
86 size_t old;
87 size_t new;
88
89 do {
90 old = atomic_read(&prz->buffer->size);
91 new = old + a;
92 if (new > prz->buffer_size)
93 return -ENOMEM;
94 } while (atomic_cmpxchg(&prz->buffer->size, old, new) != old);
95
96 return 0;
97}
98
99static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz,
100 uint8_t *data, size_t len, uint8_t *ecc)
101{
102 int i;
103 uint16_t par[prz->ecc_size];
104
105 /* Initialize the parity buffer */
106 memset(par, 0, sizeof(par));
107 encode_rs8(prz->rs_decoder, data, len, par, 0);
108 for (i = 0; i < prz->ecc_size; i++)
109 ecc[i] = par[i];
110}
111
112static int persistent_ram_decode_rs8(struct persistent_ram_zone *prz,
113 void *data, size_t len, uint8_t *ecc)
114{
115 int i;
116 uint16_t par[prz->ecc_size];
117
118 for (i = 0; i < prz->ecc_size; i++)
119 par[i] = ecc[i];
120 return decode_rs8(prz->rs_decoder, data, par, len,
121 NULL, 0, NULL, 0, NULL);
122}
123
124static void notrace persistent_ram_update_ecc(struct persistent_ram_zone *prz,
125 unsigned int start, unsigned int count)
126{
127 struct persistent_ram_buffer *buffer = prz->buffer;
128 uint8_t *buffer_end = buffer->data + prz->buffer_size;
129 uint8_t *block;
130 uint8_t *par;
131 int ecc_block_size = prz->ecc_block_size;
132 int ecc_size = prz->ecc_size;
133 int size = prz->ecc_block_size;
134
135 if (!prz->ecc)
136 return;
137
138 block = buffer->data + (start & ~(ecc_block_size - 1));
139 par = prz->par_buffer + (start / ecc_block_size) * prz->ecc_size;
140
141 do {
142 if (block + ecc_block_size > buffer_end)
143 size = buffer_end - block;
144 persistent_ram_encode_rs8(prz, block, size, par);
145 block += ecc_block_size;
146 par += ecc_size;
147 } while (block < buffer->data + start + count);
148}
149
150static void persistent_ram_update_header_ecc(struct persistent_ram_zone *prz)
151{
152 struct persistent_ram_buffer *buffer = prz->buffer;
153
154 if (!prz->ecc)
155 return;
156
157 persistent_ram_encode_rs8(prz, (uint8_t *)buffer, sizeof(*buffer),
158 prz->par_header);
159}
160
161static void persistent_ram_ecc_old(struct persistent_ram_zone *prz)
162{
163 struct persistent_ram_buffer *buffer = prz->buffer;
164 uint8_t *block;
165 uint8_t *par;
166
167 if (!prz->ecc)
168 return;
169
170 block = buffer->data;
171 par = prz->par_buffer;
172 while (block < buffer->data + buffer_size(prz)) {
173 int numerr;
174 int size = prz->ecc_block_size;
175 if (block + size > buffer->data + prz->buffer_size)
176 size = buffer->data + prz->buffer_size - block;
177 numerr = persistent_ram_decode_rs8(prz, block, size, par);
178 if (numerr > 0) {
179 pr_devel("persistent_ram: error in block %p, %d\n",
180 block, numerr);
181 prz->corrected_bytes += numerr;
182 } else if (numerr < 0) {
183 pr_devel("persistent_ram: uncorrectable error in block %p\n",
184 block);
185 prz->bad_blocks++;
186 }
187 block += prz->ecc_block_size;
188 par += prz->ecc_size;
189 }
190}
191
192static int persistent_ram_init_ecc(struct persistent_ram_zone *prz,
193 size_t buffer_size)
194{
195 int numerr;
196 struct persistent_ram_buffer *buffer = prz->buffer;
197 int ecc_blocks;
198
199 if (!prz->ecc)
200 return 0;
201
202 prz->ecc_block_size = 128;
203 prz->ecc_size = 16;
204 prz->ecc_symsize = 8;
205 prz->ecc_poly = 0x11d;
206
207 ecc_blocks = DIV_ROUND_UP(prz->buffer_size, prz->ecc_block_size);
208 prz->buffer_size -= (ecc_blocks + 1) * prz->ecc_size;
209
210 if (prz->buffer_size > buffer_size) {
211 pr_err("persistent_ram: invalid size %zu, non-ecc datasize %zu\n",
212 buffer_size, prz->buffer_size);
213 return -EINVAL;
214 }
215
216 prz->par_buffer = buffer->data + prz->buffer_size;
217 prz->par_header = prz->par_buffer + ecc_blocks * prz->ecc_size;
218
219 /*
220 * first consecutive root is 0
221 * primitive element to generate roots = 1
222 */
223 prz->rs_decoder = init_rs(prz->ecc_symsize, prz->ecc_poly, 0, 1,
224 prz->ecc_size);
225 if (prz->rs_decoder == NULL) {
226 pr_info("persistent_ram: init_rs failed\n");
227 return -EINVAL;
228 }
229
230 prz->corrected_bytes = 0;
231 prz->bad_blocks = 0;
232
233 numerr = persistent_ram_decode_rs8(prz, buffer, sizeof(*buffer),
234 prz->par_header);
235 if (numerr > 0) {
236 pr_info("persistent_ram: error in header, %d\n", numerr);
237 prz->corrected_bytes += numerr;
238 } else if (numerr < 0) {
239 pr_info("persistent_ram: uncorrectable error in header\n");
240 prz->bad_blocks++;
241 }
242
243 return 0;
244}
245
246ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz,
247 char *str, size_t len)
248{
249 ssize_t ret;
250
251 if (prz->corrected_bytes || prz->bad_blocks)
252 ret = snprintf(str, len, ""
253 "\n%d Corrected bytes, %d unrecoverable blocks\n",
254 prz->corrected_bytes, prz->bad_blocks);
255 else
256 ret = snprintf(str, len, "\nNo errors detected\n");
257
258 return ret;
259}
260
261static void notrace persistent_ram_update(struct persistent_ram_zone *prz,
262 const void *s, unsigned int start, unsigned int count)
263{
264 struct persistent_ram_buffer *buffer = prz->buffer;
265 memcpy(buffer->data + start, s, count);
266 persistent_ram_update_ecc(prz, start, count);
267}
268
269static void __init
270persistent_ram_save_old(struct persistent_ram_zone *prz)
271{
272 struct persistent_ram_buffer *buffer = prz->buffer;
273 size_t size = buffer_size(prz);
274 size_t start = buffer_start(prz);
275 char *dest;
276
277 persistent_ram_ecc_old(prz);
278
279 dest = kmalloc(size, GFP_KERNEL);
280 if (dest == NULL) {
281 pr_err("persistent_ram: failed to allocate buffer\n");
282 return;
283 }
284
285 prz->old_log = dest;
286 prz->old_log_size = size;
287 memcpy(prz->old_log, &buffer->data[start], size - start);
288 memcpy(prz->old_log + size - start, &buffer->data[0], start);
289}
290
291int notrace persistent_ram_write(struct persistent_ram_zone *prz,
292 const void *s, unsigned int count)
293{
294 int rem;
295 int c = count;
296 size_t start;
297
298 if (unlikely(c > prz->buffer_size)) {
299 s += c - prz->buffer_size;
300 c = prz->buffer_size;
301 }
302
303 buffer_size_add_clamp(prz, c);
304
305 start = buffer_start_add(prz, c);
306
307 rem = prz->buffer_size - start;
308 if (unlikely(rem < c)) {
309 persistent_ram_update(prz, s, start, rem);
310 s += rem;
311 c -= rem;
312 start = 0;
313 }
314 persistent_ram_update(prz, s, start, c);
315
316 persistent_ram_update_header_ecc(prz);
317
318 return count;
319}
320
321size_t persistent_ram_old_size(struct persistent_ram_zone *prz)
322{
323 return prz->old_log_size;
324}
325
326void *persistent_ram_old(struct persistent_ram_zone *prz)
327{
328 return prz->old_log;
329}
330
331void persistent_ram_free_old(struct persistent_ram_zone *prz)
332{
333 kfree(prz->old_log);
334 prz->old_log = NULL;
335 prz->old_log_size = 0;
336}
337
338static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size,
339 struct persistent_ram_zone *prz)
340{
341 struct page **pages;
342 phys_addr_t page_start;
343 unsigned int page_count;
344 pgprot_t prot;
345 unsigned int i;
346
347 page_start = start - offset_in_page(start);
348 page_count = DIV_ROUND_UP(size + offset_in_page(start), PAGE_SIZE);
349
350 prot = pgprot_noncached(PAGE_KERNEL);
351
352 pages = kmalloc(sizeof(struct page *) * page_count, GFP_KERNEL);
353 if (!pages) {
354 pr_err("%s: Failed to allocate array for %u pages\n", __func__,
355 page_count);
356 return -ENOMEM;
357 }
358
359 for (i = 0; i < page_count; i++) {
360 phys_addr_t addr = page_start + i * PAGE_SIZE;
361 pages[i] = pfn_to_page(addr >> PAGE_SHIFT);
362 }
363 prz->vaddr = vmap(pages, page_count, VM_MAP, prot);
364 kfree(pages);
365 if (!prz->vaddr) {
366 pr_err("%s: Failed to map %u pages\n", __func__, page_count);
367 return -ENOMEM;
368 }
369
370 prz->buffer = prz->vaddr + offset_in_page(start);
371 prz->buffer_size = size - sizeof(struct persistent_ram_buffer);
372
373 return 0;
374}
375
376static int __init persistent_ram_buffer_init(const char *name,
377 struct persistent_ram_zone *prz)
378{
379 int i;
380 struct persistent_ram *ram;
381 struct persistent_ram_descriptor *desc;
382 phys_addr_t start;
383
384 list_for_each_entry(ram, &persistent_ram_list, node) {
385 start = ram->start;
386 for (i = 0; i < ram->num_descs; i++) {
387 desc = &ram->descs[i];
388 if (!strcmp(desc->name, name))
389 return persistent_ram_buffer_map(start,
390 desc->size, prz);
391 start += desc->size;
392 }
393 }
394
395 return -EINVAL;
396}
397
398static __init
399struct persistent_ram_zone *__persistent_ram_init(struct device *dev, bool ecc)
400{
401 struct persistent_ram_zone *prz;
402 int ret;
403
404 prz = kzalloc(sizeof(struct persistent_ram_zone), GFP_KERNEL);
405 if (!prz) {
406 pr_err("persistent_ram: failed to allocate persistent ram zone\n");
407 return ERR_PTR(-ENOMEM);
408 }
409
410 INIT_LIST_HEAD(&prz->node);
411
412 ret = persistent_ram_buffer_init(dev_name(dev), prz);
413 if (ret) {
414 pr_err("persistent_ram: failed to initialize buffer\n");
415 return ERR_PTR(ret);
416 }
417
418 prz->ecc = ecc;
419 ret = persistent_ram_init_ecc(prz, prz->buffer_size);
420 if (ret)
421 return ERR_PTR(ret);
422
423 if (prz->buffer->sig == PERSISTENT_RAM_SIG) {
424 if (buffer_size(prz) > prz->buffer_size ||
425 buffer_start(prz) > buffer_size(prz))
426 pr_info("persistent_ram: found existing invalid buffer,"
427 " size %ld, start %ld\n",
428 buffer_size(prz), buffer_start(prz));
429 else {
430 pr_info("persistent_ram: found existing buffer,"
431 " size %ld, start %ld\n",
432 buffer_size(prz), buffer_start(prz));
433 persistent_ram_save_old(prz);
434 }
435 } else {
436 pr_info("persistent_ram: no valid data in buffer"
437 " (sig = 0x%08x)\n", prz->buffer->sig);
438 }
439
440 prz->buffer->sig = PERSISTENT_RAM_SIG;
441 atomic_set(&prz->buffer->start, 0);
442 atomic_set(&prz->buffer->size, 0);
443
444 return prz;
445}
446
447struct persistent_ram_zone * __init
448persistent_ram_init_ringbuffer(struct device *dev, bool ecc)
449{
450 return __persistent_ram_init(dev, ecc);
451}
452
453int __init persistent_ram_early_init(struct persistent_ram *ram)
454{
455 int ret;
456
457 ret = memblock_reserve(ram->start, ram->size);
458 if (ret) {
459 pr_err("Failed to reserve persistent memory from %08lx-%08lx\n",
460 (long)ram->start, (long)(ram->start + ram->size - 1));
461 return ret;
462 }
463
464 list_add_tail(&ram->node, &persistent_ram_list);
465
466 pr_info("Initialized persistent memory from %08lx-%08lx\n",
467 (long)ram->start, (long)(ram->start + ram->size - 1));
468
469 return 0;
470}
diff --git a/drivers/staging/android/persistent_ram.h b/drivers/staging/android/persistent_ram.h
new file mode 100644
index 000000000000..f41e2086c645
--- /dev/null
+++ b/drivers/staging/android/persistent_ram.h
@@ -0,0 +1,78 @@
1/*
2 * Copyright (C) 2011 Google, Inc.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#ifndef __LINUX_PERSISTENT_RAM_H__
16#define __LINUX_PERSISTENT_RAM_H__
17
18#include <linux/device.h>
19#include <linux/kernel.h>
20#include <linux/list.h>
21#include <linux/types.h>
22
23struct persistent_ram_buffer;
24
25struct persistent_ram_descriptor {
26 const char *name;
27 phys_addr_t size;
28};
29
30struct persistent_ram {
31 phys_addr_t start;
32 phys_addr_t size;
33
34 int num_descs;
35 struct persistent_ram_descriptor *descs;
36
37 struct list_head node;
38};
39
40struct persistent_ram_zone {
41 struct list_head node;
42 void *vaddr;
43 struct persistent_ram_buffer *buffer;
44 size_t buffer_size;
45
46 /* ECC correction */
47 bool ecc;
48 char *par_buffer;
49 char *par_header;
50 struct rs_control *rs_decoder;
51 int corrected_bytes;
52 int bad_blocks;
53 int ecc_block_size;
54 int ecc_size;
55 int ecc_symsize;
56 int ecc_poly;
57
58 char *old_log;
59 size_t old_log_size;
60 size_t old_log_footer_size;
61 bool early;
62};
63
64int persistent_ram_early_init(struct persistent_ram *ram);
65
66struct persistent_ram_zone *persistent_ram_init_ringbuffer(struct device *dev,
67 bool ecc);
68
69int persistent_ram_write(struct persistent_ram_zone *prz, const void *s,
70 unsigned int count);
71
72size_t persistent_ram_old_size(struct persistent_ram_zone *prz);
73void *persistent_ram_old(struct persistent_ram_zone *prz);
74void persistent_ram_free_old(struct persistent_ram_zone *prz);
75ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz,
76 char *str, size_t len);
77
78#endif
diff --git a/drivers/staging/android/ram_console.c b/drivers/staging/android/ram_console.c
index 6d4d67924f22..ce140ffc54ea 100644
--- a/drivers/staging/android/ram_console.c
+++ b/drivers/staging/android/ram_console.c
@@ -21,129 +21,24 @@
21#include <linux/string.h> 21#include <linux/string.h>
22#include <linux/uaccess.h> 22#include <linux/uaccess.h>
23#include <linux/io.h> 23#include <linux/io.h>
24#include "persistent_ram.h"
24#include "ram_console.h" 25#include "ram_console.h"
25 26
26#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION 27static struct persistent_ram_zone *ram_console_zone;
27#include <linux/rslib.h> 28static const char *bootinfo;
28#endif 29static size_t bootinfo_size;
29
30struct ram_console_buffer {
31 uint32_t sig;
32 uint32_t start;
33 uint32_t size;
34 uint8_t data[0];
35};
36
37#define RAM_CONSOLE_SIG (0x43474244) /* DBGC */
38
39#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
40static char __initdata
41 ram_console_old_log_init_buffer[CONFIG_ANDROID_RAM_CONSOLE_EARLY_SIZE];
42#endif
43static char *ram_console_old_log;
44static size_t ram_console_old_log_size;
45
46static struct ram_console_buffer *ram_console_buffer;
47static size_t ram_console_buffer_size;
48#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
49static char *ram_console_par_buffer;
50static struct rs_control *ram_console_rs_decoder;
51static int ram_console_corrected_bytes;
52static int ram_console_bad_blocks;
53#define ECC_BLOCK_SIZE CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE
54#define ECC_SIZE CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE
55#define ECC_SYMSIZE CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE
56#define ECC_POLY CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL
57#endif
58
59#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
60static void ram_console_encode_rs8(uint8_t *data, size_t len, uint8_t *ecc)
61{
62 int i;
63 uint16_t par[ECC_SIZE];
64 /* Initialize the parity buffer */
65 memset(par, 0, sizeof(par));
66 encode_rs8(ram_console_rs_decoder, data, len, par, 0);
67 for (i = 0; i < ECC_SIZE; i++)
68 ecc[i] = par[i];
69}
70
71static int ram_console_decode_rs8(void *data, size_t len, uint8_t *ecc)
72{
73 int i;
74 uint16_t par[ECC_SIZE];
75 for (i = 0; i < ECC_SIZE; i++)
76 par[i] = ecc[i];
77 return decode_rs8(ram_console_rs_decoder, data, par, len,
78 NULL, 0, NULL, 0, NULL);
79}
80#endif
81
82static void ram_console_update(const char *s, unsigned int count)
83{
84 struct ram_console_buffer *buffer = ram_console_buffer;
85#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
86 uint8_t *buffer_end = buffer->data + ram_console_buffer_size;
87 uint8_t *block;
88 uint8_t *par;
89 int size = ECC_BLOCK_SIZE;
90#endif
91 memcpy(buffer->data + buffer->start, s, count);
92#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
93 block = buffer->data + (buffer->start & ~(ECC_BLOCK_SIZE - 1));
94 par = ram_console_par_buffer +
95 (buffer->start / ECC_BLOCK_SIZE) * ECC_SIZE;
96 do {
97 if (block + ECC_BLOCK_SIZE > buffer_end)
98 size = buffer_end - block;
99 ram_console_encode_rs8(block, size, par);
100 block += ECC_BLOCK_SIZE;
101 par += ECC_SIZE;
102 } while (block < buffer->data + buffer->start + count);
103#endif
104}
105
106static void ram_console_update_header(void)
107{
108#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
109 struct ram_console_buffer *buffer = ram_console_buffer;
110 uint8_t *par;
111 par = ram_console_par_buffer +
112 DIV_ROUND_UP(ram_console_buffer_size, ECC_BLOCK_SIZE) * ECC_SIZE;
113 ram_console_encode_rs8((uint8_t *)buffer, sizeof(*buffer), par);
114#endif
115}
116 30
117static void 31static void
118ram_console_write(struct console *console, const char *s, unsigned int count) 32ram_console_write(struct console *console, const char *s, unsigned int count)
119{ 33{
120 int rem; 34 struct persistent_ram_zone *prz = console->data;
121 struct ram_console_buffer *buffer = ram_console_buffer; 35 persistent_ram_write(prz, s, count);
122
123 if (count > ram_console_buffer_size) {
124 s += count - ram_console_buffer_size;
125 count = ram_console_buffer_size;
126 }
127 rem = ram_console_buffer_size - buffer->start;
128 if (rem < count) {
129 ram_console_update(s, rem);
130 s += rem;
131 count -= rem;
132 buffer->start = 0;
133 buffer->size = ram_console_buffer_size;
134 }
135 ram_console_update(s, count);
136
137 buffer->start += count;
138 if (buffer->size < ram_console_buffer_size)
139 buffer->size += count;
140 ram_console_update_header();
141} 36}
142 37
143static struct console ram_console = { 38static struct console ram_console = {
144 .name = "ram", 39 .name = "ram",
145 .write = ram_console_write, 40 .write = ram_console_write,
146 .flags = CON_PRINTBUFFER | CON_ENABLED, 41 .flags = CON_PRINTBUFFER | CON_ENABLED | CON_ANYTIME,
147 .index = -1, 42 .index = -1,
148}; 43};
149 44
@@ -155,220 +50,31 @@ void ram_console_enable_console(int enabled)
155 ram_console.flags &= ~CON_ENABLED; 50 ram_console.flags &= ~CON_ENABLED;
156} 51}
157 52
158static void __init 53static int __init ram_console_probe(struct platform_device *pdev)
159ram_console_save_old(struct ram_console_buffer *buffer, const char *bootinfo,
160 char *dest)
161{
162 size_t old_log_size = buffer->size;
163 size_t bootinfo_size = 0;
164 size_t total_size = old_log_size;
165 char *ptr;
166 const char *bootinfo_label = "Boot info:\n";
167
168#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
169 uint8_t *block;
170 uint8_t *par;
171 char strbuf[80];
172 int strbuf_len = 0;
173
174 block = buffer->data;
175 par = ram_console_par_buffer;
176 while (block < buffer->data + buffer->size) {
177 int numerr;
178 int size = ECC_BLOCK_SIZE;
179 if (block + size > buffer->data + ram_console_buffer_size)
180 size = buffer->data + ram_console_buffer_size - block;
181 numerr = ram_console_decode_rs8(block, size, par);
182 if (numerr > 0) {
183#if 0
184 printk(KERN_INFO "ram_console: error in block %p, %d\n",
185 block, numerr);
186#endif
187 ram_console_corrected_bytes += numerr;
188 } else if (numerr < 0) {
189#if 0
190 printk(KERN_INFO "ram_console: uncorrectable error in "
191 "block %p\n", block);
192#endif
193 ram_console_bad_blocks++;
194 }
195 block += ECC_BLOCK_SIZE;
196 par += ECC_SIZE;
197 }
198 if (ram_console_corrected_bytes || ram_console_bad_blocks)
199 strbuf_len = snprintf(strbuf, sizeof(strbuf),
200 "\n%d Corrected bytes, %d unrecoverable blocks\n",
201 ram_console_corrected_bytes, ram_console_bad_blocks);
202 else
203 strbuf_len = snprintf(strbuf, sizeof(strbuf),
204 "\nNo errors detected\n");
205 if (strbuf_len >= sizeof(strbuf))
206 strbuf_len = sizeof(strbuf) - 1;
207 total_size += strbuf_len;
208#endif
209
210 if (bootinfo)
211 bootinfo_size = strlen(bootinfo) + strlen(bootinfo_label);
212 total_size += bootinfo_size;
213
214 if (dest == NULL) {
215 dest = kmalloc(total_size, GFP_KERNEL);
216 if (dest == NULL) {
217 printk(KERN_ERR
218 "ram_console: failed to allocate buffer\n");
219 return;
220 }
221 }
222
223 ram_console_old_log = dest;
224 ram_console_old_log_size = total_size;
225 memcpy(ram_console_old_log,
226 &buffer->data[buffer->start], buffer->size - buffer->start);
227 memcpy(ram_console_old_log + buffer->size - buffer->start,
228 &buffer->data[0], buffer->start);
229 ptr = ram_console_old_log + old_log_size;
230#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
231 memcpy(ptr, strbuf, strbuf_len);
232 ptr += strbuf_len;
233#endif
234 if (bootinfo) {
235 memcpy(ptr, bootinfo_label, strlen(bootinfo_label));
236 ptr += strlen(bootinfo_label);
237 memcpy(ptr, bootinfo, bootinfo_size);
238 ptr += bootinfo_size;
239 }
240}
241
242static int __init ram_console_init(struct ram_console_buffer *buffer,
243 size_t buffer_size, const char *bootinfo,
244 char *old_buf)
245{ 54{
246#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION 55 struct ram_console_platform_data *pdata = pdev->dev.platform_data;
247 int numerr; 56 struct persistent_ram_zone *prz;
248 uint8_t *par;
249#endif
250 ram_console_buffer = buffer;
251 ram_console_buffer_size =
252 buffer_size - sizeof(struct ram_console_buffer);
253
254 if (ram_console_buffer_size > buffer_size) {
255 pr_err("ram_console: buffer %p, invalid size %zu, "
256 "datasize %zu\n", buffer, buffer_size,
257 ram_console_buffer_size);
258 return 0;
259 }
260
261#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
262 ram_console_buffer_size -= (DIV_ROUND_UP(ram_console_buffer_size,
263 ECC_BLOCK_SIZE) + 1) * ECC_SIZE;
264
265 if (ram_console_buffer_size > buffer_size) {
266 pr_err("ram_console: buffer %p, invalid size %zu, "
267 "non-ecc datasize %zu\n",
268 buffer, buffer_size, ram_console_buffer_size);
269 return 0;
270 }
271
272 ram_console_par_buffer = buffer->data + ram_console_buffer_size;
273
274
275 /* first consecutive root is 0
276 * primitive element to generate roots = 1
277 */
278 ram_console_rs_decoder = init_rs(ECC_SYMSIZE, ECC_POLY, 0, 1, ECC_SIZE);
279 if (ram_console_rs_decoder == NULL) {
280 printk(KERN_INFO "ram_console: init_rs failed\n");
281 return 0;
282 }
283
284 ram_console_corrected_bytes = 0;
285 ram_console_bad_blocks = 0;
286 57
287 par = ram_console_par_buffer + 58 prz = persistent_ram_init_ringbuffer(&pdev->dev, true);
288 DIV_ROUND_UP(ram_console_buffer_size, ECC_BLOCK_SIZE) * ECC_SIZE; 59 if (IS_ERR(prz))
60 return PTR_ERR(prz);
289 61
290 numerr = ram_console_decode_rs8(buffer, sizeof(*buffer), par);
291 if (numerr > 0) {
292 printk(KERN_INFO "ram_console: error in header, %d\n", numerr);
293 ram_console_corrected_bytes += numerr;
294 } else if (numerr < 0) {
295 printk(KERN_INFO
296 "ram_console: uncorrectable error in header\n");
297 ram_console_bad_blocks++;
298 }
299#endif
300 62
301 if (buffer->sig == RAM_CONSOLE_SIG) { 63 if (pdata) {
302 if (buffer->size > ram_console_buffer_size 64 bootinfo = kstrdup(pdata->bootinfo, GFP_KERNEL);
303 || buffer->start > buffer->size) 65 if (bootinfo)
304 printk(KERN_INFO "ram_console: found existing invalid " 66 bootinfo_size = strlen(bootinfo);
305 "buffer, size %d, start %d\n",
306 buffer->size, buffer->start);
307 else {
308 printk(KERN_INFO "ram_console: found existing buffer, "
309 "size %d, start %d\n",
310 buffer->size, buffer->start);
311 ram_console_save_old(buffer, bootinfo, old_buf);
312 }
313 } else {
314 printk(KERN_INFO "ram_console: no valid data in buffer "
315 "(sig = 0x%08x)\n", buffer->sig);
316 } 67 }
317 68
318 buffer->sig = RAM_CONSOLE_SIG; 69 ram_console_zone = prz;
319 buffer->start = 0; 70 ram_console.data = prz;
320 buffer->size = 0;
321 71
322 register_console(&ram_console); 72 register_console(&ram_console);
323#ifdef CONFIG_ANDROID_RAM_CONSOLE_ENABLE_VERBOSE
324 console_verbose();
325#endif
326 return 0;
327}
328 73
329#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT 74 return 0;
330static int __init ram_console_early_init(void)
331{
332 return ram_console_init((struct ram_console_buffer *)
333 CONFIG_ANDROID_RAM_CONSOLE_EARLY_ADDR,
334 CONFIG_ANDROID_RAM_CONSOLE_EARLY_SIZE,
335 NULL,
336 ram_console_old_log_init_buffer);
337}
338#else
339static int ram_console_driver_probe(struct platform_device *pdev)
340{
341 struct resource *res = pdev->resource;
342 size_t start;
343 size_t buffer_size;
344 void *buffer;
345 const char *bootinfo = NULL;
346 struct ram_console_platform_data *pdata = pdev->dev.platform_data;
347
348 if (res == NULL || pdev->num_resources != 1 ||
349 !(res->flags & IORESOURCE_MEM)) {
350 printk(KERN_ERR "ram_console: invalid resource, %p %d flags "
351 "%lx\n", res, pdev->num_resources, res ? res->flags : 0);
352 return -ENXIO;
353 }
354 buffer_size = res->end - res->start + 1;
355 start = res->start;
356 printk(KERN_INFO "ram_console: got buffer at %zx, size %zx\n",
357 start, buffer_size);
358 buffer = ioremap(res->start, buffer_size);
359 if (buffer == NULL) {
360 printk(KERN_ERR "ram_console: failed to map memory\n");
361 return -ENOMEM;
362 }
363
364 if (pdata)
365 bootinfo = pdata->bootinfo;
366
367 return ram_console_init(buffer, buffer_size, bootinfo, NULL/* allocate */);
368} 75}
369 76
370static struct platform_driver ram_console_driver = { 77static struct platform_driver ram_console_driver = {
371 .probe = ram_console_driver_probe,
372 .driver = { 78 .driver = {
373 .name = "ram_console", 79 .name = "ram_console",
374 }, 80 },
@@ -376,10 +82,11 @@ static struct platform_driver ram_console_driver = {
376 82
377static int __init ram_console_module_init(void) 83static int __init ram_console_module_init(void)
378{ 84{
379 int err; 85 return platform_driver_probe(&ram_console_driver, ram_console_probe);
380 err = platform_driver_register(&ram_console_driver);
381 return err;
382} 86}
87
88#ifndef CONFIG_PRINTK
89#define dmesg_restrict 0
383#endif 90#endif
384 91
385static ssize_t ram_console_read_old(struct file *file, char __user *buf, 92static ssize_t ram_console_read_old(struct file *file, char __user *buf,
@@ -387,14 +94,52 @@ static ssize_t ram_console_read_old(struct file *file, char __user *buf,
387{ 94{
388 loff_t pos = *offset; 95 loff_t pos = *offset;
389 ssize_t count; 96 ssize_t count;
97 struct persistent_ram_zone *prz = ram_console_zone;
98 size_t old_log_size = persistent_ram_old_size(prz);
99 const char *old_log = persistent_ram_old(prz);
100 char *str;
101 int ret;
102
103 if (dmesg_restrict && !capable(CAP_SYSLOG))
104 return -EPERM;
105
106 /* Main last_kmsg log */
107 if (pos < old_log_size) {
108 count = min(len, (size_t)(old_log_size - pos));
109 if (copy_to_user(buf, old_log + pos, count))
110 return -EFAULT;
111 goto out;
112 }
390 113
391 if (pos >= ram_console_old_log_size) 114 /* ECC correction notice */
392 return 0; 115 pos -= old_log_size;
116 count = persistent_ram_ecc_string(prz, NULL, 0);
117 if (pos < count) {
118 str = kmalloc(count, GFP_KERNEL);
119 if (!str)
120 return -ENOMEM;
121 persistent_ram_ecc_string(prz, str, count + 1);
122 count = min(len, (size_t)(count - pos));
123 ret = copy_to_user(buf, str + pos, count);
124 kfree(str);
125 if (ret)
126 return -EFAULT;
127 goto out;
128 }
129
130 /* Boot info passed through pdata */
131 pos -= count;
132 if (pos < bootinfo_size) {
133 count = min(len, (size_t)(bootinfo_size - pos));
134 if (copy_to_user(buf, bootinfo + pos, count))
135 return -EFAULT;
136 goto out;
137 }
393 138
394 count = min(len, (size_t)(ram_console_old_log_size - pos)); 139 /* EOF */
395 if (copy_to_user(buf, ram_console_old_log + pos, count)) 140 return 0;
396 return -EFAULT;
397 141
142out:
398 *offset += count; 143 *offset += count;
399 return count; 144 return count;
400} 145}
@@ -407,37 +152,28 @@ static const struct file_operations ram_console_file_ops = {
407static int __init ram_console_late_init(void) 152static int __init ram_console_late_init(void)
408{ 153{
409 struct proc_dir_entry *entry; 154 struct proc_dir_entry *entry;
155 struct persistent_ram_zone *prz = ram_console_zone;
410 156
411 if (ram_console_old_log == NULL) 157 if (!prz)
412 return 0; 158 return 0;
413#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT 159
414 ram_console_old_log = kmalloc(ram_console_old_log_size, GFP_KERNEL); 160 if (persistent_ram_old_size(prz) == 0)
415 if (ram_console_old_log == NULL) {
416 printk(KERN_ERR
417 "ram_console: failed to allocate buffer for old log\n");
418 ram_console_old_log_size = 0;
419 return 0; 161 return 0;
420 } 162
421 memcpy(ram_console_old_log,
422 ram_console_old_log_init_buffer, ram_console_old_log_size);
423#endif
424 entry = create_proc_entry("last_kmsg", S_IFREG | S_IRUGO, NULL); 163 entry = create_proc_entry("last_kmsg", S_IFREG | S_IRUGO, NULL);
425 if (!entry) { 164 if (!entry) {
426 printk(KERN_ERR "ram_console: failed to create proc entry\n"); 165 printk(KERN_ERR "ram_console: failed to create proc entry\n");
427 kfree(ram_console_old_log); 166 persistent_ram_free_old(prz);
428 ram_console_old_log = NULL;
429 return 0; 167 return 0;
430 } 168 }
431 169
432 entry->proc_fops = &ram_console_file_ops; 170 entry->proc_fops = &ram_console_file_ops;
433 entry->size = ram_console_old_log_size; 171 entry->size = persistent_ram_old_size(prz) +
172 persistent_ram_ecc_string(prz, NULL, 0) +
173 bootinfo_size;
174
434 return 0; 175 return 0;
435} 176}
436 177
437#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
438console_initcall(ram_console_early_init);
439#else
440postcore_initcall(ram_console_module_init);
441#endif
442late_initcall(ram_console_late_init); 178late_initcall(ram_console_late_init);
443 179postcore_initcall(ram_console_module_init);
diff --git a/drivers/staging/android/timed_gpio.c b/drivers/staging/android/timed_gpio.c
index a64481c3e86d..bc723eff11af 100644
--- a/drivers/staging/android/timed_gpio.c
+++ b/drivers/staging/android/timed_gpio.c
@@ -29,9 +29,9 @@ struct timed_gpio_data {
29 struct timed_output_dev dev; 29 struct timed_output_dev dev;
30 struct hrtimer timer; 30 struct hrtimer timer;
31 spinlock_t lock; 31 spinlock_t lock;
32 unsigned gpio; 32 unsigned gpio;
33 int max_timeout; 33 int max_timeout;
34 u8 active_low; 34 u8 active_low;
35}; 35};
36 36
37static enum hrtimer_restart gpio_timer_func(struct hrtimer *timer) 37static enum hrtimer_restart gpio_timer_func(struct hrtimer *timer)
diff --git a/drivers/staging/android/timed_gpio.h b/drivers/staging/android/timed_gpio.h
index a0e15f8be3f7..d29e169d7ebe 100644
--- a/drivers/staging/android/timed_gpio.h
+++ b/drivers/staging/android/timed_gpio.h
@@ -20,13 +20,13 @@
20 20
21struct timed_gpio { 21struct timed_gpio {
22 const char *name; 22 const char *name;
23 unsigned gpio; 23 unsigned gpio;
24 int max_timeout; 24 int max_timeout;
25 u8 active_low; 25 u8 active_low;
26}; 26};
27 27
28struct timed_gpio_platform_data { 28struct timed_gpio_platform_data {
29 int num_gpios; 29 int num_gpios;
30 struct timed_gpio *gpios; 30 struct timed_gpio *gpios;
31}; 31};
32 32
diff --git a/drivers/staging/asus_oled/asus_oled.c b/drivers/staging/asus_oled/asus_oled.c
index 1df9586f2730..83549d9cfefc 100644
--- a/drivers/staging/asus_oled/asus_oled.c
+++ b/drivers/staging/asus_oled/asus_oled.c
@@ -159,7 +159,6 @@ static void setup_packet_header(struct asus_oled_packet *packet, char flags,
159 159
160static void enable_oled(struct asus_oled_dev *odev, uint8_t enabl) 160static void enable_oled(struct asus_oled_dev *odev, uint8_t enabl)
161{ 161{
162 int a;
163 int retval; 162 int retval;
164 int act_len; 163 int act_len;
165 struct asus_oled_packet *packet; 164 struct asus_oled_packet *packet;
@@ -178,17 +177,15 @@ static void enable_oled(struct asus_oled_dev *odev, uint8_t enabl)
178 else 177 else
179 packet->bitmap[0] = 0xae; 178 packet->bitmap[0] = 0xae;
180 179
181 for (a = 0; a < 1; a++) { 180 retval = usb_bulk_msg(odev->udev,
182 retval = usb_bulk_msg(odev->udev, 181 usb_sndbulkpipe(odev->udev, 2),
183 usb_sndbulkpipe(odev->udev, 2), 182 packet,
184 packet, 183 sizeof(struct asus_oled_header) + 1,
185 sizeof(struct asus_oled_header) + 1, 184 &act_len,
186 &act_len, 185 -1);
187 -1);
188 186
189 if (retval) 187 if (retval)
190 dev_dbg(&odev->udev->dev, "retval = %d\n", retval); 188 dev_dbg(&odev->udev->dev, "retval = %d\n", retval);
191 }
192 189
193 odev->enabled = enabl; 190 odev->enabled = enabl;
194 191
diff --git a/drivers/staging/bcm/Bcmchar.c b/drivers/staging/bcm/Bcmchar.c
index 179707b5e7c7..cf3059216958 100644
--- a/drivers/staging/bcm/Bcmchar.c
+++ b/drivers/staging/bcm/Bcmchar.c
@@ -728,14 +728,10 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
728 if (IoBuffer.InputLength > MAX_CNTL_PKT_SIZE) 728 if (IoBuffer.InputLength > MAX_CNTL_PKT_SIZE)
729 return -EINVAL; 729 return -EINVAL;
730 730
731 pvBuffer = kmalloc(IoBuffer.InputLength, GFP_KERNEL); 731 pvBuffer = memdup_user(IoBuffer.InputBuffer,
732 if (!pvBuffer) 732 IoBuffer.InputLength);
733 return -ENOMEM; 733 if (IS_ERR(pvBuffer))
734 734 return PTR_ERR(pvBuffer);
735 if (copy_from_user(pvBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength)) {
736 kfree(pvBuffer);
737 return -EFAULT;
738 }
739 735
740 down(&Adapter->LowPowerModeSync); 736 down(&Adapter->LowPowerModeSync);
741 Status = wait_event_interruptible_timeout(Adapter->lowpower_mode_wait_queue, 737 Status = wait_event_interruptible_timeout(Adapter->lowpower_mode_wait_queue,
@@ -1140,15 +1136,10 @@ cntrlEnd:
1140 if (IoBuffer.InputLength < sizeof(ULONG) * 2) 1136 if (IoBuffer.InputLength < sizeof(ULONG) * 2)
1141 return -EINVAL; 1137 return -EINVAL;
1142 1138
1143 pvBuffer = kmalloc(IoBuffer.InputLength, GFP_KERNEL); 1139 pvBuffer = memdup_user(IoBuffer.InputBuffer,
1144 if (!pvBuffer) 1140 IoBuffer.InputLength);
1145 return -ENOMEM; 1141 if (IS_ERR(pvBuffer))
1146 1142 return PTR_ERR(pvBuffer);
1147 /* Get WrmBuffer structure */
1148 if (copy_from_user(pvBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength)) {
1149 kfree(pvBuffer);
1150 return -EFAULT;
1151 }
1152 1143
1153 pBulkBuffer = (PBULKWRM_BUFFER)pvBuffer; 1144 pBulkBuffer = (PBULKWRM_BUFFER)pvBuffer;
1154 1145
@@ -1302,20 +1293,18 @@ cntrlEnd:
1302 /* 1293 /*
1303 * Deny the access if the offset crosses the cal area limit. 1294 * Deny the access if the offset crosses the cal area limit.
1304 */ 1295 */
1296 if (stNVMReadWrite.uiNumBytes > Adapter->uiNVMDSDSize)
1297 return STATUS_FAILURE;
1305 1298
1306 if ((stNVMReadWrite.uiOffset + stNVMReadWrite.uiNumBytes) > Adapter->uiNVMDSDSize) { 1299 if (stNVMReadWrite.uiOffset > Adapter->uiNVMDSDSize - stNVMReadWrite.uiNumBytes) {
1307 /* BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Can't allow access beyond NVM Size: 0x%x 0x%x\n", stNVMReadWrite.uiOffset, stNVMReadWrite.uiNumBytes); */ 1300 /* BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Can't allow access beyond NVM Size: 0x%x 0x%x\n", stNVMReadWrite.uiOffset, stNVMReadWrite.uiNumBytes); */
1308 return STATUS_FAILURE; 1301 return STATUS_FAILURE;
1309 } 1302 }
1310 1303
1311 pReadData = kzalloc(stNVMReadWrite.uiNumBytes, GFP_KERNEL); 1304 pReadData = memdup_user(stNVMReadWrite.pBuffer,
1312 if (!pReadData) 1305 stNVMReadWrite.uiNumBytes);
1313 return -ENOMEM; 1306 if (IS_ERR(pReadData))
1314 1307 return PTR_ERR(pReadData);
1315 if (copy_from_user(pReadData, stNVMReadWrite.pBuffer, stNVMReadWrite.uiNumBytes)) {
1316 kfree(pReadData);
1317 return -EFAULT;
1318 }
1319 1308
1320 do_gettimeofday(&tv0); 1309 do_gettimeofday(&tv0);
1321 if (IOCTL_BCM_NVM_READ == cmd) { 1310 if (IOCTL_BCM_NVM_READ == cmd) {
diff --git a/drivers/staging/bcm/CmHost.c b/drivers/staging/bcm/CmHost.c
index c0ee95a71343..7e38af5e1765 100644
--- a/drivers/staging/bcm/CmHost.c
+++ b/drivers/staging/bcm/CmHost.c
@@ -1,431 +1,359 @@
1/************************************************************ 1/************************************************************
2* CMHOST.C 2 * CMHOST.C
3* This file contains the routines for handling Connection 3 * This file contains the routines for handling Connection
4* Management. 4 * Management.
5************************************************************/ 5 ************************************************************/
6 6
7//#define CONN_MSG 7/* #define CONN_MSG */
8#include "headers.h" 8#include "headers.h"
9 9
10typedef enum _E_CLASSIFIER_ACTION 10enum E_CLASSIFIER_ACTION {
11{
12 eInvalidClassifierAction, 11 eInvalidClassifierAction,
13 eAddClassifier, 12 eAddClassifier,
14 eReplaceClassifier, 13 eReplaceClassifier,
15 eDeleteClassifier 14 eDeleteClassifier
16}E_CLASSIFIER_ACTION; 15};
17 16
18static ULONG GetNextTargetBufferLocation(PMINI_ADAPTER Adapter,B_UINT16 tid); 17static ULONG GetNextTargetBufferLocation(PMINI_ADAPTER Adapter, B_UINT16 tid);
19 18
20/************************************************************ 19/************************************************************
21* Function - SearchSfid 20 * Function - SearchSfid
22* 21 *
23* Description - This routinue would search QOS queues having 22 * Description - This routinue would search QOS queues having
24* specified SFID as input parameter. 23 * specified SFID as input parameter.
25* 24 *
26* Parameters - Adapter: Pointer to the Adapter structure 25 * Parameters - Adapter: Pointer to the Adapter structure
27* uiSfid : Given SFID for matching 26 * uiSfid : Given SFID for matching
28* 27 *
29* Returns - Queue index for this SFID(If matched) 28 * Returns - Queue index for this SFID(If matched)
30 Else Invalid Queue Index(If Not matched) 29 * Else Invalid Queue Index(If Not matched)
31************************************************************/ 30 ************************************************************/
32INT SearchSfid(PMINI_ADAPTER Adapter,UINT uiSfid) 31int SearchSfid(PMINI_ADAPTER Adapter, UINT uiSfid)
33{ 32{
34 INT iIndex=0; 33 int i;
35 for(iIndex=(NO_OF_QUEUES-1); iIndex>=0; iIndex--) 34
36 if(Adapter->PackInfo[iIndex].ulSFID==uiSfid) 35 for (i = (NO_OF_QUEUES-1); i >= 0; i--)
37 return iIndex; 36 if (Adapter->PackInfo[i].ulSFID == uiSfid)
37 return i;
38
38 return NO_OF_QUEUES+1; 39 return NO_OF_QUEUES+1;
39} 40}
40 41
41/*************************************************************** 42/***************************************************************
42* Function - SearchFreeSfid 43 * Function -SearchFreeSfid
43* 44 *
44* Description - This routinue would search Free available SFID. 45 * Description - This routinue would search Free available SFID.
45* 46 *
46* Parameter - Adapter: Pointer to the Adapter structure 47 * Parameter - Adapter: Pointer to the Adapter structure
47* 48 *
48* Returns - Queue index for the free SFID 49 * Returns - Queue index for the free SFID
49* Else returns Invalid Index. 50 * Else returns Invalid Index.
50****************************************************************/ 51 ****************************************************************/
51static INT SearchFreeSfid(PMINI_ADAPTER Adapter) 52static int SearchFreeSfid(PMINI_ADAPTER Adapter)
52{ 53{
53 UINT uiIndex=0; 54 int i;
55
56 for (i = 0; i < (NO_OF_QUEUES-1); i++)
57 if (Adapter->PackInfo[i].ulSFID == 0)
58 return i;
54 59
55 for(uiIndex=0; uiIndex < (NO_OF_QUEUES-1); uiIndex++)
56 if(Adapter->PackInfo[uiIndex].ulSFID==0)
57 return uiIndex;
58 return NO_OF_QUEUES+1; 60 return NO_OF_QUEUES+1;
59} 61}
60 62
61/* 63/*
62Function: SearchClsid 64 * Function: SearchClsid
63Description: This routinue would search Classifier having specified ClassifierID as input parameter 65 * Description: This routinue would search Classifier having specified ClassifierID as input parameter
64Input parameters: PMINI_ADAPTER Adapter - Adapter Context 66 * Input parameters: PMINI_ADAPTER Adapter - Adapter Context
65 unsigned int uiSfid - The SF in which the classifier is to searched 67 * unsigned int uiSfid - The SF in which the classifier is to searched
66 B_UINT16 uiClassifierID - The classifier ID to be searched 68 * B_UINT16 uiClassifierID - The classifier ID to be searched
67Return: int :Classifier table index of matching entry 69 * Return: int :Classifier table index of matching entry
68*/ 70 */
69 71static int SearchClsid(PMINI_ADAPTER Adapter, ULONG ulSFID, B_UINT16 uiClassifierID)
70static int SearchClsid(PMINI_ADAPTER Adapter,ULONG ulSFID,B_UINT16 uiClassifierID)
71{ 72{
72 unsigned int uiClassifierIndex = 0; 73 int i;
73 for(uiClassifierIndex=0;uiClassifierIndex<MAX_CLASSIFIERS;uiClassifierIndex++) 74
74 { 75 for (i = 0; i < MAX_CLASSIFIERS; i++) {
75 if((Adapter->astClassifierTable[uiClassifierIndex].bUsed) && 76 if ((Adapter->astClassifierTable[i].bUsed) &&
76 (Adapter->astClassifierTable[uiClassifierIndex].uiClassifierRuleIndex == uiClassifierID)&& 77 (Adapter->astClassifierTable[i].uiClassifierRuleIndex == uiClassifierID) &&
77 (Adapter->astClassifierTable[uiClassifierIndex].ulSFID == ulSFID)) 78 (Adapter->astClassifierTable[i].ulSFID == ulSFID))
78 return uiClassifierIndex; 79 return i;
79 } 80 }
81
80 return MAX_CLASSIFIERS+1; 82 return MAX_CLASSIFIERS+1;
81} 83}
82 84
83/** 85/*
84@ingroup ctrl_pkt_functions 86 * @ingroup ctrl_pkt_functions
85This routinue would search Free available Classifier entry in classifier table. 87 * This routinue would search Free available Classifier entry in classifier table.
86@return free Classifier Entry index in classifier table for specified SF 88 * @return free Classifier Entry index in classifier table for specified SF
87*/ 89 */
88static int SearchFreeClsid(PMINI_ADAPTER Adapter /**Adapter Context*/ 90static int SearchFreeClsid(PMINI_ADAPTER Adapter /**Adapter Context*/)
89 )
90{ 91{
91 unsigned int uiClassifierIndex = 0; 92 int i;
92 for(uiClassifierIndex=0;uiClassifierIndex<MAX_CLASSIFIERS;uiClassifierIndex++) 93
93 { 94 for (i = 0; i < MAX_CLASSIFIERS; i++) {
94 if(!Adapter->astClassifierTable[uiClassifierIndex].bUsed) 95 if (!Adapter->astClassifierTable[i].bUsed)
95 return uiClassifierIndex; 96 return i;
96 } 97 }
98
97 return MAX_CLASSIFIERS+1; 99 return MAX_CLASSIFIERS+1;
98} 100}
99 101
100static VOID deleteSFBySfid(PMINI_ADAPTER Adapter, UINT uiSearchRuleIndex) 102static VOID deleteSFBySfid(PMINI_ADAPTER Adapter, UINT uiSearchRuleIndex)
101{ 103{
102 //deleting all the packet held in the SF 104 /* deleting all the packet held in the SF */
103 flush_queue(Adapter,uiSearchRuleIndex); 105 flush_queue(Adapter, uiSearchRuleIndex);
104 106
105 //Deleting the all classifiers for this SF 107 /* Deleting the all classifiers for this SF */
106 DeleteAllClassifiersForSF(Adapter,uiSearchRuleIndex); 108 DeleteAllClassifiersForSF(Adapter, uiSearchRuleIndex);
107 109
108 //Resetting only MIBS related entries in the SF 110 /* Resetting only MIBS related entries in the SF */
109 memset((PVOID)&Adapter->PackInfo[uiSearchRuleIndex], 0, sizeof(S_MIBS_SERVICEFLOW_TABLE)); 111 memset((PVOID)&Adapter->PackInfo[uiSearchRuleIndex], 0, sizeof(S_MIBS_SERVICEFLOW_TABLE));
110} 112}
111 113
112static inline VOID 114static inline VOID
113CopyIpAddrToClassifier(S_CLASSIFIER_RULE *pstClassifierEntry , 115CopyIpAddrToClassifier(S_CLASSIFIER_RULE *pstClassifierEntry,
114 B_UINT8 u8IpAddressLen , B_UINT8 *pu8IpAddressMaskSrc , 116 B_UINT8 u8IpAddressLen, B_UINT8 *pu8IpAddressMaskSrc,
115 BOOLEAN bIpVersion6 , E_IPADDR_CONTEXT eIpAddrContext) 117 BOOLEAN bIpVersion6, E_IPADDR_CONTEXT eIpAddrContext)
116{ 118{
117 UINT ucLoopIndex=0; 119 int i = 0;
118 UINT nSizeOfIPAddressInBytes = IP_LENGTH_OF_ADDRESS; 120 UINT nSizeOfIPAddressInBytes = IP_LENGTH_OF_ADDRESS;
119 UCHAR *ptrClassifierIpAddress = NULL; 121 UCHAR *ptrClassifierIpAddress = NULL;
120 UCHAR *ptrClassifierIpMask = NULL; 122 UCHAR *ptrClassifierIpMask = NULL;
121 PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(gblpnetdev); 123 PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(gblpnetdev);
122 124
123 if(bIpVersion6) 125 if (bIpVersion6)
124 {
125 nSizeOfIPAddressInBytes = IPV6_ADDRESS_SIZEINBYTES; 126 nSizeOfIPAddressInBytes = IPV6_ADDRESS_SIZEINBYTES;
126 } 127
127 //Destination Ip Address 128 /* Destination Ip Address */
128 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Ip Address Range Length:0x%X ", 129 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Ip Address Range Length:0x%X ", u8IpAddressLen);
129 u8IpAddressLen); 130 if ((bIpVersion6 ? (IPV6_ADDRESS_SIZEINBYTES * MAX_IP_RANGE_LENGTH * 2) :
130 if((bIpVersion6?(IPV6_ADDRESS_SIZEINBYTES * MAX_IP_RANGE_LENGTH * 2): 131 (TOTAL_MASKED_ADDRESS_IN_BYTES)) >= u8IpAddressLen) {
131 (TOTAL_MASKED_ADDRESS_IN_BYTES)) >= u8IpAddressLen)
132 {
133 /* 132 /*
134 //checking both the mask and address togethor in Classification. 133 * checking both the mask and address togethor in Classification.
135 //So length will be : TotalLengthInBytes/nSizeOfIPAddressInBytes * 2 134 * So length will be : TotalLengthInBytes/nSizeOfIPAddressInBytes * 2
136 //(nSizeOfIPAddressInBytes for address and nSizeOfIPAddressInBytes for mask) 135 * (nSizeOfIPAddressInBytes for address and nSizeOfIPAddressInBytes for mask)
137 */ 136 */
138 if(eIpAddrContext == eDestIpAddress) 137 if (eIpAddrContext == eDestIpAddress) {
139 { 138 pstClassifierEntry->ucIPDestinationAddressLength = u8IpAddressLen/(nSizeOfIPAddressInBytes * 2);
140 pstClassifierEntry->ucIPDestinationAddressLength = 139 if (bIpVersion6) {
141 u8IpAddressLen/(nSizeOfIPAddressInBytes * 2); 140 ptrClassifierIpAddress = pstClassifierEntry->stDestIpAddress.ucIpv6Address;
142 if(bIpVersion6) 141 ptrClassifierIpMask = pstClassifierEntry->stDestIpAddress.ucIpv6Mask;
143 { 142 } else {
144 ptrClassifierIpAddress = 143 ptrClassifierIpAddress = pstClassifierEntry->stDestIpAddress.ucIpv4Address;
145 pstClassifierEntry->stDestIpAddress.ucIpv6Address; 144 ptrClassifierIpMask = pstClassifierEntry->stDestIpAddress.ucIpv4Mask;
146 ptrClassifierIpMask =
147 pstClassifierEntry->stDestIpAddress.ucIpv6Mask;
148 }
149 else
150 {
151 ptrClassifierIpAddress =
152 pstClassifierEntry->stDestIpAddress.ucIpv4Address;
153 ptrClassifierIpMask =
154 pstClassifierEntry->stDestIpAddress.ucIpv4Mask;
155 }
156 }
157 else if(eIpAddrContext == eSrcIpAddress)
158 {
159 pstClassifierEntry->ucIPSourceAddressLength =
160 u8IpAddressLen/(nSizeOfIPAddressInBytes * 2);
161 if(bIpVersion6)
162 {
163 ptrClassifierIpAddress =
164 pstClassifierEntry->stSrcIpAddress.ucIpv6Address;
165 ptrClassifierIpMask =
166 pstClassifierEntry->stSrcIpAddress.ucIpv6Mask;
167 } 145 }
168 else 146 } else if (eIpAddrContext == eSrcIpAddress) {
169 { 147 pstClassifierEntry->ucIPSourceAddressLength = u8IpAddressLen/(nSizeOfIPAddressInBytes * 2);
170 ptrClassifierIpAddress = 148 if (bIpVersion6) {
171 pstClassifierEntry->stSrcIpAddress.ucIpv4Address; 149 ptrClassifierIpAddress = pstClassifierEntry->stSrcIpAddress.ucIpv6Address;
172 ptrClassifierIpMask = 150 ptrClassifierIpMask = pstClassifierEntry->stSrcIpAddress.ucIpv6Mask;
173 pstClassifierEntry->stSrcIpAddress.ucIpv4Mask; 151 } else {
152 ptrClassifierIpAddress = pstClassifierEntry->stSrcIpAddress.ucIpv4Address;
153 ptrClassifierIpMask = pstClassifierEntry->stSrcIpAddress.ucIpv4Mask;
174 } 154 }
175 } 155 }
176 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Address Length:0x%X \n", 156 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Address Length:0x%X\n", pstClassifierEntry->ucIPDestinationAddressLength);
177 pstClassifierEntry->ucIPDestinationAddressLength); 157 while ((u8IpAddressLen >= nSizeOfIPAddressInBytes) && (i < MAX_IP_RANGE_LENGTH)) {
178 while((u8IpAddressLen>= nSizeOfIPAddressInBytes) &&
179 (ucLoopIndex < MAX_IP_RANGE_LENGTH))
180 {
181 memcpy(ptrClassifierIpAddress + 158 memcpy(ptrClassifierIpAddress +
182 (ucLoopIndex * nSizeOfIPAddressInBytes), 159 (i * nSizeOfIPAddressInBytes),
183 (pu8IpAddressMaskSrc+(ucLoopIndex*nSizeOfIPAddressInBytes*2)), 160 (pu8IpAddressMaskSrc+(i*nSizeOfIPAddressInBytes*2)),
184 nSizeOfIPAddressInBytes); 161 nSizeOfIPAddressInBytes);
185 if(!bIpVersion6) 162
186 { 163 if (!bIpVersion6) {
187 if(eIpAddrContext == eSrcIpAddress) 164 if (eIpAddrContext == eSrcIpAddress) {
188 { 165 pstClassifierEntry->stSrcIpAddress.ulIpv4Addr[i] = ntohl(pstClassifierEntry->stSrcIpAddress.ulIpv4Addr[i]);
189 pstClassifierEntry->stSrcIpAddress.ulIpv4Addr[ucLoopIndex]= 166 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Src Ip Address:0x%luX ",
190 ntohl(pstClassifierEntry->stSrcIpAddress. 167 pstClassifierEntry->stSrcIpAddress.ulIpv4Addr[i]);
191 ulIpv4Addr[ucLoopIndex]); 168 } else if (eIpAddrContext == eDestIpAddress) {
192 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Src Ip Address:0x%luX ",pstClassifierEntry->stSrcIpAddress.ulIpv4Addr[ucLoopIndex]); 169 pstClassifierEntry->stDestIpAddress.ulIpv4Addr[i] = ntohl(pstClassifierEntry->stDestIpAddress.ulIpv4Addr[i]);
193 } 170 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Dest Ip Address:0x%luX ",
194 else if(eIpAddrContext == eDestIpAddress) 171 pstClassifierEntry->stDestIpAddress.ulIpv4Addr[i]);
195 {
196 pstClassifierEntry->stDestIpAddress.ulIpv4Addr[ucLoopIndex]= ntohl(pstClassifierEntry->stDestIpAddress.
197 ulIpv4Addr[ucLoopIndex]);
198 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Dest Ip Address:0x%luX ",pstClassifierEntry->stDestIpAddress.ulIpv4Addr[ucLoopIndex]);
199 } 172 }
200 } 173 }
201 u8IpAddressLen-=nSizeOfIPAddressInBytes; 174 u8IpAddressLen -= nSizeOfIPAddressInBytes;
202 if(u8IpAddressLen >= nSizeOfIPAddressInBytes) 175 if (u8IpAddressLen >= nSizeOfIPAddressInBytes) {
203 {
204 memcpy(ptrClassifierIpMask + 176 memcpy(ptrClassifierIpMask +
205 (ucLoopIndex * nSizeOfIPAddressInBytes), 177 (i * nSizeOfIPAddressInBytes),
206 (pu8IpAddressMaskSrc+nSizeOfIPAddressInBytes + 178 (pu8IpAddressMaskSrc+nSizeOfIPAddressInBytes +
207 (ucLoopIndex*nSizeOfIPAddressInBytes*2)), 179 (i*nSizeOfIPAddressInBytes*2)),
208 nSizeOfIPAddressInBytes); 180 nSizeOfIPAddressInBytes);
209 if(!bIpVersion6) 181
210 { 182 if (!bIpVersion6) {
211 if(eIpAddrContext == eSrcIpAddress) 183 if (eIpAddrContext == eSrcIpAddress) {
212 { 184 pstClassifierEntry->stSrcIpAddress.ulIpv4Mask[i] =
213 pstClassifierEntry->stSrcIpAddress. 185 ntohl(pstClassifierEntry->stSrcIpAddress.ulIpv4Mask[i]);
214 ulIpv4Mask[ucLoopIndex]= 186 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Src Ip Mask Address:0x%luX ",
215 ntohl(pstClassifierEntry->stSrcIpAddress. 187 pstClassifierEntry->stSrcIpAddress.ulIpv4Mask[i]);
216 ulIpv4Mask[ucLoopIndex]); 188 } else if (eIpAddrContext == eDestIpAddress) {
217 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Src Ip Mask Address:0x%luX ",pstClassifierEntry->stSrcIpAddress.ulIpv4Mask[ucLoopIndex]); 189 pstClassifierEntry->stDestIpAddress.ulIpv4Mask[i] =
218 } 190 ntohl(pstClassifierEntry->stDestIpAddress.ulIpv4Mask[i]);
219 else if(eIpAddrContext == eDestIpAddress) 191 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Dest Ip Mask Address:0x%luX ",
220 { 192 pstClassifierEntry->stDestIpAddress.ulIpv4Mask[i]);
221 pstClassifierEntry->stDestIpAddress.
222 ulIpv4Mask[ucLoopIndex] =
223 ntohl(pstClassifierEntry->stDestIpAddress.
224 ulIpv4Mask[ucLoopIndex]);
225 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Dest Ip Mask Address:0x%luX ",pstClassifierEntry->stDestIpAddress.ulIpv4Mask[ucLoopIndex]);
226 } 193 }
227 } 194 }
228 u8IpAddressLen-=nSizeOfIPAddressInBytes; 195 u8IpAddressLen -= nSizeOfIPAddressInBytes;
229 }
230 if(0==u8IpAddressLen)
231 {
232 pstClassifierEntry->bDestIpValid=TRUE;
233 } 196 }
234 ucLoopIndex++; 197 if (u8IpAddressLen == 0)
198 pstClassifierEntry->bDestIpValid = TRUE;
199
200 i++;
235 } 201 }
236 if(bIpVersion6) 202 if (bIpVersion6) {
237 { 203 /* Restore EndianNess of Struct */
238 //Restore EndianNess of Struct 204 for (i = 0; i < MAX_IP_RANGE_LENGTH * 4; i++) {
239 for(ucLoopIndex =0 ; ucLoopIndex < MAX_IP_RANGE_LENGTH * 4 ; 205 if (eIpAddrContext == eSrcIpAddress) {
240 ucLoopIndex++) 206 pstClassifierEntry->stSrcIpAddress.ulIpv6Addr[i] = ntohl(pstClassifierEntry->stSrcIpAddress.ulIpv6Addr[i]);
241 { 207 pstClassifierEntry->stSrcIpAddress.ulIpv6Mask[i] = ntohl(pstClassifierEntry->stSrcIpAddress.ulIpv6Mask[i]);
242 if(eIpAddrContext == eSrcIpAddress) 208 } else if (eIpAddrContext == eDestIpAddress) {
243 { 209 pstClassifierEntry->stDestIpAddress.ulIpv6Addr[i] = ntohl(pstClassifierEntry->stDestIpAddress.ulIpv6Addr[i]);
244 pstClassifierEntry->stSrcIpAddress.ulIpv6Addr[ucLoopIndex]= 210 pstClassifierEntry->stDestIpAddress.ulIpv6Mask[i] = ntohl(pstClassifierEntry->stDestIpAddress.ulIpv6Mask[i]);
245 ntohl(pstClassifierEntry->stSrcIpAddress.
246 ulIpv6Addr[ucLoopIndex]);
247 pstClassifierEntry->stSrcIpAddress.ulIpv6Mask[ucLoopIndex]= ntohl(pstClassifierEntry->stSrcIpAddress.
248 ulIpv6Mask[ucLoopIndex]);
249 }
250 else if(eIpAddrContext == eDestIpAddress)
251 {
252 pstClassifierEntry->stDestIpAddress.ulIpv6Addr[ucLoopIndex]= ntohl(pstClassifierEntry->stDestIpAddress.
253 ulIpv6Addr[ucLoopIndex]);
254 pstClassifierEntry->stDestIpAddress.ulIpv6Mask[ucLoopIndex]= ntohl(pstClassifierEntry->stDestIpAddress.
255 ulIpv6Mask[ucLoopIndex]);
256 } 211 }
257 } 212 }
258 } 213 }
259 } 214 }
260} 215}
261 216
262 217void ClearTargetDSXBuffer(PMINI_ADAPTER Adapter, B_UINT16 TID, BOOLEAN bFreeAll)
263void ClearTargetDSXBuffer(PMINI_ADAPTER Adapter,B_UINT16 TID,BOOLEAN bFreeAll)
264{ 218{
265 ULONG ulIndex; 219 int i;
266 for(ulIndex=0; ulIndex < Adapter->ulTotalTargetBuffersAvailable; ulIndex++) 220
267 { 221 for (i = 0; i < Adapter->ulTotalTargetBuffersAvailable; i++) {
268 if(Adapter->astTargetDsxBuffer[ulIndex].valid) 222 if (Adapter->astTargetDsxBuffer[i].valid)
269 continue; 223 continue;
270 if ((bFreeAll) || (Adapter->astTargetDsxBuffer[ulIndex].tid == TID)){ 224
271 BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "ClearTargetDSXBuffer: found tid %d buffer cleared %lx\n", 225 if ((bFreeAll) || (Adapter->astTargetDsxBuffer[i].tid == TID)) {
272 TID, Adapter->astTargetDsxBuffer[ulIndex].ulTargetDsxBuffer); 226 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "ClearTargetDSXBuffer: found tid %d buffer cleared %lx\n",
273 Adapter->astTargetDsxBuffer[ulIndex].valid=1; 227 TID, Adapter->astTargetDsxBuffer[i].ulTargetDsxBuffer);
274 Adapter->astTargetDsxBuffer[ulIndex].tid=0; 228 Adapter->astTargetDsxBuffer[i].valid = 1;
229 Adapter->astTargetDsxBuffer[i].tid = 0;
275 Adapter->ulFreeTargetBufferCnt++; 230 Adapter->ulFreeTargetBufferCnt++;
276 } 231 }
277 } 232 }
278} 233}
279 234
280/** 235/*
281@ingroup ctrl_pkt_functions 236 * @ingroup ctrl_pkt_functions
282copy classifier rule into the specified SF index 237 * copy classifier rule into the specified SF index
283*/ 238 */
284static inline VOID CopyClassifierRuleToSF(PMINI_ADAPTER Adapter,stConvergenceSLTypes *psfCSType,UINT uiSearchRuleIndex,UINT nClassifierIndex) 239static inline VOID CopyClassifierRuleToSF(PMINI_ADAPTER Adapter, stConvergenceSLTypes *psfCSType, UINT uiSearchRuleIndex, UINT nClassifierIndex)
285{ 240{
286 S_CLASSIFIER_RULE *pstClassifierEntry = NULL; 241 S_CLASSIFIER_RULE *pstClassifierEntry = NULL;
287 //VOID *pvPhsContext = NULL; 242 /* VOID *pvPhsContext = NULL; */
288 UINT ucLoopIndex=0; 243 int i;
289 //UCHAR ucProtocolLength=0; 244 /* UCHAR ucProtocolLength=0; */
290 //ULONG ulPhsStatus; 245 /* ULONG ulPhsStatus; */
291
292 246
293 if(Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value == 0 || 247 if (Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value == 0 ||
294 nClassifierIndex > (MAX_CLASSIFIERS-1)) 248 nClassifierIndex > (MAX_CLASSIFIERS-1))
295 return; 249 return;
296 250
251 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Storing Classifier Rule Index : %X",
252 ntohs(psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex));
297 253
298 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Storing Classifier Rule Index : %X",ntohs(psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex)); 254 if (nClassifierIndex > MAX_CLASSIFIERS-1)
299
300 if(nClassifierIndex > MAX_CLASSIFIERS-1)
301 return; 255 return;
302 256
303 pstClassifierEntry = &Adapter->astClassifierTable[nClassifierIndex]; 257 pstClassifierEntry = &Adapter->astClassifierTable[nClassifierIndex];
304 if(pstClassifierEntry) 258 if (pstClassifierEntry) {
305 { 259 /* Store if Ipv6 */
306 //Store if Ipv6 260 pstClassifierEntry->bIpv6Protocol = (Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion == IPV6) ? TRUE : FALSE;
307 pstClassifierEntry->bIpv6Protocol = 261
308 (Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion == IPV6)?TRUE:FALSE; 262 /* Destinaiton Port */
309 263 pstClassifierEntry->ucDestPortRangeLength = psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength / 4;
310 //Destinaiton Port 264 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Destination Port Range Length:0x%X ", pstClassifierEntry->ucDestPortRangeLength);
311 pstClassifierEntry->ucDestPortRangeLength=psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength/4; 265
312 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Destination Port Range Length:0x%X ",pstClassifierEntry->ucDestPortRangeLength); 266 if (psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength <= MAX_PORT_RANGE) {
313 if( MAX_PORT_RANGE >= psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength) 267 for (i = 0; i < (pstClassifierEntry->ucDestPortRangeLength); i++) {
314 { 268 pstClassifierEntry->usDestPortRangeLo[i] = *((PUSHORT)(psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange+i));
315 for(ucLoopIndex=0;ucLoopIndex<(pstClassifierEntry->ucDestPortRangeLength);ucLoopIndex++) 269 pstClassifierEntry->usDestPortRangeHi[i] =
316 { 270 *((PUSHORT)(psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange+2+i));
317 pstClassifierEntry->usDestPortRangeLo[ucLoopIndex] = 271 pstClassifierEntry->usDestPortRangeLo[i] = ntohs(pstClassifierEntry->usDestPortRangeLo[i]);
318 *((PUSHORT)(psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange+ucLoopIndex)); 272 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Destination Port Range Lo:0x%X ",
319 pstClassifierEntry->usDestPortRangeHi[ucLoopIndex] = 273 pstClassifierEntry->usDestPortRangeLo[i]);
320 *((PUSHORT)(psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange+2+ucLoopIndex)); 274 pstClassifierEntry->usDestPortRangeHi[i] = ntohs(pstClassifierEntry->usDestPortRangeHi[i]);
321 pstClassifierEntry->usDestPortRangeLo[ucLoopIndex]=ntohs(pstClassifierEntry->usDestPortRangeLo[ucLoopIndex]);
322 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Destination Port Range Lo:0x%X ",pstClassifierEntry->usDestPortRangeLo[ucLoopIndex]);
323 pstClassifierEntry->usDestPortRangeHi[ucLoopIndex]=ntohs(pstClassifierEntry->usDestPortRangeHi[ucLoopIndex]);
324 } 275 }
276 } else {
277 pstClassifierEntry->ucDestPortRangeLength = 0;
325 } 278 }
326 else 279
327 { 280 /* Source Port */
328 pstClassifierEntry->ucDestPortRangeLength=0; 281 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Source Port Range Length:0x%X ",
329 } 282 psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength);
330 //Source Port 283 if (psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength <= MAX_PORT_RANGE) {
331 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Source Port Range Length:0x%X ",psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength); 284 pstClassifierEntry->ucSrcPortRangeLength = psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength/4;
332 if(MAX_PORT_RANGE >= 285 for (i = 0; i < (pstClassifierEntry->ucSrcPortRangeLength); i++) {
333 psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength) 286 pstClassifierEntry->usSrcPortRangeLo[i] =
334 { 287 *((PUSHORT)(psfCSType->cCPacketClassificationRule.
335 pstClassifierEntry->ucSrcPortRangeLength = 288 u8ProtocolSourcePortRange+i));
336 psfCSType->cCPacketClassificationRule. 289 pstClassifierEntry->usSrcPortRangeHi[i] =
337 u8ProtocolSourcePortRangeLength/4; 290 *((PUSHORT)(psfCSType->cCPacketClassificationRule.
338 for(ucLoopIndex = 0; ucLoopIndex < 291 u8ProtocolSourcePortRange+2+i));
339 (pstClassifierEntry->ucSrcPortRangeLength); ucLoopIndex++) 292 pstClassifierEntry->usSrcPortRangeLo[i] =
340 { 293 ntohs(pstClassifierEntry->usSrcPortRangeLo[i]);
341 pstClassifierEntry->usSrcPortRangeLo[ucLoopIndex] = 294 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Source Port Range Lo:0x%X ",
342 *((PUSHORT)(psfCSType->cCPacketClassificationRule. 295 pstClassifierEntry->usSrcPortRangeLo[i]);
343 u8ProtocolSourcePortRange+ucLoopIndex)); 296 pstClassifierEntry->usSrcPortRangeHi[i] = ntohs(pstClassifierEntry->usSrcPortRangeHi[i]);
344 pstClassifierEntry->usSrcPortRangeHi[ucLoopIndex] =
345 *((PUSHORT)(psfCSType->cCPacketClassificationRule.
346 u8ProtocolSourcePortRange+2+ucLoopIndex));
347 pstClassifierEntry->usSrcPortRangeLo[ucLoopIndex] =
348 ntohs(pstClassifierEntry->usSrcPortRangeLo[ucLoopIndex]);
349 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Source Port Range Lo:0x%X ",pstClassifierEntry->usSrcPortRangeLo[ucLoopIndex]);
350 pstClassifierEntry->usSrcPortRangeHi[ucLoopIndex]=ntohs(pstClassifierEntry->usSrcPortRangeHi[ucLoopIndex]);
351 } 297 }
352 } 298 }
353 //Destination Ip Address and Mask 299 /* Destination Ip Address and Mask */
354 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Ip Destination Parameters : "); 300 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Ip Destination Parameters : ");
355
356 CopyIpAddrToClassifier(pstClassifierEntry, 301 CopyIpAddrToClassifier(pstClassifierEntry,
357 psfCSType->cCPacketClassificationRule.u8IPDestinationAddressLength, 302 psfCSType->cCPacketClassificationRule.u8IPDestinationAddressLength,
358 psfCSType->cCPacketClassificationRule.u8IPDestinationAddress, 303 psfCSType->cCPacketClassificationRule.u8IPDestinationAddress,
359 (Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion == IPV6)? 304 (Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion == IPV6) ?
360 TRUE:FALSE, eDestIpAddress); 305 TRUE : FALSE, eDestIpAddress);
361 306
362 //Source Ip Address and Mask 307 /* Source Ip Address and Mask */
363 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Ip Source Parameters : "); 308 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Ip Source Parameters : ");
364 309
365 CopyIpAddrToClassifier(pstClassifierEntry, 310 CopyIpAddrToClassifier(pstClassifierEntry,
366 psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddressLength, 311 psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddressLength,
367 psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddress, 312 psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddress,
368 (Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion == IPV6)?TRUE:FALSE, 313 (Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion == IPV6) ? TRUE : FALSE,
369 eSrcIpAddress); 314 eSrcIpAddress);
370 315
371 //TOS 316 /* TOS */
372 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"TOS Length:0x%X ",psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength); 317 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "TOS Length:0x%X ", psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength);
373 if(3 == psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength) 318 if (psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength == 3) {
374 { 319 pstClassifierEntry->ucIPTypeOfServiceLength = psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength;
375 pstClassifierEntry->ucIPTypeOfServiceLength = 320 pstClassifierEntry->ucTosLow = psfCSType->cCPacketClassificationRule.u8IPTypeOfService[0];
376 psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength; 321 pstClassifierEntry->ucTosHigh = psfCSType->cCPacketClassificationRule.u8IPTypeOfService[1];
377 pstClassifierEntry->ucTosLow = 322 pstClassifierEntry->ucTosMask = psfCSType->cCPacketClassificationRule.u8IPTypeOfService[2];
378 psfCSType->cCPacketClassificationRule.u8IPTypeOfService[0];
379 pstClassifierEntry->ucTosHigh =
380 psfCSType->cCPacketClassificationRule.u8IPTypeOfService[1];
381 pstClassifierEntry->ucTosMask =
382 psfCSType->cCPacketClassificationRule.u8IPTypeOfService[2];
383 pstClassifierEntry->bTOSValid = TRUE; 323 pstClassifierEntry->bTOSValid = TRUE;
384 } 324 }
385 if(psfCSType->cCPacketClassificationRule.u8Protocol == 0) 325 if (psfCSType->cCPacketClassificationRule.u8Protocol == 0) {
386 { 326 /* we didn't get protocol field filled in by the BS */
387 //we didn't get protocol field filled in by the BS 327 pstClassifierEntry->ucProtocolLength = 0;
388 pstClassifierEntry->ucProtocolLength=0; 328 } else {
389 } 329 pstClassifierEntry->ucProtocolLength = 1; /* 1 valid protocol */
390 else
391 {
392 pstClassifierEntry->ucProtocolLength=1;// 1 valid protocol
393 } 330 }
394 331
395 pstClassifierEntry->ucProtocol[0] = 332 pstClassifierEntry->ucProtocol[0] = psfCSType->cCPacketClassificationRule.u8Protocol;
396 psfCSType->cCPacketClassificationRule.u8Protocol; 333 pstClassifierEntry->u8ClassifierRulePriority = psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority;
397 334
398 pstClassifierEntry->u8ClassifierRulePriority = 335 /* store the classifier rule ID and set this classifier entry as valid */
399 psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority; 336 pstClassifierEntry->ucDirection = Adapter->PackInfo[uiSearchRuleIndex].ucDirection;
400 337 pstClassifierEntry->uiClassifierRuleIndex = ntohs(psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex);
401 //store the classifier rule ID and set this classifier entry as valid 338 pstClassifierEntry->usVCID_Value = Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value;
402 pstClassifierEntry->ucDirection = 339 pstClassifierEntry->ulSFID = Adapter->PackInfo[uiSearchRuleIndex].ulSFID;
403 Adapter->PackInfo[uiSearchRuleIndex].ucDirection; 340 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Search Index %d Dir: %d, Index: %d, Vcid: %d\n",
404 pstClassifierEntry->uiClassifierRuleIndex = ntohs(psfCSType-> 341 uiSearchRuleIndex, pstClassifierEntry->ucDirection,
405 cCPacketClassificationRule.u16PacketClassificationRuleIndex); 342 pstClassifierEntry->uiClassifierRuleIndex,
406 pstClassifierEntry->usVCID_Value = 343 pstClassifierEntry->usVCID_Value);
407 Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value; 344
408 pstClassifierEntry->ulSFID = 345 if (psfCSType->cCPacketClassificationRule.u8AssociatedPHSI)
409 Adapter->PackInfo[uiSearchRuleIndex].ulSFID;
410 BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Search Index %d Dir: %d, Index: %d, Vcid: %d\n",
411 uiSearchRuleIndex, pstClassifierEntry->ucDirection,
412 pstClassifierEntry->uiClassifierRuleIndex,
413 pstClassifierEntry->usVCID_Value);
414
415 if(psfCSType->cCPacketClassificationRule.u8AssociatedPHSI)
416 {
417 pstClassifierEntry->u8AssociatedPHSI = psfCSType->cCPacketClassificationRule.u8AssociatedPHSI; 346 pstClassifierEntry->u8AssociatedPHSI = psfCSType->cCPacketClassificationRule.u8AssociatedPHSI;
418 }
419 347
420 //Copy ETH CS Parameters 348 /* Copy ETH CS Parameters */
421 pstClassifierEntry->ucEthCSSrcMACLen = (psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddressLength); 349 pstClassifierEntry->ucEthCSSrcMACLen = (psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddressLength);
422 memcpy(pstClassifierEntry->au8EThCSSrcMAC,psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress,MAC_ADDRESS_SIZE); 350 memcpy(pstClassifierEntry->au8EThCSSrcMAC, psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress, MAC_ADDRESS_SIZE);
423 memcpy(pstClassifierEntry->au8EThCSSrcMACMask,psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress+MAC_ADDRESS_SIZE,MAC_ADDRESS_SIZE); 351 memcpy(pstClassifierEntry->au8EThCSSrcMACMask, psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress + MAC_ADDRESS_SIZE, MAC_ADDRESS_SIZE);
424 pstClassifierEntry->ucEthCSDestMACLen = (psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength); 352 pstClassifierEntry->ucEthCSDestMACLen = (psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
425 memcpy(pstClassifierEntry->au8EThCSDestMAC,psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress,MAC_ADDRESS_SIZE); 353 memcpy(pstClassifierEntry->au8EThCSDestMAC, psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress, MAC_ADDRESS_SIZE);
426 memcpy(pstClassifierEntry->au8EThCSDestMACMask,psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress+MAC_ADDRESS_SIZE,MAC_ADDRESS_SIZE); 354 memcpy(pstClassifierEntry->au8EThCSDestMACMask, psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress + MAC_ADDRESS_SIZE, MAC_ADDRESS_SIZE);
427 pstClassifierEntry->ucEtherTypeLen = (psfCSType->cCPacketClassificationRule.u8EthertypeLength); 355 pstClassifierEntry->ucEtherTypeLen = (psfCSType->cCPacketClassificationRule.u8EthertypeLength);
428 memcpy(pstClassifierEntry->au8EthCSEtherType,psfCSType->cCPacketClassificationRule.u8Ethertype,NUM_ETHERTYPE_BYTES); 356 memcpy(pstClassifierEntry->au8EthCSEtherType, psfCSType->cCPacketClassificationRule.u8Ethertype, NUM_ETHERTYPE_BYTES);
429 memcpy(pstClassifierEntry->usUserPriority, &psfCSType->cCPacketClassificationRule.u16UserPriority, 2); 357 memcpy(pstClassifierEntry->usUserPriority, &psfCSType->cCPacketClassificationRule.u16UserPriority, 2);
430 pstClassifierEntry->usVLANID = ntohs(psfCSType->cCPacketClassificationRule.u16VLANID); 358 pstClassifierEntry->usVLANID = ntohs(psfCSType->cCPacketClassificationRule.u16VLANID);
431 pstClassifierEntry->usValidityBitMap = ntohs(psfCSType->cCPacketClassificationRule.u16ValidityBitMap); 359 pstClassifierEntry->usValidityBitMap = ntohs(psfCSType->cCPacketClassificationRule.u16ValidityBitMap);
@@ -434,244 +362,199 @@ static inline VOID CopyClassifierRuleToSF(PMINI_ADAPTER Adapter,stConvergenceSLT
434 } 362 }
435} 363}
436 364
437 365/*
438/** 366 * @ingroup ctrl_pkt_functions
439@ingroup ctrl_pkt_functions 367 */
440*/ 368static inline VOID DeleteClassifierRuleFromSF(PMINI_ADAPTER Adapter, UINT uiSearchRuleIndex, UINT nClassifierIndex)
441static inline VOID DeleteClassifierRuleFromSF(PMINI_ADAPTER Adapter,UINT uiSearchRuleIndex,UINT nClassifierIndex)
442{ 369{
443 S_CLASSIFIER_RULE *pstClassifierEntry = NULL; 370 S_CLASSIFIER_RULE *pstClassifierEntry = NULL;
444 B_UINT16 u16PacketClassificationRuleIndex; 371 B_UINT16 u16PacketClassificationRuleIndex;
445 USHORT usVCID; 372 USHORT usVCID;
446 //VOID *pvPhsContext = NULL; 373 /* VOID *pvPhsContext = NULL; */
447 //ULONG ulPhsStatus; 374 /*ULONG ulPhsStatus; */
448 375
449 usVCID = Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value; 376 usVCID = Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value;
450 377
451 if(nClassifierIndex > MAX_CLASSIFIERS-1) 378 if (nClassifierIndex > MAX_CLASSIFIERS-1)
452 return; 379 return;
453 380
454 if(usVCID == 0) 381 if (usVCID == 0)
455 return; 382 return;
456 383
457 u16PacketClassificationRuleIndex = Adapter->astClassifierTable[nClassifierIndex].uiClassifierRuleIndex; 384 u16PacketClassificationRuleIndex = Adapter->astClassifierTable[nClassifierIndex].uiClassifierRuleIndex;
458
459
460 pstClassifierEntry = &Adapter->astClassifierTable[nClassifierIndex]; 385 pstClassifierEntry = &Adapter->astClassifierTable[nClassifierIndex];
461 if(pstClassifierEntry) 386 if (pstClassifierEntry) {
462 {
463 pstClassifierEntry->bUsed = FALSE; 387 pstClassifierEntry->bUsed = FALSE;
464 pstClassifierEntry->uiClassifierRuleIndex = 0; 388 pstClassifierEntry->uiClassifierRuleIndex = 0;
465 memset(pstClassifierEntry,0,sizeof(S_CLASSIFIER_RULE)); 389 memset(pstClassifierEntry, 0, sizeof(S_CLASSIFIER_RULE));
466 390
467 //Delete the PHS Rule for this classifier 391 /* Delete the PHS Rule for this classifier */
468 PhsDeleteClassifierRule( 392 PhsDeleteClassifierRule(&Adapter->stBCMPhsContext, usVCID, u16PacketClassificationRuleIndex);
469 &Adapter->stBCMPhsContext,
470 usVCID,
471 u16PacketClassificationRuleIndex);
472 } 393 }
473} 394}
474 395
475/** 396/*
476@ingroup ctrl_pkt_functions 397 * @ingroup ctrl_pkt_functions
477*/ 398 */
478VOID DeleteAllClassifiersForSF(PMINI_ADAPTER Adapter,UINT uiSearchRuleIndex) 399VOID DeleteAllClassifiersForSF(PMINI_ADAPTER Adapter, UINT uiSearchRuleIndex)
479{ 400{
480 S_CLASSIFIER_RULE *pstClassifierEntry = NULL; 401 S_CLASSIFIER_RULE *pstClassifierEntry = NULL;
481 UINT nClassifierIndex; 402 int i;
482 //B_UINT16 u16PacketClassificationRuleIndex; 403 /* B_UINT16 u16PacketClassificationRuleIndex; */
483 USHORT ulVCID; 404 USHORT ulVCID;
484 //VOID *pvPhsContext = NULL; 405 /* VOID *pvPhsContext = NULL; */
485 //ULONG ulPhsStatus; 406 /* ULONG ulPhsStatus; */
486 407
487 ulVCID = Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value; 408 ulVCID = Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value;
488 409
489 if(ulVCID == 0) 410 if (ulVCID == 0)
490 return; 411 return;
491 412
413 for (i = 0; i < MAX_CLASSIFIERS; i++) {
414 if (Adapter->astClassifierTable[i].usVCID_Value == ulVCID) {
415 pstClassifierEntry = &Adapter->astClassifierTable[i];
492 416
493 for(nClassifierIndex =0 ; nClassifierIndex < MAX_CLASSIFIERS ; nClassifierIndex++) 417 if (pstClassifierEntry->bUsed)
494 { 418 DeleteClassifierRuleFromSF(Adapter, uiSearchRuleIndex, i);
495 if(Adapter->astClassifierTable[nClassifierIndex].usVCID_Value == ulVCID)
496 {
497 pstClassifierEntry = &Adapter->astClassifierTable[nClassifierIndex];
498 if(pstClassifierEntry->bUsed)
499 {
500 DeleteClassifierRuleFromSF(Adapter,uiSearchRuleIndex,nClassifierIndex);
501 }
502 } 419 }
503 } 420 }
504 421
505 //Delete All Phs Rules Associated with this SF 422 /* Delete All Phs Rules Associated with this SF */
506 PhsDeleteSFRules( 423 PhsDeleteSFRules(&Adapter->stBCMPhsContext, ulVCID);
507 &Adapter->stBCMPhsContext,
508 ulVCID);
509
510} 424}
511 425
512 426/*
513/** 427 * This routinue copies the Connection Management
514This routinue copies the Connection Management 428 * related data into the Adapter structure.
515related data into the Adapter structure. 429 * @ingroup ctrl_pkt_functions
516@ingroup ctrl_pkt_functions 430 */
517*/ 431static VOID CopyToAdapter(register PMINI_ADAPTER Adapter, /* <Pointer to the Adapter structure */
518 432 register pstServiceFlowParamSI psfLocalSet, /* <Pointer to the ServiceFlowParamSI structure */
519static VOID CopyToAdapter( register PMINI_ADAPTER Adapter, /**<Pointer to the Adapter structure*/ 433 register UINT uiSearchRuleIndex, /* <Index of Queue, to which this data belongs */
520 register pstServiceFlowParamSI psfLocalSet, /**<Pointer to the ServiceFlowParamSI structure*/ 434 register UCHAR ucDsxType,
521 register UINT uiSearchRuleIndex, /**<Index of Queue, to which this data belongs*/ 435 stLocalSFAddIndicationAlt *pstAddIndication) {
522 register UCHAR ucDsxType, 436
523 stLocalSFAddIndicationAlt *pstAddIndication) 437 /* UCHAR ucProtocolLength = 0; */
524{ 438 ULONG ulSFID;
525 //UCHAR ucProtocolLength=0; 439 UINT nClassifierIndex = 0;
526 ULONG ulSFID; 440 enum E_CLASSIFIER_ACTION eClassifierAction = eInvalidClassifierAction;
527 UINT nClassifierIndex = 0; 441 B_UINT16 u16PacketClassificationRuleIndex = 0;
528 E_CLASSIFIER_ACTION eClassifierAction = eInvalidClassifierAction; 442 int i;
529 B_UINT16 u16PacketClassificationRuleIndex=0;
530 UINT nIndex=0;
531 stConvergenceSLTypes *psfCSType = NULL; 443 stConvergenceSLTypes *psfCSType = NULL;
532 S_PHS_RULE sPhsRule; 444 S_PHS_RULE sPhsRule;
533 USHORT uVCID = Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value; 445 USHORT uVCID = Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value;
534 UINT UGIValue = 0; 446 UINT UGIValue = 0;
535 447
536 448 Adapter->PackInfo[uiSearchRuleIndex].bValid = TRUE;
537 Adapter->PackInfo[uiSearchRuleIndex].bValid=TRUE; 449 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Search Rule Index = %d\n", uiSearchRuleIndex);
538 BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Search Rule Index = %d\n", uiSearchRuleIndex); 450 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "%s: SFID= %x ", __func__, ntohl(psfLocalSet->u32SFID));
539 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"%s: SFID= %x ",__FUNCTION__, ntohl(psfLocalSet->u32SFID)); 451 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Updating Queue %d", uiSearchRuleIndex);
540 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Updating Queue %d",uiSearchRuleIndex);
541 452
542 ulSFID = ntohl(psfLocalSet->u32SFID); 453 ulSFID = ntohl(psfLocalSet->u32SFID);
543 //Store IP Version used 454 /* Store IP Version used */
544 //Get The Version Of IP used (IPv6 or IPv4) from CSSpecification field of SF 455 /* Get The Version Of IP used (IPv6 or IPv4) from CSSpecification field of SF */
545 456
546 Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = 0; 457 Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = 0;
547 Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport = 0; 458 Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport = 0;
548 459
549 /*Enable IP/ETh CS Support As Required*/ 460 /* Enable IP/ETh CS Support As Required */
550 BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"CopyToAdapter : u8CSSpecification : %X\n",psfLocalSet->u8CSSpecification); 461 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "CopyToAdapter : u8CSSpecification : %X\n", psfLocalSet->u8CSSpecification);
551 switch(psfLocalSet->u8CSSpecification) 462 switch (psfLocalSet->u8CSSpecification) {
463 case eCSPacketIPV4:
552 { 464 {
553 case eCSPacketIPV4: 465 Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = IPV4_CS;
554 { 466 break;
555 Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = IPV4_CS; 467 }
556 break; 468 case eCSPacketIPV6:
557 } 469 {
558 case eCSPacketIPV6: 470 Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = IPV6_CS;
559 { 471 break;
560 Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = IPV6_CS; 472 }
561 break; 473 case eCS802_3PacketEthernet:
562 } 474 case eCS802_1QPacketVLAN:
563 475 {
564 case eCS802_3PacketEthernet: 476 Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport = ETH_CS_802_3;
565 case eCS802_1QPacketVLAN: 477 break;
566 { 478 }
567 Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport = ETH_CS_802_3; 479 case eCSPacketIPV4Over802_1QVLAN:
568 break; 480 case eCSPacketIPV4Over802_3Ethernet:
569 } 481 {
570 482 Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = IPV4_CS;
571 case eCSPacketIPV4Over802_1QVLAN: 483 Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport = ETH_CS_802_3;
572 case eCSPacketIPV4Over802_3Ethernet: 484 break;
573 { 485 }
574 Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = IPV4_CS; 486 case eCSPacketIPV6Over802_1QVLAN:
575 Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport = ETH_CS_802_3; 487 case eCSPacketIPV6Over802_3Ethernet:
576 break; 488 {
577 } 489 Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = IPV6_CS;
578 490 Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport = ETH_CS_802_3;
579 case eCSPacketIPV6Over802_1QVLAN: 491 break;
580 case eCSPacketIPV6Over802_3Ethernet: 492 }
581 { 493 default:
582 Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = IPV6_CS; 494 {
583 Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport = ETH_CS_802_3; 495 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Error in value of CS Classification.. setting default to IP CS\n");
584 break; 496 Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = IPV4_CS;
585 } 497 break;
586 498 }
587 default:
588 {
589 BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Error in value of CS Classification.. setting default to IP CS\n");
590 Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = IPV4_CS;
591 break;
592 }
593 } 499 }
594 500
595 BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"CopyToAdapter : Queue No : %X ETH CS Support : %X , IP CS Support : %X \n", 501 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "CopyToAdapter : Queue No : %X ETH CS Support : %X , IP CS Support : %X\n",
596 uiSearchRuleIndex, 502 uiSearchRuleIndex,
597 Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport, 503 Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport,
598 Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport); 504 Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport);
599 505
600 //Store IP Version used 506 /* Store IP Version used */
601 //Get The Version Of IP used (IPv6 or IPv4) from CSSpecification field of SF 507 /* Get The Version Of IP used (IPv6 or IPv4) from CSSpecification field of SF */
602 if(Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport == IPV6_CS) 508 if (Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport == IPV6_CS)
603 {
604 Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion = IPV6; 509 Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion = IPV6;
605 }
606 else 510 else
607 {
608 Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion = IPV4; 511 Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion = IPV4;
609 }
610 512
611 /* To ensure that the ETH CS code doesn't gets executed if the BS doesn't supports ETH CS */ 513 /* To ensure that the ETH CS code doesn't gets executed if the BS doesn't supports ETH CS */
612 if(!Adapter->bETHCSEnabled) 514 if (!Adapter->bETHCSEnabled)
613 Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport = 0; 515 Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport = 0;
614 516
615 if(psfLocalSet->u8ServiceClassNameLength > 0 && 517 if (psfLocalSet->u8ServiceClassNameLength > 0 && psfLocalSet->u8ServiceClassNameLength < 32)
616 psfLocalSet->u8ServiceClassNameLength < 32) 518 memcpy(Adapter->PackInfo[uiSearchRuleIndex].ucServiceClassName, psfLocalSet->u8ServiceClassName, psfLocalSet->u8ServiceClassNameLength);
617 {
618 memcpy(Adapter->PackInfo[uiSearchRuleIndex].ucServiceClassName,
619 psfLocalSet->u8ServiceClassName,
620 psfLocalSet->u8ServiceClassNameLength);
621 }
622 Adapter->PackInfo[uiSearchRuleIndex].u8QueueType =
623 psfLocalSet->u8ServiceFlowSchedulingType;
624 519
625 if(Adapter->PackInfo[uiSearchRuleIndex].u8QueueType==BE && 520 Adapter->PackInfo[uiSearchRuleIndex].u8QueueType = psfLocalSet->u8ServiceFlowSchedulingType;
626 Adapter->PackInfo[uiSearchRuleIndex].ucDirection) 521
627 { 522 if (Adapter->PackInfo[uiSearchRuleIndex].u8QueueType == BE && Adapter->PackInfo[uiSearchRuleIndex].ucDirection)
628 Adapter->usBestEffortQueueIndex=uiSearchRuleIndex; 523 Adapter->usBestEffortQueueIndex = uiSearchRuleIndex;
629 }
630 524
631 Adapter->PackInfo[uiSearchRuleIndex].ulSFID = ntohl(psfLocalSet->u32SFID); 525 Adapter->PackInfo[uiSearchRuleIndex].ulSFID = ntohl(psfLocalSet->u32SFID);
632 526
633 Adapter->PackInfo[uiSearchRuleIndex].u8TrafficPriority = psfLocalSet->u8TrafficPriority; 527 Adapter->PackInfo[uiSearchRuleIndex].u8TrafficPriority = psfLocalSet->u8TrafficPriority;
634 528
635 //copy all the classifier in the Service Flow param structure 529 /* copy all the classifier in the Service Flow param structure */
636 for(nIndex=0; nIndex<psfLocalSet->u8TotalClassifiers; nIndex++) 530 for (i = 0; i < psfLocalSet->u8TotalClassifiers; i++) {
637 { 531 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Classifier index =%d", i);
638 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Classifier index =%d",nIndex); 532 psfCSType = &psfLocalSet->cConvergenceSLTypes[i];
639 psfCSType = &psfLocalSet->cConvergenceSLTypes[nIndex]; 533 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Classifier index =%d", i);
640 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Classifier index =%d",nIndex);
641 534
642 if(psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority) 535 if (psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority)
643 { 536 Adapter->PackInfo[uiSearchRuleIndex].bClassifierPriority = TRUE;
644 Adapter->PackInfo[uiSearchRuleIndex].bClassifierPriority=TRUE;
645 }
646
647 if(psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority)
648 {
649 Adapter->PackInfo[uiSearchRuleIndex].bClassifierPriority=TRUE;
650 }
651 537
538 if (psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority)
539 Adapter->PackInfo[uiSearchRuleIndex].bClassifierPriority = TRUE;
652 540
653 if(ucDsxType== DSA_ACK) 541 if (ucDsxType == DSA_ACK) {
654 {
655 eClassifierAction = eAddClassifier; 542 eClassifierAction = eAddClassifier;
656 } 543 } else if (ucDsxType == DSC_ACK) {
657 else if(ucDsxType == DSC_ACK) 544 switch (psfCSType->u8ClassfierDSCAction) {
658 { 545 case 0: /* DSC Add Classifier */
659 switch(psfCSType->u8ClassfierDSCAction)
660 {
661 case 0://DSC Add Classifier
662 { 546 {
663 eClassifierAction = eAddClassifier; 547 eClassifierAction = eAddClassifier;
664 } 548 }
665 break; 549 break;
666 case 1://DSC Replace Classifier 550 case 1: /* DSC Replace Classifier */
667 { 551 {
668 eClassifierAction = eReplaceClassifier; 552 eClassifierAction = eReplaceClassifier;
669 } 553 }
670 break; 554 break;
671 case 2://DSC Delete Classifier 555 case 2: /* DSC Delete Classifier */
672 { 556 {
673 eClassifierAction = eDeleteClassifier; 557 eClassifierAction = eDeleteClassifier;
674
675 } 558 }
676 break; 559 break;
677 default: 560 default:
@@ -683,163 +566,133 @@ static VOID CopyToAdapter( register PMINI_ADAPTER Adapter, /**<Pointer to the A
683 566
684 u16PacketClassificationRuleIndex = ntohs(psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex); 567 u16PacketClassificationRuleIndex = ntohs(psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex);
685 568
686 switch(eClassifierAction) 569 switch (eClassifierAction) {
687 {
688 case eAddClassifier: 570 case eAddClassifier:
689 { 571 {
690 //Get a Free Classifier Index From Classifier table for this SF to add the Classifier 572 /* Get a Free Classifier Index From Classifier table for this SF to add the Classifier */
691 //Contained in this message 573 /* Contained in this message */
692 nClassifierIndex = SearchClsid(Adapter,ulSFID,u16PacketClassificationRuleIndex); 574 nClassifierIndex = SearchClsid(Adapter, ulSFID, u16PacketClassificationRuleIndex);
693 575
694 if(nClassifierIndex > MAX_CLASSIFIERS) 576 if (nClassifierIndex > MAX_CLASSIFIERS) {
695 {
696 nClassifierIndex = SearchFreeClsid(Adapter); 577 nClassifierIndex = SearchFreeClsid(Adapter);
697 if(nClassifierIndex > MAX_CLASSIFIERS) 578 if (nClassifierIndex > MAX_CLASSIFIERS) {
698 { 579 /* Failed To get a free Entry */
699 //Failed To get a free Entry 580 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Error Failed To get a free Classifier Entry");
700 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Error Failed To get a free Classifier Entry");
701 break; 581 break;
702 } 582 }
703 //Copy the Classifier Rule for this service flow into our Classifier table maintained per SF. 583 /* Copy the Classifier Rule for this service flow into our Classifier table maintained per SF. */
704 CopyClassifierRuleToSF(Adapter,psfCSType,uiSearchRuleIndex,nClassifierIndex); 584 CopyClassifierRuleToSF(Adapter, psfCSType, uiSearchRuleIndex, nClassifierIndex);
705 } 585 } else {
706 586 /* This Classifier Already Exists and it is invalid to Add Classifier with existing PCRI */
707 else 587 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
708 { 588 "CopyToAdapter: Error The Specified Classifier Already Exists and attempted To Add Classifier with Same PCRI : 0x%x\n",
709 //This Classifier Already Exists and it is invalid to Add Classifier with existing PCRI 589 u16PacketClassificationRuleIndex);
710 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"CopyToAdapter : Error The Specified Classifier Already Exists \
711 and attempted To Add Classifier with Same PCRI : 0x%x\n", u16PacketClassificationRuleIndex);
712 } 590 }
713 } 591 }
714 break; 592 break;
715
716 case eReplaceClassifier: 593 case eReplaceClassifier:
717 { 594 {
718 //Get the Classifier Index From Classifier table for this SF and replace existing Classifier 595 /* Get the Classifier Index From Classifier table for this SF and replace existing Classifier */
719 //with the new classifier Contained in this message 596 /* with the new classifier Contained in this message */
720 nClassifierIndex = SearchClsid(Adapter,ulSFID,u16PacketClassificationRuleIndex); 597 nClassifierIndex = SearchClsid(Adapter, ulSFID, u16PacketClassificationRuleIndex);
721 if(nClassifierIndex > MAX_CLASSIFIERS) 598 if (nClassifierIndex > MAX_CLASSIFIERS) {
722 { 599 /* Failed To search the classifier */
723 //Failed To search the classifier 600 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Error Search for Classifier To be replaced failed");
724 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Error Search for Classifier To be replaced failed");
725 break; 601 break;
726 } 602 }
727 //Copy the Classifier Rule for this service flow into our Classifier table maintained per SF. 603 /* Copy the Classifier Rule for this service flow into our Classifier table maintained per SF. */
728 CopyClassifierRuleToSF(Adapter,psfCSType,uiSearchRuleIndex,nClassifierIndex); 604 CopyClassifierRuleToSF(Adapter, psfCSType, uiSearchRuleIndex, nClassifierIndex);
729 } 605 }
730 break; 606 break;
731
732 case eDeleteClassifier: 607 case eDeleteClassifier:
733 { 608 {
734 //Get the Classifier Index From Classifier table for this SF and replace existing Classifier 609 /* Get the Classifier Index From Classifier table for this SF and replace existing Classifier */
735 //with the new classifier Contained in this message 610 /* with the new classifier Contained in this message */
736 nClassifierIndex = SearchClsid(Adapter,ulSFID,u16PacketClassificationRuleIndex); 611 nClassifierIndex = SearchClsid(Adapter, ulSFID, u16PacketClassificationRuleIndex);
737 if(nClassifierIndex > MAX_CLASSIFIERS) 612 if (nClassifierIndex > MAX_CLASSIFIERS) {
738 { 613 /* Failed To search the classifier */
739 //Failed To search the classifier 614 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Error Search for Classifier To be deleted failed");
740 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Error Search for Classifier To be deleted failed");
741 break; 615 break;
742 } 616 }
743 617
744 //Delete This classifier 618 /* Delete This classifier */
745 DeleteClassifierRuleFromSF(Adapter,uiSearchRuleIndex,nClassifierIndex); 619 DeleteClassifierRuleFromSF(Adapter, uiSearchRuleIndex, nClassifierIndex);
746 } 620 }
747 break; 621 break;
748
749 default: 622 default:
750 { 623 {
751 //Invalid Action for classifier 624 /* Invalid Action for classifier */
752 break; 625 break;
753 } 626 }
754 } 627 }
755 } 628 }
756 629
757 //Repeat parsing Classification Entries to process PHS Rules 630 /* Repeat parsing Classification Entries to process PHS Rules */
758 for(nIndex=0; nIndex < psfLocalSet->u8TotalClassifiers; nIndex++) 631 for (i = 0; i < psfLocalSet->u8TotalClassifiers; i++) {
759 { 632 psfCSType = &psfLocalSet->cConvergenceSLTypes[i];
760 psfCSType = &psfLocalSet->cConvergenceSLTypes[nIndex]; 633 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "psfCSType->u8PhsDSCAction : 0x%x\n", psfCSType->u8PhsDSCAction);
761
762 BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "psfCSType->u8PhsDSCAction : 0x%x\n",
763 psfCSType->u8PhsDSCAction );
764 634
765 switch (psfCSType->u8PhsDSCAction) 635 switch (psfCSType->u8PhsDSCAction) {
766 {
767 case eDeleteAllPHSRules: 636 case eDeleteAllPHSRules:
768 { 637 {
769 BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Deleting All PHS Rules For VCID: 0x%X\n",uVCID); 638 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Deleting All PHS Rules For VCID: 0x%X\n", uVCID);
770
771 //Delete All the PHS rules for this Service flow
772
773 PhsDeleteSFRules(
774 &Adapter->stBCMPhsContext,
775 uVCID);
776 639
640 /* Delete All the PHS rules for this Service flow */
641 PhsDeleteSFRules(&Adapter->stBCMPhsContext, uVCID);
777 break; 642 break;
778 } 643 }
779 case eDeletePHSRule: 644 case eDeletePHSRule:
780 { 645 {
781 BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"PHS DSC Action = Delete PHS Rule \n"); 646 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "PHS DSC Action = Delete PHS Rule\n");
647
648 if (psfCSType->cPhsRule.u8PHSI)
649 PhsDeletePHSRule(&Adapter->stBCMPhsContext, uVCID, psfCSType->cCPacketClassificationRule.u8AssociatedPHSI);
782 650
783 if(psfCSType->cPhsRule.u8PHSI)
784 {
785 PhsDeletePHSRule(
786 &Adapter->stBCMPhsContext,
787 uVCID,
788 psfCSType->cCPacketClassificationRule.u8AssociatedPHSI);
789 }
790 else
791 {
792 //BCM_DEBUG_PRINT(CONN_MSG,("Error CPHSRule.PHSI is ZERO \n"));
793 }
794 break; 651 break;
795 } 652 }
796 default : 653 default:
797 { 654 {
798 if(ucDsxType == DSC_ACK) 655 if (ucDsxType == DSC_ACK) {
799 { 656 /* BCM_DEBUG_PRINT(CONN_MSG,("Invalid PHS DSC Action For DSC\n",psfCSType->cPhsRule.u8PHSI)); */
800 //BCM_DEBUG_PRINT(CONN_MSG,("Invalid PHS DSC Action For DSC \n",psfCSType->cPhsRule.u8PHSI)); 657 break; /* FOr DSC ACK Case PHS DSC Action must be in valid set */
801 break; //FOr DSC ACK Case PHS DSC Action must be in valid set
802 } 658 }
803 } 659 }
804 //Proceed To Add PHS rule for DSA_ACK case even if PHS DSC action is unspecified 660 /* Proceed To Add PHS rule for DSA_ACK case even if PHS DSC action is unspecified */
805 //No Break Here . Intentionally! 661 /* No Break Here . Intentionally! */
806 662
807 case eAddPHSRule: 663 case eAddPHSRule:
808 case eSetPHSRule: 664 case eSetPHSRule:
809 { 665 {
810 if(psfCSType->cPhsRule.u8PHSI) 666 if (psfCSType->cPhsRule.u8PHSI) {
811 { 667 /* Apply This PHS Rule to all classifiers whose Associated PHSI Match */
812 //Apply This PHS Rule to all classifiers whose Associated PHSI Match
813 unsigned int uiClassifierIndex = 0; 668 unsigned int uiClassifierIndex = 0;
814 if(pstAddIndication->u8Direction == UPLINK_DIR ) 669 if (pstAddIndication->u8Direction == UPLINK_DIR) {
815 { 670 for (uiClassifierIndex = 0; uiClassifierIndex < MAX_CLASSIFIERS; uiClassifierIndex++) {
816 for(uiClassifierIndex=0;uiClassifierIndex<MAX_CLASSIFIERS;uiClassifierIndex++) 671 if ((Adapter->astClassifierTable[uiClassifierIndex].bUsed) &&
817 {
818 if((Adapter->astClassifierTable[uiClassifierIndex].bUsed) &&
819 (Adapter->astClassifierTable[uiClassifierIndex].ulSFID == Adapter->PackInfo[uiSearchRuleIndex].ulSFID) && 672 (Adapter->astClassifierTable[uiClassifierIndex].ulSFID == Adapter->PackInfo[uiSearchRuleIndex].ulSFID) &&
820 (Adapter->astClassifierTable[uiClassifierIndex].u8AssociatedPHSI == psfCSType->cPhsRule.u8PHSI)) 673 (Adapter->astClassifierTable[uiClassifierIndex].u8AssociatedPHSI == psfCSType->cPhsRule.u8PHSI)) {
821 { 674 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
822 BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Adding PHS Rule For Classifier : 0x%x cPhsRule.u8PHSI : 0x%x\n", 675 "Adding PHS Rule For Classifier: 0x%x cPhsRule.u8PHSI: 0x%x\n",
823 Adapter->astClassifierTable[uiClassifierIndex].uiClassifierRuleIndex, 676 Adapter->astClassifierTable[uiClassifierIndex].uiClassifierRuleIndex,
824 psfCSType->cPhsRule.u8PHSI); 677 psfCSType->cPhsRule.u8PHSI);
825 //Update The PHS Rule for this classifier as Associated PHSI id defined 678 /* Update The PHS Rule for this classifier as Associated PHSI id defined */
826 679
827 //Copy the PHS Rule 680 /* Copy the PHS Rule */
828 sPhsRule.u8PHSI = psfCSType->cPhsRule.u8PHSI; 681 sPhsRule.u8PHSI = psfCSType->cPhsRule.u8PHSI;
829 sPhsRule.u8PHSFLength = psfCSType->cPhsRule.u8PHSFLength; 682 sPhsRule.u8PHSFLength = psfCSType->cPhsRule.u8PHSFLength;
830 sPhsRule.u8PHSMLength = psfCSType->cPhsRule.u8PHSMLength; 683 sPhsRule.u8PHSMLength = psfCSType->cPhsRule.u8PHSMLength;
831 sPhsRule.u8PHSS = psfCSType->cPhsRule.u8PHSS; 684 sPhsRule.u8PHSS = psfCSType->cPhsRule.u8PHSS;
832 sPhsRule.u8PHSV = psfCSType->cPhsRule.u8PHSV; 685 sPhsRule.u8PHSV = psfCSType->cPhsRule.u8PHSV;
833 memcpy(sPhsRule.u8PHSF,psfCSType->cPhsRule.u8PHSF,MAX_PHS_LENGTHS); 686 memcpy(sPhsRule.u8PHSF, psfCSType->cPhsRule.u8PHSF, MAX_PHS_LENGTHS);
834 memcpy(sPhsRule.u8PHSM,psfCSType->cPhsRule.u8PHSM,MAX_PHS_LENGTHS); 687 memcpy(sPhsRule.u8PHSM, psfCSType->cPhsRule.u8PHSM, MAX_PHS_LENGTHS);
835 sPhsRule.u8RefCnt = 0; 688 sPhsRule.u8RefCnt = 0;
836 sPhsRule.bUnclassifiedPHSRule = FALSE; 689 sPhsRule.bUnclassifiedPHSRule = FALSE;
837 sPhsRule.PHSModifiedBytes = 0; 690 sPhsRule.PHSModifiedBytes = 0;
838 sPhsRule.PHSModifiedNumPackets = 0; 691 sPhsRule.PHSModifiedNumPackets = 0;
839 sPhsRule.PHSErrorNumPackets = 0; 692 sPhsRule.PHSErrorNumPackets = 0;
840 693
841 //bPHSRuleAssociated = TRUE; 694 /* bPHSRuleAssociated = TRUE; */
842 //Store The PHS Rule for this classifier 695 /* Store The PHS Rule for this classifier */
843 696
844 PhsUpdateClassifierRule( 697 PhsUpdateClassifierRule(
845 &Adapter->stBCMPhsContext, 698 &Adapter->stBCMPhsContext,
@@ -848,184 +701,157 @@ static VOID CopyToAdapter( register PMINI_ADAPTER Adapter, /**<Pointer to the A
848 &sPhsRule, 701 &sPhsRule,
849 Adapter->astClassifierTable[uiClassifierIndex].u8AssociatedPHSI); 702 Adapter->astClassifierTable[uiClassifierIndex].u8AssociatedPHSI);
850 703
851 //Update PHS Rule For the Classifier 704 /* Update PHS Rule For the Classifier */
852 if(sPhsRule.u8PHSI) 705 if (sPhsRule.u8PHSI) {
853 {
854 Adapter->astClassifierTable[uiClassifierIndex].u32PHSRuleID = sPhsRule.u8PHSI; 706 Adapter->astClassifierTable[uiClassifierIndex].u32PHSRuleID = sPhsRule.u8PHSI;
855 memcpy(&Adapter->astClassifierTable[uiClassifierIndex].sPhsRule,&sPhsRule,sizeof(S_PHS_RULE)); 707 memcpy(&Adapter->astClassifierTable[uiClassifierIndex].sPhsRule, &sPhsRule, sizeof(S_PHS_RULE));
856 } 708 }
857
858 } 709 }
859 } 710 }
711 } else {
712 /* Error PHS Rule specified in signaling could not be applied to any classifier */
713
714 /* Copy the PHS Rule */
715 sPhsRule.u8PHSI = psfCSType->cPhsRule.u8PHSI;
716 sPhsRule.u8PHSFLength = psfCSType->cPhsRule.u8PHSFLength;
717 sPhsRule.u8PHSMLength = psfCSType->cPhsRule.u8PHSMLength;
718 sPhsRule.u8PHSS = psfCSType->cPhsRule.u8PHSS;
719 sPhsRule.u8PHSV = psfCSType->cPhsRule.u8PHSV;
720 memcpy(sPhsRule.u8PHSF, psfCSType->cPhsRule.u8PHSF, MAX_PHS_LENGTHS);
721 memcpy(sPhsRule.u8PHSM, psfCSType->cPhsRule.u8PHSM, MAX_PHS_LENGTHS);
722 sPhsRule.u8RefCnt = 0;
723 sPhsRule.bUnclassifiedPHSRule = TRUE;
724 sPhsRule.PHSModifiedBytes = 0;
725 sPhsRule.PHSModifiedNumPackets = 0;
726 sPhsRule.PHSErrorNumPackets = 0;
727 /* Store The PHS Rule for this classifier */
728
729 /*
730 * Passing the argument u8PHSI instead of clsid. Because for DL with no classifier rule,
731 * clsid will be zero hence we can't have multiple PHS rules for the same SF.
732 * To support multiple PHS rule, passing u8PHSI.
733 */
734 PhsUpdateClassifierRule(
735 &Adapter->stBCMPhsContext,
736 uVCID,
737 sPhsRule.u8PHSI,
738 &sPhsRule,
739 sPhsRule.u8PHSI);
860 } 740 }
861 else
862 {
863 //Error PHS Rule specified in signaling could not be applied to any classifier
864
865 //Copy the PHS Rule
866 sPhsRule.u8PHSI = psfCSType->cPhsRule.u8PHSI;
867 sPhsRule.u8PHSFLength = psfCSType->cPhsRule.u8PHSFLength;
868 sPhsRule.u8PHSMLength = psfCSType->cPhsRule.u8PHSMLength;
869 sPhsRule.u8PHSS = psfCSType->cPhsRule.u8PHSS;
870 sPhsRule.u8PHSV = psfCSType->cPhsRule.u8PHSV;
871 memcpy(sPhsRule.u8PHSF,psfCSType->cPhsRule.u8PHSF,MAX_PHS_LENGTHS);
872 memcpy(sPhsRule.u8PHSM,psfCSType->cPhsRule.u8PHSM,MAX_PHS_LENGTHS);
873 sPhsRule.u8RefCnt = 0;
874 sPhsRule.bUnclassifiedPHSRule = TRUE;
875 sPhsRule.PHSModifiedBytes = 0;
876 sPhsRule.PHSModifiedNumPackets = 0;
877 sPhsRule.PHSErrorNumPackets = 0;
878 //Store The PHS Rule for this classifier
879
880 /*
881 Passing the argument u8PHSI instead of clsid. Because for DL with no classifier rule,
882 clsid will be zero hence we can't have multiple PHS rules for the same SF.
883 To support multiple PHS rule, passing u8PHSI.
884 */
885
886 PhsUpdateClassifierRule(
887 &Adapter->stBCMPhsContext,
888 uVCID,
889 sPhsRule.u8PHSI,
890 &sPhsRule,
891 sPhsRule.u8PHSI);
892
893 }
894
895 } 741 }
896 } 742 }
897 break; 743 break;
898 } 744 }
899 } 745 }
900 746
901 if(psfLocalSet->u32MaxSustainedTrafficRate == 0 ) 747 if (psfLocalSet->u32MaxSustainedTrafficRate == 0) {
902 { 748 /* No Rate Limit . Set Max Sustained Traffic Rate to Maximum */
903 //No Rate Limit . Set Max Sustained Traffic Rate to Maximum 749 Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate = WIMAX_MAX_ALLOWED_RATE;
904 Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate = 750 } else if (ntohl(psfLocalSet->u32MaxSustainedTrafficRate) > WIMAX_MAX_ALLOWED_RATE) {
905 WIMAX_MAX_ALLOWED_RATE; 751 /* Too large Allowed Rate specified. Limiting to Wi Max Allowed rate */
906 752 Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate = WIMAX_MAX_ALLOWED_RATE;
907 } 753 } else {
908 else if (ntohl(psfLocalSet->u32MaxSustainedTrafficRate) > 754 Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate = ntohl(psfLocalSet->u32MaxSustainedTrafficRate);
909 WIMAX_MAX_ALLOWED_RATE)
910 {
911 //Too large Allowed Rate specified. Limiting to Wi Max Allowed rate
912 Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate =
913 WIMAX_MAX_ALLOWED_RATE;
914 }
915 else
916 {
917 Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate =
918 ntohl(psfLocalSet->u32MaxSustainedTrafficRate);
919 } 755 }
920 756
921 Adapter->PackInfo[uiSearchRuleIndex].uiMaxLatency = ntohl(psfLocalSet->u32MaximumLatency); 757 Adapter->PackInfo[uiSearchRuleIndex].uiMaxLatency = ntohl(psfLocalSet->u32MaximumLatency);
922 758 if (Adapter->PackInfo[uiSearchRuleIndex].uiMaxLatency == 0) /* 0 should be treated as infinite */
923 if(Adapter->PackInfo[uiSearchRuleIndex].uiMaxLatency == 0) /* 0 should be treated as infinite */
924 Adapter->PackInfo[uiSearchRuleIndex].uiMaxLatency = MAX_LATENCY_ALLOWED; 759 Adapter->PackInfo[uiSearchRuleIndex].uiMaxLatency = MAX_LATENCY_ALLOWED;
925 760
761 if ((Adapter->PackInfo[uiSearchRuleIndex].u8QueueType == ERTPS ||
762 Adapter->PackInfo[uiSearchRuleIndex].u8QueueType == UGS))
763 UGIValue = ntohs(psfLocalSet->u16UnsolicitedGrantInterval);
926 764
927 if(( Adapter->PackInfo[uiSearchRuleIndex].u8QueueType == ERTPS || 765 if (UGIValue == 0)
928 Adapter->PackInfo[uiSearchRuleIndex].u8QueueType == UGS ) )
929 UGIValue = ntohs(psfLocalSet->u16UnsolicitedGrantInterval);
930
931 if(UGIValue == 0)
932 UGIValue = DEFAULT_UG_INTERVAL; 766 UGIValue = DEFAULT_UG_INTERVAL;
933 767
934 /* 768 /*
935 For UGI based connections... 769 * For UGI based connections...
936 DEFAULT_UGI_FACTOR*UGIInterval worth of data is the max token count at host... 770 * DEFAULT_UGI_FACTOR*UGIInterval worth of data is the max token count at host...
937 The extra amount of token is to ensure that a large amount of jitter won't have loss in throughput... 771 * The extra amount of token is to ensure that a large amount of jitter won't have loss in throughput...
938 In case of non-UGI based connection, 200 frames worth of data is the max token count at host... 772 * In case of non-UGI based connection, 200 frames worth of data is the max token count at host...
939 */ 773 */
940
941 Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize = 774 Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize =
942 (DEFAULT_UGI_FACTOR*Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate*UGIValue)/1000; 775 (DEFAULT_UGI_FACTOR*Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate*UGIValue)/1000;
943 776
944 if(Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize < WIMAX_MAX_MTU*8) 777 if (Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize < WIMAX_MAX_MTU*8) {
945 {
946 UINT UGIFactor = 0; 778 UINT UGIFactor = 0;
947 /* Special Handling to ensure the biggest size of packet can go out from host to FW as follows: 779 /* Special Handling to ensure the biggest size of packet can go out from host to FW as follows:
948 1. Any packet from Host to FW can go out in different packet size. 780 * 1. Any packet from Host to FW can go out in different packet size.
949 2. So in case the Bucket count is smaller than MTU, the packets of size (Size > TokenCount), will get dropped. 781 * 2. So in case the Bucket count is smaller than MTU, the packets of size (Size > TokenCount), will get dropped.
950 3. We can allow packets of MaxSize from Host->FW that can go out from FW in multiple SDUs by fragmentation at Wimax Layer 782 * 3. We can allow packets of MaxSize from Host->FW that can go out from FW in multiple SDUs by fragmentation at Wimax Layer
951 */ 783 */
952 UGIFactor = (Adapter->PackInfo[uiSearchRuleIndex].uiMaxLatency/UGIValue + 1); 784 UGIFactor = (Adapter->PackInfo[uiSearchRuleIndex].uiMaxLatency/UGIValue + 1);
953 785
954 if(UGIFactor > DEFAULT_UGI_FACTOR) 786 if (UGIFactor > DEFAULT_UGI_FACTOR)
955 Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize = 787 Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize =
956 (UGIFactor*Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate*UGIValue)/1000; 788 (UGIFactor*Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate*UGIValue)/1000;
957 789
958 if(Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize > WIMAX_MAX_MTU*8) 790 if (Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize > WIMAX_MAX_MTU*8)
959 Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize = WIMAX_MAX_MTU*8; 791 Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize = WIMAX_MAX_MTU*8;
960 } 792 }
961 793
794 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "LAT: %d, UGI: %d\n", Adapter->PackInfo[uiSearchRuleIndex].uiMaxLatency, UGIValue);
795 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "uiMaxAllowedRate: 0x%x, u32MaxSustainedTrafficRate: 0x%x ,uiMaxBucketSize: 0x%x",
796 Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate,
797 ntohl(psfLocalSet->u32MaxSustainedTrafficRate),
798 Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize);
962 799
963 BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"LAT: %d, UGI: %d \n", Adapter->PackInfo[uiSearchRuleIndex].uiMaxLatency, UGIValue); 800 /* copy the extended SF Parameters to Support MIBS */
964 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"uiMaxAllowedRate: 0x%x, u32MaxSustainedTrafficRate: 0x%x ,uiMaxBucketSize: 0x%x", 801 CopyMIBSExtendedSFParameters(Adapter, psfLocalSet, uiSearchRuleIndex);
965 Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate,
966 ntohl(psfLocalSet->u32MaxSustainedTrafficRate),
967 Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize);
968
969 //copy the extended SF Parameters to Support MIBS
970 CopyMIBSExtendedSFParameters(Adapter,psfLocalSet,uiSearchRuleIndex);
971 802
972 //store header suppression enabled flag per SF 803 /* store header suppression enabled flag per SF */
973 Adapter->PackInfo[uiSearchRuleIndex].bHeaderSuppressionEnabled = 804 Adapter->PackInfo[uiSearchRuleIndex].bHeaderSuppressionEnabled =
974 !(psfLocalSet->u8RequesttransmissionPolicy & 805 !(psfLocalSet->u8RequesttransmissionPolicy &
975 MASK_DISABLE_HEADER_SUPPRESSION); 806 MASK_DISABLE_HEADER_SUPPRESSION);
976 807
977 kfree(Adapter->PackInfo[uiSearchRuleIndex].pstSFIndication); 808 kfree(Adapter->PackInfo[uiSearchRuleIndex].pstSFIndication);
978 Adapter->PackInfo[uiSearchRuleIndex].pstSFIndication = pstAddIndication; 809 Adapter->PackInfo[uiSearchRuleIndex].pstSFIndication = pstAddIndication;
979 810
980 //Re Sort the SF list in PackInfo according to Traffic Priority 811 /* Re Sort the SF list in PackInfo according to Traffic Priority */
981 SortPackInfo(Adapter); 812 SortPackInfo(Adapter);
982 813
983 /* Re Sort the Classifier Rules table and re - arrange 814 /* Re Sort the Classifier Rules table and re - arrange
984 according to Classifier Rule Priority */ 815 * according to Classifier Rule Priority
816 */
985 SortClassifiers(Adapter); 817 SortClassifiers(Adapter);
986
987 DumpPhsRules(&Adapter->stBCMPhsContext); 818 DumpPhsRules(&Adapter->stBCMPhsContext);
988 819 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "%s <=====", __func__);
989 BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"%s <=====", __FUNCTION__);
990} 820}
991 821
992
993/*********************************************************************** 822/***********************************************************************
994* Function - DumpCmControlPacket 823 * Function - DumpCmControlPacket
995* 824 *
996* Description - This routinue Dumps the Contents of the AddIndication 825 * Description - This routinue Dumps the Contents of the AddIndication
997* Structure in the Connection Management Control Packet 826 * Structure in the Connection Management Control Packet
998* 827 *
999* Parameter - pvBuffer: Pointer to the buffer containing the 828 * Parameter - pvBuffer: Pointer to the buffer containing the
1000* AddIndication data. 829 * AddIndication data.
1001* 830 *
1002* Returns - None 831 * Returns - None
1003*************************************************************************/ 832 *************************************************************************/
1004static VOID DumpCmControlPacket(PVOID pvBuffer) 833static VOID DumpCmControlPacket(PVOID pvBuffer)
1005{ 834{
1006 UINT uiLoopIndex; 835 int uiLoopIndex;
1007 UINT nIndex; 836 int nIndex;
1008 stLocalSFAddIndicationAlt *pstAddIndication; 837 stLocalSFAddIndicationAlt *pstAddIndication;
1009 UINT nCurClassifierCnt; 838 UINT nCurClassifierCnt;
1010 PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(gblpnetdev); 839 PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(gblpnetdev);
1011 840
1012 pstAddIndication = (stLocalSFAddIndicationAlt *)pvBuffer; 841 pstAddIndication = (stLocalSFAddIndicationAlt *)pvBuffer;
1013 BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "======>"); 842 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "======>");
1014 843 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Type: 0x%X", pstAddIndication->u8Type);
1015 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Type : 0x%X",pstAddIndication->u8Type); 844 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Direction: 0x%X", pstAddIndication->u8Direction);
1016 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Direction : 0x%X",pstAddIndication->u8Direction); 845 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TID: 0x%X", ntohs(pstAddIndication->u16TID));
1017 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TID: 0x%X", ntohs(pstAddIndication->u16TID)); 846 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID: 0x%X", ntohs(pstAddIndication->u16CID));
1018 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID : 0x%X",ntohs(pstAddIndication->u16CID)); 847 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16VCID: 0x%X", ntohs(pstAddIndication->u16VCID));
1019 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16VCID : 0x%X",ntohs(pstAddIndication->u16VCID)); 848 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " AuthorizedSet--->");
1020 849 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32SFID: 0x%X", htonl(pstAddIndication->sfAuthorizedSet.u32SFID));
1021 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " AuthorizedSet--->"); 850 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID: 0x%X", htons(pstAddIndication->sfAuthorizedSet.u16CID));
1022 851 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassNameLength: 0x%X",
1023 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32SFID : 0x%X",htonl(pstAddIndication->sfAuthorizedSet.u32SFID)); 852 pstAddIndication->sfAuthorizedSet.u8ServiceClassNameLength);
1024 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID : 0x%X",htons(pstAddIndication->sfAuthorizedSet.u16CID)); 853
1025 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassNameLength : 0x%X", 854 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassName: 0x%X ,0x%X , 0x%X, 0x%X, 0x%X, 0x%X",
1026 pstAddIndication->sfAuthorizedSet.u8ServiceClassNameLength);
1027
1028 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassName : 0x%X ,0x%X , 0x%X, 0x%X, 0x%X, 0x%X",
1029 pstAddIndication->sfAuthorizedSet.u8ServiceClassName[0], 855 pstAddIndication->sfAuthorizedSet.u8ServiceClassName[0],
1030 pstAddIndication->sfAuthorizedSet.u8ServiceClassName[1], 856 pstAddIndication->sfAuthorizedSet.u8ServiceClassName[1],
1031 pstAddIndication->sfAuthorizedSet.u8ServiceClassName[2], 857 pstAddIndication->sfAuthorizedSet.u8ServiceClassName[2],
@@ -1033,207 +859,170 @@ static VOID DumpCmControlPacket(PVOID pvBuffer)
1033 pstAddIndication->sfAuthorizedSet.u8ServiceClassName[4], 859 pstAddIndication->sfAuthorizedSet.u8ServiceClassName[4],
1034 pstAddIndication->sfAuthorizedSet.u8ServiceClassName[5]); 860 pstAddIndication->sfAuthorizedSet.u8ServiceClassName[5]);
1035 861
1036 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8MBSService : 0x%X", 862 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8MBSService: 0x%X", pstAddIndication->sfAuthorizedSet.u8MBSService);
1037 pstAddIndication->sfAuthorizedSet.u8MBSService); 863 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8QosParamSet: 0x%X", pstAddIndication->sfAuthorizedSet.u8QosParamSet);
1038 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8QosParamSet : 0x%X", 864 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficPriority: 0x%X, %p",
1039 pstAddIndication->sfAuthorizedSet.u8QosParamSet); 865 pstAddIndication->sfAuthorizedSet.u8TrafficPriority, &pstAddIndication->sfAuthorizedSet.u8TrafficPriority);
1040 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficPriority : 0x%X, %p", 866 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxSustainedTrafficRate: 0x%X 0x%p",
1041 pstAddIndication->sfAuthorizedSet.u8TrafficPriority, &pstAddIndication->sfAuthorizedSet.u8TrafficPriority); 867 pstAddIndication->sfAuthorizedSet.u32MaxSustainedTrafficRate,
1042
1043 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxSustainedTrafficRate : 0x%X 0x%p",
1044 pstAddIndication->sfAuthorizedSet.u32MaxSustainedTrafficRate,
1045 &pstAddIndication->sfAuthorizedSet.u32MaxSustainedTrafficRate); 868 &pstAddIndication->sfAuthorizedSet.u32MaxSustainedTrafficRate);
1046 869 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxTrafficBurst: 0x%X", pstAddIndication->sfAuthorizedSet.u32MaxTrafficBurst);
1047 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxTrafficBurst : 0x%X", 870 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MinReservedTrafficRate : 0x%X",
1048 pstAddIndication->sfAuthorizedSet.u32MaxTrafficBurst); 871 pstAddIndication->sfAuthorizedSet.u32MinReservedTrafficRate);
1049 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MinReservedTrafficRate : 0x%X", 872 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParamLength: 0x%X",
1050 pstAddIndication->sfAuthorizedSet.u32MinReservedTrafficRate); 873 pstAddIndication->sfAuthorizedSet.u8VendorSpecificQoSParamLength);
1051 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParamLength : 0x%X", 874 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParam: 0x%X",
1052 pstAddIndication->sfAuthorizedSet.u8VendorSpecificQoSParamLength); 875 pstAddIndication->sfAuthorizedSet.u8VendorSpecificQoSParam[0]);
1053 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParam : 0x%X", 876 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceFlowSchedulingType: 0x%X",
1054 pstAddIndication->sfAuthorizedSet.u8VendorSpecificQoSParam[0]); 877 pstAddIndication->sfAuthorizedSet.u8ServiceFlowSchedulingType);
1055 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceFlowSchedulingType : 0x%X", 878 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32ToleratedJitter: 0x%X", pstAddIndication->sfAuthorizedSet.u32ToleratedJitter);
1056 pstAddIndication->sfAuthorizedSet.u8ServiceFlowSchedulingType); 879 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaximumLatency: 0x%X", pstAddIndication->sfAuthorizedSet.u32MaximumLatency);
1057 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32ToleratedJitter : 0x%X", 880 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8FixedLengthVSVariableLengthSDUIndicator: 0x%X",
1058 pstAddIndication->sfAuthorizedSet.u32ToleratedJitter); 881 pstAddIndication->sfAuthorizedSet.u8FixedLengthVSVariableLengthSDUIndicator);
1059 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaximumLatency : 0x%X", 882 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8SDUSize: 0x%X", pstAddIndication->sfAuthorizedSet.u8SDUSize);
1060 pstAddIndication->sfAuthorizedSet.u32MaximumLatency); 883 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TargetSAID: 0x%X", pstAddIndication->sfAuthorizedSet.u16TargetSAID);
1061 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8FixedLengthVSVariableLengthSDUIndicator: 0x%X", 884 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQEnable: 0x%X", pstAddIndication->sfAuthorizedSet.u8ARQEnable);
1062 pstAddIndication->sfAuthorizedSet.u8FixedLengthVSVariableLengthSDUIndicator); 885 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQWindowSize: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQWindowSize);
1063 886 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryTxTimeOut: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQRetryTxTimeOut);
1064 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8SDUSize : 0x%X", 887 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryRxTimeOut: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQRetryRxTimeOut);
1065 pstAddIndication->sfAuthorizedSet.u8SDUSize); 888 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockLifeTime: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQBlockLifeTime);
1066 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TargetSAID : 0x%X", 889 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQSyncLossTimeOut: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQSyncLossTimeOut);
1067 pstAddIndication->sfAuthorizedSet.u16TargetSAID); 890 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQDeliverInOrder: 0x%X", pstAddIndication->sfAuthorizedSet.u8ARQDeliverInOrder);
1068 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQEnable : 0x%X", 891 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRxPurgeTimeOut: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQRxPurgeTimeOut);
1069 pstAddIndication->sfAuthorizedSet.u8ARQEnable); 892 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockSize: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQBlockSize);
1070 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQWindowSize : 0x%X", 893 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8CSSpecification: 0x%X", pstAddIndication->sfAuthorizedSet.u8CSSpecification);
1071 pstAddIndication->sfAuthorizedSet.u16ARQWindowSize); 894 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TypeOfDataDeliveryService: 0x%X",
1072 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryTxTimeOut : 0x%X", 895 pstAddIndication->sfAuthorizedSet.u8TypeOfDataDeliveryService);
1073 pstAddIndication->sfAuthorizedSet.u16ARQRetryTxTimeOut); 896 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16SDUInterArrivalTime: 0x%X", pstAddIndication->sfAuthorizedSet.u16SDUInterArrivalTime);
1074 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryRxTimeOut : 0x%X", 897 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TimeBase: 0x%X", pstAddIndication->sfAuthorizedSet.u16TimeBase);
1075 pstAddIndication->sfAuthorizedSet.u16ARQRetryRxTimeOut); 898 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8PagingPreference: 0x%X", pstAddIndication->sfAuthorizedSet.u8PagingPreference);
1076 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockLifeTime : 0x%X", 899 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16UnsolicitedPollingInterval: 0x%X",
1077 pstAddIndication->sfAuthorizedSet.u16ARQBlockLifeTime); 900 pstAddIndication->sfAuthorizedSet.u16UnsolicitedPollingInterval);
1078 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQSyncLossTimeOut : 0x%X", 901
1079 pstAddIndication->sfAuthorizedSet.u16ARQSyncLossTimeOut); 902 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "sfAuthorizedSet.u8HARQChannelMapping %x %x %x ",
1080 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQDeliverInOrder : 0x%X", 903 *(unsigned int *)pstAddIndication->sfAuthorizedSet.u8HARQChannelMapping,
1081 pstAddIndication->sfAuthorizedSet.u8ARQDeliverInOrder); 904 *(unsigned int *)&pstAddIndication->sfAuthorizedSet.u8HARQChannelMapping[4],
1082 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRxPurgeTimeOut : 0x%X", 905 *(USHORT *)&pstAddIndication->sfAuthorizedSet.u8HARQChannelMapping[8]);
1083 pstAddIndication->sfAuthorizedSet.u16ARQRxPurgeTimeOut); 906 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficIndicationPreference: 0x%X",
1084 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockSize : 0x%X", 907 pstAddIndication->sfAuthorizedSet.u8TrafficIndicationPreference);
1085 pstAddIndication->sfAuthorizedSet.u16ARQBlockSize); 908 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " Total Classifiers Received: 0x%X", pstAddIndication->sfAuthorizedSet.u8TotalClassifiers);
1086 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8CSSpecification : 0x%X",
1087 pstAddIndication->sfAuthorizedSet.u8CSSpecification);
1088 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TypeOfDataDeliveryService : 0x%X",
1089 pstAddIndication->sfAuthorizedSet.u8TypeOfDataDeliveryService);
1090 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16SDUInterArrivalTime : 0x%X",
1091 pstAddIndication->sfAuthorizedSet.u16SDUInterArrivalTime);
1092 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TimeBase : 0x%X",
1093 pstAddIndication->sfAuthorizedSet.u16TimeBase);
1094 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8PagingPreference : 0x%X",
1095 pstAddIndication->sfAuthorizedSet.u8PagingPreference);
1096 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16UnsolicitedPollingInterval : 0x%X",
1097 pstAddIndication->sfAuthorizedSet.u16UnsolicitedPollingInterval);
1098
1099 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "sfAuthorizedSet.u8HARQChannelMapping %x %x %x ",
1100 *(unsigned int*)pstAddIndication->sfAuthorizedSet.u8HARQChannelMapping,
1101 *(unsigned int*)&pstAddIndication->sfAuthorizedSet.u8HARQChannelMapping[4],
1102 *(USHORT*) &pstAddIndication->sfAuthorizedSet.u8HARQChannelMapping[8]);
1103 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficIndicationPreference : 0x%X",
1104 pstAddIndication->sfAuthorizedSet.u8TrafficIndicationPreference);
1105
1106 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " Total Classifiers Received : 0x%X",pstAddIndication->sfAuthorizedSet.u8TotalClassifiers);
1107 909
1108 nCurClassifierCnt = pstAddIndication->sfAuthorizedSet.u8TotalClassifiers; 910 nCurClassifierCnt = pstAddIndication->sfAuthorizedSet.u8TotalClassifiers;
1109 911 if (nCurClassifierCnt > MAX_CLASSIFIERS_IN_SF)
1110 if(nCurClassifierCnt > MAX_CLASSIFIERS_IN_SF)
1111 {
1112 nCurClassifierCnt = MAX_CLASSIFIERS_IN_SF; 912 nCurClassifierCnt = MAX_CLASSIFIERS_IN_SF;
1113 } 913
1114 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "pstAddIndication->sfAuthorizedSet.bValid %d", pstAddIndication->sfAuthorizedSet.bValid); 914 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "pstAddIndication->sfAuthorizedSet.bValid %d", pstAddIndication->sfAuthorizedSet.bValid);
1115 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "pstAddIndication->sfAuthorizedSet.u16MacOverhead %x", pstAddIndication->sfAuthorizedSet.u16MacOverhead); 915 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "pstAddIndication->sfAuthorizedSet.u16MacOverhead %x", pstAddIndication->sfAuthorizedSet.u16MacOverhead);
1116 if(!pstAddIndication->sfAuthorizedSet.bValid) 916 if (!pstAddIndication->sfAuthorizedSet.bValid)
1117 pstAddIndication->sfAuthorizedSet.bValid=1; 917 pstAddIndication->sfAuthorizedSet.bValid = 1;
1118 for(nIndex = 0 ; nIndex < nCurClassifierCnt ; nIndex++) 918 for (nIndex = 0; nIndex < nCurClassifierCnt; nIndex++) {
1119 {
1120 stConvergenceSLTypes *psfCSType = NULL; 919 stConvergenceSLTypes *psfCSType = NULL;
1121 psfCSType = &pstAddIndication->sfAuthorizedSet.cConvergenceSLTypes[nIndex]; 920 psfCSType = &pstAddIndication->sfAuthorizedSet.cConvergenceSLTypes[nIndex];
1122 921
1123 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "psfCSType = %p", psfCSType); 922 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "psfCSType = %p", psfCSType);
1124 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "CCPacketClassificationRuleSI====>"); 923 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "CCPacketClassificationRuleSI====>");
1125 924 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ClassifierRulePriority: 0x%X ",
1126 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ClassifierRulePriority :0x%X ", 925 psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority);
1127 psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority); 926 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPTypeOfServiceLength: 0x%X ",
1128 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPTypeOfServiceLength :0x%X ", 927 psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength);
1129 psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength); 928 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPTypeOfService[3]: 0x%X ,0x%X ,0x%X ",
1130 929 psfCSType->cCPacketClassificationRule.u8IPTypeOfService[0],
1131 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPTypeOfService[3] :0x%X ,0x%X ,0x%X ", 930 psfCSType->cCPacketClassificationRule.u8IPTypeOfService[1],
1132 psfCSType->cCPacketClassificationRule.u8IPTypeOfService[0], 931 psfCSType->cCPacketClassificationRule.u8IPTypeOfService[2]);
1133 psfCSType->cCPacketClassificationRule.u8IPTypeOfService[1], 932
1134 psfCSType->cCPacketClassificationRule.u8IPTypeOfService[2]); 933 for (uiLoopIndex = 0; uiLoopIndex < 1; uiLoopIndex++)
1135 934 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Protocol: 0x%02X ",
1136 for(uiLoopIndex=0; uiLoopIndex < 1; uiLoopIndex++) 935 psfCSType->cCPacketClassificationRule.u8Protocol);
1137 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Protocol : 0x%02X ", 936
1138 psfCSType->cCPacketClassificationRule.u8Protocol); 937 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddressLength: 0x%X ",
1139 938 psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddressLength);
1140 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddressLength :0x%X ", 939
1141 psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddressLength); 940 for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++)
1142 941 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddress[32]: 0x%02X ",
1143 for(uiLoopIndex=0; uiLoopIndex < 32; uiLoopIndex++) 942 psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddress[uiLoopIndex]);
1144 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddress[32] : 0x%02X ", 943
1145 psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddress[uiLoopIndex]); 944 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddressLength: 0x%X ",
1146 945 psfCSType->cCPacketClassificationRule.u8IPDestinationAddressLength);
1147 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddressLength : 0x%X ", 946
1148 psfCSType->cCPacketClassificationRule.u8IPDestinationAddressLength); 947 for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++)
1149 948 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddress[32]: 0x%02X ",
1150 for(uiLoopIndex=0; uiLoopIndex < 32; uiLoopIndex++) 949 psfCSType->cCPacketClassificationRule.u8IPDestinationAddress[uiLoopIndex]);
1151 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddress[32] : 0x%02X ", 950
1152 psfCSType->cCPacketClassificationRule.u8IPDestinationAddress[uiLoopIndex]); 951 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolSourcePortRangeLength:0x%X ",
1153 952 psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength);
1154 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolSourcePortRangeLength:0x%X ", 953 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolSourcePortRange[4]: 0x%02X ,0x%02X ,0x%02X ,0x%02X ",
1155 psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength); 954 psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[0],
1156 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolSourcePortRange[4]: 0x%02X ,0x%02X ,0x%02X ,0x%02X ", 955 psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[1],
1157 psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[0], 956 psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[2],
1158 psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[1], 957 psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[3]);
1159 psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[2], 958
1160 psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[3]); 959 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolDestPortRangeLength: 0x%02X ",
1161 960 psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength);
1162 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolDestPortRangeLength : 0x%02X ", 961 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolDestPortRange[4]: 0x%02X ,0x%02X ,0x%02X ,0x%02X ",
1163 psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength); 962 psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[0],
1164 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolDestPortRange[4]: 0x%02X ,0x%02X ,0x%02X ,0x%02X ", 963 psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[1],
1165 psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[0], 964 psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[2],
1166 psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[1], 965 psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[3]);
1167 psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[2], 966
1168 psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[3]); 967 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetDestMacAddressLength: 0x%02X ",
1169 968 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
1170 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetDestMacAddressLength : 0x%02X ", 969
1171 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength); 970 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetDestMacAddress[6]: 0x %02X %02X %02X %02X %02X %02X",
1172 971 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[0],
1173 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetDestMacAddress[6] : 0x %02X %02X %02X %02X %02X %02X", 972 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[1],
1174 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[0], 973 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[2],
1175 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[1], 974 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[3],
1176 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[2], 975 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[4],
1177 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[3], 976 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[5]);
1178 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[4], 977
1179 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[5]); 978 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddressLength: 0x%02X ",
1180 979 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
1181 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddressLength : 0x%02X ", 980
1182 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength); 981 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddress[6]: 0x %02X %02X %02X %02X %02X %02X",
1183 982 psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[0],
1184 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddress[6] : 0x %02X %02X %02X %02X %02X %02X", 983 psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[1],
1185 psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[0], 984 psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[2],
1186 psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[1], 985 psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[3],
1187 psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[2], 986 psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[4],
1188 psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[3], 987 psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[5]);
1189 psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[4], 988
1190 psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[5]); 989 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthertypeLength: 0x%02X ",
1191 990 psfCSType->cCPacketClassificationRule.u8EthertypeLength);
1192 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthertypeLength : 0x%02X ", 991 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Ethertype[3]: 0x%02X ,0x%02X ,0x%02X ",
1193 psfCSType->cCPacketClassificationRule.u8EthertypeLength); 992 psfCSType->cCPacketClassificationRule.u8Ethertype[0],
1194 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Ethertype[3] : 0x%02X ,0x%02X ,0x%02X ", 993 psfCSType->cCPacketClassificationRule.u8Ethertype[1],
1195 psfCSType->cCPacketClassificationRule.u8Ethertype[0], 994 psfCSType->cCPacketClassificationRule.u8Ethertype[2]);
1196 psfCSType->cCPacketClassificationRule.u8Ethertype[1], 995
1197 psfCSType->cCPacketClassificationRule.u8Ethertype[2]); 996 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16UserPriority: 0x%X ", psfCSType->cCPacketClassificationRule.u16UserPriority);
1198 997 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16VLANID: 0x%X ", psfCSType->cCPacketClassificationRule.u16VLANID);
1199 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16UserPriority : 0x%X ", 998 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8AssociatedPHSI: 0x%02X ", psfCSType->cCPacketClassificationRule.u8AssociatedPHSI);
1200 psfCSType->cCPacketClassificationRule.u16UserPriority); 999 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16PacketClassificationRuleIndex: 0x%X ",
1201 1000 psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex);
1202 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16VLANID : 0x%X ", 1001
1203 psfCSType->cCPacketClassificationRule.u16VLANID); 1002 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParamLength: 0x%X ",
1204 1003 psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParamLength);
1205 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8AssociatedPHSI : 0x%02X ", 1004 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParam[1]: 0x%X ",
1206 psfCSType->cCPacketClassificationRule.u8AssociatedPHSI); 1005 psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParam[0]);
1207
1208 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16PacketClassificationRuleIndex : 0x%X ",
1209 psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex);
1210
1211 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParamLength : 0x%X ",
1212 psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParamLength);
1213
1214 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParam[1] : 0x%X ",
1215 psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParam[0]);
1216#ifdef VERSION_D5 1006#ifdef VERSION_D5
1217 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPv6FlowLableLength :0x%X ", 1007 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPv6FlowLableLength: 0x%X ",
1218 psfCSType->cCPacketClassificationRule.u8IPv6FlowLableLength); 1008 psfCSType->cCPacketClassificationRule.u8IPv6FlowLableLength);
1219 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPv6FlowLable[6] : 0x %02X %02X %02X %02X %02X %02X ", 1009 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPv6FlowLable[6]: 0x %02X %02X %02X %02X %02X %02X ",
1220 psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[0], 1010 psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[0],
1221 psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[1], 1011 psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[1],
1222 psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[2], 1012 psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[2],
1223 psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[3], 1013 psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[3],
1224 psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[4], 1014 psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[4],
1225 psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[5]); 1015 psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[5]);
1226#endif 1016#endif
1227 } 1017 }
1228 1018
1229 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "bValid : 0x%02X",pstAddIndication->sfAuthorizedSet.bValid); 1019 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "bValid: 0x%02X", pstAddIndication->sfAuthorizedSet.bValid);
1230 1020 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "AdmittedSet--->");
1231 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "AdmittedSet--->"); 1021 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32SFID: 0x%X", pstAddIndication->sfAdmittedSet.u32SFID);
1232 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32SFID : 0x%X",pstAddIndication->sfAdmittedSet.u32SFID); 1022 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID: 0x%X", pstAddIndication->sfAdmittedSet.u16CID);
1233 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID : 0x%X",pstAddIndication->sfAdmittedSet.u16CID); 1023 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassNameLength: 0x%X",
1234 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassNameLength : 0x%X", 1024 pstAddIndication->sfAdmittedSet.u8ServiceClassNameLength);
1235 pstAddIndication->sfAdmittedSet.u8ServiceClassNameLength); 1025 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassName: 0x %02X %02X %02X %02X %02X %02X",
1236 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassName : 0x %02X %02X %02X %02X %02X %02X",
1237 pstAddIndication->sfAdmittedSet.u8ServiceClassName[0], 1026 pstAddIndication->sfAdmittedSet.u8ServiceClassName[0],
1238 pstAddIndication->sfAdmittedSet.u8ServiceClassName[1], 1027 pstAddIndication->sfAdmittedSet.u8ServiceClassName[1],
1239 pstAddIndication->sfAdmittedSet.u8ServiceClassName[2], 1028 pstAddIndication->sfAdmittedSet.u8ServiceClassName[2],
@@ -1241,429 +1030,338 @@ static VOID DumpCmControlPacket(PVOID pvBuffer)
1241 pstAddIndication->sfAdmittedSet.u8ServiceClassName[4], 1030 pstAddIndication->sfAdmittedSet.u8ServiceClassName[4],
1242 pstAddIndication->sfAdmittedSet.u8ServiceClassName[5]); 1031 pstAddIndication->sfAdmittedSet.u8ServiceClassName[5]);
1243 1032
1244 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8MBSService : 0x%02X", 1033 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8MBSService: 0x%02X", pstAddIndication->sfAdmittedSet.u8MBSService);
1245 pstAddIndication->sfAdmittedSet.u8MBSService); 1034 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8QosParamSet: 0x%02X", pstAddIndication->sfAdmittedSet.u8QosParamSet);
1246 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8QosParamSet : 0x%02X", 1035 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficPriority: 0x%02X", pstAddIndication->sfAdmittedSet.u8TrafficPriority);
1247 pstAddIndication->sfAdmittedSet.u8QosParamSet); 1036 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxTrafficBurst: 0x%X", pstAddIndication->sfAdmittedSet.u32MaxTrafficBurst);
1248 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficPriority : 0x%02X", 1037 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MinReservedTrafficRate: 0x%X",
1249 pstAddIndication->sfAdmittedSet.u8TrafficPriority); 1038 pstAddIndication->sfAdmittedSet.u32MinReservedTrafficRate);
1250 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxTrafficBurst : 0x%X", 1039
1251 pstAddIndication->sfAdmittedSet.u32MaxTrafficBurst); 1040 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParamLength: 0x%02X",
1252 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MinReservedTrafficRate : 0x%X", 1041 pstAddIndication->sfAdmittedSet.u8VendorSpecificQoSParamLength);
1253 pstAddIndication->sfAdmittedSet.u32MinReservedTrafficRate); 1042 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParam: 0x%02X",
1254 1043 pstAddIndication->sfAdmittedSet.u8VendorSpecificQoSParam[0]);
1255 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParamLength : 0x%02X", 1044 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceFlowSchedulingType: 0x%02X",
1256 pstAddIndication->sfAdmittedSet.u8VendorSpecificQoSParamLength); 1045 pstAddIndication->sfAdmittedSet.u8ServiceFlowSchedulingType);
1257 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParam : 0x%02X", 1046 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32ToleratedJitter: 0x%X", pstAddIndication->sfAdmittedSet.u32ToleratedJitter);
1258 pstAddIndication->sfAdmittedSet.u8VendorSpecificQoSParam[0]); 1047 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaximumLatency: 0x%X", pstAddIndication->sfAdmittedSet.u32MaximumLatency);
1259 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceFlowSchedulingType : 0x%02X", 1048 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8FixedLengthVSVariableLengthSDUIndicator: 0x%02X",
1260 pstAddIndication->sfAdmittedSet.u8ServiceFlowSchedulingType); 1049 pstAddIndication->sfAdmittedSet.u8FixedLengthVSVariableLengthSDUIndicator);
1261 1050 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8SDUSize: 0x%02X", pstAddIndication->sfAdmittedSet.u8SDUSize);
1262 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32ToleratedJitter : 0x%X", 1051 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TargetSAID: 0x%02X", pstAddIndication->sfAdmittedSet.u16TargetSAID);
1263 pstAddIndication->sfAdmittedSet.u32ToleratedJitter); 1052 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQEnable: 0x%02X", pstAddIndication->sfAdmittedSet.u8ARQEnable);
1264 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaximumLatency : 0x%X", 1053 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQWindowSize: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQWindowSize);
1265 pstAddIndication->sfAdmittedSet.u32MaximumLatency); 1054 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryTxTimeOut: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQRetryTxTimeOut);
1266 1055 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryRxTimeOut: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQRetryRxTimeOut);
1267 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8FixedLengthVSVariableLengthSDUIndicator: 0x%02X", 1056 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockLifeTime: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQBlockLifeTime);
1268 pstAddIndication->sfAdmittedSet.u8FixedLengthVSVariableLengthSDUIndicator); 1057 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQSyncLossTimeOut: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQSyncLossTimeOut);
1269 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8SDUSize : 0x%02X", 1058 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQDeliverInOrder: 0x%02X", pstAddIndication->sfAdmittedSet.u8ARQDeliverInOrder);
1270 pstAddIndication->sfAdmittedSet.u8SDUSize); 1059 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRxPurgeTimeOut: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQRxPurgeTimeOut);
1271 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TargetSAID : 0x%02X", 1060 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockSize: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQBlockSize);
1272 pstAddIndication->sfAdmittedSet.u16TargetSAID); 1061 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8CSSpecification: 0x%02X", pstAddIndication->sfAdmittedSet.u8CSSpecification);
1273 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQEnable : 0x%02X", 1062 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TypeOfDataDeliveryService: 0x%02X",
1274 pstAddIndication->sfAdmittedSet.u8ARQEnable); 1063 pstAddIndication->sfAdmittedSet.u8TypeOfDataDeliveryService);
1275 1064 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16SDUInterArrivalTime: 0x%X", pstAddIndication->sfAdmittedSet.u16SDUInterArrivalTime);
1276 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQWindowSize : 0x%X", 1065 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TimeBase: 0x%X", pstAddIndication->sfAdmittedSet.u16TimeBase);
1277 pstAddIndication->sfAdmittedSet.u16ARQWindowSize); 1066 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8PagingPreference: 0x%X", pstAddIndication->sfAdmittedSet.u8PagingPreference);
1278 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryTxTimeOut : 0x%X", 1067 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficIndicationPreference: 0x%02X",
1279 pstAddIndication->sfAdmittedSet.u16ARQRetryTxTimeOut); 1068 pstAddIndication->sfAdmittedSet.u8TrafficIndicationPreference);
1280 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryRxTimeOut : 0x%X", 1069 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " Total Classifiers Received: 0x%X", pstAddIndication->sfAdmittedSet.u8TotalClassifiers);
1281 pstAddIndication->sfAdmittedSet.u16ARQRetryRxTimeOut);
1282 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockLifeTime : 0x%X",
1283 pstAddIndication->sfAdmittedSet.u16ARQBlockLifeTime);
1284 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQSyncLossTimeOut : 0x%X",
1285 pstAddIndication->sfAdmittedSet.u16ARQSyncLossTimeOut);
1286
1287 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQDeliverInOrder : 0x%02X",
1288 pstAddIndication->sfAdmittedSet.u8ARQDeliverInOrder);
1289 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRxPurgeTimeOut : 0x%X",
1290 pstAddIndication->sfAdmittedSet.u16ARQRxPurgeTimeOut);
1291 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockSize : 0x%X",
1292 pstAddIndication->sfAdmittedSet.u16ARQBlockSize);
1293 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8CSSpecification : 0x%02X",
1294 pstAddIndication->sfAdmittedSet.u8CSSpecification);
1295 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TypeOfDataDeliveryService : 0x%02X",
1296 pstAddIndication->sfAdmittedSet.u8TypeOfDataDeliveryService);
1297 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16SDUInterArrivalTime : 0x%X",
1298 pstAddIndication->sfAdmittedSet.u16SDUInterArrivalTime);
1299 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TimeBase : 0x%X",
1300 pstAddIndication->sfAdmittedSet.u16TimeBase);
1301 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8PagingPreference : 0x%X",
1302 pstAddIndication->sfAdmittedSet.u8PagingPreference);
1303
1304
1305 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficIndicationPreference : 0x%02X",
1306 pstAddIndication->sfAdmittedSet.u8TrafficIndicationPreference);
1307
1308 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " Total Classifiers Received : 0x%X",pstAddIndication->sfAdmittedSet.u8TotalClassifiers);
1309 1070
1310 nCurClassifierCnt = pstAddIndication->sfAdmittedSet.u8TotalClassifiers; 1071 nCurClassifierCnt = pstAddIndication->sfAdmittedSet.u8TotalClassifiers;
1311 1072 if (nCurClassifierCnt > MAX_CLASSIFIERS_IN_SF)
1312 if(nCurClassifierCnt > MAX_CLASSIFIERS_IN_SF)
1313 {
1314 nCurClassifierCnt = MAX_CLASSIFIERS_IN_SF; 1073 nCurClassifierCnt = MAX_CLASSIFIERS_IN_SF;
1315 }
1316
1317
1318 for(nIndex = 0 ; nIndex < nCurClassifierCnt ; nIndex++)
1319 {
1320 1074
1075 for (nIndex = 0; nIndex < nCurClassifierCnt; nIndex++) {
1321 stConvergenceSLTypes *psfCSType = NULL; 1076 stConvergenceSLTypes *psfCSType = NULL;
1322 psfCSType = &pstAddIndication->sfAdmittedSet.cConvergenceSLTypes[nIndex];
1323 1077
1324 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " CCPacketClassificationRuleSI====>"); 1078 psfCSType = &pstAddIndication->sfAdmittedSet.cConvergenceSLTypes[nIndex];
1325 1079 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " CCPacketClassificationRuleSI====>");
1326 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ClassifierRulePriority :0x%02X ", 1080 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ClassifierRulePriority: 0x%02X ",
1327 psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority); 1081 psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority);
1328 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPTypeOfServiceLength :0x%02X", 1082 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPTypeOfServiceLength: 0x%02X",
1329 psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength); 1083 psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength);
1330 1084 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPTypeOfService[3]: 0x%02X %02X %02X",
1331 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPTypeOfService[3] :0x%02X %02X %02X", 1085 psfCSType->cCPacketClassificationRule.u8IPTypeOfService[0],
1332 psfCSType->cCPacketClassificationRule.u8IPTypeOfService[0], 1086 psfCSType->cCPacketClassificationRule.u8IPTypeOfService[1],
1333 psfCSType->cCPacketClassificationRule.u8IPTypeOfService[1], 1087 psfCSType->cCPacketClassificationRule.u8IPTypeOfService[2]);
1334 psfCSType->cCPacketClassificationRule.u8IPTypeOfService[2]); 1088 for (uiLoopIndex = 0; uiLoopIndex < 1; uiLoopIndex++)
1335 for(uiLoopIndex=0; uiLoopIndex < 1; uiLoopIndex++) 1089 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Protocol: 0x%02X ", psfCSType->cCPacketClassificationRule.u8Protocol);
1336 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Protocol: 0x%02X ", 1090
1337 psfCSType->cCPacketClassificationRule.u8Protocol); 1091 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddressLength: 0x%02X ",
1338 1092 psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddressLength);
1339 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddressLength :0x%02X ", 1093
1340 psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddressLength); 1094 for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++)
1341 1095 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddress[32]: 0x%02X ",
1342 for(uiLoopIndex=0; uiLoopIndex < 32; uiLoopIndex++) 1096 psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddress[uiLoopIndex]);
1343 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddress[32] : 0x%02X ", 1097
1344 psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddress[uiLoopIndex]); 1098 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddressLength: 0x%02X ",
1345 1099 psfCSType->cCPacketClassificationRule.u8IPDestinationAddressLength);
1346 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddressLength : 0x%02X ", 1100
1347 psfCSType->cCPacketClassificationRule.u8IPDestinationAddressLength); 1101 for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++)
1348 1102 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddress[32]: 0x%02X ",
1349 for(uiLoopIndex=0; uiLoopIndex < 32; uiLoopIndex++) 1103 psfCSType->cCPacketClassificationRule.u8IPDestinationAddress[uiLoopIndex]);
1350 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddress[32] : 0x%02X ", 1104
1351 psfCSType->cCPacketClassificationRule.u8IPDestinationAddress[uiLoopIndex]); 1105 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolSourcePortRangeLength: 0x%02X ",
1352 1106 psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength);
1353 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolSourcePortRangeLength : 0x%02X ", 1107
1354 psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength); 1108 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolSourcePortRange[4]: 0x %02X %02X %02X %02X ",
1355 1109 psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[0],
1356 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolSourcePortRange[4] : 0x %02X %02X %02X %02X ", 1110 psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[1],
1357 psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[0], 1111 psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[2],
1358 psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[1], 1112 psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[3]);
1359 psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[2], 1113
1360 psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[3]); 1114 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolDestPortRangeLength: 0x%02X ",
1361 1115 psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength);
1362 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolDestPortRangeLength : 0x%02X ", 1116
1363 psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength); 1117 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolDestPortRange[4]: 0x %02X %02X %02X %02X ",
1364 1118 psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[0],
1365 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolDestPortRange[4] : 0x %02X %02X %02X %02X ", 1119 psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[1],
1366 psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[0], 1120 psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[2],
1367 psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[1], 1121 psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[3]);
1368 psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[2], 1122
1369 psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[3]); 1123 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetDestMacAddressLength: 0x%02X ",
1370 1124 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
1371 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetDestMacAddressLength : 0x%02X ", 1125
1372 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength); 1126 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetDestMacAddress[6]: 0x %02X %02X %02X %02X %02X %02X",
1373 1127 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[0],
1374 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetDestMacAddress[6] : 0x %02X %02X %02X %02X %02X %02X", 1128 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[1],
1375 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[0], 1129 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[2],
1376 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[1], 1130 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[3],
1377 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[2], 1131 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[4],
1378 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[3], 1132 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[5]);
1379 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[4], 1133
1380 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[5]); 1134 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddressLength: 0x%02X ",
1381 1135 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
1382 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddressLength : 0x%02X ", 1136
1383 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength); 1137 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddress[6]: 0x %02X %02X %02X %02X %02X %02X",
1384 1138 psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[0],
1385 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddress[6] : 0x %02X %02X %02X %02X %02X %02X", 1139 psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[1],
1386 psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[0], 1140 psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[2],
1387 psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[1], 1141 psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[3],
1388 psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[2], 1142 psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[4],
1389 psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[3], 1143 psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[5]);
1390 psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[4], 1144
1391 psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[5]); 1145 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthertypeLength: 0x%02X ", psfCSType->cCPacketClassificationRule.u8EthertypeLength);
1392 1146 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Ethertype[3]: 0x%02X %02X %02X",
1393 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthertypeLength : 0x%02X ", 1147 psfCSType->cCPacketClassificationRule.u8Ethertype[0],
1394 psfCSType->cCPacketClassificationRule.u8EthertypeLength); 1148 psfCSType->cCPacketClassificationRule.u8Ethertype[1],
1395 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Ethertype[3] : 0x%02X %02X %02X", 1149 psfCSType->cCPacketClassificationRule.u8Ethertype[2]);
1396 psfCSType->cCPacketClassificationRule.u8Ethertype[0], 1150
1397 psfCSType->cCPacketClassificationRule.u8Ethertype[1], 1151 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16UserPriority: 0x%X ", psfCSType->cCPacketClassificationRule.u16UserPriority);
1398 psfCSType->cCPacketClassificationRule.u8Ethertype[2]); 1152 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16VLANID: 0x%X ", psfCSType->cCPacketClassificationRule.u16VLANID);
1399 1153 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8AssociatedPHSI: 0x%02X ", psfCSType->cCPacketClassificationRule.u8AssociatedPHSI);
1400 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16UserPriority : 0x%X ", 1154 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16PacketClassificationRuleIndex: 0x%X ",
1401 psfCSType->cCPacketClassificationRule.u16UserPriority); 1155 psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex);
1402 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16VLANID : 0x%X ", 1156 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParamLength: 0x%02X",
1403 psfCSType->cCPacketClassificationRule.u16VLANID); 1157 psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParamLength);
1404 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8AssociatedPHSI : 0x%02X ", 1158 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParam[1]: 0x%02X ",
1405 psfCSType->cCPacketClassificationRule.u8AssociatedPHSI); 1159 psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParam[0]);
1406 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16PacketClassificationRuleIndex : 0x%X ",
1407 psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex);
1408
1409 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParamLength : 0x%02X",
1410 psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParamLength);
1411 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParam[1] : 0x%02X ",
1412 psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParam[0]);
1413#ifdef VERSION_D5 1160#ifdef VERSION_D5
1414 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPv6FlowLableLength : 0x%X ", 1161 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPv6FlowLableLength: 0x%X ",
1415 psfCSType->cCPacketClassificationRule.u8IPv6FlowLableLength); 1162 psfCSType->cCPacketClassificationRule.u8IPv6FlowLableLength);
1416 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPv6FlowLable[6] : 0x %02X %02X %02X %02X %02X %02X ", 1163 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPv6FlowLable[6]: 0x %02X %02X %02X %02X %02X %02X ",
1417 psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[0], 1164 psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[0],
1418 psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[1], 1165 psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[1],
1419 psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[2], 1166 psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[2],
1420 psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[3], 1167 psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[3],
1421 psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[4], 1168 psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[4],
1422 psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[5]); 1169 psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[5]);
1423#endif 1170#endif
1424 } 1171 }
1425 1172
1426 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "bValid : 0x%X",pstAddIndication->sfAdmittedSet.bValid); 1173 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "bValid: 0x%X", pstAddIndication->sfAdmittedSet.bValid);
1427 1174 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " ActiveSet--->");
1428 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " ActiveSet--->"); 1175 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32SFID: 0x%X", pstAddIndication->sfActiveSet.u32SFID);
1429 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32SFID : 0x%X",pstAddIndication->sfActiveSet.u32SFID); 1176 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID: 0x%X", pstAddIndication->sfActiveSet.u16CID);
1430 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID : 0x%X",pstAddIndication->sfActiveSet.u16CID); 1177 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassNameLength: 0x%X", pstAddIndication->sfActiveSet.u8ServiceClassNameLength);
1431 1178 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassName: 0x %02X %02X %02X %02X %02X %02X",
1432 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassNameLength : 0x%X", 1179 pstAddIndication->sfActiveSet.u8ServiceClassName[0],
1433 pstAddIndication->sfActiveSet.u8ServiceClassNameLength); 1180 pstAddIndication->sfActiveSet.u8ServiceClassName[1],
1434 1181 pstAddIndication->sfActiveSet.u8ServiceClassName[2],
1435 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassName : 0x %02X %02X %02X %02X %02X %02X", 1182 pstAddIndication->sfActiveSet.u8ServiceClassName[3],
1436 pstAddIndication->sfActiveSet.u8ServiceClassName[0], 1183 pstAddIndication->sfActiveSet.u8ServiceClassName[4],
1437 pstAddIndication->sfActiveSet.u8ServiceClassName[1], 1184 pstAddIndication->sfActiveSet.u8ServiceClassName[5]);
1438 pstAddIndication->sfActiveSet.u8ServiceClassName[2], 1185
1439 pstAddIndication->sfActiveSet.u8ServiceClassName[3], 1186 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8MBSService: 0x%02X", pstAddIndication->sfActiveSet.u8MBSService);
1440 pstAddIndication->sfActiveSet.u8ServiceClassName[4], 1187 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8QosParamSet: 0x%02X", pstAddIndication->sfActiveSet.u8QosParamSet);
1441 pstAddIndication->sfActiveSet.u8ServiceClassName[5]); 1188 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficPriority: 0x%02X", pstAddIndication->sfActiveSet.u8TrafficPriority);
1442 1189 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxTrafficBurst: 0x%X", pstAddIndication->sfActiveSet.u32MaxTrafficBurst);
1443 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8MBSService : 0x%02X", 1190 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MinReservedTrafficRate: 0x%X",
1444 pstAddIndication->sfActiveSet.u8MBSService); 1191 pstAddIndication->sfActiveSet.u32MinReservedTrafficRate);
1445 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8QosParamSet : 0x%02X", 1192 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParamLength: 0x%02X",
1446 pstAddIndication->sfActiveSet.u8QosParamSet); 1193 pstAddIndication->sfActiveSet.u8VendorSpecificQoSParamLength);
1447 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficPriority : 0x%02X", 1194 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParam: 0x%02X",
1448 pstAddIndication->sfActiveSet.u8TrafficPriority); 1195 pstAddIndication->sfActiveSet.u8VendorSpecificQoSParam[0]);
1449 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxTrafficBurst : 0x%X", 1196 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceFlowSchedulingType: 0x%02X",
1450 pstAddIndication->sfActiveSet.u32MaxTrafficBurst); 1197 pstAddIndication->sfActiveSet.u8ServiceFlowSchedulingType);
1451 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MinReservedTrafficRate : 0x%X", 1198 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32ToleratedJitter: 0x%X", pstAddIndication->sfActiveSet.u32ToleratedJitter);
1452 pstAddIndication->sfActiveSet.u32MinReservedTrafficRate); 1199 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaximumLatency: 0x%X", pstAddIndication->sfActiveSet.u32MaximumLatency);
1453 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParamLength : 0x%02X", 1200 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8FixedLengthVSVariableLengthSDUIndicator: 0x%02X",
1454 pstAddIndication->sfActiveSet.u8VendorSpecificQoSParamLength); 1201 pstAddIndication->sfActiveSet.u8FixedLengthVSVariableLengthSDUIndicator);
1455 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParam : 0x%02X", 1202 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8SDUSize: 0x%X", pstAddIndication->sfActiveSet.u8SDUSize);
1456 pstAddIndication->sfActiveSet.u8VendorSpecificQoSParam[0]); 1203 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16TargetSAID: 0x%X", pstAddIndication->sfActiveSet.u16TargetSAID);
1457 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceFlowSchedulingType : 0x%02X", 1204 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ARQEnable: 0x%X", pstAddIndication->sfActiveSet.u8ARQEnable);
1458 pstAddIndication->sfActiveSet.u8ServiceFlowSchedulingType); 1205 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQWindowSize: 0x%X", pstAddIndication->sfActiveSet.u16ARQWindowSize);
1459 1206 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQRetryTxTimeOut: 0x%X", pstAddIndication->sfActiveSet.u16ARQRetryTxTimeOut);
1460 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32ToleratedJitter : 0x%X", 1207 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQRetryRxTimeOut: 0x%X", pstAddIndication->sfActiveSet.u16ARQRetryRxTimeOut);
1461 pstAddIndication->sfActiveSet.u32ToleratedJitter); 1208 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQBlockLifeTime: 0x%X", pstAddIndication->sfActiveSet.u16ARQBlockLifeTime);
1462 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaximumLatency : 0x%X", 1209 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQSyncLossTimeOut: 0x%X", pstAddIndication->sfActiveSet.u16ARQSyncLossTimeOut);
1463 pstAddIndication->sfActiveSet.u32MaximumLatency); 1210 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ARQDeliverInOrder: 0x%X", pstAddIndication->sfActiveSet.u8ARQDeliverInOrder);
1464 1211 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQRxPurgeTimeOut: 0x%X", pstAddIndication->sfActiveSet.u16ARQRxPurgeTimeOut);
1465 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8FixedLengthVSVariableLengthSDUIndicator: 0x%02X", 1212 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQBlockSize: 0x%X", pstAddIndication->sfActiveSet.u16ARQBlockSize);
1466 pstAddIndication->sfActiveSet.u8FixedLengthVSVariableLengthSDUIndicator); 1213 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8CSSpecification: 0x%X", pstAddIndication->sfActiveSet.u8CSSpecification);
1467 1214 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8TypeOfDataDeliveryService: 0x%X",
1468 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8SDUSize : 0x%X", 1215 pstAddIndication->sfActiveSet.u8TypeOfDataDeliveryService);
1469 pstAddIndication->sfActiveSet.u8SDUSize); 1216 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16SDUInterArrivalTime: 0x%X", pstAddIndication->sfActiveSet.u16SDUInterArrivalTime);
1470 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16TargetSAID : 0x%X", 1217 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16TimeBase: 0x%X", pstAddIndication->sfActiveSet.u16TimeBase);
1471 pstAddIndication->sfActiveSet.u16TargetSAID); 1218 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8PagingPreference: 0x%X", pstAddIndication->sfActiveSet.u8PagingPreference);
1472 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ARQEnable : 0x%X", 1219 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8TrafficIndicationPreference: 0x%X",
1473 pstAddIndication->sfActiveSet.u8ARQEnable); 1220 pstAddIndication->sfActiveSet.u8TrafficIndicationPreference);
1474 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQWindowSize : 0x%X", 1221 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " Total Classifiers Received: 0x%X", pstAddIndication->sfActiveSet.u8TotalClassifiers);
1475 pstAddIndication->sfActiveSet.u16ARQWindowSize);
1476 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQRetryTxTimeOut : 0x%X",
1477 pstAddIndication->sfActiveSet.u16ARQRetryTxTimeOut);
1478 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQRetryRxTimeOut : 0x%X",
1479 pstAddIndication->sfActiveSet.u16ARQRetryRxTimeOut);
1480 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQBlockLifeTime : 0x%X",
1481 pstAddIndication->sfActiveSet.u16ARQBlockLifeTime);
1482 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQSyncLossTimeOut : 0x%X",
1483 pstAddIndication->sfActiveSet.u16ARQSyncLossTimeOut);
1484 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ARQDeliverInOrder : 0x%X",
1485 pstAddIndication->sfActiveSet.u8ARQDeliverInOrder);
1486 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQRxPurgeTimeOut : 0x%X",
1487 pstAddIndication->sfActiveSet.u16ARQRxPurgeTimeOut);
1488 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQBlockSize : 0x%X",
1489 pstAddIndication->sfActiveSet.u16ARQBlockSize);
1490 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8CSSpecification : 0x%X",
1491 pstAddIndication->sfActiveSet.u8CSSpecification);
1492 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8TypeOfDataDeliveryService : 0x%X",
1493 pstAddIndication->sfActiveSet.u8TypeOfDataDeliveryService);
1494 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16SDUInterArrivalTime : 0x%X",
1495 pstAddIndication->sfActiveSet.u16SDUInterArrivalTime);
1496 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16TimeBase : 0x%X",
1497 pstAddIndication->sfActiveSet.u16TimeBase);
1498 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8PagingPreference : 0x%X",
1499 pstAddIndication->sfActiveSet.u8PagingPreference);
1500
1501
1502 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8TrafficIndicationPreference : 0x%X",
1503 pstAddIndication->sfActiveSet.u8TrafficIndicationPreference);
1504
1505 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " Total Classifiers Received : 0x%X",pstAddIndication->sfActiveSet.u8TotalClassifiers);
1506 1222
1507 nCurClassifierCnt = pstAddIndication->sfActiveSet.u8TotalClassifiers; 1223 nCurClassifierCnt = pstAddIndication->sfActiveSet.u8TotalClassifiers;
1508 1224 if (nCurClassifierCnt > MAX_CLASSIFIERS_IN_SF)
1509 if(nCurClassifierCnt > MAX_CLASSIFIERS_IN_SF)
1510 {
1511 nCurClassifierCnt = MAX_CLASSIFIERS_IN_SF; 1225 nCurClassifierCnt = MAX_CLASSIFIERS_IN_SF;
1512 }
1513
1514 for(nIndex = 0 ; nIndex < nCurClassifierCnt ; nIndex++)
1515 {
1516 1226
1227 for (nIndex = 0; nIndex < nCurClassifierCnt; nIndex++) {
1517 stConvergenceSLTypes *psfCSType = NULL; 1228 stConvergenceSLTypes *psfCSType = NULL;
1518 psfCSType = &pstAddIndication->sfActiveSet.cConvergenceSLTypes[nIndex];
1519
1520 1229
1521 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " CCPacketClassificationRuleSI====>"); 1230 psfCSType = &pstAddIndication->sfActiveSet.cConvergenceSLTypes[nIndex];
1522 1231 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " CCPacketClassificationRuleSI====>");
1523 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ClassifierRulePriority :0x%X ", 1232 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ClassifierRulePriority: 0x%X ",
1524 psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority); 1233 psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority);
1525 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8IPTypeOfServiceLength :0x%X ", 1234 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8IPTypeOfServiceLength: 0x%X ",
1526 psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength); 1235 psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength);
1527 1236 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8IPTypeOfService[3]: 0x%X ,0x%X ,0x%X ",
1528 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8IPTypeOfService[3] :0x%X ,0x%X ,0x%X ", 1237 psfCSType->cCPacketClassificationRule.u8IPTypeOfService[0],
1529 psfCSType->cCPacketClassificationRule.u8IPTypeOfService[0], 1238 psfCSType->cCPacketClassificationRule.u8IPTypeOfService[1],
1530 psfCSType->cCPacketClassificationRule.u8IPTypeOfService[1], 1239 psfCSType->cCPacketClassificationRule.u8IPTypeOfService[2]);
1531 psfCSType->cCPacketClassificationRule.u8IPTypeOfService[2]); 1240
1532 for(uiLoopIndex=0; uiLoopIndex < 1; uiLoopIndex++) 1241 for (uiLoopIndex = 0; uiLoopIndex < 1; uiLoopIndex++)
1533 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8Protocol : 0x%X ", 1242 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8Protocol: 0x%X ", psfCSType->cCPacketClassificationRule.u8Protocol);
1534 psfCSType->cCPacketClassificationRule.u8Protocol); 1243
1535 1244 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddressLength: 0x%X ",
1536 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddressLength :0x%X ", 1245 psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddressLength);
1537 psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddressLength); 1246
1538 1247 for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++)
1539 for(uiLoopIndex=0; uiLoopIndex < 32; uiLoopIndex++) 1248 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddress[32]: 0x%X ",
1540 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddress[32]:0x%X ", 1249 psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddress[uiLoopIndex]);
1541 psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddress[uiLoopIndex]); 1250
1542 1251 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddressLength: 0x%02X ",
1543 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddressLength : 0x%02X ", 1252 psfCSType->cCPacketClassificationRule.u8IPDestinationAddressLength);
1544 psfCSType->cCPacketClassificationRule.u8IPDestinationAddressLength); 1253
1545 1254 for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++)
1546 for(uiLoopIndex=0;uiLoopIndex<32;uiLoopIndex++) 1255 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8IPDestinationAddress[32]:0x%X ",
1547 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8IPDestinationAddress[32]:0x%X ", 1256 psfCSType->cCPacketClassificationRule.u8IPDestinationAddress[uiLoopIndex]);
1548 psfCSType->cCPacketClassificationRule.u8IPDestinationAddress[uiLoopIndex]); 1257
1549 1258 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ProtocolSourcePortRangeLength: 0x%X ",
1550 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ProtocolSourcePortRangeLength:0x%X ", 1259 psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength);
1551 psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength); 1260
1552 1261 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ProtocolSourcePortRange[4]: 0x%X ,0x%X ,0x%X ,0x%X ",
1553 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ProtocolSourcePortRange[4]:0x%X ,0x%X ,0x%X ,0x%X ", 1262 psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[0],
1554 psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[0], 1263 psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[1],
1555 psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[1], 1264 psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[2],
1556 psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[2], 1265 psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[3]);
1557 psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[3]); 1266
1558 1267 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ProtocolDestPortRangeLength: 0x%X ",
1559 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ProtocolDestPortRangeLength:0x%X ", 1268 psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength);
1560 psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength); 1269 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ProtocolDestPortRange[4]: 0x%X ,0x%X ,0x%X ,0x%X ",
1561 1270 psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[0],
1562 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ProtocolDestPortRange[4]:0x%X ,0x%X ,0x%X ,0x%X ", 1271 psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[1],
1563 psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[0], 1272 psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[2],
1564 psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[1], 1273 psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[3]);
1565 psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[2], 1274
1566 psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[3]); 1275 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8EthernetDestMacAddressLength: 0x%X ",
1567 1276 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
1568 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8EthernetDestMacAddressLength:0x%X ", 1277 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8EthernetDestMacAddress[6]: 0x%X ,0x%X ,0x%X ,0x%X ,0x%X ,0x%X",
1569 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength); 1278 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[0],
1570 1279 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[1],
1571 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8EthernetDestMacAddress[6]:0x%X ,0x%X ,0x%X ,0x%X ,0x%X ,0x%X", 1280 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[2],
1572 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[0], 1281 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[3],
1573 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[1], 1282 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[4],
1574 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[2], 1283 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[5]);
1575 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[3], 1284
1576 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[4], 1285 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8EthernetSourceMACAddressLength: 0x%X ",
1577 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[5]); 1286 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
1578 1287 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddress[6]: 0x%X ,0x%X ,0x%X ,0x%X ,0x%X ,0x%X",
1579 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8EthernetSourceMACAddressLength:0x%X ", 1288 psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[0],
1580 psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength); 1289 psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[1],
1581 1290 psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[2],
1582 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddress[6]:0x%X ,0x%X ,0x%X ,0x%X ,0x%X ,0x%X", 1291 psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[3],
1583 psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[0], 1292 psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[4],
1584 psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[1], 1293 psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[5]);
1585 psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[2], 1294
1586 psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[3], 1295 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8EthertypeLength: 0x%X ",
1587 psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[4], 1296 psfCSType->cCPacketClassificationRule.u8EthertypeLength);
1588 psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[5]); 1297 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8Ethertype[3]: 0x%X ,0x%X ,0x%X ",
1589 1298 psfCSType->cCPacketClassificationRule.u8Ethertype[0],
1590 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8EthertypeLength :0x%X ", 1299 psfCSType->cCPacketClassificationRule.u8Ethertype[1],
1591 psfCSType->cCPacketClassificationRule.u8EthertypeLength); 1300 psfCSType->cCPacketClassificationRule.u8Ethertype[2]);
1592 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8Ethertype[3] :0x%X ,0x%X ,0x%X ", 1301 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16UserPriority: 0x%X ",
1593 psfCSType->cCPacketClassificationRule.u8Ethertype[0], 1302 psfCSType->cCPacketClassificationRule.u16UserPriority);
1594 psfCSType->cCPacketClassificationRule.u8Ethertype[1], 1303 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16VLANID: 0x%X ", psfCSType->cCPacketClassificationRule.u16VLANID);
1595 psfCSType->cCPacketClassificationRule.u8Ethertype[2]); 1304 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8AssociatedPHSI: 0x%X ", psfCSType->cCPacketClassificationRule.u8AssociatedPHSI);
1596 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16UserPriority :0x%X ", 1305 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16PacketClassificationRuleIndex:0x%X ",
1597 psfCSType->cCPacketClassificationRule.u16UserPriority); 1306 psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex);
1598 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16VLANID :0x%X ", 1307
1599 psfCSType->cCPacketClassificationRule.u16VLANID); 1308 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8VendorSpecificClassifierParamLength:0x%X ",
1600 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8AssociatedPHSI :0x%X ", 1309 psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParamLength);
1601 psfCSType->cCPacketClassificationRule.u8AssociatedPHSI); 1310 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8VendorSpecificClassifierParam[1]:0x%X ",
1602 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16PacketClassificationRuleIndex:0x%X ", 1311 psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParam[0]);
1603 psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex);
1604
1605 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8VendorSpecificClassifierParamLength:0x%X ",
1606 psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParamLength);
1607 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8VendorSpecificClassifierParam[1]:0x%X ",
1608 psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParam[0]);
1609#ifdef VERSION_D5 1312#ifdef VERSION_D5
1610 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8IPv6FlowLableLength :0x%X ", 1313 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8IPv6FlowLableLength: 0x%X ",
1611 psfCSType->cCPacketClassificationRule.u8IPv6FlowLableLength); 1314 psfCSType->cCPacketClassificationRule.u8IPv6FlowLableLength);
1612 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8IPv6FlowLable[6] :0x%X ,0x%X ,0x%X ,0x%X ,0x%X ,0x%X ", 1315 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8IPv6FlowLable[6]: 0x%X ,0x%X ,0x%X ,0x%X ,0x%X ,0x%X ",
1613 psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[0], 1316 psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[0],
1614 psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[1], 1317 psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[1],
1615 psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[2], 1318 psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[2],
1616 psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[3], 1319 psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[3],
1617 psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[4], 1320 psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[4],
1618 psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[5]); 1321 psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[5]);
1619#endif 1322#endif
1620 } 1323 }
1621 1324
1622 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " bValid : 0x%X",pstAddIndication->sfActiveSet.bValid); 1325 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " bValid: 0x%X", pstAddIndication->sfActiveSet.bValid);
1623
1624} 1326}
1625 1327
1626static inline ULONG RestoreSFParam(PMINI_ADAPTER Adapter, ULONG ulAddrSFParamSet,PUCHAR pucDestBuffer) 1328static inline ULONG RestoreSFParam(PMINI_ADAPTER Adapter, ULONG ulAddrSFParamSet, PUCHAR pucDestBuffer)
1627{ 1329{
1628 UINT nBytesToRead = sizeof(stServiceFlowParamSI); 1330 UINT nBytesToRead = sizeof(stServiceFlowParamSI);
1629 1331
1630 if(ulAddrSFParamSet == 0 || NULL == pucDestBuffer) 1332 if (ulAddrSFParamSet == 0 || NULL == pucDestBuffer) {
1631 { 1333 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Got Param address as 0!!");
1632 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Got Param address as 0!!");
1633 return 0; 1334 return 0;
1634 } 1335 }
1635 ulAddrSFParamSet = ntohl(ulAddrSFParamSet); 1336 ulAddrSFParamSet = ntohl(ulAddrSFParamSet);
1636 1337
1637 //Read out the SF Param Set At the indicated Location 1338 /* Read out the SF Param Set At the indicated Location */
1638 if(rdm(Adapter, ulAddrSFParamSet, (PUCHAR)pucDestBuffer, nBytesToRead) < 0) 1339 if (rdm(Adapter, ulAddrSFParamSet, (PUCHAR)pucDestBuffer, nBytesToRead) < 0)
1639 return STATUS_FAILURE; 1340 return STATUS_FAILURE;
1640 1341
1641 return 1; 1342 return 1;
1642} 1343}
1643 1344
1644 1345static ULONG StoreSFParam(PMINI_ADAPTER Adapter, PUCHAR pucSrcBuffer, ULONG ulAddrSFParamSet)
1645static ULONG StoreSFParam(PMINI_ADAPTER Adapter,PUCHAR pucSrcBuffer,ULONG ulAddrSFParamSet)
1646{ 1346{
1647 UINT nBytesToWrite = sizeof(stServiceFlowParamSI); 1347 UINT nBytesToWrite = sizeof(stServiceFlowParamSI);
1648 int ret = 0; 1348 int ret = 0;
1649 1349
1650 if(ulAddrSFParamSet == 0 || NULL == pucSrcBuffer) 1350 if (ulAddrSFParamSet == 0 || NULL == pucSrcBuffer)
1651 {
1652 return 0; 1351 return 0;
1653 }
1654 1352
1655 ret = wrm(Adapter, ulAddrSFParamSet, (u8 *)pucSrcBuffer, nBytesToWrite); 1353 ret = wrm(Adapter, ulAddrSFParamSet, (u8 *)pucSrcBuffer, nBytesToWrite);
1656 if (ret < 0) { 1354 if (ret < 0) {
1657 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "%s:%d WRM failed",__FUNCTION__, __LINE__); 1355 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "%s:%d WRM failed", __func__, __LINE__);
1658 return ret; 1356 return ret;
1659 } 1357 }
1660 return 1; 1358 return 1;
1661} 1359}
1662 1360
1663ULONG StoreCmControlResponseMessage(PMINI_ADAPTER Adapter,PVOID pvBuffer,UINT *puBufferLength) 1361ULONG StoreCmControlResponseMessage(PMINI_ADAPTER Adapter, PVOID pvBuffer, UINT *puBufferLength)
1664{ 1362{
1665 stLocalSFAddIndicationAlt *pstAddIndicationAlt = NULL; 1363 stLocalSFAddIndicationAlt *pstAddIndicationAlt = NULL;
1666 stLocalSFAddIndication * pstAddIndication = NULL; 1364 stLocalSFAddIndication *pstAddIndication = NULL;
1667 stLocalSFDeleteRequest *pstDeletionRequest; 1365 stLocalSFDeleteRequest *pstDeletionRequest;
1668 UINT uiSearchRuleIndex; 1366 UINT uiSearchRuleIndex;
1669 ULONG ulSFID; 1367 ULONG ulSFID;
@@ -1671,52 +1369,51 @@ ULONG StoreCmControlResponseMessage(PMINI_ADAPTER Adapter,PVOID pvBuffer,UINT *p
1671 pstAddIndicationAlt = (stLocalSFAddIndicationAlt *)(pvBuffer); 1369 pstAddIndicationAlt = (stLocalSFAddIndicationAlt *)(pvBuffer);
1672 1370
1673 /* 1371 /*
1674 * In case of DSD Req By MS, we should immediately delete this SF so that 1372 * In case of DSD Req By MS, we should immediately delete this SF so that
1675 * we can stop the further classifying the pkt for this SF. 1373 * we can stop the further classifying the pkt for this SF.
1676 */ 1374 */
1677 if(pstAddIndicationAlt->u8Type == DSD_REQ) 1375 if (pstAddIndicationAlt->u8Type == DSD_REQ) {
1678 {
1679 pstDeletionRequest = (stLocalSFDeleteRequest *)pvBuffer; 1376 pstDeletionRequest = (stLocalSFDeleteRequest *)pvBuffer;
1680 1377
1681 ulSFID = ntohl(pstDeletionRequest->u32SFID); 1378 ulSFID = ntohl(pstDeletionRequest->u32SFID);
1682 uiSearchRuleIndex=SearchSfid(Adapter,ulSFID); 1379 uiSearchRuleIndex = SearchSfid(Adapter, ulSFID);
1683 1380
1684 if(uiSearchRuleIndex < NO_OF_QUEUES) 1381 if (uiSearchRuleIndex < NO_OF_QUEUES) {
1685 { 1382 deleteSFBySfid(Adapter, uiSearchRuleIndex);
1686 deleteSFBySfid(Adapter,uiSearchRuleIndex);
1687 Adapter->u32TotalDSD++; 1383 Adapter->u32TotalDSD++;
1688 } 1384 }
1689 return 1; 1385 return 1;
1690 } 1386 }
1691 1387
1692 1388 if ((pstAddIndicationAlt->u8Type == DSD_RSP) ||
1693 if( (pstAddIndicationAlt->u8Type == DSD_RSP) || 1389 (pstAddIndicationAlt->u8Type == DSD_ACK)) {
1694 (pstAddIndicationAlt->u8Type == DSD_ACK)) 1390 /* No Special handling send the message as it is */
1695 {
1696 //No Special handling send the message as it is
1697 return 1; 1391 return 1;
1698 } 1392 }
1699 // For DSA_REQ, only up to "psfAuthorizedSet" parameter should be accessed by driver! 1393 /* For DSA_REQ, only up to "psfAuthorizedSet" parameter should be accessed by driver! */
1700 1394
1701 pstAddIndication=kmalloc(sizeof(*pstAddIndication), GFP_KERNEL); 1395 pstAddIndication = kmalloc(sizeof(*pstAddIndication), GFP_KERNEL);
1702 if(NULL==pstAddIndication) 1396 if (pstAddIndication == NULL)
1703 return 0; 1397 return 0;
1704 1398
1705 /* AUTHORIZED SET */ 1399 /* AUTHORIZED SET */
1706 pstAddIndication->psfAuthorizedSet = (stServiceFlowParamSI *) 1400 pstAddIndication->psfAuthorizedSet = (stServiceFlowParamSI *)
1707 GetNextTargetBufferLocation(Adapter, pstAddIndicationAlt->u16TID); 1401 GetNextTargetBufferLocation(Adapter, pstAddIndicationAlt->u16TID);
1708 if(!pstAddIndication->psfAuthorizedSet) 1402 if (!pstAddIndication->psfAuthorizedSet) {
1403 kfree(pstAddIndication);
1709 return 0; 1404 return 0;
1405 }
1710 1406
1711 if(StoreSFParam(Adapter,(PUCHAR)&pstAddIndicationAlt->sfAuthorizedSet, 1407 if (StoreSFParam(Adapter, (PUCHAR)&pstAddIndicationAlt->sfAuthorizedSet,
1712 (ULONG)pstAddIndication->psfAuthorizedSet)!= 1) 1408 (ULONG)pstAddIndication->psfAuthorizedSet) != 1) {
1409 kfree(pstAddIndication);
1713 return 0; 1410 return 0;
1411 }
1714 1412
1715 /* this can't possibly be right */ 1413 /* this can't possibly be right */
1716 pstAddIndication->psfAuthorizedSet = (stServiceFlowParamSI *)ntohl((ULONG)pstAddIndication->psfAuthorizedSet); 1414 pstAddIndication->psfAuthorizedSet = (stServiceFlowParamSI *)ntohl((ULONG)pstAddIndication->psfAuthorizedSet);
1717 1415
1718 if(pstAddIndicationAlt->u8Type == DSA_REQ) 1416 if (pstAddIndicationAlt->u8Type == DSA_REQ) {
1719 {
1720 stLocalSFAddRequest AddRequest; 1417 stLocalSFAddRequest AddRequest;
1721 1418
1722 AddRequest.u8Type = pstAddIndicationAlt->u8Type; 1419 AddRequest.u8Type = pstAddIndicationAlt->u8Type;
@@ -1724,18 +1421,18 @@ ULONG StoreCmControlResponseMessage(PMINI_ADAPTER Adapter,PVOID pvBuffer,UINT *p
1724 AddRequest.u16TID = pstAddIndicationAlt->u16TID; 1421 AddRequest.u16TID = pstAddIndicationAlt->u16TID;
1725 AddRequest.u16CID = pstAddIndicationAlt->u16CID; 1422 AddRequest.u16CID = pstAddIndicationAlt->u16CID;
1726 AddRequest.u16VCID = pstAddIndicationAlt->u16VCID; 1423 AddRequest.u16VCID = pstAddIndicationAlt->u16VCID;
1727 AddRequest.psfParameterSet =pstAddIndication->psfAuthorizedSet ; 1424 AddRequest.psfParameterSet = pstAddIndication->psfAuthorizedSet;
1728 (*puBufferLength) = sizeof(stLocalSFAddRequest); 1425 (*puBufferLength) = sizeof(stLocalSFAddRequest);
1729 memcpy(pvBuffer,&AddRequest,sizeof(stLocalSFAddRequest)); 1426 memcpy(pvBuffer, &AddRequest, sizeof(stLocalSFAddRequest));
1427 kfree(pstAddIndication);
1730 return 1; 1428 return 1;
1731 } 1429 }
1732 1430
1733 // Since it's not DSA_REQ, we can access all field in pstAddIndicationAlt 1431 /* Since it's not DSA_REQ, we can access all field in pstAddIndicationAlt */
1734 1432 /* We need to extract the structure from the buffer and pack it differently */
1735 //We need to extract the structure from the buffer and pack it differently
1736 1433
1737 pstAddIndication->u8Type = pstAddIndicationAlt->u8Type; 1434 pstAddIndication->u8Type = pstAddIndicationAlt->u8Type;
1738 pstAddIndication->eConnectionDir= pstAddIndicationAlt->u8Direction ; 1435 pstAddIndication->eConnectionDir = pstAddIndicationAlt->u8Direction;
1739 pstAddIndication->u16TID = pstAddIndicationAlt->u16TID; 1436 pstAddIndication->u16TID = pstAddIndicationAlt->u16TID;
1740 pstAddIndication->u16CID = pstAddIndicationAlt->u16CID; 1437 pstAddIndication->u16CID = pstAddIndicationAlt->u16CID;
1741 pstAddIndication->u16VCID = pstAddIndicationAlt->u16VCID; 1438 pstAddIndication->u16VCID = pstAddIndicationAlt->u16VCID;
@@ -1744,21 +1441,28 @@ ULONG StoreCmControlResponseMessage(PMINI_ADAPTER Adapter,PVOID pvBuffer,UINT *p
1744 /* ADMITTED SET */ 1441 /* ADMITTED SET */
1745 pstAddIndication->psfAdmittedSet = (stServiceFlowParamSI *) 1442 pstAddIndication->psfAdmittedSet = (stServiceFlowParamSI *)
1746 GetNextTargetBufferLocation(Adapter, pstAddIndicationAlt->u16TID); 1443 GetNextTargetBufferLocation(Adapter, pstAddIndicationAlt->u16TID);
1747 if(!pstAddIndication->psfAdmittedSet) 1444 if (!pstAddIndication->psfAdmittedSet) {
1445 kfree(pstAddIndication);
1748 return 0; 1446 return 0;
1749 if(StoreSFParam(Adapter,(PUCHAR)&pstAddIndicationAlt->sfAdmittedSet,(ULONG)pstAddIndication->psfAdmittedSet) != 1) 1447 }
1448 if (StoreSFParam(Adapter, (PUCHAR)&pstAddIndicationAlt->sfAdmittedSet, (ULONG)pstAddIndication->psfAdmittedSet) != 1) {
1449 kfree(pstAddIndication);
1750 return 0; 1450 return 0;
1451 }
1751 1452
1752 pstAddIndication->psfAdmittedSet = (stServiceFlowParamSI *)ntohl((ULONG)pstAddIndication->psfAdmittedSet); 1453 pstAddIndication->psfAdmittedSet = (stServiceFlowParamSI *)ntohl((ULONG)pstAddIndication->psfAdmittedSet);
1753 1454
1754
1755 /* ACTIVE SET */ 1455 /* ACTIVE SET */
1756 pstAddIndication->psfActiveSet = (stServiceFlowParamSI *) 1456 pstAddIndication->psfActiveSet = (stServiceFlowParamSI *)
1757 GetNextTargetBufferLocation(Adapter, pstAddIndicationAlt->u16TID); 1457 GetNextTargetBufferLocation(Adapter, pstAddIndicationAlt->u16TID);
1758 if(!pstAddIndication->psfActiveSet) 1458 if (!pstAddIndication->psfActiveSet) {
1459 kfree(pstAddIndication);
1759 return 0; 1460 return 0;
1760 if(StoreSFParam(Adapter,(PUCHAR)&pstAddIndicationAlt->sfActiveSet,(ULONG)pstAddIndication->psfActiveSet) != 1) 1461 }
1462 if (StoreSFParam(Adapter, (PUCHAR)&pstAddIndicationAlt->sfActiveSet, (ULONG)pstAddIndication->psfActiveSet) != 1) {
1463 kfree(pstAddIndication);
1761 return 0; 1464 return 0;
1465 }
1762 1466
1763 pstAddIndication->psfActiveSet = (stServiceFlowParamSI *)ntohl((ULONG)pstAddIndication->psfActiveSet); 1467 pstAddIndication->psfActiveSet = (stServiceFlowParamSI *)ntohl((ULONG)pstAddIndication->psfActiveSet);
1764 1468
@@ -1768,47 +1472,41 @@ ULONG StoreCmControlResponseMessage(PMINI_ADAPTER Adapter,PVOID pvBuffer,UINT *p
1768 return 1; 1472 return 1;
1769} 1473}
1770 1474
1771
1772static inline stLocalSFAddIndicationAlt 1475static inline stLocalSFAddIndicationAlt
1773*RestoreCmControlResponseMessage(register PMINI_ADAPTER Adapter,register PVOID pvBuffer) 1476*RestoreCmControlResponseMessage(register PMINI_ADAPTER Adapter, register PVOID pvBuffer)
1774{ 1477{
1775 ULONG ulStatus=0; 1478 ULONG ulStatus = 0;
1776 stLocalSFAddIndication *pstAddIndication = NULL; 1479 stLocalSFAddIndication *pstAddIndication = NULL;
1777 stLocalSFAddIndicationAlt *pstAddIndicationDest = NULL; 1480 stLocalSFAddIndicationAlt *pstAddIndicationDest = NULL;
1778 pstAddIndication = (stLocalSFAddIndication *)(pvBuffer);
1779 1481
1780 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "=====>" ); 1482 pstAddIndication = (stLocalSFAddIndication *)(pvBuffer);
1483 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "=====>");
1781 if ((pstAddIndication->u8Type == DSD_REQ) || 1484 if ((pstAddIndication->u8Type == DSD_REQ) ||
1782 (pstAddIndication->u8Type == DSD_RSP) || 1485 (pstAddIndication->u8Type == DSD_RSP) ||
1783 (pstAddIndication->u8Type == DSD_ACK)) 1486 (pstAddIndication->u8Type == DSD_ACK))
1784 {
1785 return (stLocalSFAddIndicationAlt *)pvBuffer; 1487 return (stLocalSFAddIndicationAlt *)pvBuffer;
1786 }
1787 1488
1788 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Inside RestoreCmControlResponseMessage "); 1489 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Inside RestoreCmControlResponseMessage ");
1789 /* 1490 /*
1790 //Need to Allocate memory to contain the SUPER Large structures 1491 * Need to Allocate memory to contain the SUPER Large structures
1791 //Our driver can't create these structures on Stack :( 1492 * Our driver can't create these structures on Stack :(
1792 */ 1493 */
1793 pstAddIndicationDest=kmalloc(sizeof(stLocalSFAddIndicationAlt), GFP_KERNEL); 1494 pstAddIndicationDest = kmalloc(sizeof(stLocalSFAddIndicationAlt), GFP_KERNEL);
1794 1495
1795 if(pstAddIndicationDest) 1496 if (pstAddIndicationDest) {
1796 { 1497 memset(pstAddIndicationDest, 0, sizeof(stLocalSFAddIndicationAlt));
1797 memset(pstAddIndicationDest,0,sizeof(stLocalSFAddIndicationAlt)); 1498 } else {
1798 } 1499 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Failed to allocate memory for SF Add Indication Structure ");
1799 else
1800 {
1801 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Failed to allocate memory for SF Add Indication Structure ");
1802 return NULL; 1500 return NULL;
1803 } 1501 }
1804 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-u8Type : 0x%X",pstAddIndication->u8Type); 1502 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-u8Type : 0x%X", pstAddIndication->u8Type);
1805 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-u8Direction : 0x%X",pstAddIndication->eConnectionDir); 1503 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-u8Direction : 0x%X", pstAddIndication->eConnectionDir);
1806 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-u8TID : 0x%X",ntohs(pstAddIndication->u16TID)); 1504 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-u8TID : 0x%X", ntohs(pstAddIndication->u16TID));
1807 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-u8CID : 0x%X",ntohs(pstAddIndication->u16CID)); 1505 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-u8CID : 0x%X", ntohs(pstAddIndication->u16CID));
1808 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-u16VCID : 0x%X",ntohs(pstAddIndication->u16VCID)); 1506 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-u16VCID : 0x%X", ntohs(pstAddIndication->u16VCID));
1809 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-autorized set loc : %p",pstAddIndication->psfAuthorizedSet); 1507 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-autorized set loc : %p", pstAddIndication->psfAuthorizedSet);
1810 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-admitted set loc : %p",pstAddIndication->psfAdmittedSet); 1508 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-admitted set loc : %p", pstAddIndication->psfAdmittedSet);
1811 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-Active set loc : %p",pstAddIndication->psfActiveSet); 1509 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-Active set loc : %p", pstAddIndication->psfActiveSet);
1812 1510
1813 pstAddIndicationDest->u8Type = pstAddIndication->u8Type; 1511 pstAddIndicationDest->u8Type = pstAddIndication->u8Type;
1814 pstAddIndicationDest->u8Direction = pstAddIndication->eConnectionDir; 1512 pstAddIndicationDest->u8Direction = pstAddIndication->eConnectionDir;
@@ -1817,42 +1515,39 @@ static inline stLocalSFAddIndicationAlt
1817 pstAddIndicationDest->u16VCID = pstAddIndication->u16VCID; 1515 pstAddIndicationDest->u16VCID = pstAddIndication->u16VCID;
1818 pstAddIndicationDest->u8CC = pstAddIndication->u8CC; 1516 pstAddIndicationDest->u8CC = pstAddIndication->u8CC;
1819 1517
1820 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Restoring Active Set "); 1518 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Restoring Active Set ");
1821 ulStatus=RestoreSFParam(Adapter,(ULONG)pstAddIndication->psfActiveSet, (PUCHAR)&pstAddIndicationDest->sfActiveSet); 1519 ulStatus = RestoreSFParam(Adapter, (ULONG)pstAddIndication->psfActiveSet, (PUCHAR)&pstAddIndicationDest->sfActiveSet);
1822 if(ulStatus != 1) 1520 if (ulStatus != 1)
1823 {
1824 goto failed_restore_sf_param; 1521 goto failed_restore_sf_param;
1825 } 1522
1826 if(pstAddIndicationDest->sfActiveSet.u8TotalClassifiers > MAX_CLASSIFIERS_IN_SF) 1523 if (pstAddIndicationDest->sfActiveSet.u8TotalClassifiers > MAX_CLASSIFIERS_IN_SF)
1827 pstAddIndicationDest->sfActiveSet.u8TotalClassifiers = MAX_CLASSIFIERS_IN_SF; 1524 pstAddIndicationDest->sfActiveSet.u8TotalClassifiers = MAX_CLASSIFIERS_IN_SF;
1828 1525
1829 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Restoring Admitted Set "); 1526 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Restoring Admitted Set ");
1830 ulStatus=RestoreSFParam(Adapter,(ULONG)pstAddIndication->psfAdmittedSet,(PUCHAR)&pstAddIndicationDest->sfAdmittedSet); 1527 ulStatus = RestoreSFParam(Adapter, (ULONG)pstAddIndication->psfAdmittedSet, (PUCHAR)&pstAddIndicationDest->sfAdmittedSet);
1831 if(ulStatus != 1) 1528 if (ulStatus != 1)
1832 {
1833 goto failed_restore_sf_param; 1529 goto failed_restore_sf_param;
1834 } 1530
1835 if(pstAddIndicationDest->sfAdmittedSet.u8TotalClassifiers > MAX_CLASSIFIERS_IN_SF) 1531 if (pstAddIndicationDest->sfAdmittedSet.u8TotalClassifiers > MAX_CLASSIFIERS_IN_SF)
1836 pstAddIndicationDest->sfAdmittedSet.u8TotalClassifiers = MAX_CLASSIFIERS_IN_SF; 1532 pstAddIndicationDest->sfAdmittedSet.u8TotalClassifiers = MAX_CLASSIFIERS_IN_SF;
1837 1533
1838 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Restoring Authorized Set "); 1534 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Restoring Authorized Set ");
1839 ulStatus=RestoreSFParam(Adapter,(ULONG)pstAddIndication->psfAuthorizedSet,(PUCHAR)&pstAddIndicationDest->sfAuthorizedSet); 1535 ulStatus = RestoreSFParam(Adapter, (ULONG)pstAddIndication->psfAuthorizedSet, (PUCHAR)&pstAddIndicationDest->sfAuthorizedSet);
1840 if(ulStatus != 1) 1536 if (ulStatus != 1)
1841 {
1842 goto failed_restore_sf_param; 1537 goto failed_restore_sf_param;
1843 } 1538
1844 if(pstAddIndicationDest->sfAuthorizedSet.u8TotalClassifiers > MAX_CLASSIFIERS_IN_SF) 1539 if (pstAddIndicationDest->sfAuthorizedSet.u8TotalClassifiers > MAX_CLASSIFIERS_IN_SF)
1845 pstAddIndicationDest->sfAuthorizedSet.u8TotalClassifiers = MAX_CLASSIFIERS_IN_SF; 1540 pstAddIndicationDest->sfAuthorizedSet.u8TotalClassifiers = MAX_CLASSIFIERS_IN_SF;
1846 1541
1847 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Dumping the whole raw packet"); 1542 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Dumping the whole raw packet");
1848 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "============================================================"); 1543 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "============================================================");
1849 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " pstAddIndicationDest->sfActiveSet size %zx %p", sizeof(*pstAddIndicationDest), pstAddIndicationDest); 1544 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " pstAddIndicationDest->sfActiveSet size %zx %p", sizeof(*pstAddIndicationDest), pstAddIndicationDest);
1850 //BCM_DEBUG_PRINT_BUFFER(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, (unsigned char *)pstAddIndicationDest, sizeof(*pstAddIndicationDest)); 1545 /* BCM_DEBUG_PRINT_BUFFER(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, (unsigned char *)pstAddIndicationDest, sizeof(*pstAddIndicationDest)); */
1851 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "============================================================"); 1546 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "============================================================");
1852 return pstAddIndicationDest; 1547 return pstAddIndicationDest;
1853failed_restore_sf_param: 1548failed_restore_sf_param:
1854 kfree(pstAddIndicationDest); 1549 kfree(pstAddIndicationDest);
1855 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "<=====" ); 1550 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "<=====");
1856 return NULL; 1551 return NULL;
1857} 1552}
1858 1553
@@ -1860,7 +1555,7 @@ ULONG SetUpTargetDsxBuffers(PMINI_ADAPTER Adapter)
1860{ 1555{
1861 ULONG ulTargetDsxBuffersBase = 0; 1556 ULONG ulTargetDsxBuffersBase = 0;
1862 ULONG ulCntTargetBuffers; 1557 ULONG ulCntTargetBuffers;
1863 ULONG ulIndex=0; 1558 ULONG i;
1864 int Status; 1559 int Status;
1865 1560
1866 if (!Adapter) { 1561 if (!Adapter) {
@@ -1868,411 +1563,354 @@ ULONG SetUpTargetDsxBuffers(PMINI_ADAPTER Adapter)
1868 return 0; 1563 return 0;
1869 } 1564 }
1870 1565
1871 if(Adapter->astTargetDsxBuffer[0].ulTargetDsxBuffer) 1566 if (Adapter->astTargetDsxBuffer[0].ulTargetDsxBuffer)
1872 return 1; 1567 return 1;
1873 1568
1874 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Size of Each DSX Buffer(Also size of ServiceFlowParamSI): %zx ",sizeof(stServiceFlowParamSI)); 1569 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Size of Each DSX Buffer(Also size of ServiceFlowParamSI): %zx ", sizeof(stServiceFlowParamSI));
1875 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Reading DSX buffer From Target location %x ",DSX_MESSAGE_EXCHANGE_BUFFER); 1570 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Reading DSX buffer From Target location %x ", DSX_MESSAGE_EXCHANGE_BUFFER);
1876 1571
1877 Status = rdmalt(Adapter, DSX_MESSAGE_EXCHANGE_BUFFER, 1572 Status = rdmalt(Adapter, DSX_MESSAGE_EXCHANGE_BUFFER, (PUINT)&ulTargetDsxBuffersBase, sizeof(UINT));
1878 (PUINT)&ulTargetDsxBuffersBase, sizeof(UINT)); 1573 if (Status < 0) {
1879 if(Status < 0) 1574 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "RDM failed!!");
1880 {
1881 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "RDM failed!!");
1882 return 0; 1575 return 0;
1883 } 1576 }
1884 1577
1885 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Base Address Of DSX Target Buffer : 0x%lx",ulTargetDsxBuffersBase); 1578 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Base Address Of DSX Target Buffer : 0x%lx", ulTargetDsxBuffersBase);
1886 1579 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Tgt Buffer is Now %lx :", ulTargetDsxBuffersBase);
1887 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Tgt Buffer is Now %lx :",ulTargetDsxBuffersBase); 1580 ulCntTargetBuffers = DSX_MESSAGE_EXCHANGE_BUFFER_SIZE / sizeof(stServiceFlowParamSI);
1888
1889 ulCntTargetBuffers = DSX_MESSAGE_EXCHANGE_BUFFER_SIZE/sizeof(stServiceFlowParamSI);
1890 1581
1891 Adapter->ulTotalTargetBuffersAvailable = 1582 Adapter->ulTotalTargetBuffersAvailable =
1892 ulCntTargetBuffers > MAX_TARGET_DSX_BUFFERS ? 1583 ulCntTargetBuffers > MAX_TARGET_DSX_BUFFERS ?
1893 MAX_TARGET_DSX_BUFFERS : ulCntTargetBuffers; 1584 MAX_TARGET_DSX_BUFFERS : ulCntTargetBuffers;
1894 1585
1895 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " Total Target DSX Buffer setup %lx ",Adapter->ulTotalTargetBuffersAvailable); 1586 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " Total Target DSX Buffer setup %lx ", Adapter->ulTotalTargetBuffersAvailable);
1896 1587
1897 for(ulIndex=0; ulIndex < Adapter->ulTotalTargetBuffersAvailable ; ulIndex++) 1588 for (i = 0; i < Adapter->ulTotalTargetBuffersAvailable; i++) {
1898 { 1589 Adapter->astTargetDsxBuffer[i].ulTargetDsxBuffer = ulTargetDsxBuffersBase;
1899 Adapter->astTargetDsxBuffer[ulIndex].ulTargetDsxBuffer = ulTargetDsxBuffersBase; 1590 Adapter->astTargetDsxBuffer[i].valid = 1;
1900 Adapter->astTargetDsxBuffer[ulIndex].valid=1; 1591 Adapter->astTargetDsxBuffer[i].tid = 0;
1901 Adapter->astTargetDsxBuffer[ulIndex].tid=0; 1592 ulTargetDsxBuffersBase += sizeof(stServiceFlowParamSI);
1902 ulTargetDsxBuffersBase+=sizeof(stServiceFlowParamSI); 1593 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " Target DSX Buffer %lx setup at 0x%lx",
1903 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " Target DSX Buffer %lx setup at 0x%lx", 1594 i, Adapter->astTargetDsxBuffer[i].ulTargetDsxBuffer);
1904 ulIndex, Adapter->astTargetDsxBuffer[ulIndex].ulTargetDsxBuffer);
1905 } 1595 }
1906 Adapter->ulCurrentTargetBuffer = 0; 1596 Adapter->ulCurrentTargetBuffer = 0;
1907 Adapter->ulFreeTargetBufferCnt = Adapter->ulTotalTargetBuffersAvailable; 1597 Adapter->ulFreeTargetBufferCnt = Adapter->ulTotalTargetBuffersAvailable;
1908 return 1; 1598 return 1;
1909} 1599}
1910 1600
1911static ULONG GetNextTargetBufferLocation(PMINI_ADAPTER Adapter,B_UINT16 tid) 1601static ULONG GetNextTargetBufferLocation(PMINI_ADAPTER Adapter, B_UINT16 tid)
1912{ 1602{
1913 ULONG ulTargetDSXBufferAddress; 1603 ULONG ulTargetDSXBufferAddress;
1914 ULONG ulTargetDsxBufferIndexToUse,ulMaxTry; 1604 ULONG ulTargetDsxBufferIndexToUse, ulMaxTry;
1915 1605
1916 if((Adapter->ulTotalTargetBuffersAvailable == 0)|| 1606 if ((Adapter->ulTotalTargetBuffersAvailable == 0) || (Adapter->ulFreeTargetBufferCnt == 0)) {
1917 (Adapter->ulFreeTargetBufferCnt == 0)) 1607 ClearTargetDSXBuffer(Adapter, tid, FALSE);
1918 {
1919 ClearTargetDSXBuffer(Adapter,tid,FALSE);
1920 return 0; 1608 return 0;
1921 } 1609 }
1922 1610
1923 ulTargetDsxBufferIndexToUse = Adapter->ulCurrentTargetBuffer; 1611 ulTargetDsxBufferIndexToUse = Adapter->ulCurrentTargetBuffer;
1924 ulMaxTry = Adapter->ulTotalTargetBuffersAvailable; 1612 ulMaxTry = Adapter->ulTotalTargetBuffersAvailable;
1925 while((ulMaxTry)&&(Adapter->astTargetDsxBuffer[ulTargetDsxBufferIndexToUse].valid != 1)) 1613 while ((ulMaxTry) && (Adapter->astTargetDsxBuffer[ulTargetDsxBufferIndexToUse].valid != 1)) {
1926 { 1614 ulTargetDsxBufferIndexToUse = (ulTargetDsxBufferIndexToUse+1) % Adapter->ulTotalTargetBuffersAvailable;
1927 ulTargetDsxBufferIndexToUse = (ulTargetDsxBufferIndexToUse+1)% 1615 ulMaxTry--;
1928 Adapter->ulTotalTargetBuffersAvailable;
1929 ulMaxTry--;
1930 } 1616 }
1931 1617
1932 if(ulMaxTry==0) 1618 if (ulMaxTry == 0) {
1933 { 1619 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "\n GetNextTargetBufferLocation : Error No Free Target DSX Buffers FreeCnt : %lx ", Adapter->ulFreeTargetBufferCnt);
1934 BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "\n GetNextTargetBufferLocation : Error No Free Target DSX Buffers FreeCnt : %lx ",Adapter->ulFreeTargetBufferCnt); 1620 ClearTargetDSXBuffer(Adapter, tid, FALSE);
1935 ClearTargetDSXBuffer(Adapter,tid,FALSE);
1936 return 0; 1621 return 0;
1937 } 1622 }
1938 1623
1939 1624 ulTargetDSXBufferAddress = Adapter->astTargetDsxBuffer[ulTargetDsxBufferIndexToUse].ulTargetDsxBuffer;
1940 ulTargetDSXBufferAddress = 1625 Adapter->astTargetDsxBuffer[ulTargetDsxBufferIndexToUse].valid = 0;
1941 Adapter->astTargetDsxBuffer[ulTargetDsxBufferIndexToUse].ulTargetDsxBuffer; 1626 Adapter->astTargetDsxBuffer[ulTargetDsxBufferIndexToUse].tid = tid;
1942 Adapter->astTargetDsxBuffer[ulTargetDsxBufferIndexToUse].valid=0;
1943 Adapter->astTargetDsxBuffer[ulTargetDsxBufferIndexToUse].tid=tid;
1944 Adapter->ulFreeTargetBufferCnt--; 1627 Adapter->ulFreeTargetBufferCnt--;
1945 1628 ulTargetDsxBufferIndexToUse = (ulTargetDsxBufferIndexToUse+1)%Adapter->ulTotalTargetBuffersAvailable;
1946
1947 ulTargetDsxBufferIndexToUse =
1948 (ulTargetDsxBufferIndexToUse+1)%Adapter->ulTotalTargetBuffersAvailable;
1949 Adapter->ulCurrentTargetBuffer = ulTargetDsxBufferIndexToUse; 1629 Adapter->ulCurrentTargetBuffer = ulTargetDsxBufferIndexToUse;
1950 BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "GetNextTargetBufferLocation :Returning address %lx tid %d\n", 1630 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "GetNextTargetBufferLocation :Returning address %lx tid %d\n", ulTargetDSXBufferAddress, tid);
1951 ulTargetDSXBufferAddress,tid); 1631
1952 return ulTargetDSXBufferAddress; 1632 return ulTargetDSXBufferAddress;
1953} 1633}
1954 1634
1955 1635int AllocAdapterDsxBuffer(PMINI_ADAPTER Adapter)
1956INT AllocAdapterDsxBuffer(PMINI_ADAPTER Adapter)
1957{ 1636{
1958 /* 1637 /*
1959 //Need to Allocate memory to contain the SUPER Large structures 1638 * Need to Allocate memory to contain the SUPER Large structures
1960 //Our driver can't create these structures on Stack 1639 * Our driver can't create these structures on Stack
1961 */ 1640 */
1962 Adapter->caDsxReqResp=kmalloc(sizeof(stLocalSFAddIndicationAlt)+LEADER_SIZE, GFP_KERNEL); 1641 Adapter->caDsxReqResp = kmalloc(sizeof(stLocalSFAddIndicationAlt)+LEADER_SIZE, GFP_KERNEL);
1963 if(!Adapter->caDsxReqResp) 1642 if (!Adapter->caDsxReqResp)
1964 return -ENOMEM; 1643 return -ENOMEM;
1644
1965 return 0; 1645 return 0;
1966} 1646}
1967 1647
1968INT FreeAdapterDsxBuffer(PMINI_ADAPTER Adapter) 1648int FreeAdapterDsxBuffer(PMINI_ADAPTER Adapter)
1969{ 1649{
1970 kfree(Adapter->caDsxReqResp); 1650 kfree(Adapter->caDsxReqResp);
1971 return 0; 1651 return 0;
1972
1973} 1652}
1974/** 1653
1975@ingroup ctrl_pkt_functions 1654/*
1976This routinue would process the Control responses 1655 * @ingroup ctrl_pkt_functions
1977for the Connection Management. 1656 * This routinue would process the Control responses
1978@return - Queue index for the free SFID else returns Invalid Index. 1657 * for the Connection Management.
1979*/ 1658 * @return - Queue index for the free SFID else returns Invalid Index.
1980BOOLEAN CmControlResponseMessage(PMINI_ADAPTER Adapter, /**<Pointer to the Adapter structure*/ 1659 */
1981 PVOID pvBuffer /**Starting Address of the Buffer, that contains the AddIndication Data*/ 1660BOOLEAN CmControlResponseMessage(PMINI_ADAPTER Adapter, /* <Pointer to the Adapter structure */
1982 ) 1661 PVOID pvBuffer /* Starting Address of the Buffer, that contains the AddIndication Data */)
1983{ 1662{
1984 stServiceFlowParamSI *psfLocalSet=NULL; 1663 stServiceFlowParamSI *psfLocalSet = NULL;
1985 stLocalSFAddIndicationAlt *pstAddIndication = NULL; 1664 stLocalSFAddIndicationAlt *pstAddIndication = NULL;
1986 stLocalSFChangeIndicationAlt *pstChangeIndication = NULL; 1665 stLocalSFChangeIndicationAlt *pstChangeIndication = NULL;
1987 PLEADER pLeader=NULL; 1666 PLEADER pLeader = NULL;
1667
1988 /* 1668 /*
1989 //Otherwise the message contains a target address from where we need to 1669 * Otherwise the message contains a target address from where we need to
1990 //read out the rest of the service flow param structure 1670 * read out the rest of the service flow param structure
1991 */ 1671 */
1992 if((pstAddIndication = RestoreCmControlResponseMessage(Adapter,pvBuffer)) 1672 pstAddIndication = RestoreCmControlResponseMessage(Adapter, pvBuffer);
1993 == NULL) 1673 if (pstAddIndication == NULL) {
1994 { 1674 ClearTargetDSXBuffer(Adapter, ((stLocalSFAddIndication *)pvBuffer)->u16TID, FALSE);
1995 ClearTargetDSXBuffer(Adapter,((stLocalSFAddIndication *)pvBuffer)->u16TID, FALSE); 1675 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Error in restoring Service Flow param structure from DSx message");
1996 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_PRINTK, 0, 0, "Error in restoring Service Flow param structure from DSx message");
1997 return FALSE; 1676 return FALSE;
1998 } 1677 }
1999 1678
2000 DumpCmControlPacket(pstAddIndication); 1679 DumpCmControlPacket(pstAddIndication);
2001 BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "====>"); 1680 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "====>");
2002 pLeader = (PLEADER)Adapter->caDsxReqResp; 1681 pLeader = (PLEADER)Adapter->caDsxReqResp;
2003 1682
2004 pLeader->Status =CM_CONTROL_NEWDSX_MULTICLASSIFIER_REQ; 1683 pLeader->Status = CM_CONTROL_NEWDSX_MULTICLASSIFIER_REQ;
2005 pLeader->Vcid = 0; 1684 pLeader->Vcid = 0;
2006 1685
2007 ClearTargetDSXBuffer(Adapter,pstAddIndication->u16TID,FALSE); 1686 ClearTargetDSXBuffer(Adapter, pstAddIndication->u16TID, FALSE);
2008 BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "### TID RECEIVED %d\n",pstAddIndication->u16TID); 1687 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "### TID RECEIVED %d\n", pstAddIndication->u16TID);
2009 switch(pstAddIndication->u8Type) 1688 switch (pstAddIndication->u8Type) {
1689 case DSA_REQ:
2010 { 1690 {
2011 case DSA_REQ: 1691 pLeader->PLength = sizeof(stLocalSFAddIndicationAlt);
2012 { 1692 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Sending DSA Response....\n");
2013 pLeader->PLength = sizeof(stLocalSFAddIndicationAlt); 1693 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSA RESPONSE TO MAC %d", pLeader->PLength);
2014 BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Sending DSA Response....\n"); 1694 *((stLocalSFAddIndicationAlt *)&(Adapter->caDsxReqResp[LEADER_SIZE]))
2015 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSA RESPONSE TO MAC %d", pLeader->PLength ); 1695 = *pstAddIndication;
2016 *((stLocalSFAddIndicationAlt*)&(Adapter->caDsxReqResp[LEADER_SIZE])) 1696 ((stLocalSFAddIndicationAlt *)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSA_RSP;
2017 = *pstAddIndication; 1697
2018 ((stLocalSFAddIndicationAlt*)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSA_RSP; 1698 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " VCID = %x", ntohs(pstAddIndication->u16VCID));
2019 1699 CopyBufferToControlPacket(Adapter, (PVOID)Adapter->caDsxReqResp);
2020 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " VCID = %x", ntohs(pstAddIndication->u16VCID)); 1700 kfree(pstAddIndication);
2021 CopyBufferToControlPacket(Adapter,(PVOID)Adapter->caDsxReqResp); 1701 }
2022 kfree(pstAddIndication); 1702 break;
2023 } 1703 case DSA_RSP:
2024 break; 1704 {
2025 case DSA_RSP: 1705 pLeader->PLength = sizeof(stLocalSFAddIndicationAlt);
2026 { 1706 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSA ACK TO MAC %d",
2027 pLeader->PLength = sizeof(stLocalSFAddIndicationAlt);
2028 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSA ACK TO MAC %d",
2029 pLeader->PLength); 1707 pLeader->PLength);
2030 *((stLocalSFAddIndicationAlt*)&(Adapter->caDsxReqResp[LEADER_SIZE])) 1708 *((stLocalSFAddIndicationAlt *)&(Adapter->caDsxReqResp[LEADER_SIZE]))
2031 = *pstAddIndication; 1709 = *pstAddIndication;
2032 ((stLocalSFAddIndicationAlt*)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSA_ACK; 1710 ((stLocalSFAddIndicationAlt *)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSA_ACK;
2033 1711
2034 }//no break here..we should go down. 1712 } /* no break here..we should go down. */
2035 case DSA_ACK: 1713 case DSA_ACK:
2036 { 1714 {
2037 UINT uiSearchRuleIndex=0; 1715 UINT uiSearchRuleIndex = 0;
2038 1716
2039 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "VCID:0x%X", 1717 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "VCID:0x%X",
2040 ntohs(pstAddIndication->u16VCID)); 1718 ntohs(pstAddIndication->u16VCID));
2041 uiSearchRuleIndex=SearchFreeSfid(Adapter); 1719 uiSearchRuleIndex = SearchFreeSfid(Adapter);
2042 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"uiSearchRuleIndex:0x%X ", 1720 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "uiSearchRuleIndex:0x%X ",
2043 uiSearchRuleIndex); 1721 uiSearchRuleIndex);
2044 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Direction:0x%X ", 1722 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Direction:0x%X ",
2045 pstAddIndication->u8Direction); 1723 pstAddIndication->u8Direction);
2046 if((uiSearchRuleIndex< NO_OF_QUEUES) ) 1724 if ((uiSearchRuleIndex < NO_OF_QUEUES)) {
2047 { 1725 Adapter->PackInfo[uiSearchRuleIndex].ucDirection =
2048 Adapter->PackInfo[uiSearchRuleIndex].ucDirection = 1726 pstAddIndication->u8Direction;
2049 pstAddIndication->u8Direction; 1727 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "bValid:0x%X ",
2050 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "bValid:0x%X ",
2051 pstAddIndication->sfActiveSet.bValid); 1728 pstAddIndication->sfActiveSet.bValid);
2052 if(pstAddIndication->sfActiveSet.bValid==TRUE) 1729 if (pstAddIndication->sfActiveSet.bValid == TRUE)
2053 { 1730 Adapter->PackInfo[uiSearchRuleIndex].bActiveSet = TRUE;
2054 Adapter->PackInfo[uiSearchRuleIndex].bActiveSet=TRUE; 1731
2055 } 1732 if (pstAddIndication->sfAuthorizedSet.bValid == TRUE)
2056 if(pstAddIndication->sfAuthorizedSet.bValid==TRUE) 1733 Adapter->PackInfo[uiSearchRuleIndex].bAuthorizedSet = TRUE;
2057 { 1734
2058 Adapter->PackInfo[uiSearchRuleIndex].bAuthorizedSet=TRUE; 1735 if (pstAddIndication->sfAdmittedSet.bValid == TRUE)
2059 } 1736 Adapter->PackInfo[uiSearchRuleIndex].bAdmittedSet = TRUE;
2060 if(pstAddIndication->sfAdmittedSet.bValid==TRUE) 1737
2061 { 1738 if (pstAddIndication->sfActiveSet.bValid == FALSE) {
2062 Adapter->PackInfo[uiSearchRuleIndex].bAdmittedSet=TRUE; 1739 Adapter->PackInfo[uiSearchRuleIndex].bActive = FALSE;
2063 } 1740 Adapter->PackInfo[uiSearchRuleIndex].bActivateRequestSent = FALSE;
2064 if(FALSE == pstAddIndication->sfActiveSet.bValid) 1741 if (pstAddIndication->sfAdmittedSet.bValid)
2065 { 1742 psfLocalSet = &pstAddIndication->sfAdmittedSet;
2066 Adapter->PackInfo[uiSearchRuleIndex].bActive = FALSE; 1743 else if (pstAddIndication->sfAuthorizedSet.bValid)
2067 Adapter->PackInfo[uiSearchRuleIndex].bActivateRequestSent = FALSE; 1744 psfLocalSet = &pstAddIndication->sfAuthorizedSet;
2068 if(pstAddIndication->sfAdmittedSet.bValid) 1745 } else {
2069 { 1746 psfLocalSet = &pstAddIndication->sfActiveSet;
2070 psfLocalSet = &pstAddIndication->sfAdmittedSet; 1747 Adapter->PackInfo[uiSearchRuleIndex].bActive = TRUE;
2071 } 1748 }
2072 else if(pstAddIndication->sfAuthorizedSet.bValid)
2073 {
2074 psfLocalSet = &pstAddIndication->sfAuthorizedSet;
2075 }
2076 }
2077 else
2078 {
2079 psfLocalSet = &pstAddIndication->sfActiveSet;
2080 Adapter->PackInfo[uiSearchRuleIndex].bActive=TRUE;
2081 }
2082
2083 if(!psfLocalSet)
2084 {
2085 BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "No set is valid\n");
2086 Adapter->PackInfo[uiSearchRuleIndex].bActive=FALSE;
2087 Adapter->PackInfo[uiSearchRuleIndex].bValid=FALSE;
2088 Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value=0;
2089 kfree(pstAddIndication);
2090 }
2091 1749
2092 else if(psfLocalSet->bValid && (pstAddIndication->u8CC == 0)) 1750 if (!psfLocalSet) {
2093 { 1751 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "No set is valid\n");
2094 BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "DSA ACK"); 1752 Adapter->PackInfo[uiSearchRuleIndex].bActive = FALSE;
2095 Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = 1753 Adapter->PackInfo[uiSearchRuleIndex].bValid = FALSE;
2096 ntohs(pstAddIndication->u16VCID); 1754 Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = 0;
2097 Adapter->PackInfo[uiSearchRuleIndex].usCID = 1755 kfree(pstAddIndication);
2098 ntohs(pstAddIndication->u16CID); 1756 } else if (psfLocalSet->bValid && (pstAddIndication->u8CC == 0)) {
2099 1757 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "DSA ACK");
2100 if(UPLINK_DIR == pstAddIndication->u8Direction) 1758 Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = ntohs(pstAddIndication->u16VCID);
2101 atomic_set(&Adapter->PackInfo[uiSearchRuleIndex].uiPerSFTxResourceCount, DEFAULT_PERSFCOUNT); 1759 Adapter->PackInfo[uiSearchRuleIndex].usCID = ntohs(pstAddIndication->u16CID);
2102 CopyToAdapter(Adapter,psfLocalSet,uiSearchRuleIndex, 1760
2103 DSA_ACK, pstAddIndication); 1761 if (UPLINK_DIR == pstAddIndication->u8Direction)
2104 // don't free pstAddIndication 1762 atomic_set(&Adapter->PackInfo[uiSearchRuleIndex].uiPerSFTxResourceCount, DEFAULT_PERSFCOUNT);
2105 1763
2106 /* Inside CopyToAdapter, Sorting of all the SFs take place. 1764 CopyToAdapter(Adapter, psfLocalSet, uiSearchRuleIndex, DSA_ACK, pstAddIndication);
2107 Hence any access to the newly added SF through uiSearchRuleIndex is invalid. 1765 /* don't free pstAddIndication */
2108 SHOULD BE STRICTLY AVOIDED. 1766
2109 */ 1767 /* Inside CopyToAdapter, Sorting of all the SFs take place.
2110// *(PULONG)(((PUCHAR)pvBuffer)+1)=psfLocalSet->u32SFID; 1768 * Hence any access to the newly added SF through uiSearchRuleIndex is invalid.
2111 memcpy((((PUCHAR)pvBuffer)+1), &psfLocalSet->u32SFID, 4); 1769 * SHOULD BE STRICTLY AVOIDED.
2112 1770 */
2113 if(pstAddIndication->sfActiveSet.bValid == TRUE) 1771 /* *(PULONG)(((PUCHAR)pvBuffer)+1)=psfLocalSet->u32SFID; */
2114 { 1772 memcpy((((PUCHAR)pvBuffer)+1), &psfLocalSet->u32SFID, 4);
2115 if(UPLINK_DIR == pstAddIndication->u8Direction) 1773
2116 { 1774 if (pstAddIndication->sfActiveSet.bValid == TRUE) {
2117 if(!Adapter->LinkUpStatus) 1775 if (UPLINK_DIR == pstAddIndication->u8Direction) {
2118 { 1776 if (!Adapter->LinkUpStatus) {
2119 netif_carrier_on(Adapter->dev); 1777 netif_carrier_on(Adapter->dev);
2120 netif_start_queue(Adapter->dev); 1778 netif_start_queue(Adapter->dev);
2121 Adapter->LinkUpStatus = 1; 1779 Adapter->LinkUpStatus = 1;
2122 if (netif_msg_link(Adapter)) 1780 if (netif_msg_link(Adapter))
2123 pr_info(PFX "%s: link up\n", Adapter->dev->name); 1781 pr_info(PFX "%s: link up\n", Adapter->dev->name);
2124 atomic_set(&Adapter->TxPktAvail, 1); 1782 atomic_set(&Adapter->TxPktAvail, 1);
2125 wake_up(&Adapter->tx_packet_wait_queue); 1783 wake_up(&Adapter->tx_packet_wait_queue);
2126 Adapter->liTimeSinceLastNetEntry = get_seconds(); 1784 Adapter->liTimeSinceLastNetEntry = get_seconds();
2127 }
2128 } 1785 }
2129 } 1786 }
2130 } 1787 }
2131 1788 } else {
2132 else 1789 Adapter->PackInfo[uiSearchRuleIndex].bActive = FALSE;
2133 { 1790 Adapter->PackInfo[uiSearchRuleIndex].bValid = FALSE;
2134 Adapter->PackInfo[uiSearchRuleIndex].bActive=FALSE; 1791 Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = 0;
2135 Adapter->PackInfo[uiSearchRuleIndex].bValid=FALSE;
2136 Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value=0;
2137 kfree(pstAddIndication);
2138 }
2139 }
2140 else
2141 {
2142 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_PRINTK, 0, 0, "DSA ACK did not get valid SFID");
2143 kfree(pstAddIndication); 1792 kfree(pstAddIndication);
2144 return FALSE;
2145 } 1793 }
2146 } 1794 } else {
2147 break; 1795 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "DSA ACK did not get valid SFID");
2148 case DSC_REQ:
2149 {
2150 pLeader->PLength = sizeof(stLocalSFChangeIndicationAlt);
2151 pstChangeIndication = (stLocalSFChangeIndicationAlt*)pstAddIndication;
2152 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSC RESPONSE TO MAC %d", pLeader->PLength);
2153
2154 *((stLocalSFChangeIndicationAlt*)&(Adapter->caDsxReqResp[LEADER_SIZE])) = *pstChangeIndication;
2155 ((stLocalSFChangeIndicationAlt*)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSC_RSP;
2156
2157 CopyBufferToControlPacket(Adapter,(PVOID)Adapter->caDsxReqResp);
2158 kfree(pstAddIndication); 1796 kfree(pstAddIndication);
1797 return FALSE;
2159 } 1798 }
2160 break; 1799 }
2161 case DSC_RSP: 1800 break;
2162 { 1801 case DSC_REQ:
2163 pLeader->PLength = sizeof(stLocalSFChangeIndicationAlt); 1802 {
2164 pstChangeIndication = (stLocalSFChangeIndicationAlt*)pstAddIndication; 1803 pLeader->PLength = sizeof(stLocalSFChangeIndicationAlt);
2165 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSC ACK TO MAC %d", pLeader->PLength); 1804 pstChangeIndication = (stLocalSFChangeIndicationAlt *)pstAddIndication;
2166 *((stLocalSFChangeIndicationAlt*)&(Adapter->caDsxReqResp[LEADER_SIZE])) = *pstChangeIndication; 1805 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSC RESPONSE TO MAC %d", pLeader->PLength);
2167 ((stLocalSFChangeIndicationAlt*)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSC_ACK;
2168 }
2169 case DSC_ACK:
2170 {
2171 UINT uiSearchRuleIndex=0;
2172 1806
2173 pstChangeIndication = (stLocalSFChangeIndicationAlt *)pstAddIndication; 1807 *((stLocalSFChangeIndicationAlt *)&(Adapter->caDsxReqResp[LEADER_SIZE])) = *pstChangeIndication;
2174 uiSearchRuleIndex=SearchSfid(Adapter,ntohl(pstChangeIndication->sfActiveSet.u32SFID)); 1808 ((stLocalSFChangeIndicationAlt *)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSC_RSP;
2175 if(uiSearchRuleIndex > NO_OF_QUEUES-1)
2176 {
2177 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_PRINTK, 0, 0, "SF doesn't exist for which DSC_ACK is received");
2178 }
2179 if((uiSearchRuleIndex < NO_OF_QUEUES))
2180 {
2181 Adapter->PackInfo[uiSearchRuleIndex].ucDirection = pstChangeIndication->u8Direction;
2182 if(pstChangeIndication->sfActiveSet.bValid==TRUE)
2183 {
2184 Adapter->PackInfo[uiSearchRuleIndex].bActiveSet=TRUE;
2185 }
2186 if(pstChangeIndication->sfAuthorizedSet.bValid==TRUE)
2187 {
2188 Adapter->PackInfo[uiSearchRuleIndex].bAuthorizedSet=TRUE;
2189 }
2190 if(pstChangeIndication->sfAdmittedSet.bValid==TRUE)
2191 {
2192 Adapter->PackInfo[uiSearchRuleIndex].bAdmittedSet=TRUE;
2193 }
2194 1809
2195 if(FALSE==pstChangeIndication->sfActiveSet.bValid) 1810 CopyBufferToControlPacket(Adapter, (PVOID)Adapter->caDsxReqResp);
2196 { 1811 kfree(pstAddIndication);
2197 Adapter->PackInfo[uiSearchRuleIndex].bActive = FALSE; 1812 }
2198 Adapter->PackInfo[uiSearchRuleIndex].bActivateRequestSent = FALSE; 1813 break;
2199 if(pstChangeIndication->sfAdmittedSet.bValid) 1814 case DSC_RSP:
2200 { 1815 {
2201 psfLocalSet = &pstChangeIndication->sfAdmittedSet; 1816 pLeader->PLength = sizeof(stLocalSFChangeIndicationAlt);
2202 } 1817 pstChangeIndication = (stLocalSFChangeIndicationAlt *)pstAddIndication;
2203 else if(pstChangeIndication->sfAuthorizedSet.bValid) 1818 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSC ACK TO MAC %d", pLeader->PLength);
2204 { 1819 *((stLocalSFChangeIndicationAlt *)&(Adapter->caDsxReqResp[LEADER_SIZE])) = *pstChangeIndication;
2205 psfLocalSet = &pstChangeIndication->sfAuthorizedSet; 1820 ((stLocalSFChangeIndicationAlt *)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSC_ACK;
2206 } 1821 }
2207 } 1822 case DSC_ACK:
2208 1823 {
2209 else 1824 UINT uiSearchRuleIndex = 0;
2210 { 1825
2211 psfLocalSet = &pstChangeIndication->sfActiveSet; 1826 pstChangeIndication = (stLocalSFChangeIndicationAlt *)pstAddIndication;
2212 Adapter->PackInfo[uiSearchRuleIndex].bActive=TRUE; 1827 uiSearchRuleIndex = SearchSfid(Adapter, ntohl(pstChangeIndication->sfActiveSet.u32SFID));
2213 } 1828 if (uiSearchRuleIndex > NO_OF_QUEUES-1)
2214 if(psfLocalSet->bValid && (pstChangeIndication->u8CC == 0)) 1829 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "SF doesn't exist for which DSC_ACK is received");
2215 { 1830
2216 Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = 1831 if ((uiSearchRuleIndex < NO_OF_QUEUES)) {
2217 ntohs(pstChangeIndication->u16VCID); 1832 Adapter->PackInfo[uiSearchRuleIndex].ucDirection = pstChangeIndication->u8Direction;
2218 BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "CC field is %d bvalid = %d\n", 1833 if (pstChangeIndication->sfActiveSet.bValid == TRUE)
2219 pstChangeIndication->u8CC, psfLocalSet->bValid); 1834 Adapter->PackInfo[uiSearchRuleIndex].bActiveSet = TRUE;
2220 BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "VCID= %d\n", ntohs(pstChangeIndication->u16VCID)); 1835
2221 Adapter->PackInfo[uiSearchRuleIndex].usCID = 1836 if (pstChangeIndication->sfAuthorizedSet.bValid == TRUE)
2222 ntohs(pstChangeIndication->u16CID); 1837 Adapter->PackInfo[uiSearchRuleIndex].bAuthorizedSet = TRUE;
2223 CopyToAdapter(Adapter,psfLocalSet,uiSearchRuleIndex, 1838
2224 DSC_ACK, pstAddIndication); 1839 if (pstChangeIndication->sfAdmittedSet.bValid == TRUE)
2225 1840 Adapter->PackInfo[uiSearchRuleIndex].bAdmittedSet = TRUE;
2226 *(PULONG)(((PUCHAR)pvBuffer)+1)=psfLocalSet->u32SFID; 1841
2227 } 1842 if (pstChangeIndication->sfActiveSet.bValid == FALSE) {
2228 else if(pstChangeIndication->u8CC == 6) 1843 Adapter->PackInfo[uiSearchRuleIndex].bActive = FALSE;
2229 { 1844 Adapter->PackInfo[uiSearchRuleIndex].bActivateRequestSent = FALSE;
2230 deleteSFBySfid(Adapter,uiSearchRuleIndex); 1845
2231 kfree(pstAddIndication); 1846 if (pstChangeIndication->sfAdmittedSet.bValid)
2232 } 1847 psfLocalSet = &pstChangeIndication->sfAdmittedSet;
1848 else if (pstChangeIndication->sfAuthorizedSet.bValid)
1849 psfLocalSet = &pstChangeIndication->sfAuthorizedSet;
1850 } else {
1851 psfLocalSet = &pstChangeIndication->sfActiveSet;
1852 Adapter->PackInfo[uiSearchRuleIndex].bActive = TRUE;
2233 } 1853 }
2234 else 1854
2235 { 1855 if (!psfLocalSet) {
2236 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_PRINTK, 0, 0, "DSC ACK did not get valid SFID"); 1856 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "No set is valid\n");
1857 Adapter->PackInfo[uiSearchRuleIndex].bActive = FALSE;
1858 Adapter->PackInfo[uiSearchRuleIndex].bValid = FALSE;
1859 Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = 0;
1860 kfree(pstAddIndication);
1861 } else if (psfLocalSet->bValid && (pstChangeIndication->u8CC == 0)) {
1862 Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = ntohs(pstChangeIndication->u16VCID);
1863 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "CC field is %d bvalid = %d\n",
1864 pstChangeIndication->u8CC, psfLocalSet->bValid);
1865 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "VCID= %d\n", ntohs(pstChangeIndication->u16VCID));
1866 Adapter->PackInfo[uiSearchRuleIndex].usCID = ntohs(pstChangeIndication->u16CID);
1867 CopyToAdapter(Adapter, psfLocalSet, uiSearchRuleIndex, DSC_ACK, pstAddIndication);
1868
1869 *(PULONG)(((PUCHAR)pvBuffer)+1) = psfLocalSet->u32SFID;
1870 } else if (pstChangeIndication->u8CC == 6) {
1871 deleteSFBySfid(Adapter, uiSearchRuleIndex);
2237 kfree(pstAddIndication); 1872 kfree(pstAddIndication);
2238 return FALSE;
2239 } 1873 }
1874 } else {
1875 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "DSC ACK did not get valid SFID");
1876 kfree(pstAddIndication);
1877 return FALSE;
2240 } 1878 }
2241 break; 1879 }
2242 case DSD_REQ: 1880 break;
2243 { 1881 case DSD_REQ:
2244 UINT uiSearchRuleIndex; 1882 {
2245 ULONG ulSFID; 1883 UINT uiSearchRuleIndex;
2246 1884 ULONG ulSFID;
2247 pLeader->PLength = sizeof(stLocalSFDeleteIndication);
2248 *((stLocalSFDeleteIndication*)&(Adapter->caDsxReqResp[LEADER_SIZE])) = *((stLocalSFDeleteIndication*)pstAddIndication);
2249 1885
2250 ulSFID = ntohl(((stLocalSFDeleteIndication*)pstAddIndication)->u32SFID); 1886 pLeader->PLength = sizeof(stLocalSFDeleteIndication);
2251 uiSearchRuleIndex=SearchSfid(Adapter,ulSFID); 1887 *((stLocalSFDeleteIndication *)&(Adapter->caDsxReqResp[LEADER_SIZE])) = *((stLocalSFDeleteIndication *)pstAddIndication);
2252 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "DSD - Removing connection %x",uiSearchRuleIndex);
2253 1888
2254 if(uiSearchRuleIndex < NO_OF_QUEUES) 1889 ulSFID = ntohl(((stLocalSFDeleteIndication *)pstAddIndication)->u32SFID);
2255 { 1890 uiSearchRuleIndex = SearchSfid(Adapter, ulSFID);
2256 //Delete All Classifiers Associated with this SFID 1891 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "DSD - Removing connection %x", uiSearchRuleIndex);
2257 deleteSFBySfid(Adapter,uiSearchRuleIndex);
2258 Adapter->u32TotalDSD++;
2259 }
2260 1892
2261 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSD RESPONSE TO MAC"); 1893 if (uiSearchRuleIndex < NO_OF_QUEUES) {
2262 ((stLocalSFDeleteIndication*)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSD_RSP; 1894 /* Delete All Classifiers Associated with this SFID */
2263 CopyBufferToControlPacket(Adapter,(PVOID)Adapter->caDsxReqResp); 1895 deleteSFBySfid(Adapter, uiSearchRuleIndex);
2264 } 1896 Adapter->u32TotalDSD++;
2265 case DSD_RSP:
2266 {
2267 //Do nothing as SF has already got Deleted
2268 } 1897 }
2269 break; 1898
1899 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSD RESPONSE TO MAC");
1900 ((stLocalSFDeleteIndication *)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSD_RSP;
1901 CopyBufferToControlPacket(Adapter, (PVOID)Adapter->caDsxReqResp);
1902 }
1903 case DSD_RSP:
1904 {
1905 /* Do nothing as SF has already got Deleted */
1906 }
1907 break;
2270 case DSD_ACK: 1908 case DSD_ACK:
2271 BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "DSD ACK Rcd, let App handle it\n"); 1909 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "DSD ACK Rcd, let App handle it\n");
2272 break; 1910 break;
2273 default: 1911 default:
2274 kfree(pstAddIndication); 1912 kfree(pstAddIndication);
2275 return FALSE ; 1913 return FALSE;
2276 } 1914 }
2277 return TRUE; 1915 return TRUE;
2278} 1916}
@@ -2280,78 +1918,67 @@ BOOLEAN CmControlResponseMessage(PMINI_ADAPTER Adapter, /**<Pointer to the Adap
2280int get_dsx_sf_data_to_application(PMINI_ADAPTER Adapter, UINT uiSFId, void __user *user_buffer) 1918int get_dsx_sf_data_to_application(PMINI_ADAPTER Adapter, UINT uiSFId, void __user *user_buffer)
2281{ 1919{
2282 int status = 0; 1920 int status = 0;
2283 struct _packet_info *psSfInfo=NULL; 1921 struct _packet_info *psSfInfo = NULL;
2284 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "status =%d",status); 1922
1923 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "status =%d", status);
2285 status = SearchSfid(Adapter, uiSFId); 1924 status = SearchSfid(Adapter, uiSFId);
2286 if (status >= NO_OF_QUEUES) { 1925 if (status >= NO_OF_QUEUES) {
2287 BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SFID %d not present in queue !!!", uiSFId ); 1926 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SFID %d not present in queue !!!", uiSFId);
2288 return -EINVAL; 1927 return -EINVAL;
2289 } 1928 }
2290 BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "status =%d",status); 1929 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "status =%d", status);
2291 psSfInfo=&Adapter->PackInfo[status]; 1930 psSfInfo = &Adapter->PackInfo[status];
2292 if(psSfInfo->pstSFIndication && copy_to_user(user_buffer, 1931 if (psSfInfo->pstSFIndication && copy_to_user(user_buffer,
2293 psSfInfo->pstSFIndication, sizeof(stLocalSFAddIndicationAlt))) 1932 psSfInfo->pstSFIndication, sizeof(stLocalSFAddIndicationAlt))) {
2294 { 1933 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "copy to user failed SFID %d, present in queue !!!", uiSFId);
2295 BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "copy to user failed SFID %d, present in queue !!!", uiSFId );
2296 status = -EFAULT; 1934 status = -EFAULT;
2297 return status; 1935 return status;
2298 } 1936 }
2299 return STATUS_SUCCESS; 1937 return STATUS_SUCCESS;
2300} 1938}
2301 1939
2302VOID OverrideServiceFlowParams(PMINI_ADAPTER Adapter,PUINT puiBuffer) 1940VOID OverrideServiceFlowParams(PMINI_ADAPTER Adapter, PUINT puiBuffer)
2303{ 1941{
2304 B_UINT32 u32NumofSFsinMsg = ntohl(*(puiBuffer + 1)); 1942 B_UINT32 u32NumofSFsinMsg = ntohl(*(puiBuffer + 1));
2305 stIM_SFHostNotify *pHostInfo = NULL; 1943 stIM_SFHostNotify *pHostInfo = NULL;
2306 UINT uiSearchRuleIndex = 0; 1944 UINT uiSearchRuleIndex = 0;
2307 ULONG ulSFID = 0; 1945 ULONG ulSFID = 0;
2308 1946
2309 puiBuffer+=2; 1947 puiBuffer += 2;
1948 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "u32NumofSFsinMsg: 0x%x\n", u32NumofSFsinMsg);
2310 1949
2311 BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "u32NumofSFsinMsg: 0x%x\n",u32NumofSFsinMsg); 1950 while (u32NumofSFsinMsg != 0 && u32NumofSFsinMsg < NO_OF_QUEUES) {
2312
2313 while(u32NumofSFsinMsg != 0 && u32NumofSFsinMsg < NO_OF_QUEUES)
2314 {
2315 u32NumofSFsinMsg--; 1951 u32NumofSFsinMsg--;
2316 pHostInfo = (stIM_SFHostNotify *)puiBuffer; 1952 pHostInfo = (stIM_SFHostNotify *)puiBuffer;
2317 puiBuffer = (PUINT)(pHostInfo + 1); 1953 puiBuffer = (PUINT)(pHostInfo + 1);
2318 1954
2319 ulSFID = ntohl(pHostInfo->SFID); 1955 ulSFID = ntohl(pHostInfo->SFID);
2320 uiSearchRuleIndex=SearchSfid(Adapter,ulSFID); 1956 uiSearchRuleIndex = SearchSfid(Adapter, ulSFID);
2321 BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"SFID: 0x%lx\n",ulSFID); 1957 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SFID: 0x%lx\n", ulSFID);
2322 1958
2323 if(uiSearchRuleIndex >= NO_OF_QUEUES || uiSearchRuleIndex == HiPriority) 1959 if (uiSearchRuleIndex >= NO_OF_QUEUES || uiSearchRuleIndex == HiPriority) {
2324 { 1960 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "The SFID <%lx> doesn't exist in host entry or is Invalid\n", ulSFID);
2325 BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"The SFID <%lx> doesn't exist in host entry or is Invalid\n", ulSFID);
2326 continue; 1961 continue;
2327 } 1962 }
2328 1963
2329 if(pHostInfo->RetainSF == FALSE) 1964 if (pHostInfo->RetainSF == FALSE) {
2330 { 1965 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Going to Delete SF");
2331 BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Going to Delete SF"); 1966 deleteSFBySfid(Adapter, uiSearchRuleIndex);
2332 deleteSFBySfid(Adapter,uiSearchRuleIndex); 1967 } else {
2333 }
2334 else
2335 {
2336
2337 Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = ntohs(pHostInfo->VCID); 1968 Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = ntohs(pHostInfo->VCID);
2338 Adapter->PackInfo[uiSearchRuleIndex].usCID = ntohs(pHostInfo->newCID); 1969 Adapter->PackInfo[uiSearchRuleIndex].usCID = ntohs(pHostInfo->newCID);
2339 Adapter->PackInfo[uiSearchRuleIndex].bActive=FALSE; 1970 Adapter->PackInfo[uiSearchRuleIndex].bActive = FALSE;
2340 1971
2341 BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"pHostInfo->QoSParamSet: 0x%x\n",pHostInfo->QoSParamSet); 1972 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "pHostInfo->QoSParamSet: 0x%x\n", pHostInfo->QoSParamSet);
2342 1973
2343 if(pHostInfo->QoSParamSet & 0x1) 1974 if (pHostInfo->QoSParamSet & 0x1)
2344 Adapter->PackInfo[uiSearchRuleIndex].bAuthorizedSet =TRUE; 1975 Adapter->PackInfo[uiSearchRuleIndex].bAuthorizedSet = TRUE;
2345 if(pHostInfo->QoSParamSet & 0x2) 1976 if (pHostInfo->QoSParamSet & 0x2)
2346 Adapter->PackInfo[uiSearchRuleIndex].bAdmittedSet =TRUE; 1977 Adapter->PackInfo[uiSearchRuleIndex].bAdmittedSet = TRUE;
2347 if(pHostInfo->QoSParamSet & 0x4) 1978 if (pHostInfo->QoSParamSet & 0x4) {
2348 { 1979 Adapter->PackInfo[uiSearchRuleIndex].bActiveSet = TRUE;
2349 Adapter->PackInfo[uiSearchRuleIndex].bActiveSet =TRUE; 1980 Adapter->PackInfo[uiSearchRuleIndex].bActive = TRUE;
2350 Adapter->PackInfo[uiSearchRuleIndex].bActive=TRUE;
2351 } 1981 }
2352 } 1982 }
2353 } 1983 }
2354} 1984}
2355
2356
2357
diff --git a/drivers/staging/bcm/led_control.h b/drivers/staging/bcm/led_control.h
index 0711ac20f6fc..ed8fbc091115 100644
--- a/drivers/staging/bcm/led_control.h
+++ b/drivers/staging/bcm/led_control.h
@@ -4,11 +4,11 @@
4/*************************TYPE DEF**********************/ 4/*************************TYPE DEF**********************/
5#define NUM_OF_LEDS 4 5#define NUM_OF_LEDS 4
6 6
7#define DSD_START_OFFSET 0x0200 7#define DSD_START_OFFSET 0x0200
8#define EEPROM_VERSION_OFFSET 0x020E 8#define EEPROM_VERSION_OFFSET 0x020E
9#define EEPROM_HW_PARAM_POINTER_ADDRESS 0x0218 9#define EEPROM_HW_PARAM_POINTER_ADDRESS 0x0218
10#define EEPROM_HW_PARAM_POINTER_ADDRRES_MAP5 0x0220 10#define EEPROM_HW_PARAM_POINTER_ADDRRES_MAP5 0x0220
11#define GPIO_SECTION_START_OFFSET 0x03 11#define GPIO_SECTION_START_OFFSET 0x03
12 12
13#define COMPATIBILITY_SECTION_LENGTH 42 13#define COMPATIBILITY_SECTION_LENGTH 42
14#define COMPATIBILITY_SECTION_LENGTH_MAP5 84 14#define COMPATIBILITY_SECTION_LENGTH_MAP5 84
@@ -18,27 +18,27 @@
18#define EEPROM_MAP5_MINORVERSION 0 18#define EEPROM_MAP5_MINORVERSION 0
19 19
20 20
21#define MAX_NUM_OF_BLINKS 10 21#define MAX_NUM_OF_BLINKS 10
22#define NUM_OF_GPIO_PINS 16 22#define NUM_OF_GPIO_PINS 16
23 23
24#define DISABLE_GPIO_NUM 0xFF 24#define DISABLE_GPIO_NUM 0xFF
25#define EVENT_SIGNALED 1 25#define EVENT_SIGNALED 1
26 26
27#define MAX_FILE_NAME_BUFFER_SIZE 100 27#define MAX_FILE_NAME_BUFFER_SIZE 100
28 28
29#define TURN_ON_LED(GPIO, index) do{ \ 29#define TURN_ON_LED(GPIO, index) do { \
30 UINT gpio_val = GPIO; \ 30 UINT gpio_val = GPIO; \
31 (Adapter->LEDInfo.LEDState[index].BitPolarity == 1) ? \ 31 (Adapter->LEDInfo.LEDState[index].BitPolarity == 1) ? \
32 wrmaltWithLock(Adapter,BCM_GPIO_OUTPUT_SET_REG, &gpio_val ,sizeof(gpio_val)) : \ 32 wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_SET_REG, &gpio_val, sizeof(gpio_val)) : \
33 wrmaltWithLock(Adapter,BCM_GPIO_OUTPUT_CLR_REG, &gpio_val, sizeof(gpio_val)); \ 33 wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_CLR_REG, &gpio_val, sizeof(gpio_val)); \
34 }while(0); 34 } while (0);
35 35
36#define TURN_OFF_LED(GPIO, index) do { \ 36#define TURN_OFF_LED(GPIO, index) do { \
37 UINT gpio_val = GPIO; \ 37 UINT gpio_val = GPIO; \
38 (Adapter->LEDInfo.LEDState[index].BitPolarity == 1) ? \ 38 (Adapter->LEDInfo.LEDState[index].BitPolarity == 1) ? \
39 wrmaltWithLock(Adapter,BCM_GPIO_OUTPUT_CLR_REG,&gpio_val ,sizeof(gpio_val)) : \ 39 wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_CLR_REG, &gpio_val, sizeof(gpio_val)) : \
40 wrmaltWithLock(Adapter,BCM_GPIO_OUTPUT_SET_REG,&gpio_val ,sizeof(gpio_val)); \ 40 wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_SET_REG, &gpio_val, sizeof(gpio_val)); \
41 }while(0); 41 } while (0);
42 42
43#define B_ULONG32 unsigned long 43#define B_ULONG32 unsigned long
44 44
@@ -50,7 +50,7 @@ typedef enum _LEDColors{
50 BLUE_LED = 2, 50 BLUE_LED = 2,
51 YELLOW_LED = 3, 51 YELLOW_LED = 3,
52 GREEN_LED = 4 52 GREEN_LED = 4
53} LEDColors; /*Enumerated values of different LED types*/ 53} LEDColors; /*Enumerated values of different LED types*/
54 54
55typedef enum LedEvents { 55typedef enum LedEvents {
56 SHUTDOWN_EXIT = 0x00, 56 SHUTDOWN_EXIT = 0x00,
@@ -62,43 +62,39 @@ typedef enum LedEvents {
62 LOWPOWER_MODE_ENTER = 0x20, 62 LOWPOWER_MODE_ENTER = 0x20,
63 IDLEMODE_CONTINUE = 0x40, 63 IDLEMODE_CONTINUE = 0x40,
64 IDLEMODE_EXIT = 0x80, 64 IDLEMODE_EXIT = 0x80,
65 LED_THREAD_INACTIVE = 0x100, //Makes the LED thread Inactivce. It wil be equivallent to putting the thread on hold. 65 LED_THREAD_INACTIVE = 0x100, /* Makes the LED thread Inactivce. It wil be equivallent to putting the thread on hold. */
66 LED_THREAD_ACTIVE = 0x200 //Makes the LED Thread Active back. 66 LED_THREAD_ACTIVE = 0x200, /* Makes the LED Thread Active back. */
67} LedEventInfo_t; /*Enumerated values of different driver states*/ 67 DRIVER_HALT = 0xff
68 68} LedEventInfo_t; /* Enumerated values of different driver states */
69#define DRIVER_HALT 0xff 69
70 70/*
71 71 * Structure which stores the information of different LED types
72/*Structure which stores the information of different LED types 72 * and corresponding LED state information of driver states
73 * and corresponding LED state information of driver states*/ 73 */
74typedef struct LedStateInfo_t 74typedef struct LedStateInfo_t {
75{
76 UCHAR LED_Type; /* specify GPIO number - use 0xFF if not used */ 75 UCHAR LED_Type; /* specify GPIO number - use 0xFF if not used */
77 UCHAR LED_On_State; /* Bits set or reset for different states */ 76 UCHAR LED_On_State; /* Bits set or reset for different states */
78 UCHAR LED_Blink_State; /* Bits set or reset for blinking LEDs for different states */ 77 UCHAR LED_Blink_State; /* Bits set or reset for blinking LEDs for different states */
79 UCHAR GPIO_Num; 78 UCHAR GPIO_Num;
80 UCHAR BitPolarity; /*To represent whether H/W is normal polarity or reverse 79 UCHAR BitPolarity; /* To represent whether H/W is normal polarity or reverse polarity */
81 polarity*/ 80} LEDStateInfo, *pLEDStateInfo;
82}LEDStateInfo, *pLEDStateInfo;
83 81
84 82
85typedef struct _LED_INFO_STRUCT 83typedef struct _LED_INFO_STRUCT {
86{
87 LEDStateInfo LEDState[NUM_OF_LEDS]; 84 LEDStateInfo LEDState[NUM_OF_LEDS];
88 BOOLEAN bIdleMode_tx_from_host; /*Variable to notify whether driver came out 85 BOOLEAN bIdleMode_tx_from_host; /* Variable to notify whether driver came out from idlemode due to Host or target*/
89 from idlemode due to Host or target*/
90 BOOLEAN bIdle_led_off; 86 BOOLEAN bIdle_led_off;
91 wait_queue_head_t notify_led_event; 87 wait_queue_head_t notify_led_event;
92 wait_queue_head_t idleModeSyncEvent; 88 wait_queue_head_t idleModeSyncEvent;
93 struct task_struct *led_cntrl_threadid; 89 struct task_struct *led_cntrl_threadid;
94 int led_thread_running; 90 int led_thread_running;
95 BOOLEAN bLedInitDone; 91 BOOLEAN bLedInitDone;
96 92
97} LED_INFO_STRUCT, *PLED_INFO_STRUCT; 93} LED_INFO_STRUCT, *PLED_INFO_STRUCT;
98//LED Thread state. 94/* LED Thread state. */
99#define BCM_LED_THREAD_DISABLED 0 //LED Thread is not running. 95#define BCM_LED_THREAD_DISABLED 0 /* LED Thread is not running. */
100#define BCM_LED_THREAD_RUNNING_ACTIVELY 1 //LED thread is running. 96#define BCM_LED_THREAD_RUNNING_ACTIVELY 1 /* LED thread is running. */
101#define BCM_LED_THREAD_RUNNING_INACTIVELY 2 //LED thread has been put on hold 97#define BCM_LED_THREAD_RUNNING_INACTIVELY 2 /*LED thread has been put on hold*/
102 98
103 99
104 100
diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
index 4c77e508066b..12c691d90900 100644
--- a/drivers/staging/comedi/Kconfig
+++ b/drivers/staging/comedi/Kconfig
@@ -765,8 +765,9 @@ config COMEDI_ADV_PCI_DIO
765 default N 765 default N
766 ---help--- 766 ---help---
767 Enable support for Advantech PCI DIO cards 767 Enable support for Advantech PCI DIO cards
768 PCI-1730, PCI-1733, PCI-1734, PCI-1736UP, PCI-1750, PCI-1751, 768 PCI-1730, PCI-1733, PCI-1734, PCI-1735U, PCI-1736UP, PCI-1739U,
769 PCI-1752, PCI-1753/E, PCI-1754, PCI-1756 and PCI-1762 769 PCI-1750, PCI-1751, PCI-1752, PCI-1753/E, PCI-1754, PCI-1756,
770 PCI-1760 and PCI-1762
770 771
771 To compile this driver as a module, choose M here: the module will be 772 To compile this driver as a module, choose M here: the module will be
772 called adv_pci_dio. 773 called adv_pci_dio.
diff --git a/drivers/staging/comedi/drivers/adv_pci_dio.c b/drivers/staging/comedi/drivers/adv_pci_dio.c
index 537e58534275..7af068f4a749 100644
--- a/drivers/staging/comedi/drivers/adv_pci_dio.c
+++ b/drivers/staging/comedi/drivers/adv_pci_dio.c
@@ -8,16 +8,16 @@
8/* 8/*
9Driver: adv_pci_dio 9Driver: adv_pci_dio
10Description: Advantech PCI-1730, PCI-1733, PCI-1734, PCI-1735U, 10Description: Advantech PCI-1730, PCI-1733, PCI-1734, PCI-1735U,
11 PCI-1736UP, PCI-1750, PCI-1751, PCI-1752, PCI-1753/E, 11 PCI-1736UP, PCI-1739U, PCI-1750, PCI-1751, PCI-1752,
12 PCI-1754, PCI-1756, PCI-1762 12 PCI-1753/E, PCI-1754, PCI-1756, PCI-1760, PCI-1762
13Author: Michal Dobes <dobes@tesnet.cz> 13Author: Michal Dobes <dobes@tesnet.cz>
14Devices: [Advantech] PCI-1730 (adv_pci_dio), PCI-1733, 14Devices: [Advantech] PCI-1730 (adv_pci_dio), PCI-1733,
15 PCI-1734, PCI-1735U, PCI-1736UP, PCI-1750, 15 PCI-1734, PCI-1735U, PCI-1736UP, PCI-1739U, PCI-1750,
16 PCI-1751, PCI-1752, PCI-1753, 16 PCI-1751, PCI-1752, PCI-1753,
17 PCI-1753+PCI-1753E, PCI-1754, PCI-1756, 17 PCI-1753+PCI-1753E, PCI-1754, PCI-1756,
18 PCI-1760, PCI-1762 18 PCI-1760, PCI-1762
19Status: untested 19Status: untested
20Updated: Tue, 04 May 2010 13:00:00 +0000 20Updated: Mon, 09 Jan 2012 12:40:46 +0000
21 21
22This driver supports now only insn interface for DI/DO/DIO. 22This driver supports now only insn interface for DI/DO/DIO.
23 23
@@ -51,6 +51,7 @@ Configuration options:
51/* hardware types of the cards */ 51/* hardware types of the cards */
52enum hw_cards_id { 52enum hw_cards_id {
53 TYPE_PCI1730, TYPE_PCI1733, TYPE_PCI1734, TYPE_PCI1735, TYPE_PCI1736, 53 TYPE_PCI1730, TYPE_PCI1733, TYPE_PCI1734, TYPE_PCI1735, TYPE_PCI1736,
54 TYPE_PCI1739,
54 TYPE_PCI1750, 55 TYPE_PCI1750,
55 TYPE_PCI1751, 56 TYPE_PCI1751,
56 TYPE_PCI1752, 57 TYPE_PCI1752,
@@ -109,6 +110,12 @@ enum hw_io_access {
109#define PCI1736_BOARDID 4 /* R: Board I/D switch for 1736UP */ 110#define PCI1736_BOARDID 4 /* R: Board I/D switch for 1736UP */
110#define PCI1736_MAINREG 0 /* Normal register (2) doesn't work */ 111#define PCI1736_MAINREG 0 /* Normal register (2) doesn't work */
111 112
113/* Advantech PCI-1739U */
114#define PCI1739_DIO 0 /* R/W: begin of 8255 registers block */
115#define PCI1739_ICR 32 /* W: Interrupt control register */
116#define PCI1739_ISR 32 /* R: Interrupt status register */
117#define PCI1739_BOARDID 8 /* R: Board I/D switch for 1739U */
118
112/* Advantech PCI-1750 */ 119/* Advantech PCI-1750 */
113#define PCI1750_IDI 0 /* R: Isolated digital input 0-15 */ 120#define PCI1750_IDI 0 /* R: Isolated digital input 0-15 */
114#define PCI1750_IDO 0 /* W: Isolated digital output 0-15 */ 121#define PCI1750_IDO 0 /* W: Isolated digital output 0-15 */
@@ -262,6 +269,7 @@ static DEFINE_PCI_DEVICE_TABLE(pci_dio_pci_table) = {
262 { PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1734) }, 269 { PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1734) },
263 { PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1735) }, 270 { PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1735) },
264 { PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1736) }, 271 { PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1736) },
272 { PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1739) },
265 { PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1750) }, 273 { PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1750) },
266 { PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1751) }, 274 { PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1751) },
267 { PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1752) }, 275 { PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1752) },
@@ -316,6 +324,14 @@ static const struct dio_boardtype boardtypes[] = {
316 {4, PCI1736_BOARDID, 1, SDF_INTERNAL}, 324 {4, PCI1736_BOARDID, 1, SDF_INTERNAL},
317 { {0, 0, 0, 0} }, 325 { {0, 0, 0, 0} },
318 IO_8b}, 326 IO_8b},
327 {"pci1739", PCI_VENDOR_ID_ADVANTECH, 0x1739, PCIDIO_MAINREG,
328 TYPE_PCI1739,
329 { {0, 0, 0, 0}, {0, 0, 0, 0} },
330 { {0, 0, 0, 0}, {0, 0, 0, 0} },
331 { {48, PCI1739_DIO, 2, 0}, {0, 0, 0, 0} },
332 {0, 0, 0, 0},
333 { {0, 0, 0, 0} },
334 IO_8b},
319 {"pci1750", PCI_VENDOR_ID_ADVANTECH, 0x1750, PCIDIO_MAINREG, 335 {"pci1750", PCI_VENDOR_ID_ADVANTECH, 0x1750, PCIDIO_MAINREG,
320 TYPE_PCI1750, 336 TYPE_PCI1750,
321 { {0, 0, 0, 0}, {16, PCI1750_IDI, 2, 0} }, 337 { {0, 0, 0, 0}, {16, PCI1750_IDI, 2, 0} },
@@ -883,6 +899,11 @@ static int pci_dio_reset(struct comedi_device *dev)
883 outb(0, dev->iobase + PCI1736_3_INT_RF); 899 outb(0, dev->iobase + PCI1736_3_INT_RF);
884 break; 900 break;
885 901
902 case TYPE_PCI1739:
903 /* disable & clear interrupts */
904 outb(0x88, dev->iobase + PCI1739_ICR);
905 break;
906
886 case TYPE_PCI1750: 907 case TYPE_PCI1750:
887 case TYPE_PCI1751: 908 case TYPE_PCI1751:
888 /* disable & clear interrupts */ 909 /* disable & clear interrupts */
diff --git a/drivers/staging/comedi/drivers/dt2801.c b/drivers/staging/comedi/drivers/dt2801.c
index 5cce1b5f4484..b85c8366a396 100644
--- a/drivers/staging/comedi/drivers/dt2801.c
+++ b/drivers/staging/comedi/drivers/dt2801.c
@@ -720,12 +720,20 @@ static int dt2801_dio_insn_config(struct comedi_device *dev,
720 which = 1; 720 which = 1;
721 721
722 /* configure */ 722 /* configure */
723 if (data[0]) { 723 switch (data[0]) {
724 case INSN_CONFIG_DIO_OUTPUT:
724 s->io_bits = 0xff; 725 s->io_bits = 0xff;
725 dt2801_writecmd(dev, DT_C_SET_DIGOUT); 726 dt2801_writecmd(dev, DT_C_SET_DIGOUT);
726 } else { 727 break;
728 case INSN_CONFIG_DIO_INPUT:
727 s->io_bits = 0; 729 s->io_bits = 0;
728 dt2801_writecmd(dev, DT_C_SET_DIGIN); 730 dt2801_writecmd(dev, DT_C_SET_DIGIN);
731 break;
732 case INSN_CONFIG_DIO_QUERY:
733 data[1] = s->io_bits ? COMEDI_OUTPUT : COMEDI_INPUT;
734 return insn->n;
735 default:
736 return -EINVAL;
729 } 737 }
730 dt2801_writedata(dev, which); 738 dt2801_writedata(dev, which);
731 739
diff --git a/drivers/staging/comedi/drivers/dt9812.c b/drivers/staging/comedi/drivers/dt9812.c
index 32d9c42e9659..e86ab5862895 100644
--- a/drivers/staging/comedi/drivers/dt9812.c
+++ b/drivers/staging/comedi/drivers/dt9812.c
@@ -527,7 +527,7 @@ static void dt9812_configure_gain(struct usb_dt9812 *dev,
527 * 11x -> Gain = 0.5 527 * 11x -> Gain = 0.5
528 */ 528 */
529 case DT9812_GAIN_0PT5: 529 case DT9812_GAIN_0PT5:
530 rmw->or_value = F020_MASK_ADC0CF_AMP0GN2 || 530 rmw->or_value = F020_MASK_ADC0CF_AMP0GN2 |
531 F020_MASK_ADC0CF_AMP0GN1; 531 F020_MASK_ADC0CF_AMP0GN1;
532 break; 532 break;
533 case DT9812_GAIN_1: 533 case DT9812_GAIN_1:
@@ -540,7 +540,7 @@ static void dt9812_configure_gain(struct usb_dt9812 *dev,
540 rmw->or_value = F020_MASK_ADC0CF_AMP0GN1; 540 rmw->or_value = F020_MASK_ADC0CF_AMP0GN1;
541 break; 541 break;
542 case DT9812_GAIN_8: 542 case DT9812_GAIN_8:
543 rmw->or_value = F020_MASK_ADC0CF_AMP0GN1 || 543 rmw->or_value = F020_MASK_ADC0CF_AMP0GN1 |
544 F020_MASK_ADC0CF_AMP0GN0; 544 F020_MASK_ADC0CF_AMP0GN0;
545 break; 545 break;
546 case DT9812_GAIN_16: 546 case DT9812_GAIN_16:
diff --git a/drivers/staging/comedi/drivers/me4000.c b/drivers/staging/comedi/drivers/me4000.c
index b692fea0d2b0..b0bc6bb877ab 100644
--- a/drivers/staging/comedi/drivers/me4000.c
+++ b/drivers/staging/comedi/drivers/me4000.c
@@ -2098,23 +2098,29 @@ static int me4000_dio_insn_config(struct comedi_device *dev,
2098 2098
2099 CALL_PDEBUG("In me4000_dio_insn_config()\n"); 2099 CALL_PDEBUG("In me4000_dio_insn_config()\n");
2100 2100
2101 if (data[0] == INSN_CONFIG_DIO_QUERY) { 2101 switch (data[0]) {
2102 default:
2103 return -EINVAL;
2104 case INSN_CONFIG_DIO_QUERY:
2102 data[1] = 2105 data[1] =
2103 (s->io_bits & (1 << chan)) ? COMEDI_OUTPUT : COMEDI_INPUT; 2106 (s->io_bits & (1 << chan)) ? COMEDI_OUTPUT : COMEDI_INPUT;
2104 return insn->n; 2107 return insn->n;
2108 case INSN_CONFIG_DIO_INPUT:
2109 case INSN_CONFIG_DIO_OUTPUT:
2110 break;
2105 } 2111 }
2106 2112
2107 /* 2113 /*
2108 * The input or output configuration of each digital line is 2114 * The input or output configuration of each digital line is
2109 * configured by a special insn_config instruction. chanspec 2115 * configured by a special insn_config instruction. chanspec
2110 * contains the channel to be changed, and data[0] contains the 2116 * contains the channel to be changed, and data[0] contains the
2111 * value COMEDI_INPUT or COMEDI_OUTPUT. 2117 * value INSN_CONFIG_DIO_INPUT or INSN_CONFIG_DIO_OUTPUT.
2112 * On the ME-4000 it is only possible to switch port wise (8 bit) 2118 * On the ME-4000 it is only possible to switch port wise (8 bit)
2113 */ 2119 */
2114 2120
2115 tmp = me4000_inl(dev, info->dio_context.ctrl_reg); 2121 tmp = me4000_inl(dev, info->dio_context.ctrl_reg);
2116 2122
2117 if (data[0] == COMEDI_OUTPUT) { 2123 if (data[0] == INSN_CONFIG_DIO_OUTPUT) {
2118 if (chan < 8) { 2124 if (chan < 8) {
2119 s->io_bits |= 0xFF; 2125 s->io_bits |= 0xFF;
2120 tmp &= ~(ME4000_DIO_CTRL_BIT_MODE_0 | 2126 tmp &= ~(ME4000_DIO_CTRL_BIT_MODE_0 |
diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c
index 045a4c00f346..1df8fcbcd108 100644
--- a/drivers/staging/comedi/drivers/ni_pcidio.c
+++ b/drivers/staging/comedi/drivers/ni_pcidio.c
@@ -30,7 +30,7 @@ Status: works
30Devices: [National Instruments] PCI-DIO-32HS (ni_pcidio), PXI-6533, 30Devices: [National Instruments] PCI-DIO-32HS (ni_pcidio), PXI-6533,
31 PCI-DIO-96, PCI-DIO-96B, PXI-6508, PCI-6503, PCI-6503B, PCI-6503X, 31 PCI-DIO-96, PCI-DIO-96B, PXI-6508, PCI-6503, PCI-6503B, PCI-6503X,
32 PXI-6503, PCI-6533, PCI-6534 32 PXI-6503, PCI-6533, PCI-6534
33Updated: Sun, 21 Apr 2002 21:03:38 -0700 33Updated: Mon, 09 Jan 2012 14:27:23 +0000
34 34
35The DIO-96 appears as four 8255 subdevices. See the 8255 35The DIO-96 appears as four 8255 subdevices. See the 8255
36driver notes for details. 36driver notes for details.
@@ -42,6 +42,11 @@ supports simple digital I/O; no handshaking is supported.
42 42
43DMA mostly works for the PCI-DIO32HS, but only in timed input mode. 43DMA mostly works for the PCI-DIO32HS, but only in timed input mode.
44 44
45The PCI-DIO-32HS/PCI-6533 has a configurable external trigger. Setting
46scan_begin_arg to 0 or CR_EDGE triggers on the leading edge. Setting
47scan_begin_arg to CR_INVERT or (CR_EDGE | CR_INVERT) triggers on the
48trailing edge.
49
45This driver could be easily modified to support AT-MIO32HS and 50This driver could be easily modified to support AT-MIO32HS and
46AT-MIO96. 51AT-MIO96.
47 52
@@ -436,6 +441,7 @@ static int ni_pcidio_request_di_mite_channel(struct comedi_device *dev)
436 comedi_error(dev, "failed to reserve mite dma channel."); 441 comedi_error(dev, "failed to reserve mite dma channel.");
437 return -EBUSY; 442 return -EBUSY;
438 } 443 }
444 devpriv->di_mite_chan->dir = COMEDI_INPUT;
439 writeb(primary_DMAChannel_bits(devpriv->di_mite_chan->channel) | 445 writeb(primary_DMAChannel_bits(devpriv->di_mite_chan->channel) |
440 secondary_DMAChannel_bits(devpriv->di_mite_chan->channel), 446 secondary_DMAChannel_bits(devpriv->di_mite_chan->channel),
441 devpriv->mite->daq_io_addr + DMA_Line_Control_Group1); 447 devpriv->mite->daq_io_addr + DMA_Line_Control_Group1);
@@ -482,6 +488,21 @@ void ni_pcidio_event(struct comedi_device *dev, struct comedi_subdevice *s)
482 comedi_event(dev, s); 488 comedi_event(dev, s);
483} 489}
484 490
491static int ni_pcidio_poll(struct comedi_device *dev, struct comedi_subdevice *s)
492{
493 unsigned long irq_flags;
494 int count;
495
496 spin_lock_irqsave(&dev->spinlock, irq_flags);
497 spin_lock(&devpriv->mite_channel_lock);
498 if (devpriv->di_mite_chan)
499 mite_sync_input_dma(devpriv->di_mite_chan, s->async);
500 spin_unlock(&devpriv->mite_channel_lock);
501 count = s->async->buf_write_count - s->async->buf_read_count;
502 spin_unlock_irqrestore(&dev->spinlock, irq_flags);
503 return count;
504}
505
485static irqreturn_t nidio_interrupt(int irq, void *d) 506static irqreturn_t nidio_interrupt(int irq, void *d)
486{ 507{
487 struct comedi_device *dev = d; 508 struct comedi_device *dev = d;
@@ -497,7 +518,6 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
497 int status; 518 int status;
498 int work = 0; 519 int work = 0;
499 unsigned int m_status = 0; 520 unsigned int m_status = 0;
500 unsigned long irq_flags;
501 521
502 /* interrupcions parasites */ 522 /* interrupcions parasites */
503 if (dev->attached == 0) { 523 if (dev->attached == 0) {
@@ -505,6 +525,9 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
505 return IRQ_NONE; 525 return IRQ_NONE;
506 } 526 }
507 527
528 /* Lock to avoid race with comedi_poll */
529 spin_lock(&dev->spinlock);
530
508 status = readb(devpriv->mite->daq_io_addr + 531 status = readb(devpriv->mite->daq_io_addr +
509 Interrupt_And_Window_Status); 532 Interrupt_And_Window_Status);
510 flags = readb(devpriv->mite->daq_io_addr + Group_1_Flags); 533 flags = readb(devpriv->mite->daq_io_addr + Group_1_Flags);
@@ -518,7 +541,7 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
518 /* printk("buf[4096]=%08x\n", 541 /* printk("buf[4096]=%08x\n",
519 *(unsigned int *)(async->prealloc_buf+4096)); */ 542 *(unsigned int *)(async->prealloc_buf+4096)); */
520 543
521 spin_lock_irqsave(&devpriv->mite_channel_lock, irq_flags); 544 spin_lock(&devpriv->mite_channel_lock);
522 if (devpriv->di_mite_chan) 545 if (devpriv->di_mite_chan)
523 m_status = mite_get_status(devpriv->di_mite_chan); 546 m_status = mite_get_status(devpriv->di_mite_chan);
524#ifdef MITE_DEBUG 547#ifdef MITE_DEBUG
@@ -543,7 +566,7 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
543 disable_irq(dev->irq); 566 disable_irq(dev->irq);
544 } 567 }
545 } 568 }
546 spin_unlock_irqrestore(&devpriv->mite_channel_lock, irq_flags); 569 spin_unlock(&devpriv->mite_channel_lock);
547 570
548 while (status & DataLeft) { 571 while (status & DataLeft) {
549 work++; 572 work++;
@@ -645,6 +668,8 @@ out:
645 Master_DMA_And_Interrupt_Control); 668 Master_DMA_And_Interrupt_Control);
646 } 669 }
647#endif 670#endif
671
672 spin_unlock(&dev->spinlock);
648 return IRQ_HANDLED; 673 return IRQ_HANDLED;
649} 674}
650 675
@@ -825,8 +850,8 @@ static int ni_pcidio_cmdtest(struct comedi_device *dev,
825 } else { 850 } else {
826 /* TRIG_EXT */ 851 /* TRIG_EXT */
827 /* should be level/edge, hi/lo specification here */ 852 /* should be level/edge, hi/lo specification here */
828 if (cmd->scan_begin_arg != 0) { 853 if ((cmd->scan_begin_arg & ~(CR_EDGE | CR_INVERT)) != 0) {
829 cmd->scan_begin_arg = 0; 854 cmd->scan_begin_arg &= (CR_EDGE | CR_INVERT);
830 err++; 855 err++;
831 } 856 }
832 } 857 }
@@ -941,7 +966,13 @@ static int ni_pcidio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
941 writeb(0, devpriv->mite->daq_io_addr + Sequence); 966 writeb(0, devpriv->mite->daq_io_addr + Sequence);
942 writeb(0x00, devpriv->mite->daq_io_addr + ReqReg); 967 writeb(0x00, devpriv->mite->daq_io_addr + ReqReg);
943 writeb(4, devpriv->mite->daq_io_addr + BlockMode); 968 writeb(4, devpriv->mite->daq_io_addr + BlockMode);
944 writeb(0, devpriv->mite->daq_io_addr + LinePolarities); 969 if (!(cmd->scan_begin_arg & CR_INVERT)) {
970 /* Leading Edge pulse mode */
971 writeb(0, devpriv->mite->daq_io_addr + LinePolarities);
972 } else {
973 /* Trailing Edge pulse mode */
974 writeb(2, devpriv->mite->daq_io_addr + LinePolarities);
975 }
945 writeb(0x00, devpriv->mite->daq_io_addr + AckSer); 976 writeb(0x00, devpriv->mite->daq_io_addr + AckSer);
946 writel(1, devpriv->mite->daq_io_addr + StartDelay); 977 writel(1, devpriv->mite->daq_io_addr + StartDelay);
947 writeb(1, devpriv->mite->daq_io_addr + ReqDelay); 978 writeb(1, devpriv->mite->daq_io_addr + ReqDelay);
@@ -1005,17 +1036,24 @@ static int ni_pcidio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
1005static int setup_mite_dma(struct comedi_device *dev, struct comedi_subdevice *s) 1036static int setup_mite_dma(struct comedi_device *dev, struct comedi_subdevice *s)
1006{ 1037{
1007 int retval; 1038 int retval;
1039 unsigned long flags;
1008 1040
1009 retval = ni_pcidio_request_di_mite_channel(dev); 1041 retval = ni_pcidio_request_di_mite_channel(dev);
1010 if (retval) 1042 if (retval)
1011 return retval; 1043 return retval;
1012 1044
1013 devpriv->di_mite_chan->dir = COMEDI_INPUT; 1045 /* write alloc the entire buffer */
1046 comedi_buf_write_alloc(s->async, s->async->prealloc_bufsz);
1014 1047
1015 mite_prep_dma(devpriv->di_mite_chan, 32, 32); 1048 spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
1049 if (devpriv->di_mite_chan) {
1050 mite_prep_dma(devpriv->di_mite_chan, 32, 32);
1051 mite_dma_arm(devpriv->di_mite_chan);
1052 } else
1053 retval = -EIO;
1054 spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
1016 1055
1017 mite_dma_arm(devpriv->di_mite_chan); 1056 return retval;
1018 return 0;
1019} 1057}
1020 1058
1021static int ni_pcidio_inttrig(struct comedi_device *dev, 1059static int ni_pcidio_inttrig(struct comedi_device *dev,
@@ -1244,6 +1282,7 @@ static int nidio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
1244 s->len_chanlist = 32; /* XXX */ 1282 s->len_chanlist = 32; /* XXX */
1245 s->buf_change = &ni_pcidio_change; 1283 s->buf_change = &ni_pcidio_change;
1246 s->async_dma_dir = DMA_BIDIRECTIONAL; 1284 s->async_dma_dir = DMA_BIDIRECTIONAL;
1285 s->poll = &ni_pcidio_poll;
1247 1286
1248 writel(0, devpriv->mite->daq_io_addr + Port_IO(0)); 1287 writel(0, devpriv->mite->daq_io_addr + Port_IO(0));
1249 writel(0, devpriv->mite->daq_io_addr + Port_Pin_Directions(0)); 1288 writel(0, devpriv->mite->daq_io_addr + Port_Pin_Directions(0));
diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c
index 0f0d995f137c..27baefa32b17 100644
--- a/drivers/staging/comedi/drivers/ni_pcimio.c
+++ b/drivers/staging/comedi/drivers/ni_pcimio.c
@@ -29,14 +29,15 @@ Devices: [National Instruments] PCI-MIO-16XE-50 (ni_pcimio),
29 PCI-MIO-16XE-10, PXI-6030E, PCI-MIO-16E-1, PCI-MIO-16E-4, PCI-6014, PCI-6040E, 29 PCI-MIO-16XE-10, PXI-6030E, PCI-MIO-16E-1, PCI-MIO-16E-4, PCI-6014, PCI-6040E,
30 PXI-6040E, PCI-6030E, PCI-6031E, PCI-6032E, PCI-6033E, PCI-6071E, PCI-6023E, 30 PXI-6040E, PCI-6030E, PCI-6031E, PCI-6032E, PCI-6033E, PCI-6071E, PCI-6023E,
31 PCI-6024E, PCI-6025E, PXI-6025E, PCI-6034E, PCI-6035E, PCI-6052E, 31 PCI-6024E, PCI-6025E, PXI-6025E, PCI-6034E, PCI-6035E, PCI-6052E,
32 PCI-6110, PCI-6111, PCI-6220, PCI-6221, PCI-6224, PXI-6224, PCI-6225, PXI-6225, 32 PCI-6110, PCI-6111, PCI-6220, PCI-6221, PCI-6224, PXI-6224,
33 PCI-6229, PCI-6250, PCI-6251, PCIe-6251, PCI-6254, PCI-6259, PCIe-6259, 33 PCI-6225, PXI-6225, PCI-6229, PCI-6250, PCI-6251, PCIe-6251, PXIe-6251,
34 PCI-6254, PCI-6259, PCIe-6259,
34 PCI-6280, PCI-6281, PXI-6281, PCI-6284, PCI-6289, 35 PCI-6280, PCI-6281, PXI-6281, PCI-6284, PCI-6289,
35 PCI-6711, PXI-6711, PCI-6713, PXI-6713, 36 PCI-6711, PXI-6711, PCI-6713, PXI-6713,
36 PXI-6071E, PCI-6070E, PXI-6070E, 37 PXI-6071E, PCI-6070E, PXI-6070E,
37 PXI-6052E, PCI-6036E, PCI-6731, PCI-6733, PXI-6733, 38 PXI-6052E, PCI-6036E, PCI-6731, PCI-6733, PXI-6733,
38 PCI-6143, PXI-6143 39 PCI-6143, PXI-6143
39Updated: Wed, 03 Dec 2008 10:51:47 +0000 40Updated: Mon, 09 Jan 2012 14:52:48 +0000
40 41
41These boards are almost identical to the AT-MIO E series, except that 42These boards are almost identical to the AT-MIO E series, except that
42they use the PCI bus instead of ISA (i.e., AT). See the notes for 43they use the PCI bus instead of ISA (i.e., AT). See the notes for
@@ -182,6 +183,7 @@ static DEFINE_PCI_DEVICE_TABLE(ni_pci_table) = {
182 {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x717f)}, 183 {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x717f)},
183 {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x71bc)}, 184 {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x71bc)},
184 {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x717d)}, 185 {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x717d)},
186 {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x72e8)},
185 {0} 187 {0}
186}; 188};
187 189
@@ -1046,6 +1048,25 @@ static const struct ni_board_struct ni_boards[] = {
1046 .has_8255 = 0, 1048 .has_8255 = 0,
1047 }, 1049 },
1048 { 1050 {
1051 .device_id = 0x72e8,
1052 .name = "pxie-6251",
1053 .n_adchan = 16,
1054 .adbits = 16,
1055 .ai_fifo_depth = 4095,
1056 .gainlkup = ai_gain_628x,
1057 .ai_speed = 800,
1058 .n_aochan = 2,
1059 .aobits = 16,
1060 .ao_fifo_depth = 8191,
1061 .ao_range_table = &range_ni_M_625x_ao,
1062 .reg_type = ni_reg_625x,
1063 .ao_unipolar = 0,
1064 .ao_speed = 357,
1065 .num_p0_dio_channels = 8,
1066 .caldac = {caldac_none},
1067 .has_8255 = 0,
1068 },
1069 {
1049 .device_id = 0x70b7, 1070 .device_id = 0x70b7,
1050 .name = "pci-6254", 1071 .name = "pci-6254",
1051 .n_adchan = 32, 1072 .n_adchan = 32,
diff --git a/drivers/staging/comedi/drivers/unioxx5.c b/drivers/staging/comedi/drivers/unioxx5.c
index 89e62aa134b0..f45824f0d86b 100644
--- a/drivers/staging/comedi/drivers/unioxx5.c
+++ b/drivers/staging/comedi/drivers/unioxx5.c
@@ -306,7 +306,7 @@ static int __unioxx5_subdev_init(struct comedi_subdevice *subdev,
306 usp = kzalloc(sizeof(*usp), GFP_KERNEL); 306 usp = kzalloc(sizeof(*usp), GFP_KERNEL);
307 307
308 if (usp == NULL) { 308 if (usp == NULL) {
309 printk(KERN_ERR "comedi%d: erorr! --> out of memory!\n", minor); 309 printk(KERN_ERR "comedi%d: error! --> out of memory!\n", minor);
310 return -1; 310 return -1;
311 } 311 }
312 312
diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c
index ca6bcf8b0231..63c9b6dbc317 100644
--- a/drivers/staging/comedi/drivers/usbduxsigma.c
+++ b/drivers/staging/comedi/drivers/usbduxsigma.c
@@ -39,7 +39,7 @@ Status: testing
39 * 39 *
40 * 40 *
41 * Revision history: 41 * Revision history:
42 * 0.1: inital version 42 * 0.1: initial version
43 * 0.2: all basic functions implemented, digital I/O only for one port 43 * 0.2: all basic functions implemented, digital I/O only for one port
44 * 0.3: proper vendor ID and driver name 44 * 0.3: proper vendor ID and driver name
45 * 0.4: fixed D/A voltage range 45 * 0.4: fixed D/A voltage range
@@ -235,16 +235,16 @@ struct usbduxsub {
235 short int ao_cmd_running; 235 short int ao_cmd_running;
236 /* pwm is running */ 236 /* pwm is running */
237 short int pwm_cmd_running; 237 short int pwm_cmd_running;
238 /* continous aquisition */ 238 /* continuous acquisition */
239 short int ai_continous; 239 short int ai_continuous;
240 short int ao_continous; 240 short int ao_continuous;
241 /* number of samples to acquire */ 241 /* number of samples to acquire */
242 int ai_sample_count; 242 int ai_sample_count;
243 int ao_sample_count; 243 int ao_sample_count;
244 /* time between samples in units of the timer */ 244 /* time between samples in units of the timer */
245 unsigned int ai_timer; 245 unsigned int ai_timer;
246 unsigned int ao_timer; 246 unsigned int ao_timer;
247 /* counter between aquisitions */ 247 /* counter between acquisitions */
248 unsigned int ai_counter; 248 unsigned int ai_counter;
249 unsigned int ao_counter; 249 unsigned int ao_counter;
250 /* interval in frames/uframes */ 250 /* interval in frames/uframes */
@@ -455,8 +455,8 @@ static void usbduxsub_ai_IsocIrq(struct urb *urb)
455 this_usbduxsub->ai_counter = this_usbduxsub->ai_timer; 455 this_usbduxsub->ai_counter = this_usbduxsub->ai_timer;
456 456
457 /* test, if we transmit only a fixed number of samples */ 457 /* test, if we transmit only a fixed number of samples */
458 if (!(this_usbduxsub->ai_continous)) { 458 if (!(this_usbduxsub->ai_continuous)) {
459 /* not continous, fixed number of samples */ 459 /* not continuous, fixed number of samples */
460 this_usbduxsub->ai_sample_count--; 460 this_usbduxsub->ai_sample_count--;
461 /* all samples received? */ 461 /* all samples received? */
462 if (this_usbduxsub->ai_sample_count < 0) { 462 if (this_usbduxsub->ai_sample_count < 0) {
@@ -607,8 +607,8 @@ static void usbduxsub_ao_IsocIrq(struct urb *urb)
607 /* timer zero */ 607 /* timer zero */
608 this_usbduxsub->ao_counter = this_usbduxsub->ao_timer; 608 this_usbduxsub->ao_counter = this_usbduxsub->ao_timer;
609 609
610 /* handle non continous aquisition */ 610 /* handle non continuous acquisition */
611 if (!(this_usbduxsub->ao_continous)) { 611 if (!(this_usbduxsub->ao_continuous)) {
612 /* fixed number of samples */ 612 /* fixed number of samples */
613 this_usbduxsub->ao_sample_count--; 613 this_usbduxsub->ao_sample_count--;
614 if (this_usbduxsub->ao_sample_count < 0) { 614 if (this_usbduxsub->ao_sample_count < 0) {
@@ -925,7 +925,7 @@ static int usbdux_ai_cmdtest(struct comedi_device *dev,
925 if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src) 925 if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src)
926 err++; 926 err++;
927 927
928 /* scanning is continous */ 928 /* scanning is continuous */
929 tmp = cmd->convert_src; 929 tmp = cmd->convert_src;
930 cmd->convert_src &= TRIG_NOW; 930 cmd->convert_src &= TRIG_NOW;
931 if (!cmd->convert_src || tmp != cmd->convert_src) 931 if (!cmd->convert_src || tmp != cmd->convert_src)
@@ -1193,7 +1193,7 @@ static int usbdux_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
1193 up(&this_usbduxsub->sem); 1193 up(&this_usbduxsub->sem);
1194 return -EBUSY; 1194 return -EBUSY;
1195 } 1195 }
1196 /* set current channel of the running aquisition to zero */ 1196 /* set current channel of the running acquisition to zero */
1197 s->async->cur_chan = 0; 1197 s->async->cur_chan = 0;
1198 1198
1199 /* first the number of channels per time step */ 1199 /* first the number of channels per time step */
@@ -1261,10 +1261,10 @@ static int usbdux_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
1261 if (cmd->stop_src == TRIG_COUNT) { 1261 if (cmd->stop_src == TRIG_COUNT) {
1262 /* data arrives as one packet */ 1262 /* data arrives as one packet */
1263 this_usbduxsub->ai_sample_count = cmd->stop_arg; 1263 this_usbduxsub->ai_sample_count = cmd->stop_arg;
1264 this_usbduxsub->ai_continous = 0; 1264 this_usbduxsub->ai_continuous = 0;
1265 } else { 1265 } else {
1266 /* continous aquisition */ 1266 /* continuous acquisition */
1267 this_usbduxsub->ai_continous = 1; 1267 this_usbduxsub->ai_continuous = 1;
1268 this_usbduxsub->ai_sample_count = 0; 1268 this_usbduxsub->ai_sample_count = 0;
1269 } 1269 }
1270 1270
@@ -1586,7 +1586,7 @@ static int usbdux_ao_cmdtest(struct comedi_device *dev,
1586 /* just now we scan also in the high speed mode every frame */ 1586 /* just now we scan also in the high speed mode every frame */
1587 /* this is due to ehci driver limitations */ 1587 /* this is due to ehci driver limitations */
1588 if (0) { /* (this_usbduxsub->high_speed) */ 1588 if (0) { /* (this_usbduxsub->high_speed) */
1589 /* start immidiately a new scan */ 1589 /* start immediately a new scan */
1590 /* the sampling rate is set by the coversion rate */ 1590 /* the sampling rate is set by the coversion rate */
1591 cmd->scan_begin_src &= TRIG_FOLLOW; 1591 cmd->scan_begin_src &= TRIG_FOLLOW;
1592 } else { 1592 } else {
@@ -1596,7 +1596,7 @@ static int usbdux_ao_cmdtest(struct comedi_device *dev,
1596 if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src) 1596 if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src)
1597 err++; 1597 err++;
1598 1598
1599 /* scanning is continous */ 1599 /* scanning is continuous */
1600 tmp = cmd->convert_src; 1600 tmp = cmd->convert_src;
1601 1601
1602 /* all conversion events happen simultaneously */ 1602 /* all conversion events happen simultaneously */
@@ -1710,7 +1710,7 @@ static int usbdux_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
1710 dev_dbg(&this_usbduxsub->interface->dev, 1710 dev_dbg(&this_usbduxsub->interface->dev,
1711 "comedi%d: %s\n", dev->minor, __func__); 1711 "comedi%d: %s\n", dev->minor, __func__);
1712 1712
1713 /* set current channel of the running aquisition to zero */ 1713 /* set current channel of the running acquisition to zero */
1714 s->async->cur_chan = 0; 1714 s->async->cur_chan = 0;
1715 for (i = 0; i < cmd->chanlist_len; ++i) { 1715 for (i = 0; i < cmd->chanlist_len; ++i) {
1716 chan = CR_CHAN(cmd->chanlist[i]); 1716 chan = CR_CHAN(cmd->chanlist[i]);
@@ -1759,7 +1759,7 @@ static int usbdux_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
1759 this_usbduxsub->ao_counter = this_usbduxsub->ao_timer; 1759 this_usbduxsub->ao_counter = this_usbduxsub->ao_timer;
1760 1760
1761 if (cmd->stop_src == TRIG_COUNT) { 1761 if (cmd->stop_src == TRIG_COUNT) {
1762 /* not continous */ 1762 /* not continuous */
1763 /* counter */ 1763 /* counter */
1764 /* high speed also scans everything at once */ 1764 /* high speed also scans everything at once */
1765 if (0) { /* (this_usbduxsub->high_speed) */ 1765 if (0) { /* (this_usbduxsub->high_speed) */
@@ -1771,10 +1771,10 @@ static int usbdux_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
1771 /* data arrives as one packet */ 1771 /* data arrives as one packet */
1772 this_usbduxsub->ao_sample_count = cmd->stop_arg; 1772 this_usbduxsub->ao_sample_count = cmd->stop_arg;
1773 } 1773 }
1774 this_usbduxsub->ao_continous = 0; 1774 this_usbduxsub->ao_continuous = 0;
1775 } else { 1775 } else {
1776 /* continous aquisition */ 1776 /* continuous acquisition */
1777 this_usbduxsub->ao_continous = 1; 1777 this_usbduxsub->ao_continuous = 1;
1778 this_usbduxsub->ao_sample_count = 0; 1778 this_usbduxsub->ao_sample_count = 0;
1779 } 1779 }
1780 1780
diff --git a/drivers/staging/crystalhd/bc_dts_glob_lnx.h b/drivers/staging/crystalhd/bc_dts_glob_lnx.h
index bbe5119761fa..fd1a6e680c8a 100644
--- a/drivers/staging/crystalhd/bc_dts_glob_lnx.h
+++ b/drivers/staging/crystalhd/bc_dts_glob_lnx.h
@@ -48,8 +48,7 @@
48 48
49#endif 49#endif
50 50
51#include "bc_dts_defs.h" 51#include "crystalhd.h"
52#include "bcm_70012_regs.h" /* Link Register defs */
53 52
54#define CRYSTALHD_API_NAME "crystalhd" 53#define CRYSTALHD_API_NAME "crystalhd"
55#define CRYSTALHD_API_DEV_NAME "/dev/crystalhd" 54#define CRYSTALHD_API_DEV_NAME "/dev/crystalhd"
diff --git a/drivers/staging/crystalhd/bc_dts_types.h b/drivers/staging/crystalhd/bc_dts_types.h
deleted file mode 100644
index 1085a91221b8..000000000000
--- a/drivers/staging/crystalhd/bc_dts_types.h
+++ /dev/null
@@ -1,40 +0,0 @@
1/********************************************************************
2 * Copyright(c) 2006-2009 Broadcom Corporation.
3 *
4 * Name: bc_dts_types.h
5 *
6 * Description: Data types
7 *
8 * AU
9 *
10 * HISTORY:
11 *
12 ********************************************************************
13 * This header is free software: you can redistribute it and/or modify
14 * it under the terms of the GNU Lesser General Public License as published
15 * by the Free Software Foundation, either version 2.1 of the License.
16 *
17 * This header is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public License
22 * along with this header. If not, see <http://www.gnu.org/licenses/>.
23 *******************************************************************/
24
25#ifndef _BC_DTS_TYPES_H_
26#define _BC_DTS_TYPES_H_
27
28#include <stdint.h>
29
30#ifndef TRUE
31 #define TRUE 1
32#endif
33
34#ifndef FALSE
35 #define FALSE 0
36#endif
37
38#define TEXT
39
40#endif
diff --git a/drivers/staging/crystalhd/crystalhd.h b/drivers/staging/crystalhd/crystalhd.h
new file mode 100644
index 000000000000..3f4d79515026
--- /dev/null
+++ b/drivers/staging/crystalhd/crystalhd.h
@@ -0,0 +1,14 @@
1#ifndef _CRYSTALHD_H_
2#define _CRYSTALHD_H_
3
4#include <asm/system.h>
5#include "bc_dts_defs.h"
6#include "crystalhd_misc.h"
7#include "bc_dts_glob_lnx.h"
8#include "crystalhd_hw.h"
9#include "crystalhd_cmds.h"
10#include "crystalhd_lnx.h"
11#include "bcm_70012_regs.h"
12#include "crystalhd_fw_if.h"
13
14#endif
diff --git a/drivers/staging/crystalhd/crystalhd_cmds.c b/drivers/staging/crystalhd/crystalhd_cmds.c
index 3735ed3da4c6..05fe78748dfc 100644
--- a/drivers/staging/crystalhd/crystalhd_cmds.c
+++ b/drivers/staging/crystalhd/crystalhd_cmds.c
@@ -24,8 +24,7 @@
24 * along with this driver. If not, see <http://www.gnu.org/licenses/>. 24 * along with this driver. If not, see <http://www.gnu.org/licenses/>.
25 **********************************************************************/ 25 **********************************************************************/
26 26
27#include "crystalhd_cmds.h" 27#include "crystalhd.h"
28#include "crystalhd_hw.h"
29 28
30static struct crystalhd_user *bc_cproc_get_uid(struct crystalhd_cmd *ctx) 29static struct crystalhd_user *bc_cproc_get_uid(struct crystalhd_cmd *ctx)
31{ 30{
diff --git a/drivers/staging/crystalhd/crystalhd_cmds.h b/drivers/staging/crystalhd/crystalhd_cmds.h
index f0a2796045c2..4066ba393a17 100644
--- a/drivers/staging/crystalhd/crystalhd_cmds.h
+++ b/drivers/staging/crystalhd/crystalhd_cmds.h
@@ -33,8 +33,8 @@
33 * from _dts_glob and dts_defs etc.. which are defined for 33 * from _dts_glob and dts_defs etc.. which are defined for
34 * windows. 34 * windows.
35 */ 35 */
36#include "crystalhd_misc.h" 36
37#include "crystalhd_hw.h" 37#include "crystalhd.h"
38 38
39enum crystalhd_state { 39enum crystalhd_state {
40 BC_LINK_INVALID = 0x00, 40 BC_LINK_INVALID = 0x00,
diff --git a/drivers/staging/crystalhd/crystalhd_hw.c b/drivers/staging/crystalhd/crystalhd_hw.c
index 5acf39e7cdef..e617d2fcbb1f 100644
--- a/drivers/staging/crystalhd/crystalhd_hw.c
+++ b/drivers/staging/crystalhd/crystalhd_hw.c
@@ -22,10 +22,11 @@
22 * along with this driver. If not, see <http://www.gnu.org/licenses/>. 22 * along with this driver. If not, see <http://www.gnu.org/licenses/>.
23 **********************************************************************/ 23 **********************************************************************/
24 24
25#include "crystalhd.h"
26
25#include <linux/pci.h> 27#include <linux/pci.h>
26#include <linux/slab.h> 28#include <linux/slab.h>
27#include <linux/delay.h> 29#include <linux/delay.h>
28#include "crystalhd_hw.h"
29 30
30/* Functions internal to this file */ 31/* Functions internal to this file */
31 32
@@ -766,7 +767,7 @@ static enum BC_STATUS crystalhd_hw_fill_desc(struct crystalhd_dio_req *ioreq,
766 crystalhd_hw_dump_desc(desc, last_desc_ix, 1); 767 crystalhd_hw_dump_desc(desc, last_desc_ix, 1);
767 768
768 if (count != xfr_sz) { 769 if (count != xfr_sz) {
769 BCMLOG_ERR("interal error sz curr:%x exp:%x\n", count, xfr_sz); 770 BCMLOG_ERR("internal error sz curr:%x exp:%x\n", count, xfr_sz);
770 return BC_STS_ERROR; 771 return BC_STS_ERROR;
771 } 772 }
772 773
@@ -868,8 +869,7 @@ static enum BC_STATUS crystalhd_stop_tx_dma_engine(struct crystalhd_hw *hw)
868 869
869 BCMLOG(BCMLOG_DBG, "Stopping TX DMA Engine..\n"); 870 BCMLOG(BCMLOG_DBG, "Stopping TX DMA Engine..\n");
870 871
871 /* FIXME: jarod: invert dma_ctrl and check bit? or are there missing parens? */ 872 if (!(dma_cntrl & DMA_START_BIT)) {
872 if (!dma_cntrl & DMA_START_BIT) {
873 BCMLOG(BCMLOG_DBG, "Already Stopped\n"); 873 BCMLOG(BCMLOG_DBG, "Already Stopped\n");
874 return BC_STS_SUCCESS; 874 return BC_STS_SUCCESS;
875 } 875 }
@@ -1628,7 +1628,6 @@ enum BC_STATUS crystalhd_download_fw(struct crystalhd_adp *adp, void *buffer, ui
1628 uint32_t fw_sig_len = 36; 1628 uint32_t fw_sig_len = 36;
1629 uint32_t dram_offset = BC_FWIMG_ST_ADDR, sig_reg; 1629 uint32_t dram_offset = BC_FWIMG_ST_ADDR, sig_reg;
1630 1630
1631 BCMLOG_ENTER;
1632 1631
1633 if (!adp || !buffer || !sz) { 1632 if (!adp || !buffer || !sz) {
1634 BCMLOG_ERR("Invalid Params.\n"); 1633 BCMLOG_ERR("Invalid Params.\n");
@@ -1725,8 +1724,6 @@ enum BC_STATUS crystalhd_do_fw_cmd(struct crystalhd_hw *hw,
1725 1724
1726 crystalhd_create_event(&fw_cmd_event); 1725 crystalhd_create_event(&fw_cmd_event);
1727 1726
1728 BCMLOG_ENTER;
1729
1730 if (!hw || !fw_cmd) { 1727 if (!hw || !fw_cmd) {
1731 BCMLOG_ERR("Invalid Arguments\n"); 1728 BCMLOG_ERR("Invalid Arguments\n");
1732 return BC_STS_INV_ARG; 1729 return BC_STS_INV_ARG;
diff --git a/drivers/staging/crystalhd/crystalhd_hw.h b/drivers/staging/crystalhd/crystalhd_hw.h
index 3efbf9d4ff5d..2d0e6c6005e5 100644
--- a/drivers/staging/crystalhd/crystalhd_hw.h
+++ b/drivers/staging/crystalhd/crystalhd_hw.h
@@ -27,8 +27,7 @@
27#ifndef _CRYSTALHD_HW_H_ 27#ifndef _CRYSTALHD_HW_H_
28#define _CRYSTALHD_HW_H_ 28#define _CRYSTALHD_HW_H_
29 29
30#include "crystalhd_misc.h" 30#include "crystalhd.h"
31#include "crystalhd_fw_if.h"
32 31
33/* HW constants..*/ 32/* HW constants..*/
34#define DMA_ENGINE_CNT 2 33#define DMA_ENGINE_CNT 2
diff --git a/drivers/staging/crystalhd/crystalhd_lnx.c b/drivers/staging/crystalhd/crystalhd_lnx.c
index 7e0c199f6893..d9e3d618f7f4 100644
--- a/drivers/staging/crystalhd/crystalhd_lnx.c
+++ b/drivers/staging/crystalhd/crystalhd_lnx.c
@@ -15,10 +15,11 @@
15 along with this driver. If not, see <http://www.gnu.org/licenses/>. 15 along with this driver. If not, see <http://www.gnu.org/licenses/>.
16***************************************************************************/ 16***************************************************************************/
17 17
18#include "crystalhd.h"
19
18#include <linux/mutex.h> 20#include <linux/mutex.h>
19#include <linux/slab.h> 21#include <linux/slab.h>
20 22
21#include "crystalhd_lnx.h"
22 23
23static DEFINE_MUTEX(chd_dec_mutex); 24static DEFINE_MUTEX(chd_dec_mutex);
24static struct class *crystalhd_class; 25static struct class *crystalhd_class;
@@ -298,7 +299,6 @@ static int chd_dec_open(struct inode *in, struct file *fd)
298 enum BC_STATUS sts = BC_STS_SUCCESS; 299 enum BC_STATUS sts = BC_STS_SUCCESS;
299 struct crystalhd_user *uc = NULL; 300 struct crystalhd_user *uc = NULL;
300 301
301 BCMLOG_ENTER;
302 if (!adp) { 302 if (!adp) {
303 BCMLOG_ERR("Invalid adp\n"); 303 BCMLOG_ERR("Invalid adp\n");
304 return -EINVAL; 304 return -EINVAL;
@@ -327,7 +327,6 @@ static int chd_dec_close(struct inode *in, struct file *fd)
327 struct crystalhd_adp *adp = chd_get_adp(); 327 struct crystalhd_adp *adp = chd_get_adp();
328 struct crystalhd_user *uc; 328 struct crystalhd_user *uc;
329 329
330 BCMLOG_ENTER;
331 if (!adp) { 330 if (!adp) {
332 BCMLOG_ERR("Invalid adp\n"); 331 BCMLOG_ERR("Invalid adp\n");
333 return -EINVAL; 332 return -EINVAL;
@@ -513,8 +512,6 @@ static void __devexit chd_dec_pci_remove(struct pci_dev *pdev)
513 struct crystalhd_adp *pinfo; 512 struct crystalhd_adp *pinfo;
514 enum BC_STATUS sts = BC_STS_SUCCESS; 513 enum BC_STATUS sts = BC_STS_SUCCESS;
515 514
516 BCMLOG_ENTER;
517
518 pinfo = pci_get_drvdata(pdev); 515 pinfo = pci_get_drvdata(pdev);
519 if (!pinfo) { 516 if (!pinfo) {
520 BCMLOG_ERR("could not get adp\n"); 517 BCMLOG_ERR("could not get adp\n");
diff --git a/drivers/staging/crystalhd/crystalhd_lnx.h b/drivers/staging/crystalhd/crystalhd_lnx.h
index a2b5a56be6dd..a81f9298b0a1 100644
--- a/drivers/staging/crystalhd/crystalhd_lnx.h
+++ b/drivers/staging/crystalhd/crystalhd_lnx.h
@@ -1,7 +1,7 @@
1/*************************************************************************** 1/***************************************************************************
2 * Copyright (c) 2005-2009, Broadcom Corporation. 2 * Copyright (c) 2005-2009, Broadcom Corporation.
3 * 3 *
4 * Name: crystalhd_lnx . c 4 * Name: crystalhd_lnx . h
5 * 5 *
6 * Description: 6 * Description:
7 * BCM70012 Linux driver 7 * BCM70012 Linux driver
@@ -48,11 +48,10 @@
48#include <asm/system.h> 48#include <asm/system.h>
49#include <linux/uaccess.h> 49#include <linux/uaccess.h>
50 50
51#include "crystalhd_cmds.h" 51#include "crystalhd.h"
52 52
53#define CRYSTAL_HD_NAME "Broadcom Crystal HD Decoder (BCM70012) Driver" 53#define CRYSTAL_HD_NAME "Broadcom Crystal HD Decoder (BCM70012) Driver"
54 54
55
56/* OS specific PCI information structure and adapter information. */ 55/* OS specific PCI information structure and adapter information. */
57struct crystalhd_adp { 56struct crystalhd_adp {
58 /* Hardware borad/PCI specifics */ 57 /* Hardware borad/PCI specifics */
diff --git a/drivers/staging/crystalhd/crystalhd_misc.c b/drivers/staging/crystalhd/crystalhd_misc.c
index 5fa0c6e10ce2..b3a637814a16 100644
--- a/drivers/staging/crystalhd/crystalhd_misc.c
+++ b/drivers/staging/crystalhd/crystalhd_misc.c
@@ -24,10 +24,9 @@
24 * along with this driver. If not, see <http://www.gnu.org/licenses/>. 24 * along with this driver. If not, see <http://www.gnu.org/licenses/>.
25 **********************************************************************/ 25 **********************************************************************/
26 26
27#include <linux/slab.h> 27#include "crystalhd.h"
28 28
29#include "crystalhd_misc.h" 29#include <linux/slab.h>
30#include "crystalhd_lnx.h"
31 30
32uint32_t g_linklog_level; 31uint32_t g_linklog_level;
33 32
diff --git a/drivers/staging/crystalhd/crystalhd_misc.h b/drivers/staging/crystalhd/crystalhd_misc.h
index 4d6172357428..84c87938a831 100644
--- a/drivers/staging/crystalhd/crystalhd_misc.h
+++ b/drivers/staging/crystalhd/crystalhd_misc.h
@@ -28,6 +28,8 @@
28#ifndef _CRYSTALHD_MISC_H_ 28#ifndef _CRYSTALHD_MISC_H_
29#define _CRYSTALHD_MISC_H_ 29#define _CRYSTALHD_MISC_H_
30 30
31#include "crystalhd.h"
32
31#include <linux/module.h> 33#include <linux/module.h>
32#include <linux/kernel.h> 34#include <linux/kernel.h>
33#include <linux/errno.h> 35#include <linux/errno.h>
@@ -35,8 +37,6 @@
35#include <linux/ioctl.h> 37#include <linux/ioctl.h>
36#include <linux/dma-mapping.h> 38#include <linux/dma-mapping.h>
37#include <linux/sched.h> 39#include <linux/sched.h>
38#include <asm/system.h>
39#include "bc_dts_glob_lnx.h"
40 40
41/* Global log level variable defined in crystal_misc.c file */ 41/* Global log level variable defined in crystal_misc.c file */
42extern uint32_t g_linklog_level; 42extern uint32_t g_linklog_level;
@@ -200,29 +200,21 @@ enum _chd_log_levels {
200 BCMLOG_INFO = 0x00000001, /* Generic informational */ 200 BCMLOG_INFO = 0x00000001, /* Generic informational */
201 BCMLOG_DBG = 0x00000002, /* First level Debug info */ 201 BCMLOG_DBG = 0x00000002, /* First level Debug info */
202 BCMLOG_SSTEP = 0x00000004, /* Stepping information */ 202 BCMLOG_SSTEP = 0x00000004, /* Stepping information */
203 BCMLOG_ENTER_LEAVE = 0x00000008, /* stack tracking */
204}; 203};
205 204
206#define BCMLOG_ENTER \
207if (g_linklog_level & BCMLOG_ENTER_LEAVE) { \
208 printk(KERN_DEBUG "Entered %s\n", __func__); \
209}
210 205
211#define BCMLOG_LEAVE \ 206#define BCMLOG(trace, fmt, args...) \
212if (g_linklog_level & BCMLOG_ENTER_LEAVE) { \ 207do { \
213 printk(KERN_DEBUG "Leaving %s\n", __func__); \ 208 if (g_linklog_level & trace) \
214} 209 printk(fmt, ##args); \
210} while (0)
215 211
216#define BCMLOG(trace, fmt, args...) \
217if (g_linklog_level & trace) { \
218 printk(fmt, ##args); \
219}
220 212
221#define BCMLOG_ERR(fmt, args...) \ 213#define BCMLOG_ERR(fmt, args...) \
222do { \ 214do { \
223 if (g_linklog_level & BCMLOG_ERROR) { \ 215 if (g_linklog_level & BCMLOG_ERROR) \
224 printk(KERN_ERR "*ERR*:%s:%d: "fmt, __FILE__, __LINE__, ##args); \ 216 printk(KERN_ERR "*ERR*:%s:%d: "fmt, \
225 } \ 217 __FILE__, __LINE__, ##args); \
226} while (0); 218} while (0)
227 219
228#endif 220#endif
diff --git a/drivers/staging/et131x/README b/drivers/staging/et131x/README
index 3458aa713a33..82657233c8b6 100644
--- a/drivers/staging/et131x/README
+++ b/drivers/staging/et131x/README
@@ -11,6 +11,6 @@ TODO:
11 - Use of kmem_cache seems a bit unusual 11 - Use of kmem_cache seems a bit unusual
12 12
13Please send patches to: 13Please send patches to:
14 Greg Kroah-Hartman <gregkh@suse.de> 14 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
15 Mark Einon <mark.einon@gmail.com> 15 Mark Einon <mark.einon@gmail.com>
16 16
diff --git a/drivers/staging/et131x/et131x.c b/drivers/staging/et131x/et131x.c
index 2c4069fcd981..3f919babe79b 100644
--- a/drivers/staging/et131x/et131x.c
+++ b/drivers/staging/et131x/et131x.c
@@ -802,7 +802,7 @@ static int et131x_init_eeprom(struct et131x_adapter *adapter)
802 /* THIS IS A WORKAROUND: 802 /* THIS IS A WORKAROUND:
803 * I need to call this function twice to get my card in a 803 * I need to call this function twice to get my card in a
804 * LG M1 Express Dual running. I tried also a msleep before this 804 * LG M1 Express Dual running. I tried also a msleep before this
805 * function, because I thougth there could be some time condidions 805 * function, because I thought there could be some time condidions
806 * but it didn't work. Call the whole function twice also work. 806 * but it didn't work. Call the whole function twice also work.
807 */ 807 */
808 if (pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus)) { 808 if (pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus)) {
@@ -987,7 +987,7 @@ static void et1310_config_mac_regs1(struct et131x_adapter *adapter)
987 writel(station1, &macregs->station_addr_1); 987 writel(station1, &macregs->station_addr_1);
988 writel(station2, &macregs->station_addr_2); 988 writel(station2, &macregs->station_addr_2);
989 989
990 /* Max ethernet packet in bytes that will passed by the mac without 990 /* Max ethernet packet in bytes that will be passed by the mac without
991 * being truncated. Allow the MAC to pass 4 more than our max packet 991 * being truncated. Allow the MAC to pass 4 more than our max packet
992 * size. This is 4 for the Ethernet CRC. 992 * size. This is 4 for the Ethernet CRC.
993 * 993 *
@@ -3109,7 +3109,7 @@ static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
3109 skb->protocol = eth_type_trans(skb, adapter->netdev); 3109 skb->protocol = eth_type_trans(skb, adapter->netdev);
3110 skb->ip_summed = CHECKSUM_NONE; 3110 skb->ip_summed = CHECKSUM_NONE;
3111 3111
3112 netif_rx(skb); 3112 netif_rx_ni(skb);
3113 } else { 3113 } else {
3114 rfd->len = 0; 3114 rfd->len = 0;
3115 } 3115 }
@@ -4413,7 +4413,7 @@ static void et131x_up(struct net_device *netdev)
4413 4413
4414/** 4414/**
4415 * et131x_down - Bring down the device 4415 * et131x_down - Bring down the device
4416 * @netdev: device to be broght down 4416 * @netdev: device to be brought down
4417 */ 4417 */
4418static void et131x_down(struct net_device *netdev) 4418static void et131x_down(struct net_device *netdev)
4419{ 4419{
@@ -5177,7 +5177,7 @@ static int et131x_set_mac_addr(struct net_device *netdev, void *new_mac)
5177 5177
5178 /* Make sure the requested MAC is valid */ 5178 /* Make sure the requested MAC is valid */
5179 if (!is_valid_ether_addr(address->sa_data)) 5179 if (!is_valid_ether_addr(address->sa_data))
5180 return -EINVAL; 5180 return -EADDRNOTAVAIL;
5181 5181
5182 et131x_disable_txrx(netdev); 5182 et131x_disable_txrx(netdev);
5183 et131x_handle_send_interrupt(adapter); 5183 et131x_handle_send_interrupt(adapter);
diff --git a/drivers/staging/et131x/et131x.h b/drivers/staging/et131x/et131x.h
index 7eed3c8986f1..864379b4e8df 100644
--- a/drivers/staging/et131x/et131x.h
+++ b/drivers/staging/et131x/et131x.h
@@ -596,7 +596,7 @@ struct rxdma_regs { /* Location: */
596 * structure for tx test reg in txmac address map 596 * structure for tx test reg in txmac address map
597 * located at address 0x3014 597 * located at address 0x3014
598 * 31-17: unused 598 * 31-17: unused
599 * 16: reserved1 599 * 16: reserved
600 * 15: txtest_en 600 * 15: txtest_en
601 * 14-11: unused 601 * 14-11: unused
602 * 10-0: txq test pointer 602 * 10-0: txq test pointer
@@ -1485,7 +1485,7 @@ struct address_map {
1485 * 3: reserved 1485 * 3: reserved
1486 * 2: ignore_10g_fr 1486 * 2: ignore_10g_fr
1487 * 1: reserved 1487 * 1: reserved
1488 * 0: preamble_supress_en 1488 * 0: preamble_suppress_en
1489 */ 1489 */
1490 1490
1491/* MI Register 22: PHY Configuration Reg(0x16) 1491/* MI Register 22: PHY Configuration Reg(0x16)
diff --git a/drivers/staging/frontier/alphatrack.c b/drivers/staging/frontier/alphatrack.c
index d8efed657440..3bf0f40e97fd 100644
--- a/drivers/staging/frontier/alphatrack.c
+++ b/drivers/staging/frontier/alphatrack.c
@@ -450,7 +450,7 @@ exit:
450/** 450/**
451 * usb_alphatrack_poll 451 * usb_alphatrack_poll
452 */ 452 */
453static unsigned int usb_alphatrack_poll(struct file *file, poll_table * wait) 453static unsigned int usb_alphatrack_poll(struct file *file, poll_table *wait)
454{ 454{
455 struct usb_alphatrack *dev; 455 struct usb_alphatrack *dev;
456 unsigned int mask = 0; 456 unsigned int mask = 0;
diff --git a/drivers/staging/frontier/tranzport.c b/drivers/staging/frontier/tranzport.c
index cf47a5d191fc..29e99bbcae48 100644
--- a/drivers/staging/frontier/tranzport.c
+++ b/drivers/staging/frontier/tranzport.c
@@ -471,7 +471,7 @@ exit:
471/** 471/**
472 * usb_tranzport_poll 472 * usb_tranzport_poll
473 */ 473 */
474static unsigned int usb_tranzport_poll(struct file *file, poll_table * wait) 474static unsigned int usb_tranzport_poll(struct file *file, poll_table *wait)
475{ 475{
476 struct usb_tranzport *dev; 476 struct usb_tranzport *dev;
477 unsigned int mask = 0; 477 unsigned int mask = 0;
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
index 7faeadad1fff..71aaad31270b 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
@@ -29,10 +29,10 @@
29#define FT1000_PROC "ft1000" 29#define FT1000_PROC "ft1000"
30#define MAX_FILE_LEN 255 30#define MAX_FILE_LEN 255
31 31
32#define PUTM_TO_PAGE(len,page,args...) \ 32#define PUTM_TO_PAGE(len, page, args...) \
33 len += snprintf(page+len, PAGE_SIZE - len, args) 33 len += snprintf(page+len, PAGE_SIZE - len, args)
34 34
35#define PUTX_TO_PAGE(len,page,message,size,var) \ 35#define PUTX_TO_PAGE(len, page, message, size, var) \
36 len += snprintf(page+len, PAGE_SIZE - len, message); \ 36 len += snprintf(page+len, PAGE_SIZE - len, message); \
37 for(i = 0; i < (size - 1); i++) \ 37 for(i = 0; i < (size - 1); i++) \
38 { \ 38 { \
@@ -40,7 +40,7 @@
40 } \ 40 } \
41 len += snprintf(page+len, PAGE_SIZE - len, "%02x\n", var[i]) 41 len += snprintf(page+len, PAGE_SIZE - len, "%02x\n", var[i])
42 42
43#define PUTD_TO_PAGE(len,page,message,size,var) \ 43#define PUTD_TO_PAGE(len, page, message, size, var) \
44 len += snprintf(page+len, PAGE_SIZE - len, message); \ 44 len += snprintf(page+len, PAGE_SIZE - len, message); \
45 for(i = 0; i < (size - 1); i++) \ 45 for(i = 0; i < (size - 1); i++) \
46 { \ 46 { \
diff --git a/drivers/staging/hv/Kconfig b/drivers/staging/hv/Kconfig
deleted file mode 100644
index 60ac479a2909..000000000000
--- a/drivers/staging/hv/Kconfig
+++ /dev/null
@@ -1,5 +0,0 @@
1config HYPERV_STORAGE
2 tristate "Microsoft Hyper-V virtual storage driver"
3 depends on HYPERV && SCSI
4 help
5 Select this option to enable the Hyper-V virtual storage driver.
diff --git a/drivers/staging/hv/Makefile b/drivers/staging/hv/Makefile
deleted file mode 100644
index af95a6b7e436..000000000000
--- a/drivers/staging/hv/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
1obj-$(CONFIG_HYPERV_STORAGE) += hv_storvsc.o
2
3hv_storvsc-y := storvsc_drv.o
diff --git a/drivers/staging/hv/TODO b/drivers/staging/hv/TODO
deleted file mode 100644
index dea7d92dfdc1..000000000000
--- a/drivers/staging/hv/TODO
+++ /dev/null
@@ -1,5 +0,0 @@
1TODO:
2 - audit the scsi driver
3
4Please send patches for this code to Greg Kroah-Hartman <gregkh@suse.de>,
5Haiyang Zhang <haiyangz@microsoft.com>, and K. Y. Srinivasan <kys@microsoft.com>
diff --git a/drivers/staging/iio/Documentation/device.txt b/drivers/staging/iio/Documentation/device.txt
index 1abb80cb884e..8926f2448cc9 100644
--- a/drivers/staging/iio/Documentation/device.txt
+++ b/drivers/staging/iio/Documentation/device.txt
@@ -62,7 +62,7 @@ Then fill in the following:
62 An optional associated buffer. 62 An optional associated buffer.
63- indio_dev->pollfunc: 63- indio_dev->pollfunc:
64 Poll function related elements. This controls what occurs when a trigger 64 Poll function related elements. This controls what occurs when a trigger
65 to which this device is attached sends and event. 65 to which this device is attached sends an event.
66- indio_dev->channels: 66- indio_dev->channels:
67 Specification of device channels. Most attributes etc are built 67 Specification of device channels. Most attributes etc are built
68 form this spec. 68 form this spec.
diff --git a/drivers/staging/iio/Documentation/iio_event_monitor.c b/drivers/staging/iio/Documentation/iio_event_monitor.c
new file mode 100644
index 000000000000..0d21a277305f
--- /dev/null
+++ b/drivers/staging/iio/Documentation/iio_event_monitor.c
@@ -0,0 +1,241 @@
1/* Industrialio event test code.
2 *
3 * Copyright (c) 2011-2012 Lars-Peter Clausen <lars@metafoo.de>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is primarily intended as an example application.
10 * Reads the current buffer setup from sysfs and starts a short capture
11 * from the specified device, pretty printing the result after appropriate
12 * conversion.
13 *
14 * Usage:
15 * iio_event_monitor <device_name>
16 *
17 */
18
19#define _GNU_SOURCE
20
21#include <unistd.h>
22#include <stdbool.h>
23#include <stdio.h>
24#include <errno.h>
25#include <string.h>
26#include <poll.h>
27#include <fcntl.h>
28#include <sys/ioctl.h>
29#include "iio_utils.h"
30#include "../events.h"
31
32static const char * const iio_chan_type_name_spec[] = {
33 [IIO_VOLTAGE] = "voltage",
34 [IIO_CURRENT] = "current",
35 [IIO_POWER] = "power",
36 [IIO_ACCEL] = "accel",
37 [IIO_ANGL_VEL] = "anglvel",
38 [IIO_MAGN] = "magn",
39 [IIO_LIGHT] = "illuminance",
40 [IIO_INTENSITY] = "intensity",
41 [IIO_PROXIMITY] = "proximity",
42 [IIO_TEMP] = "temp",
43 [IIO_INCLI] = "incli",
44 [IIO_ROT] = "rot",
45 [IIO_ANGL] = "angl",
46 [IIO_TIMESTAMP] = "timestamp",
47 [IIO_CAPACITANCE] = "capacitance",
48};
49
50static const char * const iio_ev_type_text[] = {
51 [IIO_EV_TYPE_THRESH] = "thresh",
52 [IIO_EV_TYPE_MAG] = "mag",
53 [IIO_EV_TYPE_ROC] = "roc",
54 [IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive",
55 [IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive",
56};
57
58static const char * const iio_ev_dir_text[] = {
59 [IIO_EV_DIR_EITHER] = "either",
60 [IIO_EV_DIR_RISING] = "rising",
61 [IIO_EV_DIR_FALLING] = "falling"
62};
63
64static const char * const iio_modifier_names[] = {
65 [IIO_MOD_X] = "x",
66 [IIO_MOD_Y] = "y",
67 [IIO_MOD_Z] = "z",
68 [IIO_MOD_LIGHT_BOTH] = "both",
69 [IIO_MOD_LIGHT_IR] = "ir",
70};
71
72static bool event_is_known(struct iio_event_data *event)
73{
74 enum iio_chan_type type = IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event->id);
75 enum iio_modifier mod = IIO_EVENT_CODE_EXTRACT_MODIFIER(event->id);
76 enum iio_event_type ev_type = IIO_EVENT_CODE_EXTRACT_TYPE(event->id);
77 enum iio_event_direction dir = IIO_EVENT_CODE_EXTRACT_DIR(event->id);
78
79 switch (type) {
80 case IIO_VOLTAGE:
81 case IIO_CURRENT:
82 case IIO_POWER:
83 case IIO_ACCEL:
84 case IIO_ANGL_VEL:
85 case IIO_MAGN:
86 case IIO_LIGHT:
87 case IIO_INTENSITY:
88 case IIO_PROXIMITY:
89 case IIO_TEMP:
90 case IIO_INCLI:
91 case IIO_ROT:
92 case IIO_ANGL:
93 case IIO_TIMESTAMP:
94 case IIO_CAPACITANCE:
95 break;
96 default:
97 return false;
98 }
99
100 switch (mod) {
101 case IIO_NO_MOD:
102 case IIO_MOD_X:
103 case IIO_MOD_Y:
104 case IIO_MOD_Z:
105 case IIO_MOD_LIGHT_BOTH:
106 case IIO_MOD_LIGHT_IR:
107 break;
108 default:
109 return false;
110 }
111
112 switch (ev_type) {
113 case IIO_EV_TYPE_THRESH:
114 case IIO_EV_TYPE_MAG:
115 case IIO_EV_TYPE_ROC:
116 case IIO_EV_TYPE_THRESH_ADAPTIVE:
117 case IIO_EV_TYPE_MAG_ADAPTIVE:
118 break;
119 default:
120 return false;
121 }
122
123 switch (dir) {
124 case IIO_EV_DIR_EITHER:
125 case IIO_EV_DIR_RISING:
126 case IIO_EV_DIR_FALLING:
127 break;
128 default:
129 return false;
130 }
131
132 return true;
133}
134
135static void print_event(struct iio_event_data *event)
136{
137 enum iio_chan_type type = IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event->id);
138 enum iio_modifier mod = IIO_EVENT_CODE_EXTRACT_MODIFIER(event->id);
139 enum iio_event_type ev_type = IIO_EVENT_CODE_EXTRACT_TYPE(event->id);
140 enum iio_event_direction dir = IIO_EVENT_CODE_EXTRACT_DIR(event->id);
141 int chan = IIO_EVENT_CODE_EXTRACT_CHAN(event->id);
142 int chan2 = IIO_EVENT_CODE_EXTRACT_CHAN2(event->id);
143 bool diff = IIO_EVENT_CODE_EXTRACT_DIFF(event->id);
144
145 if (!event_is_known(event)) {
146 printf("Unknown event: time: %lld, id: %llx\n",
147 event->timestamp, event->id);
148 return;
149 }
150
151 printf("Event: time: %lld, ", event->timestamp);
152
153 if (mod != IIO_NO_MOD) {
154 printf("type: %s(%s), ",
155 iio_chan_type_name_spec[type],
156 iio_modifier_names[mod]);
157 } else {
158 printf("type: %s, ",
159 iio_chan_type_name_spec[type]);
160 }
161
162 if (diff && chan >= 0 && chan2 >= 0)
163 printf("channel: %d-%d, ", chan, chan2);
164 else if (chan >= 0)
165 printf("channel: %d, ", chan);
166
167 printf("evtype: %s, direction: %s\n",
168 iio_ev_type_text[ev_type],
169 iio_ev_dir_text[dir]);
170}
171
172int main(int argc, char **argv)
173{
174 struct iio_event_data event;
175 const char *device_name;
176 char *chrdev_name;
177 int ret;
178 int dev_num;
179 int fd, event_fd;
180
181 if (argc <= 1) {
182 printf("Usage: %s <device_name>\n", argv[0]);
183 return -1;
184 }
185
186 device_name = argv[1];
187
188 dev_num = find_type_by_name(device_name, "iio:device");
189 if (dev_num >= 0) {
190 printf("Found IIO device with name %s with device number %d\n",
191 device_name, dev_num);
192 ret = asprintf(&chrdev_name, "/dev/iio:device%d", dev_num);
193 if (ret < 0) {
194 ret = -ENOMEM;
195 goto error_ret;
196 }
197 } else {
198 /* If we can't find a IIO device by name assume device_name is a
199 IIO chrdev */
200 chrdev_name = strdup(device_name);
201 }
202
203 fd = open(chrdev_name, 0);
204 if (fd == -1) {
205 fprintf(stdout, "Failed to open %s\n", chrdev_name);
206 ret = -errno;
207 goto error_free_chrdev_name;
208 }
209
210 ret = ioctl(fd, IIO_GET_EVENT_FD_IOCTL, &event_fd);
211
212 close(fd);
213
214 if (ret == -1 || event_fd == -1) {
215 fprintf(stdout, "Failed to retrieve event fd\n");
216 ret = -errno;
217 goto error_free_chrdev_name;
218 }
219
220 while (true) {
221 ret = read(event_fd, &event, sizeof(event));
222 if (ret == -1) {
223 if (errno == EAGAIN) {
224 printf("nothing available\n");
225 continue;
226 } else {
227 perror("Failed to read event from device");
228 ret = -errno;
229 break;
230 }
231 }
232
233 print_event(&event);
234 }
235
236 close(event_fd);
237error_free_chrdev_name:
238 free(chrdev_name);
239error_ret:
240 return ret;
241}
diff --git a/drivers/staging/iio/Documentation/inkernel.txt b/drivers/staging/iio/Documentation/inkernel.txt
new file mode 100644
index 000000000000..a05823e955d2
--- /dev/null
+++ b/drivers/staging/iio/Documentation/inkernel.txt
@@ -0,0 +1,58 @@
1Industrial I/O Subsystem in kernel consumers.
2
3The IIO subsystem can act as a layer under other elements of the kernel
4providing a means of obtaining ADC type readings or of driving DAC type
5signals. The functionality supported will grow as use cases arise.
6
7Describing the channel mapping (iio/machine.h)
8
9Channel associations are described using:
10
11struct iio_map {
12 const char *adc_channel_label;
13 const char *consumer_dev_name;
14 const char *consumer_channel;
15};
16
17adc_channel_label identifies the channel on the IIO device by being
18matched against the datasheet_name field of the iio_chan_spec.
19
20consumer_dev_name allows identification of the consumer device.
21This are then used to find the channel mapping from the consumer device (see
22below).
23
24Finally consumer_channel is a string identifying the channel to the consumer.
25(Perhaps 'battery_voltage' or similar).
26
27An array of these structures is then passed to the IIO driver.
28
29Supporting in kernel interfaces in the driver (driver.h)
30
31The driver must provide datasheet_name values for its channels and
32must pass the iio_map structures and a pointer to its own iio_dev structure
33 on to the core via a call to iio_map_array_register. On removal,
34iio_map_array_unregister reverses this process.
35
36The result of this is that the IIO core now has all the information needed
37to associate a given channel with the consumer requesting it.
38
39Acting as an IIO consumer (consumer.h)
40
41The consumer first has to obtain an iio_channel structure from the core
42by calling iio_channel_get(). The correct channel is identified by:
43
44* matching dev or dev_name against consumer_dev and consumer_dev_name
45* matching consumer_channel against consumer_channel in the map
46
47There are then a number of functions that can be used to get information
48about this channel such as it's current reading.
49
50e.g.
51iio_st_read_channel_raw() - get a reading
52iio_st_read_channel_type() - get the type of channel
53
54There is also provision for retrieving all of the channels associated
55with a given consumer. This is useful for generic drivers such as
56iio_hwmon where the number and naming of channels is not known by the
57consumer driver. To do this, use iio_st_channel_get_all.
58
diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig
index 90162aa8b2df..fe1586718880 100644
--- a/drivers/staging/iio/Kconfig
+++ b/drivers/staging/iio/Kconfig
@@ -11,6 +11,13 @@ menuconfig IIO
11 number of different physical interfaces (i2c, spi, etc). See 11 number of different physical interfaces (i2c, spi, etc). See
12 drivers/staging/iio/Documentation for more information. 12 drivers/staging/iio/Documentation for more information.
13if IIO 13if IIO
14config IIO_ST_HWMON
15 tristate "Hwmon driver that uses channels specified via iio maps"
16 depends on HWMON
17 help
18 This is a platform driver that in combination with a suitable
19 map allows IIO devices to provide basic hwmon functionality
20 for those channels specified in the map.
14 21
15config IIO_BUFFER 22config IIO_BUFFER
16 bool "Enable buffer support within IIO" 23 bool "Enable buffer support within IIO"
@@ -79,7 +86,7 @@ config IIO_SIMPLE_DUMMY
79 help 86 help
80 Driver intended mainly as documentation for how to write 87 Driver intended mainly as documentation for how to write
81 a driver. May also be useful for testing userspace code 88 a driver. May also be useful for testing userspace code
82 without hardward. 89 without hardware.
83 90
84if IIO_SIMPLE_DUMMY 91if IIO_SIMPLE_DUMMY
85 92
diff --git a/drivers/staging/iio/Makefile b/drivers/staging/iio/Makefile
index 1340aead18b4..5075291dda7a 100644
--- a/drivers/staging/iio/Makefile
+++ b/drivers/staging/iio/Makefile
@@ -3,7 +3,7 @@
3# 3#
4 4
5obj-$(CONFIG_IIO) += industrialio.o 5obj-$(CONFIG_IIO) += industrialio.o
6industrialio-y := industrialio-core.o 6industrialio-y := industrialio-core.o industrialio-event.o inkern.o
7industrialio-$(CONFIG_IIO_BUFFER) += industrialio-buffer.o 7industrialio-$(CONFIG_IIO_BUFFER) += industrialio-buffer.o
8industrialio-$(CONFIG_IIO_TRIGGER) += industrialio-trigger.o 8industrialio-$(CONFIG_IIO_TRIGGER) += industrialio-trigger.o
9 9
@@ -17,6 +17,8 @@ iio_dummy-$(CONFIG_IIO_SIMPLE_DUMMY_BUFFER) += iio_simple_dummy_buffer.o
17 17
18obj-$(CONFIG_IIO_DUMMY_EVGEN) += iio_dummy_evgen.o 18obj-$(CONFIG_IIO_DUMMY_EVGEN) += iio_dummy_evgen.o
19 19
20obj-$(CONFIG_IIO_ST_HWMON) += iio_hwmon.o
21
20obj-y += accel/ 22obj-y += accel/
21obj-y += adc/ 23obj-y += adc/
22obj-y += addac/ 24obj-y += addac/
diff --git a/drivers/staging/iio/accel/adis16201_ring.c b/drivers/staging/iio/accel/adis16201_ring.c
index 26c610faee3f..97f9e6b159d9 100644
--- a/drivers/staging/iio/accel/adis16201_ring.c
+++ b/drivers/staging/iio/accel/adis16201_ring.c
@@ -115,9 +115,7 @@ int adis16201_configure_ring(struct iio_dev *indio_dev)
115 return ret; 115 return ret;
116 } 116 }
117 indio_dev->buffer = ring; 117 indio_dev->buffer = ring;
118 /* Effectively select the ring buffer implementation */
119 ring->scan_timestamp = true; 118 ring->scan_timestamp = true;
120 ring->access = &ring_sw_access_funcs;
121 indio_dev->setup_ops = &adis16201_ring_setup_ops; 119 indio_dev->setup_ops = &adis16201_ring_setup_ops;
122 120
123 indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time, 121 indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
diff --git a/drivers/staging/iio/accel/adis16203_ring.c b/drivers/staging/iio/accel/adis16203_ring.c
index 064640d15e41..6a8963db4f60 100644
--- a/drivers/staging/iio/accel/adis16203_ring.c
+++ b/drivers/staging/iio/accel/adis16203_ring.c
@@ -117,9 +117,7 @@ int adis16203_configure_ring(struct iio_dev *indio_dev)
117 return ret; 117 return ret;
118 } 118 }
119 indio_dev->buffer = ring; 119 indio_dev->buffer = ring;
120 /* Effectively select the ring buffer implementation */
121 ring->scan_timestamp = true; 120 ring->scan_timestamp = true;
122 ring->access = &ring_sw_access_funcs;
123 indio_dev->setup_ops = &adis16203_ring_setup_ops; 121 indio_dev->setup_ops = &adis16203_ring_setup_ops;
124 122
125 indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time, 123 indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
diff --git a/drivers/staging/iio/accel/adis16204_ring.c b/drivers/staging/iio/accel/adis16204_ring.c
index 4081179dfa5c..5c8ab7338864 100644
--- a/drivers/staging/iio/accel/adis16204_ring.c
+++ b/drivers/staging/iio/accel/adis16204_ring.c
@@ -112,8 +112,6 @@ int adis16204_configure_ring(struct iio_dev *indio_dev)
112 return ret; 112 return ret;
113 } 113 }
114 indio_dev->buffer = ring; 114 indio_dev->buffer = ring;
115 /* Effectively select the ring buffer implementation */
116 ring->access = &ring_sw_access_funcs;
117 ring->scan_timestamp = true; 115 ring->scan_timestamp = true;
118 indio_dev->setup_ops = &adis16204_ring_setup_ops; 116 indio_dev->setup_ops = &adis16204_ring_setup_ops;
119 117
diff --git a/drivers/staging/iio/accel/adis16209_ring.c b/drivers/staging/iio/accel/adis16209_ring.c
index 2a6fd334f5f1..57254b6b38b7 100644
--- a/drivers/staging/iio/accel/adis16209_ring.c
+++ b/drivers/staging/iio/accel/adis16209_ring.c
@@ -113,8 +113,6 @@ int adis16209_configure_ring(struct iio_dev *indio_dev)
113 return ret; 113 return ret;
114 } 114 }
115 indio_dev->buffer = ring; 115 indio_dev->buffer = ring;
116 /* Effectively select the ring buffer implementation */
117 ring->access = &ring_sw_access_funcs;
118 ring->scan_timestamp = true; 116 ring->scan_timestamp = true;
119 indio_dev->setup_ops = &adis16209_ring_setup_ops; 117 indio_dev->setup_ops = &adis16209_ring_setup_ops;
120 118
diff --git a/drivers/staging/iio/accel/adis16240_ring.c b/drivers/staging/iio/accel/adis16240_ring.c
index e23622d96f9f..43ba84e993ad 100644
--- a/drivers/staging/iio/accel/adis16240_ring.c
+++ b/drivers/staging/iio/accel/adis16240_ring.c
@@ -110,8 +110,6 @@ int adis16240_configure_ring(struct iio_dev *indio_dev)
110 return ret; 110 return ret;
111 } 111 }
112 indio_dev->buffer = ring; 112 indio_dev->buffer = ring;
113 /* Effectively select the ring buffer implementation */
114 ring->access = &ring_sw_access_funcs;
115 ring->scan_timestamp = true; 113 ring->scan_timestamp = true;
116 indio_dev->setup_ops = &adis16240_ring_setup_ops; 114 indio_dev->setup_ops = &adis16240_ring_setup_ops;
117 115
diff --git a/drivers/staging/iio/accel/lis3l02dq.h b/drivers/staging/iio/accel/lis3l02dq.h
index 2db383fc2743..ae5f225b4bb2 100644
--- a/drivers/staging/iio/accel/lis3l02dq.h
+++ b/drivers/staging/iio/accel/lis3l02dq.h
@@ -187,12 +187,10 @@ void lis3l02dq_unconfigure_buffer(struct iio_dev *indio_dev);
187#ifdef CONFIG_LIS3L02DQ_BUF_RING_SW 187#ifdef CONFIG_LIS3L02DQ_BUF_RING_SW
188#define lis3l02dq_free_buf iio_sw_rb_free 188#define lis3l02dq_free_buf iio_sw_rb_free
189#define lis3l02dq_alloc_buf iio_sw_rb_allocate 189#define lis3l02dq_alloc_buf iio_sw_rb_allocate
190#define lis3l02dq_access_funcs ring_sw_access_funcs
191#endif 190#endif
192#ifdef CONFIG_LIS3L02DQ_BUF_KFIFO 191#ifdef CONFIG_LIS3L02DQ_BUF_KFIFO
193#define lis3l02dq_free_buf iio_kfifo_free 192#define lis3l02dq_free_buf iio_kfifo_free
194#define lis3l02dq_alloc_buf iio_kfifo_allocate 193#define lis3l02dq_alloc_buf iio_kfifo_allocate
195#define lis3l02dq_access_funcs kfifo_access_funcs
196#endif 194#endif
197irqreturn_t lis3l02dq_data_rdy_trig_poll(int irq, void *private); 195irqreturn_t lis3l02dq_data_rdy_trig_poll(int irq, void *private);
198#define lis3l02dq_th lis3l02dq_data_rdy_trig_poll 196#define lis3l02dq_th lis3l02dq_data_rdy_trig_poll
diff --git a/drivers/staging/iio/accel/lis3l02dq_ring.c b/drivers/staging/iio/accel/lis3l02dq_ring.c
index 98c5c92d3450..0fc3973f32ae 100644
--- a/drivers/staging/iio/accel/lis3l02dq_ring.c
+++ b/drivers/staging/iio/accel/lis3l02dq_ring.c
@@ -239,7 +239,7 @@ static int lis3l02dq_data_rdy_trigger_set_state(struct iio_trigger *trig,
239 __lis3l02dq_write_data_ready_config(&indio_dev->dev, state); 239 __lis3l02dq_write_data_ready_config(&indio_dev->dev, state);
240 if (state == false) { 240 if (state == false) {
241 /* 241 /*
242 * A possible quirk with teh handler is currently worked around 242 * A possible quirk with the handler is currently worked around
243 * by ensuring outstanding read events are cleared. 243 * by ensuring outstanding read events are cleared.
244 */ 244 */
245 ret = lis3l02dq_read_all(indio_dev, NULL); 245 ret = lis3l02dq_read_all(indio_dev, NULL);
@@ -406,8 +406,6 @@ int lis3l02dq_configure_buffer(struct iio_dev *indio_dev)
406 return -ENOMEM; 406 return -ENOMEM;
407 407
408 indio_dev->buffer = buffer; 408 indio_dev->buffer = buffer;
409 /* Effectively select the buffer implementation */
410 indio_dev->buffer->access = &lis3l02dq_access_funcs;
411 409
412 buffer->scan_timestamp = true; 410 buffer->scan_timestamp = true;
413 indio_dev->setup_ops = &lis3l02dq_buffer_setup_ops; 411 indio_dev->setup_ops = &lis3l02dq_buffer_setup_ops;
diff --git a/drivers/staging/iio/accel/sca3000.h b/drivers/staging/iio/accel/sca3000.h
index ad38dd955cd4..131daac90012 100644
--- a/drivers/staging/iio/accel/sca3000.h
+++ b/drivers/staging/iio/accel/sca3000.h
@@ -136,7 +136,7 @@
136#define SCA3000_INT_MASK_ACTIVE_HIGH 0x01 136#define SCA3000_INT_MASK_ACTIVE_HIGH 0x01
137#define SCA3000_INT_MASK_ACTIVE_LOW 0x00 137#define SCA3000_INT_MASK_ACTIVE_LOW 0x00
138 138
139/* Values of mulipexed registers (write to ctrl_data after select) */ 139/* Values of multiplexed registers (write to ctrl_data after select) */
140#define SCA3000_REG_ADDR_CTRL_DATA 0x22 140#define SCA3000_REG_ADDR_CTRL_DATA 0x22
141 141
142/* Measurement modes available on some sca3000 series chips. Code assumes others 142/* Measurement modes available on some sca3000 series chips. Code assumes others
diff --git a/drivers/staging/iio/adc/Kconfig b/drivers/staging/iio/adc/Kconfig
index d9decea4fa62..592eabd85f36 100644
--- a/drivers/staging/iio/adc/Kconfig
+++ b/drivers/staging/iio/adc/Kconfig
@@ -193,4 +193,13 @@ config MAX1363_RING_BUFFER
193 Say yes here to include ring buffer support in the MAX1363 193 Say yes here to include ring buffer support in the MAX1363
194 ADC driver. 194 ADC driver.
195 195
196config LPC32XX_ADC
197 tristate "NXP LPC32XX ADC"
198 depends on ARCH_LPC32XX && !TOUCHSCREEN_LPC32XX
199 help
200 Say yes here to build support for the integrated ADC inside the
201 LPC32XX SoC. Note that this feature uses the same hardware as the
202 touchscreen driver, so you can only select one of the two drivers
203 (lpc32xx_adc or lpc32xx_ts). Provides direct access via sysfs.
204
196endmenu 205endmenu
diff --git a/drivers/staging/iio/adc/Makefile b/drivers/staging/iio/adc/Makefile
index ceee7f3c3061..f83ab9551d8e 100644
--- a/drivers/staging/iio/adc/Makefile
+++ b/drivers/staging/iio/adc/Makefile
@@ -37,3 +37,4 @@ obj-$(CONFIG_AD7192) += ad7192.o
37obj-$(CONFIG_ADT7310) += adt7310.o 37obj-$(CONFIG_ADT7310) += adt7310.o
38obj-$(CONFIG_ADT7410) += adt7410.o 38obj-$(CONFIG_ADT7410) += adt7410.o
39obj-$(CONFIG_AD7280) += ad7280a.o 39obj-$(CONFIG_AD7280) += ad7280a.o
40obj-$(CONFIG_LPC32XX_ADC) += lpc32xx_adc.o
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index 45f4504ed927..9fd6d63d2999 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -561,8 +561,6 @@ static int ad7192_register_ring_funcs_and_init(struct iio_dev *indio_dev)
561 ret = -ENOMEM; 561 ret = -ENOMEM;
562 goto error_ret; 562 goto error_ret;
563 } 563 }
564 /* Effectively select the ring buffer implementation */
565 indio_dev->buffer->access = &ring_sw_access_funcs;
566 indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time, 564 indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
567 &ad7192_trigger_handler, 565 &ad7192_trigger_handler,
568 IRQF_ONESHOT, 566 IRQF_ONESHOT,
@@ -824,25 +822,20 @@ static struct attribute *ad7192_attributes[] = {
824 NULL 822 NULL
825}; 823};
826 824
827static umode_t ad7192_attr_is_visible(struct kobject *kobj,
828 struct attribute *attr, int n)
829{
830 struct device *dev = container_of(kobj, struct device, kobj);
831 struct iio_dev *indio_dev = dev_get_drvdata(dev);
832 struct ad7192_state *st = iio_priv(indio_dev);
833
834 umode_t mode = attr->mode;
835
836 if ((st->devid != ID_AD7195) &&
837 (attr == &iio_dev_attr_ac_excitation_en.dev_attr.attr))
838 mode = 0;
839
840 return mode;
841}
842
843static const struct attribute_group ad7192_attribute_group = { 825static const struct attribute_group ad7192_attribute_group = {
844 .attrs = ad7192_attributes, 826 .attrs = ad7192_attributes,
845 .is_visible = ad7192_attr_is_visible, 827};
828
829static struct attribute *ad7195_attributes[] = {
830 &iio_dev_attr_sampling_frequency.dev_attr.attr,
831 &iio_dev_attr_in_v_m_v_scale_available.dev_attr.attr,
832 &iio_dev_attr_in_voltage_scale_available.dev_attr.attr,
833 &iio_dev_attr_bridge_switch_en.dev_attr.attr,
834 NULL
835};
836
837static const struct attribute_group ad7195_attribute_group = {
838 .attrs = ad7195_attributes,
846}; 839};
847 840
848static int ad7192_read_raw(struct iio_dev *indio_dev, 841static int ad7192_read_raw(struct iio_dev *indio_dev,
@@ -972,6 +965,15 @@ static const struct iio_info ad7192_info = {
972 .driver_module = THIS_MODULE, 965 .driver_module = THIS_MODULE,
973}; 966};
974 967
968static const struct iio_info ad7195_info = {
969 .read_raw = &ad7192_read_raw,
970 .write_raw = &ad7192_write_raw,
971 .write_raw_get_fmt = &ad7192_write_raw_get_fmt,
972 .attrs = &ad7195_attribute_group,
973 .validate_trigger = ad7192_validate_trigger,
974 .driver_module = THIS_MODULE,
975};
976
975#define AD7192_CHAN_DIFF(_chan, _chan2, _name, _address, _si) \ 977#define AD7192_CHAN_DIFF(_chan, _chan2, _name, _address, _si) \
976 { .type = IIO_VOLTAGE, \ 978 { .type = IIO_VOLTAGE, \
977 .differential = 1, \ 979 .differential = 1, \
@@ -1064,7 +1066,10 @@ static int __devinit ad7192_probe(struct spi_device *spi)
1064 indio_dev->channels = ad7192_channels; 1066 indio_dev->channels = ad7192_channels;
1065 indio_dev->num_channels = ARRAY_SIZE(ad7192_channels); 1067 indio_dev->num_channels = ARRAY_SIZE(ad7192_channels);
1066 indio_dev->available_scan_masks = st->available_scan_masks; 1068 indio_dev->available_scan_masks = st->available_scan_masks;
1067 indio_dev->info = &ad7192_info; 1069 if (st->devid == ID_AD7195)
1070 indio_dev->info = &ad7195_info;
1071 else
1072 indio_dev->info = &ad7192_info;
1068 1073
1069 for (i = 0; i < indio_dev->num_channels; i++) 1074 for (i = 0; i < indio_dev->num_channels; i++)
1070 st->available_scan_masks[i] = (1 << i) | (1 << 1075 st->available_scan_masks[i] = (1 << i) | (1 <<
diff --git a/drivers/staging/iio/adc/ad7291.c b/drivers/staging/iio/adc/ad7291.c
index 0a13616e3db9..81d6b6128cb0 100644
--- a/drivers/staging/iio/adc/ad7291.c
+++ b/drivers/staging/iio/adc/ad7291.c
@@ -321,7 +321,7 @@ static int ad7291_read_event_value(struct iio_dev *indio_dev,
321 321
322 switch (IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event_code)) { 322 switch (IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event_code)) {
323 case IIO_VOLTAGE: 323 case IIO_VOLTAGE:
324 reg = ad7291_limit_regs[IIO_EVENT_CODE_EXTRACT_NUM(event_code)] 324 reg = ad7291_limit_regs[IIO_EVENT_CODE_EXTRACT_CHAN(event_code)]
325 [!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) == 325 [!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
326 IIO_EV_DIR_RISING)]; 326 IIO_EV_DIR_RISING)];
327 327
@@ -359,7 +359,7 @@ static int ad7291_write_event_value(struct iio_dev *indio_dev,
359 case IIO_VOLTAGE: 359 case IIO_VOLTAGE:
360 if (val > AD7291_VALUE_MASK || val < 0) 360 if (val > AD7291_VALUE_MASK || val < 0)
361 return -EINVAL; 361 return -EINVAL;
362 reg = ad7291_limit_regs[IIO_EVENT_CODE_EXTRACT_NUM(event_code)] 362 reg = ad7291_limit_regs[IIO_EVENT_CODE_EXTRACT_CHAN(event_code)]
363 [!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) == 363 [!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
364 IIO_EV_DIR_RISING)]; 364 IIO_EV_DIR_RISING)];
365 return ad7291_i2c_write(chip, reg, val); 365 return ad7291_i2c_write(chip, reg, val);
@@ -386,7 +386,7 @@ static int ad7291_read_event_config(struct iio_dev *indio_dev,
386 switch (IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event_code)) { 386 switch (IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event_code)) {
387 case IIO_VOLTAGE: 387 case IIO_VOLTAGE:
388 if (chip->c_mask & 388 if (chip->c_mask &
389 (1 << (15 - IIO_EVENT_CODE_EXTRACT_NUM(event_code)))) 389 (1 << (15 - IIO_EVENT_CODE_EXTRACT_CHAN(event_code))))
390 return 1; 390 return 1;
391 else 391 else
392 return 0; 392 return 0;
@@ -418,12 +418,12 @@ static int ad7291_write_event_config(struct iio_dev *indio_dev,
418 switch (IIO_EVENT_CODE_EXTRACT_TYPE(event_code)) { 418 switch (IIO_EVENT_CODE_EXTRACT_TYPE(event_code)) {
419 case IIO_VOLTAGE: 419 case IIO_VOLTAGE:
420 if ((!state) && (chip->c_mask & (1 << (15 - 420 if ((!state) && (chip->c_mask & (1 << (15 -
421 IIO_EVENT_CODE_EXTRACT_NUM(event_code))))) 421 IIO_EVENT_CODE_EXTRACT_CHAN(event_code)))))
422 chip->c_mask &= ~(1 << (15 - IIO_EVENT_CODE_EXTRACT_NUM 422 chip->c_mask &= ~(1 << (15 - IIO_EVENT_CODE_EXTRACT_CHAN
423 (event_code))); 423 (event_code)));
424 else if (state && (!(chip->c_mask & (1 << (15 - 424 else if (state && (!(chip->c_mask & (1 << (15 -
425 IIO_EVENT_CODE_EXTRACT_NUM(event_code)))))) 425 IIO_EVENT_CODE_EXTRACT_CHAN(event_code))))))
426 chip->c_mask |= (1 << (15 - IIO_EVENT_CODE_EXTRACT_NUM 426 chip->c_mask |= (1 << (15 - IIO_EVENT_CODE_EXTRACT_CHAN
427 (event_code))); 427 (event_code)));
428 else 428 else
429 break; 429 break;
diff --git a/drivers/staging/iio/adc/ad7298_ring.c b/drivers/staging/iio/adc/ad7298_ring.c
index d1a12dd015e2..feeb0eeba59a 100644
--- a/drivers/staging/iio/adc/ad7298_ring.c
+++ b/drivers/staging/iio/adc/ad7298_ring.c
@@ -131,9 +131,6 @@ int ad7298_register_ring_funcs_and_init(struct iio_dev *indio_dev)
131 ret = -ENOMEM; 131 ret = -ENOMEM;
132 goto error_ret; 132 goto error_ret;
133 } 133 }
134 /* Effectively select the ring buffer implementation */
135 indio_dev->buffer->access = &ring_sw_access_funcs;
136
137 indio_dev->pollfunc = iio_alloc_pollfunc(NULL, 134 indio_dev->pollfunc = iio_alloc_pollfunc(NULL,
138 &ad7298_trigger_handler, 135 &ad7298_trigger_handler,
139 IRQF_ONESHOT, 136 IRQF_ONESHOT,
diff --git a/drivers/staging/iio/adc/ad7476_ring.c b/drivers/staging/iio/adc/ad7476_ring.c
index 4e298b2a05b2..d6af6c05ce1c 100644
--- a/drivers/staging/iio/adc/ad7476_ring.c
+++ b/drivers/staging/iio/adc/ad7476_ring.c
@@ -23,7 +23,7 @@
23/** 23/**
24 * ad7476_ring_preenable() setup the parameters of the ring before enabling 24 * ad7476_ring_preenable() setup the parameters of the ring before enabling
25 * 25 *
26 * The complex nature of the setting of the nuber of bytes per datum is due 26 * The complex nature of the setting of the number of bytes per datum is due
27 * to this driver currently ensuring that the timestamp is stored at an 8 27 * to this driver currently ensuring that the timestamp is stored at an 8
28 * byte boundary. 28 * byte boundary.
29 **/ 29 **/
@@ -98,8 +98,6 @@ int ad7476_register_ring_funcs_and_init(struct iio_dev *indio_dev)
98 ret = -ENOMEM; 98 ret = -ENOMEM;
99 goto error_ret; 99 goto error_ret;
100 } 100 }
101 /* Effectively select the ring buffer implementation */
102 indio_dev->buffer->access = &ring_sw_access_funcs;
103 indio_dev->pollfunc 101 indio_dev->pollfunc
104 = iio_alloc_pollfunc(NULL, 102 = iio_alloc_pollfunc(NULL,
105 &ad7476_trigger_handler, 103 &ad7476_trigger_handler,
diff --git a/drivers/staging/iio/adc/ad7606_core.c b/drivers/staging/iio/adc/ad7606_core.c
index ddb7ef92f5c1..97e8d3d4471e 100644
--- a/drivers/staging/iio/adc/ad7606_core.c
+++ b/drivers/staging/iio/adc/ad7606_core.c
@@ -197,7 +197,7 @@ static IIO_DEVICE_ATTR(oversampling_ratio, S_IRUGO | S_IWUSR,
197 ad7606_store_oversampling_ratio, 0); 197 ad7606_store_oversampling_ratio, 0);
198static IIO_CONST_ATTR(oversampling_ratio_available, "0 2 4 8 16 32 64"); 198static IIO_CONST_ATTR(oversampling_ratio_available, "0 2 4 8 16 32 64");
199 199
200static struct attribute *ad7606_attributes[] = { 200static struct attribute *ad7606_attributes_os_and_range[] = {
201 &iio_dev_attr_in_voltage_range.dev_attr.attr, 201 &iio_dev_attr_in_voltage_range.dev_attr.attr,
202 &iio_const_attr_in_voltage_range_available.dev_attr.attr, 202 &iio_const_attr_in_voltage_range_available.dev_attr.attr,
203 &iio_dev_attr_oversampling_ratio.dev_attr.attr, 203 &iio_dev_attr_oversampling_ratio.dev_attr.attr,
@@ -205,34 +205,28 @@ static struct attribute *ad7606_attributes[] = {
205 NULL, 205 NULL,
206}; 206};
207 207
208static umode_t ad7606_attr_is_visible(struct kobject *kobj, 208static const struct attribute_group ad7606_attribute_group_os_and_range = {
209 struct attribute *attr, int n) 209 .attrs = ad7606_attributes_os_and_range,
210{ 210};
211 struct device *dev = container_of(kobj, struct device, kobj);
212 struct iio_dev *indio_dev = dev_get_drvdata(dev);
213 struct ad7606_state *st = iio_priv(indio_dev);
214 211
215 umode_t mode = attr->mode; 212static struct attribute *ad7606_attributes_os[] = {
216 213 &iio_dev_attr_oversampling_ratio.dev_attr.attr,
217 if (!(gpio_is_valid(st->pdata->gpio_os0) && 214 &iio_const_attr_oversampling_ratio_available.dev_attr.attr,
218 gpio_is_valid(st->pdata->gpio_os1) && 215 NULL,
219 gpio_is_valid(st->pdata->gpio_os2)) && 216};
220 (attr == &iio_dev_attr_oversampling_ratio.dev_attr.attr ||
221 attr ==
222 &iio_const_attr_oversampling_ratio_available.dev_attr.attr))
223 mode = 0;
224 else if (!gpio_is_valid(st->pdata->gpio_range) &&
225 (attr == &iio_dev_attr_in_voltage_range.dev_attr.attr ||
226 attr ==
227 &iio_const_attr_in_voltage_range_available.dev_attr.attr))
228 mode = 0;
229
230 return mode;
231}
232 217
233static const struct attribute_group ad7606_attribute_group = { 218static const struct attribute_group ad7606_attribute_group_os = {
234 .attrs = ad7606_attributes, 219 .attrs = ad7606_attributes_os,
235 .is_visible = ad7606_attr_is_visible, 220};
221
222static struct attribute *ad7606_attributes_range[] = {
223 &iio_dev_attr_in_voltage_range.dev_attr.attr,
224 &iio_const_attr_in_voltage_range_available.dev_attr.attr,
225 NULL,
226};
227
228static const struct attribute_group ad7606_attribute_group_range = {
229 .attrs = ad7606_attributes_range,
236}; 230};
237 231
238#define AD7606_CHANNEL(num) \ 232#define AD7606_CHANNEL(num) \
@@ -435,10 +429,27 @@ static irqreturn_t ad7606_interrupt(int irq, void *dev_id)
435 return IRQ_HANDLED; 429 return IRQ_HANDLED;
436}; 430};
437 431
438static const struct iio_info ad7606_info = { 432static const struct iio_info ad7606_info_no_os_or_range = {
439 .driver_module = THIS_MODULE, 433 .driver_module = THIS_MODULE,
440 .read_raw = &ad7606_read_raw, 434 .read_raw = &ad7606_read_raw,
441 .attrs = &ad7606_attribute_group, 435};
436
437static const struct iio_info ad7606_info_os_and_range = {
438 .driver_module = THIS_MODULE,
439 .read_raw = &ad7606_read_raw,
440 .attrs = &ad7606_attribute_group_os_and_range,
441};
442
443static const struct iio_info ad7606_info_os = {
444 .driver_module = THIS_MODULE,
445 .read_raw = &ad7606_read_raw,
446 .attrs = &ad7606_attribute_group_os,
447};
448
449static const struct iio_info ad7606_info_range = {
450 .driver_module = THIS_MODULE,
451 .read_raw = &ad7606_read_raw,
452 .attrs = &ad7606_attribute_group_range,
442}; 453};
443 454
444struct iio_dev *ad7606_probe(struct device *dev, int irq, 455struct iio_dev *ad7606_probe(struct device *dev, int irq,
@@ -483,7 +494,19 @@ struct iio_dev *ad7606_probe(struct device *dev, int irq,
483 st->chip_info = &ad7606_chip_info_tbl[id]; 494 st->chip_info = &ad7606_chip_info_tbl[id];
484 495
485 indio_dev->dev.parent = dev; 496 indio_dev->dev.parent = dev;
486 indio_dev->info = &ad7606_info; 497 if (gpio_is_valid(st->pdata->gpio_os0) &&
498 gpio_is_valid(st->pdata->gpio_os1) &&
499 gpio_is_valid(st->pdata->gpio_os2)) {
500 if (gpio_is_valid(st->pdata->gpio_range))
501 indio_dev->info = &ad7606_info_os_and_range;
502 else
503 indio_dev->info = &ad7606_info_os;
504 } else {
505 if (gpio_is_valid(st->pdata->gpio_range))
506 indio_dev->info = &ad7606_info_range;
507 else
508 indio_dev->info = &ad7606_info_no_os_or_range;
509 }
487 indio_dev->modes = INDIO_DIRECT_MODE; 510 indio_dev->modes = INDIO_DIRECT_MODE;
488 indio_dev->name = st->chip_info->name; 511 indio_dev->name = st->chip_info->name;
489 indio_dev->channels = st->chip_info->channels; 512 indio_dev->channels = st->chip_info->channels;
diff --git a/drivers/staging/iio/adc/ad7606_par.c b/drivers/staging/iio/adc/ad7606_par.c
index cff97568189e..bb152a8e8c92 100644
--- a/drivers/staging/iio/adc/ad7606_par.c
+++ b/drivers/staging/iio/adc/ad7606_par.c
@@ -173,18 +173,7 @@ static struct platform_driver ad7606_driver = {
173 }, 173 },
174}; 174};
175 175
176static int __init ad7606_init(void) 176module_platform_driver(ad7606_driver);
177{
178 return platform_driver_register(&ad7606_driver);
179}
180
181static void __exit ad7606_cleanup(void)
182{
183 platform_driver_unregister(&ad7606_driver);
184}
185
186module_init(ad7606_init);
187module_exit(ad7606_cleanup);
188 177
189MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); 178MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
190MODULE_DESCRIPTION("Analog Devices AD7606 ADC"); 179MODULE_DESCRIPTION("Analog Devices AD7606 ADC");
diff --git a/drivers/staging/iio/adc/ad7606_ring.c b/drivers/staging/iio/adc/ad7606_ring.c
index e8f94a18a943..1ef9fbcaf2de 100644
--- a/drivers/staging/iio/adc/ad7606_ring.c
+++ b/drivers/staging/iio/adc/ad7606_ring.c
@@ -110,8 +110,6 @@ int ad7606_register_ring_funcs_and_init(struct iio_dev *indio_dev)
110 goto error_ret; 110 goto error_ret;
111 } 111 }
112 112
113 /* Effectively select the ring buffer implementation */
114 indio_dev->buffer->access = &ring_sw_access_funcs;
115 indio_dev->pollfunc = iio_alloc_pollfunc(&ad7606_trigger_handler_th_bh, 113 indio_dev->pollfunc = iio_alloc_pollfunc(&ad7606_trigger_handler_th_bh,
116 &ad7606_trigger_handler_th_bh, 114 &ad7606_trigger_handler_th_bh,
117 0, 115 0,
diff --git a/drivers/staging/iio/adc/ad7793.c b/drivers/staging/iio/adc/ad7793.c
index 6a058b19c49a..84ecde1ad042 100644
--- a/drivers/staging/iio/adc/ad7793.c
+++ b/drivers/staging/iio/adc/ad7793.c
@@ -427,8 +427,6 @@ static int ad7793_register_ring_funcs_and_init(struct iio_dev *indio_dev)
427 ret = -ENOMEM; 427 ret = -ENOMEM;
428 goto error_ret; 428 goto error_ret;
429 } 429 }
430 /* Effectively select the ring buffer implementation */
431 indio_dev->buffer->access = &ring_sw_access_funcs;
432 indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time, 430 indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
433 &ad7793_trigger_handler, 431 &ad7793_trigger_handler,
434 IRQF_ONESHOT, 432 IRQF_ONESHOT,
diff --git a/drivers/staging/iio/adc/ad7887_ring.c b/drivers/staging/iio/adc/ad7887_ring.c
index 85076cd962e7..d1809079b63d 100644
--- a/drivers/staging/iio/adc/ad7887_ring.c
+++ b/drivers/staging/iio/adc/ad7887_ring.c
@@ -131,8 +131,6 @@ int ad7887_register_ring_funcs_and_init(struct iio_dev *indio_dev)
131 ret = -ENOMEM; 131 ret = -ENOMEM;
132 goto error_ret; 132 goto error_ret;
133 } 133 }
134 /* Effectively select the ring buffer implementation */
135 indio_dev->buffer->access = &ring_sw_access_funcs;
136 indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time, 134 indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
137 &ad7887_trigger_handler, 135 &ad7887_trigger_handler,
138 IRQF_ONESHOT, 136 IRQF_ONESHOT,
diff --git a/drivers/staging/iio/adc/ad799x_core.c b/drivers/staging/iio/adc/ad799x_core.c
index d5b581d8bc2b..a8458669350f 100644
--- a/drivers/staging/iio/adc/ad799x_core.c
+++ b/drivers/staging/iio/adc/ad799x_core.c
@@ -256,7 +256,7 @@ static int ad799x_write_event_value(struct iio_dev *indio_dev,
256 struct ad799x_state *st = iio_priv(indio_dev); 256 struct ad799x_state *st = iio_priv(indio_dev);
257 int direction = !!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) == 257 int direction = !!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
258 IIO_EV_DIR_FALLING); 258 IIO_EV_DIR_FALLING);
259 int number = IIO_EVENT_CODE_EXTRACT_NUM(event_code); 259 int number = IIO_EVENT_CODE_EXTRACT_CHAN(event_code);
260 260
261 mutex_lock(&indio_dev->mlock); 261 mutex_lock(&indio_dev->mlock);
262 ret = ad799x_i2c_write16(st, 262 ret = ad799x_i2c_write16(st,
@@ -275,7 +275,7 @@ static int ad799x_read_event_value(struct iio_dev *indio_dev,
275 struct ad799x_state *st = iio_priv(indio_dev); 275 struct ad799x_state *st = iio_priv(indio_dev);
276 int direction = !!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) == 276 int direction = !!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
277 IIO_EV_DIR_FALLING); 277 IIO_EV_DIR_FALLING);
278 int number = IIO_EVENT_CODE_EXTRACT_NUM(event_code); 278 int number = IIO_EVENT_CODE_EXTRACT_CHAN(event_code);
279 u16 valin; 279 u16 valin;
280 280
281 mutex_lock(&indio_dev->mlock); 281 mutex_lock(&indio_dev->mlock);
diff --git a/drivers/staging/iio/adc/ad799x_ring.c b/drivers/staging/iio/adc/ad799x_ring.c
index 5dded9e7820a..069765cab275 100644
--- a/drivers/staging/iio/adc/ad799x_ring.c
+++ b/drivers/staging/iio/adc/ad799x_ring.c
@@ -26,7 +26,7 @@
26/** 26/**
27 * ad799x_ring_preenable() setup the parameters of the ring before enabling 27 * ad799x_ring_preenable() setup the parameters of the ring before enabling
28 * 28 *
29 * The complex nature of the setting of the nuber of bytes per datum is due 29 * The complex nature of the setting of the number of bytes per datum is due
30 * to this driver currently ensuring that the timestamp is stored at an 8 30 * to this driver currently ensuring that the timestamp is stored at an 8
31 * byte boundary. 31 * byte boundary.
32 **/ 32 **/
@@ -141,8 +141,6 @@ int ad799x_register_ring_funcs_and_init(struct iio_dev *indio_dev)
141 ret = -ENOMEM; 141 ret = -ENOMEM;
142 goto error_ret; 142 goto error_ret;
143 } 143 }
144 /* Effectively select the ring buffer implementation */
145 indio_dev->buffer->access = &ring_sw_access_funcs;
146 indio_dev->pollfunc = iio_alloc_pollfunc(NULL, 144 indio_dev->pollfunc = iio_alloc_pollfunc(NULL,
147 &ad799x_trigger_handler, 145 &ad799x_trigger_handler,
148 IRQF_ONESHOT, 146 IRQF_ONESHOT,
diff --git a/drivers/staging/iio/adc/adt7310.c b/drivers/staging/iio/adc/adt7310.c
index eec2f325d549..caf57c1169b1 100644
--- a/drivers/staging/iio/adc/adt7310.c
+++ b/drivers/staging/iio/adc/adt7310.c
@@ -725,32 +725,19 @@ static struct attribute *adt7310_event_int_attributes[] = {
725 &iio_dev_attr_fault_queue.dev_attr.attr, 725 &iio_dev_attr_fault_queue.dev_attr.attr,
726 &iio_dev_attr_t_alarm_high.dev_attr.attr, 726 &iio_dev_attr_t_alarm_high.dev_attr.attr,
727 &iio_dev_attr_t_alarm_low.dev_attr.attr, 727 &iio_dev_attr_t_alarm_low.dev_attr.attr,
728 &iio_dev_attr_t_hyst.dev_attr.attr,
729 NULL,
730};
731
732static struct attribute *adt7310_event_ct_attributes[] = {
733 &iio_dev_attr_event_mode.dev_attr.attr,
734 &iio_dev_attr_available_event_modes.dev_attr.attr,
735 &iio_dev_attr_fault_queue.dev_attr.attr,
736 &iio_dev_attr_t_crit.dev_attr.attr, 728 &iio_dev_attr_t_crit.dev_attr.attr,
737 &iio_dev_attr_t_hyst.dev_attr.attr, 729 &iio_dev_attr_t_hyst.dev_attr.attr,
738 NULL, 730 NULL,
739}; 731};
740 732
741static struct attribute_group adt7310_event_attribute_group[ADT7310_IRQS] = { 733static struct attribute_group adt7310_event_attribute_group = {
742 { 734 .attrs = adt7310_event_int_attributes,
743 .attrs = adt7310_event_int_attributes, 735 .name = "events",
744 .name = "events",
745 }, {
746 .attrs = adt7310_event_ct_attributes,
747 .name = "events",
748 }
749}; 736};
750 737
751static const struct iio_info adt7310_info = { 738static const struct iio_info adt7310_info = {
752 .attrs = &adt7310_attribute_group, 739 .attrs = &adt7310_attribute_group,
753 .event_attrs = adt7310_event_attribute_group, 740 .event_attrs = &adt7310_event_attribute_group,
754 .driver_module = THIS_MODULE, 741 .driver_module = THIS_MODULE,
755}; 742};
756 743
diff --git a/drivers/staging/iio/adc/adt7410.c b/drivers/staging/iio/adc/adt7410.c
index c62248ceb37a..dff3e8ca2d78 100644
--- a/drivers/staging/iio/adc/adt7410.c
+++ b/drivers/staging/iio/adc/adt7410.c
@@ -693,32 +693,19 @@ static struct attribute *adt7410_event_int_attributes[] = {
693 &iio_dev_attr_fault_queue.dev_attr.attr, 693 &iio_dev_attr_fault_queue.dev_attr.attr,
694 &iio_dev_attr_t_alarm_high.dev_attr.attr, 694 &iio_dev_attr_t_alarm_high.dev_attr.attr,
695 &iio_dev_attr_t_alarm_low.dev_attr.attr, 695 &iio_dev_attr_t_alarm_low.dev_attr.attr,
696 &iio_dev_attr_t_hyst.dev_attr.attr,
697 NULL,
698};
699
700static struct attribute *adt7410_event_ct_attributes[] = {
701 &iio_dev_attr_event_mode.dev_attr.attr,
702 &iio_dev_attr_available_event_modes.dev_attr.attr,
703 &iio_dev_attr_fault_queue.dev_attr.attr,
704 &iio_dev_attr_t_crit.dev_attr.attr, 696 &iio_dev_attr_t_crit.dev_attr.attr,
705 &iio_dev_attr_t_hyst.dev_attr.attr, 697 &iio_dev_attr_t_hyst.dev_attr.attr,
706 NULL, 698 NULL,
707}; 699};
708 700
709static struct attribute_group adt7410_event_attribute_group[ADT7410_IRQS] = { 701static struct attribute_group adt7410_event_attribute_group = {
710 { 702 .attrs = adt7410_event_int_attributes,
711 .attrs = adt7410_event_int_attributes, 703 .name = "events",
712 .name = "events",
713 }, {
714 .attrs = adt7410_event_ct_attributes,
715 .name = "events",
716 }
717}; 704};
718 705
719static const struct iio_info adt7410_info = { 706static const struct iio_info adt7410_info = {
720 .attrs = &adt7410_attribute_group, 707 .attrs = &adt7410_attribute_group,
721 .event_attrs = adt7410_event_attribute_group, 708 .event_attrs = &adt7410_event_attribute_group,
722 .driver_module = THIS_MODULE, 709 .driver_module = THIS_MODULE,
723}; 710};
724 711
diff --git a/drivers/staging/iio/adc/lpc32xx_adc.c b/drivers/staging/iio/adc/lpc32xx_adc.c
new file mode 100644
index 000000000000..dfc9033843a3
--- /dev/null
+++ b/drivers/staging/iio/adc/lpc32xx_adc.c
@@ -0,0 +1,237 @@
1/*
2 * lpc32xx_adc.c - Support for ADC in LPC32XX
3 *
4 * 3-channel, 10-bit ADC
5 *
6 * Copyright (C) 2011, 2012 Roland Stigge <stigge@antcom.de>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/module.h>
24#include <linux/platform_device.h>
25#include <linux/interrupt.h>
26#include <linux/device.h>
27#include <linux/kernel.h>
28#include <linux/slab.h>
29#include <linux/io.h>
30#include <linux/clk.h>
31#include <linux/err.h>
32#include <linux/completion.h>
33
34#include "../iio.h"
35#include "../sysfs.h"
36
37/*
38 * LPC32XX registers definitions
39 */
40#define LPC32XX_ADC_SELECT(x) ((x) + 0x04)
41#define LPC32XX_ADC_CTRL(x) ((x) + 0x08)
42#define LPC32XX_ADC_VALUE(x) ((x) + 0x48)
43
44/* Bit definitions for LPC32XX_ADC_SELECT: */
45#define AD_REFm 0x00000200 /* constant, always write this value! */
46#define AD_REFp 0x00000080 /* constant, always write this value! */
47#define AD_IN 0x00000010 /* multiple of this is the */
48 /* channel number: 0, 1, 2 */
49#define AD_INTERNAL 0x00000004 /* constant, always write this value! */
50
51/* Bit definitions for LPC32XX_ADC_CTRL: */
52#define AD_STROBE 0x00000002
53#define AD_PDN_CTRL 0x00000004
54
55/* Bit definitions for LPC32XX_ADC_VALUE: */
56#define ADC_VALUE_MASK 0x000003FF
57
58#define MOD_NAME "lpc32xx-adc"
59
60struct lpc32xx_adc_info {
61 void __iomem *adc_base;
62 struct clk *clk;
63 struct completion completion;
64
65 u32 value;
66};
67
68static int lpc32xx_read_raw(struct iio_dev *indio_dev,
69 struct iio_chan_spec const *chan,
70 int *val,
71 int *val2,
72 long mask)
73{
74 struct lpc32xx_adc_info *info = iio_priv(indio_dev);
75
76 if (mask == 0) {
77 mutex_lock(&indio_dev->mlock);
78 clk_enable(info->clk);
79 /* Measurement setup */
80 __raw_writel(AD_INTERNAL | (chan->address) | AD_REFp | AD_REFm,
81 LPC32XX_ADC_SELECT(info->adc_base));
82 /* Trigger conversion */
83 __raw_writel(AD_PDN_CTRL | AD_STROBE,
84 LPC32XX_ADC_CTRL(info->adc_base));
85 wait_for_completion(&info->completion); /* set by ISR */
86 clk_disable(info->clk);
87 *val = info->value;
88 mutex_unlock(&indio_dev->mlock);
89
90 return IIO_VAL_INT;
91 }
92
93 return -EINVAL;
94}
95
96static const struct iio_info lpc32xx_adc_iio_info = {
97 .read_raw = &lpc32xx_read_raw,
98 .driver_module = THIS_MODULE,
99};
100
101#define LPC32XX_ADC_CHANNEL(_index) { \
102 .type = IIO_VOLTAGE, \
103 .indexed = 1, \
104 .channel = _index, \
105 .address = AD_IN * _index, \
106 .scan_index = _index, \
107}
108
109static struct iio_chan_spec lpc32xx_adc_iio_channels[] = {
110 LPC32XX_ADC_CHANNEL(0),
111 LPC32XX_ADC_CHANNEL(1),
112 LPC32XX_ADC_CHANNEL(2),
113};
114
115static irqreturn_t lpc32xx_adc_isr(int irq, void *dev_id)
116{
117 struct lpc32xx_adc_info *info = (struct lpc32xx_adc_info *) dev_id;
118
119 /* Read value and clear irq */
120 info->value = __raw_readl(LPC32XX_ADC_VALUE(info->adc_base)) &
121 ADC_VALUE_MASK;
122 complete(&info->completion);
123
124 return IRQ_HANDLED;
125}
126
127static int __devinit lpc32xx_adc_probe(struct platform_device *pdev)
128{
129 struct lpc32xx_adc_info *info = NULL;
130 struct resource *res;
131 int retval = -ENODEV;
132 struct iio_dev *iodev = NULL;
133 int irq;
134
135 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
136 if (!res) {
137 dev_err(&pdev->dev, "failed to get platform I/O memory\n");
138 retval = -EBUSY;
139 goto errout1;
140 }
141
142 iodev = iio_allocate_device(sizeof(struct lpc32xx_adc_info));
143 if (!iodev) {
144 dev_err(&pdev->dev, "failed allocating iio device\n");
145 retval = -ENOMEM;
146 goto errout1;
147 }
148
149 info = iio_priv(iodev);
150
151 info->adc_base = ioremap(res->start, res->end - res->start + 1);
152 if (!info->adc_base) {
153 dev_err(&pdev->dev, "failed mapping memory\n");
154 retval = -EBUSY;
155 goto errout2;
156 }
157
158 info->clk = clk_get(&pdev->dev, NULL);
159 if (IS_ERR(info->clk)) {
160 dev_err(&pdev->dev, "failed getting clock\n");
161 goto errout3;
162 }
163
164 irq = platform_get_irq(pdev, 0);
165 if ((irq < 0) || (irq >= NR_IRQS)) {
166 dev_err(&pdev->dev, "failed getting interrupt resource\n");
167 retval = -EINVAL;
168 goto errout4;
169 }
170
171 retval = request_irq(irq, lpc32xx_adc_isr, 0, MOD_NAME, info);
172 if (retval < 0) {
173 dev_err(&pdev->dev, "failed requesting interrupt\n");
174 goto errout4;
175 }
176
177 platform_set_drvdata(pdev, iodev);
178
179 init_completion(&info->completion);
180
181 iodev->name = MOD_NAME;
182 iodev->dev.parent = &pdev->dev;
183 iodev->info = &lpc32xx_adc_iio_info;
184 iodev->modes = INDIO_DIRECT_MODE;
185 iodev->channels = lpc32xx_adc_iio_channels;
186 iodev->num_channels = ARRAY_SIZE(lpc32xx_adc_iio_channels);
187
188 retval = iio_device_register(iodev);
189 if (retval)
190 goto errout5;
191
192 dev_info(&pdev->dev, "LPC32XX ADC driver loaded, IRQ %d\n", irq);
193
194 return 0;
195
196errout5:
197 free_irq(irq, iodev);
198errout4:
199 clk_put(info->clk);
200errout3:
201 iounmap(info->adc_base);
202errout2:
203 iio_free_device(iodev);
204errout1:
205 return retval;
206}
207
208static int __devexit lpc32xx_adc_remove(struct platform_device *pdev)
209{
210 struct iio_dev *iodev = platform_get_drvdata(pdev);
211 struct lpc32xx_adc_info *info = iio_priv(iodev);
212 int irq = platform_get_irq(pdev, 0);
213
214 iio_device_unregister(iodev);
215 free_irq(irq, iodev);
216 platform_set_drvdata(pdev, NULL);
217 clk_put(info->clk);
218 iounmap(info->adc_base);
219 iio_free_device(iodev);
220
221 return 0;
222}
223
224static struct platform_driver lpc32xx_adc_driver = {
225 .probe = lpc32xx_adc_probe,
226 .remove = __devexit_p(lpc32xx_adc_remove),
227 .driver = {
228 .name = MOD_NAME,
229 .owner = THIS_MODULE,
230 },
231};
232
233module_platform_driver(lpc32xx_adc_driver);
234
235MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
236MODULE_DESCRIPTION("LPC32XX ADC driver");
237MODULE_LICENSE("GPL");
diff --git a/drivers/staging/iio/adc/max1363_core.c b/drivers/staging/iio/adc/max1363_core.c
index b92cb4af18ce..cf3e2ca7e314 100644
--- a/drivers/staging/iio/adc/max1363_core.c
+++ b/drivers/staging/iio/adc/max1363_core.c
@@ -341,7 +341,7 @@ static struct iio_chan_spec max1361_channels[] =
341static struct iio_chan_spec max1363_channels[] = 341static struct iio_chan_spec max1363_channels[] =
342 MAX1363_4X_CHANS(12, MAX1363_EV_M); 342 MAX1363_4X_CHANS(12, MAX1363_EV_M);
343 343
344/* Appies to max1236, max1237 */ 344/* Applies to max1236, max1237 */
345static const enum max1363_modes max1236_mode_list[] = { 345static const enum max1363_modes max1236_mode_list[] = {
346 _s0, _s1, _s2, _s3, 346 _s0, _s1, _s2, _s3,
347 s0to1, s0to2, s0to3, 347 s0to1, s0to2, s0to3,
@@ -543,9 +543,9 @@ static int max1363_read_thresh(struct iio_dev *indio_dev,
543{ 543{
544 struct max1363_state *st = iio_priv(indio_dev); 544 struct max1363_state *st = iio_priv(indio_dev);
545 if (IIO_EVENT_CODE_EXTRACT_DIR(event_code) == IIO_EV_DIR_FALLING) 545 if (IIO_EVENT_CODE_EXTRACT_DIR(event_code) == IIO_EV_DIR_FALLING)
546 *val = st->thresh_low[IIO_EVENT_CODE_EXTRACT_NUM(event_code)]; 546 *val = st->thresh_low[IIO_EVENT_CODE_EXTRACT_CHAN(event_code)];
547 else 547 else
548 *val = st->thresh_high[IIO_EVENT_CODE_EXTRACT_NUM(event_code)]; 548 *val = st->thresh_high[IIO_EVENT_CODE_EXTRACT_CHAN(event_code)];
549 return 0; 549 return 0;
550} 550}
551 551
@@ -568,10 +568,10 @@ static int max1363_write_thresh(struct iio_dev *indio_dev,
568 568
569 switch (IIO_EVENT_CODE_EXTRACT_DIR(event_code)) { 569 switch (IIO_EVENT_CODE_EXTRACT_DIR(event_code)) {
570 case IIO_EV_DIR_FALLING: 570 case IIO_EV_DIR_FALLING:
571 st->thresh_low[IIO_EVENT_CODE_EXTRACT_NUM(event_code)] = val; 571 st->thresh_low[IIO_EVENT_CODE_EXTRACT_CHAN(event_code)] = val;
572 break; 572 break;
573 case IIO_EV_DIR_RISING: 573 case IIO_EV_DIR_RISING:
574 st->thresh_high[IIO_EVENT_CODE_EXTRACT_NUM(event_code)] = val; 574 st->thresh_high[IIO_EVENT_CODE_EXTRACT_CHAN(event_code)] = val;
575 break; 575 break;
576 } 576 }
577 577
@@ -622,7 +622,7 @@ static int max1363_read_event_config(struct iio_dev *indio_dev,
622 struct max1363_state *st = iio_priv(indio_dev); 622 struct max1363_state *st = iio_priv(indio_dev);
623 623
624 int val; 624 int val;
625 int number = IIO_EVENT_CODE_EXTRACT_NUM(event_code); 625 int number = IIO_EVENT_CODE_EXTRACT_CHAN(event_code);
626 mutex_lock(&indio_dev->mlock); 626 mutex_lock(&indio_dev->mlock);
627 if (IIO_EVENT_CODE_EXTRACT_DIR(event_code) == IIO_EV_DIR_FALLING) 627 if (IIO_EVENT_CODE_EXTRACT_DIR(event_code) == IIO_EV_DIR_FALLING)
628 val = (1 << number) & st->mask_low; 628 val = (1 << number) & st->mask_low;
@@ -775,7 +775,7 @@ static int max1363_write_event_config(struct iio_dev *indio_dev,
775 int ret = 0; 775 int ret = 0;
776 struct max1363_state *st = iio_priv(indio_dev); 776 struct max1363_state *st = iio_priv(indio_dev);
777 u16 unifiedmask; 777 u16 unifiedmask;
778 int number = IIO_EVENT_CODE_EXTRACT_NUM(event_code); 778 int number = IIO_EVENT_CODE_EXTRACT_CHAN(event_code);
779 779
780 mutex_lock(&indio_dev->mlock); 780 mutex_lock(&indio_dev->mlock);
781 unifiedmask = st->mask_low | st->mask_high; 781 unifiedmask = st->mask_low | st->mask_high;
@@ -1245,10 +1245,31 @@ static int max1363_initial_setup(struct max1363_state *st)
1245 return max1363_set_scan_mode(st); 1245 return max1363_set_scan_mode(st);
1246} 1246}
1247 1247
1248static int __devinit max1363_alloc_scan_masks(struct iio_dev *indio_dev)
1249{
1250 struct max1363_state *st = iio_priv(indio_dev);
1251 unsigned long *masks;
1252 int i;
1253
1254 masks = kzalloc(BITS_TO_LONGS(MAX1363_MAX_CHANNELS)*sizeof(long)*
1255 (st->chip_info->num_modes + 1), GFP_KERNEL);
1256 if (!masks)
1257 return -ENOMEM;
1258
1259 for (i = 0; i < st->chip_info->num_modes; i++)
1260 bitmap_copy(masks + BITS_TO_LONGS(MAX1363_MAX_CHANNELS)*i,
1261 max1363_mode_table[st->chip_info->mode_list[i]]
1262 .modemask, MAX1363_MAX_CHANNELS);
1263
1264 indio_dev->available_scan_masks = masks;
1265
1266 return 0;
1267}
1268
1248static int __devinit max1363_probe(struct i2c_client *client, 1269static int __devinit max1363_probe(struct i2c_client *client,
1249 const struct i2c_device_id *id) 1270 const struct i2c_device_id *id)
1250{ 1271{
1251 int ret, i; 1272 int ret;
1252 struct max1363_state *st; 1273 struct max1363_state *st;
1253 struct iio_dev *indio_dev; 1274 struct iio_dev *indio_dev;
1254 struct regulator *reg; 1275 struct regulator *reg;
@@ -1276,19 +1297,10 @@ static int __devinit max1363_probe(struct i2c_client *client,
1276 st->chip_info = &max1363_chip_info_tbl[id->driver_data]; 1297 st->chip_info = &max1363_chip_info_tbl[id->driver_data];
1277 st->client = client; 1298 st->client = client;
1278 1299
1279 indio_dev->available_scan_masks 1300 ret = max1363_alloc_scan_masks(indio_dev);
1280 = kzalloc(BITS_TO_LONGS(MAX1363_MAX_CHANNELS)*sizeof(long)* 1301 if (ret)
1281 (st->chip_info->num_modes + 1), GFP_KERNEL);
1282 if (!indio_dev->available_scan_masks) {
1283 ret = -ENOMEM;
1284 goto error_free_device; 1302 goto error_free_device;
1285 }
1286 1303
1287 for (i = 0; i < st->chip_info->num_modes; i++)
1288 bitmap_copy(indio_dev->available_scan_masks +
1289 BITS_TO_LONGS(MAX1363_MAX_CHANNELS)*i,
1290 max1363_mode_table[st->chip_info->mode_list[i]]
1291 .modemask, MAX1363_MAX_CHANNELS);
1292 /* Estabilish that the iio_dev is a child of the i2c device */ 1304 /* Estabilish that the iio_dev is a child of the i2c device */
1293 indio_dev->dev.parent = &client->dev; 1305 indio_dev->dev.parent = &client->dev;
1294 indio_dev->name = id->name; 1306 indio_dev->name = id->name;
diff --git a/drivers/staging/iio/adc/max1363_ring.c b/drivers/staging/iio/adc/max1363_ring.c
index f730b3fb971a..d0a60a382930 100644
--- a/drivers/staging/iio/adc/max1363_ring.c
+++ b/drivers/staging/iio/adc/max1363_ring.c
@@ -116,8 +116,6 @@ int max1363_register_ring_funcs_and_init(struct iio_dev *indio_dev)
116 ret = -ENOMEM; 116 ret = -ENOMEM;
117 goto error_deallocate_sw_rb; 117 goto error_deallocate_sw_rb;
118 } 118 }
119 /* Effectively select the ring buffer implementation */
120 indio_dev->buffer->access = &ring_sw_access_funcs;
121 /* Ring buffer functions - here trigger setup related */ 119 /* Ring buffer functions - here trigger setup related */
122 indio_dev->setup_ops = &max1363_ring_setup_ops; 120 indio_dev->setup_ops = &max1363_ring_setup_ops;
123 121
diff --git a/drivers/staging/iio/addac/adt7316-i2c.c b/drivers/staging/iio/addac/adt7316-i2c.c
index 2c03a39220e8..9e128dd7d457 100644
--- a/drivers/staging/iio/addac/adt7316-i2c.c
+++ b/drivers/staging/iio/addac/adt7316-i2c.c
@@ -125,30 +125,14 @@ static const struct i2c_device_id adt7316_i2c_id[] = {
125 125
126MODULE_DEVICE_TABLE(i2c, adt7316_i2c_id); 126MODULE_DEVICE_TABLE(i2c, adt7316_i2c_id);
127 127
128#ifdef CONFIG_PM
129static int adt7316_i2c_suspend(struct i2c_client *client, pm_message_t message)
130{
131 return adt7316_disable(&client->dev);
132}
133
134static int adt7316_i2c_resume(struct i2c_client *client)
135{
136 return adt7316_enable(&client->dev);
137}
138#else
139# define adt7316_i2c_suspend NULL
140# define adt7316_i2c_resume NULL
141#endif
142
143static struct i2c_driver adt7316_driver = { 128static struct i2c_driver adt7316_driver = {
144 .driver = { 129 .driver = {
145 .name = "adt7316", 130 .name = "adt7316",
131 .pm = ADT7316_PM_OPS,
146 .owner = THIS_MODULE, 132 .owner = THIS_MODULE,
147 }, 133 },
148 .probe = adt7316_i2c_probe, 134 .probe = adt7316_i2c_probe,
149 .remove = __devexit_p(adt7316_i2c_remove), 135 .remove = __devexit_p(adt7316_i2c_remove),
150 .suspend = adt7316_i2c_suspend,
151 .resume = adt7316_i2c_resume,
152 .id_table = adt7316_i2c_id, 136 .id_table = adt7316_i2c_id,
153}; 137};
154module_i2c_driver(adt7316_driver); 138module_i2c_driver(adt7316_driver);
diff --git a/drivers/staging/iio/addac/adt7316-spi.c b/drivers/staging/iio/addac/adt7316-spi.c
index 1ea3cd06299d..985f7d8a6eb2 100644
--- a/drivers/staging/iio/addac/adt7316-spi.c
+++ b/drivers/staging/iio/addac/adt7316-spi.c
@@ -133,30 +133,14 @@ static const struct spi_device_id adt7316_spi_id[] = {
133 133
134MODULE_DEVICE_TABLE(spi, adt7316_spi_id); 134MODULE_DEVICE_TABLE(spi, adt7316_spi_id);
135 135
136#ifdef CONFIG_PM
137static int adt7316_spi_suspend(struct spi_device *spi_dev, pm_message_t message)
138{
139 return adt7316_disable(&spi_dev->dev);
140}
141
142static int adt7316_spi_resume(struct spi_device *spi_dev)
143{
144 return adt7316_enable(&spi_dev->dev);
145}
146#else
147# define adt7316_spi_suspend NULL
148# define adt7316_spi_resume NULL
149#endif
150
151static struct spi_driver adt7316_driver = { 136static struct spi_driver adt7316_driver = {
152 .driver = { 137 .driver = {
153 .name = "adt7316", 138 .name = "adt7316",
139 .pm = ADT7316_PM_OPS,
154 .owner = THIS_MODULE, 140 .owner = THIS_MODULE,
155 }, 141 },
156 .probe = adt7316_spi_probe, 142 .probe = adt7316_spi_probe,
157 .remove = __devexit_p(adt7316_spi_remove), 143 .remove = __devexit_p(adt7316_spi_remove),
158 .suspend = adt7316_spi_suspend,
159 .resume = adt7316_spi_resume,
160 .id_table = adt7316_spi_id, 144 .id_table = adt7316_spi_id,
161}; 145};
162module_spi_driver(adt7316_driver); 146module_spi_driver(adt7316_driver);
diff --git a/drivers/staging/iio/addac/adt7316.c b/drivers/staging/iio/addac/adt7316.c
index 13c39292d3f2..fd6a45444058 100644
--- a/drivers/staging/iio/addac/adt7316.c
+++ b/drivers/staging/iio/addac/adt7316.c
@@ -2089,24 +2089,25 @@ static struct attribute_group adt7516_event_attribute_group = {
2089 .name = "events", 2089 .name = "events",
2090}; 2090};
2091 2091
2092#ifdef CONFIG_PM 2092#ifdef CONFIG_PM_SLEEP
2093int adt7316_disable(struct device *dev) 2093static int adt7316_disable(struct device *dev)
2094{ 2094{
2095 struct iio_dev *dev_info = dev_get_drvdata(dev); 2095 struct iio_dev *dev_info = dev_get_drvdata(dev);
2096 struct adt7316_chip_info *chip = iio_priv(dev_info); 2096 struct adt7316_chip_info *chip = iio_priv(dev_info);
2097 2097
2098 return _adt7316_store_enabled(chip, 0); 2098 return _adt7316_store_enabled(chip, 0);
2099} 2099}
2100EXPORT_SYMBOL(adt7316_disable);
2101 2100
2102int adt7316_enable(struct device *dev) 2101static int adt7316_enable(struct device *dev)
2103{ 2102{
2104 struct iio_dev *dev_info = dev_get_drvdata(dev); 2103 struct iio_dev *dev_info = dev_get_drvdata(dev);
2105 struct adt7316_chip_info *chip = iio_priv(dev_info); 2104 struct adt7316_chip_info *chip = iio_priv(dev_info);
2106 2105
2107 return _adt7316_store_enabled(chip, 1); 2106 return _adt7316_store_enabled(chip, 1);
2108} 2107}
2109EXPORT_SYMBOL(adt7316_enable); 2108
2109SIMPLE_DEV_PM_OPS(adt7316_pm_ops, adt7316_disable, adt7316_enable);
2110EXPORT_SYMBOL_GPL(adt7316_pm_ops);
2110#endif 2111#endif
2111 2112
2112static const struct iio_info adt7316_info = { 2113static const struct iio_info adt7316_info = {
diff --git a/drivers/staging/iio/addac/adt7316.h b/drivers/staging/iio/addac/adt7316.h
index d34bd679bb4e..4d3efff46ae7 100644
--- a/drivers/staging/iio/addac/adt7316.h
+++ b/drivers/staging/iio/addac/adt7316.h
@@ -10,6 +10,7 @@
10#define _ADT7316_H_ 10#define _ADT7316_H_
11 11
12#include <linux/types.h> 12#include <linux/types.h>
13#include <linux/pm.h>
13 14
14#define ADT7316_REG_MAX_ADDR 0x3F 15#define ADT7316_REG_MAX_ADDR 0x3F
15 16
@@ -23,9 +24,11 @@ struct adt7316_bus {
23 int (*multi_write) (void *client, u8 first_reg, u8 count, u8 *data); 24 int (*multi_write) (void *client, u8 first_reg, u8 count, u8 *data);
24}; 25};
25 26
26#ifdef CONFIG_PM 27#ifdef CONFIG_PM_SLEEP
27int adt7316_disable(struct device *dev); 28extern const struct dev_pm_ops adt7316_pm_ops;
28int adt7316_enable(struct device *dev); 29#define ADT7316_PM_OPS (&adt7316_pm_ops)
30#else
31#define ADT7316_PM_OPS NULL
29#endif 32#endif
30int adt7316_probe(struct device *dev, struct adt7316_bus *bus, const char *name); 33int adt7316_probe(struct device *dev, struct adt7316_bus *bus, const char *name);
31int adt7316_remove(struct device *dev); 34int adt7316_remove(struct device *dev);
diff --git a/drivers/staging/iio/buffer.h b/drivers/staging/iio/buffer.h
index 6fb6e64181a5..df2046dcb623 100644
--- a/drivers/staging/iio/buffer.h
+++ b/drivers/staging/iio/buffer.h
@@ -91,8 +91,6 @@ struct iio_buffer {
91 **/ 91 **/
92void iio_buffer_init(struct iio_buffer *buffer); 92void iio_buffer_init(struct iio_buffer *buffer);
93 93
94void iio_buffer_deinit(struct iio_buffer *buffer);
95
96/** 94/**
97 * __iio_update_buffer() - update common elements of buffers 95 * __iio_update_buffer() - update common elements of buffers
98 * @buffer: buffer that is the event source 96 * @buffer: buffer that is the event source
diff --git a/drivers/staging/iio/cdc/ad7150.c b/drivers/staging/iio/cdc/ad7150.c
index b73007dcf4b3..e4a08dc9b6f5 100644
--- a/drivers/staging/iio/cdc/ad7150.c
+++ b/drivers/staging/iio/cdc/ad7150.c
@@ -167,7 +167,7 @@ static int ad7150_write_event_params(struct iio_dev *indio_dev, u64 event_code)
167 u16 value; 167 u16 value;
168 u8 sens, timeout; 168 u8 sens, timeout;
169 struct ad7150_chip_info *chip = iio_priv(indio_dev); 169 struct ad7150_chip_info *chip = iio_priv(indio_dev);
170 int chan = IIO_EVENT_CODE_EXTRACT_NUM(event_code); 170 int chan = IIO_EVENT_CODE_EXTRACT_CHAN(event_code);
171 int rising = !!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) == 171 int rising = !!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
172 IIO_EV_DIR_RISING); 172 IIO_EV_DIR_RISING);
173 173
@@ -279,7 +279,7 @@ static int ad7150_read_event_value(struct iio_dev *indio_dev,
279 u64 event_code, 279 u64 event_code,
280 int *val) 280 int *val)
281{ 281{
282 int chan = IIO_EVENT_CODE_EXTRACT_NUM(event_code); 282 int chan = IIO_EVENT_CODE_EXTRACT_CHAN(event_code);
283 struct ad7150_chip_info *chip = iio_priv(indio_dev); 283 struct ad7150_chip_info *chip = iio_priv(indio_dev);
284 int rising = !!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) == 284 int rising = !!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
285 IIO_EV_DIR_RISING); 285 IIO_EV_DIR_RISING);
@@ -309,7 +309,7 @@ static int ad7150_write_event_value(struct iio_dev *indio_dev,
309{ 309{
310 int ret; 310 int ret;
311 struct ad7150_chip_info *chip = iio_priv(indio_dev); 311 struct ad7150_chip_info *chip = iio_priv(indio_dev);
312 int chan = IIO_EVENT_CODE_EXTRACT_NUM(event_code); 312 int chan = IIO_EVENT_CODE_EXTRACT_CHAN(event_code);
313 int rising = !!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) == 313 int rising = !!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
314 IIO_EV_DIR_RISING); 314 IIO_EV_DIR_RISING);
315 315
@@ -347,7 +347,7 @@ static ssize_t ad7150_show_timeout(struct device *dev,
347 u8 value; 347 u8 value;
348 348
349 /* use the event code for consistency reasons */ 349 /* use the event code for consistency reasons */
350 int chan = IIO_EVENT_CODE_EXTRACT_NUM(this_attr->address); 350 int chan = IIO_EVENT_CODE_EXTRACT_CHAN(this_attr->address);
351 int rising = !!(IIO_EVENT_CODE_EXTRACT_DIR(this_attr->address) 351 int rising = !!(IIO_EVENT_CODE_EXTRACT_DIR(this_attr->address)
352 == IIO_EV_DIR_RISING); 352 == IIO_EV_DIR_RISING);
353 353
@@ -373,7 +373,7 @@ static ssize_t ad7150_store_timeout(struct device *dev,
373 struct iio_dev *indio_dev = dev_get_drvdata(dev); 373 struct iio_dev *indio_dev = dev_get_drvdata(dev);
374 struct ad7150_chip_info *chip = iio_priv(indio_dev); 374 struct ad7150_chip_info *chip = iio_priv(indio_dev);
375 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 375 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
376 int chan = IIO_EVENT_CODE_EXTRACT_NUM(this_attr->address); 376 int chan = IIO_EVENT_CODE_EXTRACT_CHAN(this_attr->address);
377 int rising = !!(IIO_EVENT_CODE_EXTRACT_DIR(this_attr->address) == 377 int rising = !!(IIO_EVENT_CODE_EXTRACT_DIR(this_attr->address) ==
378 IIO_EV_DIR_RISING); 378 IIO_EV_DIR_RISING);
379 u8 data; 379 u8 data;
diff --git a/drivers/staging/iio/consumer.h b/drivers/staging/iio/consumer.h
new file mode 100644
index 000000000000..36a060cd3a21
--- /dev/null
+++ b/drivers/staging/iio/consumer.h
@@ -0,0 +1,96 @@
1/*
2 * Industrial I/O in kernel consumer interface
3 *
4 * Copyright (c) 2011 Jonathan Cameron
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
9 */
10#ifndef _IIO_INKERN_CONSUMER_H_
11#define _IIO_INKERN_CONSUMER_H
12#include "types.h"
13
14struct iio_dev;
15struct iio_chan_spec;
16
17/**
18 * struct iio_channel - everything needed for a consumer to use a channel
19 * @indio_dev: Device on which the channel exists.
20 * @channel: Full description of the channel.
21 */
22struct iio_channel {
23 struct iio_dev *indio_dev;
24 const struct iio_chan_spec *channel;
25};
26
27/**
28 * iio_channel_get() - get description of all that is needed to access channel.
29 * @name: Unique name of the device as provided in the iio_map
30 * with which the desired provider to consumer mapping
31 * was registered.
32 * @consumer_channel: Unique name to identify the channel on the consumer
33 * side. This typically describes the channels use within
34 * the consumer. E.g. 'battery_voltage'
35 */
36struct iio_channel *iio_st_channel_get(const char *name,
37 const char *consumer_channel);
38
39/**
40 * iio_st_channel_release() - release channels obtained via iio_st_channel_get
41 * @chan: The channel to be released.
42 */
43void iio_st_channel_release(struct iio_channel *chan);
44
45/**
46 * iio_st_channel_get_all() - get all channels associated with a client
47 * @name: name of consumer device.
48 *
49 * Returns an array of iio_channel structures terminated with one with
50 * null iio_dev pointer.
51 * This function is used by fairly generic consumers to get all the
52 * channels registered as having this consumer.
53 */
54struct iio_channel *iio_st_channel_get_all(const char *name);
55
56/**
57 * iio_st_channel_release_all() - reverse iio_st_get_all
58 * @chan: Array of channels to be released.
59 */
60void iio_st_channel_release_all(struct iio_channel *chan);
61
62/**
63 * iio_st_read_channel_raw() - read from a given channel
64 * @channel: The channel being queried.
65 * @val: Value read back.
66 *
67 * Note raw reads from iio channels are in adc counts and hence
68 * scale will need to be applied if standard units required.
69 */
70int iio_st_read_channel_raw(struct iio_channel *chan,
71 int *val);
72
73/**
74 * iio_st_get_channel_type() - get the type of a channel
75 * @channel: The channel being queried.
76 * @type: The type of the channel.
77 *
78 * returns the enum iio_chan_type of the channel
79 */
80int iio_st_get_channel_type(struct iio_channel *channel,
81 enum iio_chan_type *type);
82
83/**
84 * iio_st_read_channel_scale() - read the scale value for a channel
85 * @channel: The channel being queried.
86 * @val: First part of value read back.
87 * @val2: Second part of value read back.
88 *
89 * Note returns a description of what is in val and val2, such
90 * as IIO_VAL_INT_PLUS_MICRO telling us we have a value of val
91 * + val2/1e6
92 */
93int iio_st_read_channel_scale(struct iio_channel *chan, int *val,
94 int *val2);
95
96#endif
diff --git a/drivers/staging/iio/dac/Kconfig b/drivers/staging/iio/dac/Kconfig
index 13e27979df24..a57803a5d1a7 100644
--- a/drivers/staging/iio/dac/Kconfig
+++ b/drivers/staging/iio/dac/Kconfig
@@ -4,11 +4,12 @@
4menu "Digital to analog converters" 4menu "Digital to analog converters"
5 5
6config AD5064 6config AD5064
7 tristate "Analog Devices AD5064/64-1/44/24 DAC driver" 7 tristate "Analog Devices AD5064/64-1/65/44/45/24/25, AD5628/48/66/68 DAC driver"
8 depends on SPI 8 depends on SPI
9 help 9 help
10 Say yes here to build support for Analog Devices AD5064, AD5064-1, 10 Say yes here to build support for Analog Devices AD5024, AD5025, AD5044,
11 AD5044, AD5024 Digital to Analog Converter. 11 AD5045, AD5064, AD5064-1, AD5065, AD5628, AD5648, AD5666, AD5668 Digital
12 to Analog Converter.
12 13
13 To compile this driver as a module, choose M here: the 14 To compile this driver as a module, choose M here: the
14 module will be called ad5064. 15 module will be called ad5064.
diff --git a/drivers/staging/iio/dac/ad5064.c b/drivers/staging/iio/dac/ad5064.c
index 049a855039c2..06b162745a3e 100644
--- a/drivers/staging/iio/dac/ad5064.c
+++ b/drivers/staging/iio/dac/ad5064.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * AD5064, AD5064-1, AD5044, AD5024 Digital to analog converters driver 2 * AD5024, AD5025, AD5044, AD5045, AD5064, AD5064-1, AD5065, AD5628, AD5648,
3 * AD5666, AD5668 Digital to analog converters driver
3 * 4 *
4 * Copyright 2011 Analog Devices Inc. 5 * Copyright 2011 Analog Devices Inc.
5 * 6 *
@@ -19,7 +20,8 @@
19#include "../sysfs.h" 20#include "../sysfs.h"
20#include "dac.h" 21#include "dac.h"
21 22
22#define AD5064_DAC_CHANNELS 4 23#define AD5064_MAX_DAC_CHANNELS 8
24#define AD5064_MAX_VREFS 4
23 25
24#define AD5064_ADDR(x) ((x) << 20) 26#define AD5064_ADDR(x) ((x) << 20)
25#define AD5064_CMD(x) ((x) << 24) 27#define AD5064_CMD(x) ((x) << 24)
@@ -35,7 +37,10 @@
35#define AD5064_CMD_CLEAR 0x5 37#define AD5064_CMD_CLEAR 0x5
36#define AD5064_CMD_LDAC_MASK 0x6 38#define AD5064_CMD_LDAC_MASK 0x6
37#define AD5064_CMD_RESET 0x7 39#define AD5064_CMD_RESET 0x7
38#define AD5064_CMD_DAISY_CHAIN_ENABLE 0x8 40#define AD5064_CMD_CONFIG 0x8
41
42#define AD5064_CONFIG_DAISY_CHAIN_ENABLE BIT(1)
43#define AD5064_CONFIG_INT_VREF_ENABLE BIT(0)
39 44
40#define AD5064_LDAC_PWRDN_NONE 0x0 45#define AD5064_LDAC_PWRDN_NONE 0x0
41#define AD5064_LDAC_PWRDN_1K 0x1 46#define AD5064_LDAC_PWRDN_1K 0x1
@@ -45,12 +50,17 @@
45/** 50/**
46 * struct ad5064_chip_info - chip specific information 51 * struct ad5064_chip_info - chip specific information
47 * @shared_vref: whether the vref supply is shared between channels 52 * @shared_vref: whether the vref supply is shared between channels
53 * @internal_vref: internal reference voltage. 0 if the chip has no internal
54 * vref.
48 * @channel: channel specification 55 * @channel: channel specification
49*/ 56 * @num_channels: number of channels
57 */
50 58
51struct ad5064_chip_info { 59struct ad5064_chip_info {
52 bool shared_vref; 60 bool shared_vref;
53 struct iio_chan_spec channel[AD5064_DAC_CHANNELS]; 61 unsigned long internal_vref;
62 const struct iio_chan_spec *channels;
63 unsigned int num_channels;
54}; 64};
55 65
56/** 66/**
@@ -61,16 +71,19 @@ struct ad5064_chip_info {
61 * @pwr_down: whether channel is powered down 71 * @pwr_down: whether channel is powered down
62 * @pwr_down_mode: channel's current power down mode 72 * @pwr_down_mode: channel's current power down mode
63 * @dac_cache: current DAC raw value (chip does not support readback) 73 * @dac_cache: current DAC raw value (chip does not support readback)
74 * @use_internal_vref: set to true if the internal reference voltage should be
75 * used.
64 * @data: spi transfer buffers 76 * @data: spi transfer buffers
65 */ 77 */
66 78
67struct ad5064_state { 79struct ad5064_state {
68 struct spi_device *spi; 80 struct spi_device *spi;
69 const struct ad5064_chip_info *chip_info; 81 const struct ad5064_chip_info *chip_info;
70 struct regulator_bulk_data vref_reg[AD5064_DAC_CHANNELS]; 82 struct regulator_bulk_data vref_reg[AD5064_MAX_VREFS];
71 bool pwr_down[AD5064_DAC_CHANNELS]; 83 bool pwr_down[AD5064_MAX_DAC_CHANNELS];
72 u8 pwr_down_mode[AD5064_DAC_CHANNELS]; 84 u8 pwr_down_mode[AD5064_MAX_DAC_CHANNELS];
73 unsigned int dac_cache[AD5064_DAC_CHANNELS]; 85 unsigned int dac_cache[AD5064_MAX_DAC_CHANNELS];
86 bool use_internal_vref;
74 87
75 /* 88 /*
76 * DMA (thus cache coherency maintenance) requires the 89 * DMA (thus cache coherency maintenance) requires the
@@ -81,50 +94,20 @@ struct ad5064_state {
81 94
82enum ad5064_type { 95enum ad5064_type {
83 ID_AD5024, 96 ID_AD5024,
97 ID_AD5025,
84 ID_AD5044, 98 ID_AD5044,
99 ID_AD5045,
85 ID_AD5064, 100 ID_AD5064,
86 ID_AD5064_1, 101 ID_AD5064_1,
87}; 102 ID_AD5065,
88 103 ID_AD5628_1,
89#define AD5064_CHANNEL(chan, bits) { \ 104 ID_AD5628_2,
90 .type = IIO_VOLTAGE, \ 105 ID_AD5648_1,
91 .indexed = 1, \ 106 ID_AD5648_2,
92 .output = 1, \ 107 ID_AD5666_1,
93 .channel = (chan), \ 108 ID_AD5666_2,
94 .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT, \ 109 ID_AD5668_1,
95 .address = AD5064_ADDR_DAC(chan), \ 110 ID_AD5668_2,
96 .scan_type = IIO_ST('u', (bits), 16, 20 - (bits)) \
97}
98
99static const struct ad5064_chip_info ad5064_chip_info_tbl[] = {
100 [ID_AD5024] = {
101 .shared_vref = false,
102 .channel[0] = AD5064_CHANNEL(0, 12),
103 .channel[1] = AD5064_CHANNEL(1, 12),
104 .channel[2] = AD5064_CHANNEL(2, 12),
105 .channel[3] = AD5064_CHANNEL(3, 12),
106 },
107 [ID_AD5044] = {
108 .shared_vref = false,
109 .channel[0] = AD5064_CHANNEL(0, 14),
110 .channel[1] = AD5064_CHANNEL(1, 14),
111 .channel[2] = AD5064_CHANNEL(2, 14),
112 .channel[3] = AD5064_CHANNEL(3, 14),
113 },
114 [ID_AD5064] = {
115 .shared_vref = false,
116 .channel[0] = AD5064_CHANNEL(0, 16),
117 .channel[1] = AD5064_CHANNEL(1, 16),
118 .channel[2] = AD5064_CHANNEL(2, 16),
119 .channel[3] = AD5064_CHANNEL(3, 16),
120 },
121 [ID_AD5064_1] = {
122 .shared_vref = true,
123 .channel[0] = AD5064_CHANNEL(0, 16),
124 .channel[1] = AD5064_CHANNEL(1, 16),
125 .channel[2] = AD5064_CHANNEL(2, 16),
126 .channel[3] = AD5064_CHANNEL(3, 16),
127 },
128}; 111};
129 112
130static int ad5064_spi_write(struct ad5064_state *st, unsigned int cmd, 113static int ad5064_spi_write(struct ad5064_state *st, unsigned int cmd,
@@ -160,22 +143,25 @@ static const char ad5064_powerdown_modes[][15] = {
160 [AD5064_LDAC_PWRDN_3STATE] = "three_state", 143 [AD5064_LDAC_PWRDN_3STATE] = "three_state",
161}; 144};
162 145
163static ssize_t ad5064_read_powerdown_mode(struct device *dev, 146static ssize_t ad5064_read_powerdown_mode_available(struct iio_dev *indio_dev,
164 struct device_attribute *attr, char *buf) 147 const struct iio_chan_spec *chan, char *buf)
148{
149 return sprintf(buf, "%s %s %s\n", ad5064_powerdown_modes[1],
150 ad5064_powerdown_modes[2], ad5064_powerdown_modes[3]);
151}
152
153static ssize_t ad5064_read_powerdown_mode(struct iio_dev *indio_dev,
154 const struct iio_chan_spec *chan, char *buf)
165{ 155{
166 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
167 struct iio_dev *indio_dev = dev_get_drvdata(dev);
168 struct ad5064_state *st = iio_priv(indio_dev); 156 struct ad5064_state *st = iio_priv(indio_dev);
169 157
170 return sprintf(buf, "%s\n", 158 return sprintf(buf, "%s\n",
171 ad5064_powerdown_modes[st->pwr_down_mode[this_attr->address]]); 159 ad5064_powerdown_modes[st->pwr_down_mode[chan->channel]]);
172} 160}
173 161
174static ssize_t ad5064_write_powerdown_mode(struct device *dev, 162static ssize_t ad5064_write_powerdown_mode(struct iio_dev *indio_dev,
175 struct device_attribute *attr, const char *buf, size_t len) 163 const struct iio_chan_spec *chan, const char *buf, size_t len)
176{ 164{
177 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
178 struct iio_dev *indio_dev = dev_get_drvdata(dev);
179 struct ad5064_state *st = iio_priv(indio_dev); 165 struct ad5064_state *st = iio_priv(indio_dev);
180 unsigned int mode, i; 166 unsigned int mode, i;
181 int ret; 167 int ret;
@@ -192,31 +178,26 @@ static ssize_t ad5064_write_powerdown_mode(struct device *dev,
192 return -EINVAL; 178 return -EINVAL;
193 179
194 mutex_lock(&indio_dev->mlock); 180 mutex_lock(&indio_dev->mlock);
195 st->pwr_down_mode[this_attr->address] = mode; 181 st->pwr_down_mode[chan->channel] = mode;
196 182
197 ret = ad5064_sync_powerdown_mode(st, this_attr->address); 183 ret = ad5064_sync_powerdown_mode(st, chan->channel);
198 mutex_unlock(&indio_dev->mlock); 184 mutex_unlock(&indio_dev->mlock);
199 185
200 return ret ? ret : len; 186 return ret ? ret : len;
201} 187}
202 188
203static ssize_t ad5064_read_dac_powerdown(struct device *dev, 189static ssize_t ad5064_read_dac_powerdown(struct iio_dev *indio_dev,
204 struct device_attribute *attr, 190 const struct iio_chan_spec *chan, char *buf)
205 char *buf)
206{ 191{
207 struct iio_dev *indio_dev = dev_get_drvdata(dev);
208 struct ad5064_state *st = iio_priv(indio_dev); 192 struct ad5064_state *st = iio_priv(indio_dev);
209 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
210 193
211 return sprintf(buf, "%d\n", st->pwr_down[this_attr->address]); 194 return sprintf(buf, "%d\n", st->pwr_down[chan->channel]);
212} 195}
213 196
214static ssize_t ad5064_write_dac_powerdown(struct device *dev, 197static ssize_t ad5064_write_dac_powerdown(struct iio_dev *indio_dev,
215 struct device_attribute *attr, const char *buf, size_t len) 198 const struct iio_chan_spec *chan, const char *buf, size_t len)
216{ 199{
217 struct iio_dev *indio_dev = dev_get_drvdata(dev);
218 struct ad5064_state *st = iio_priv(indio_dev); 200 struct ad5064_state *st = iio_priv(indio_dev);
219 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
220 bool pwr_down; 201 bool pwr_down;
221 int ret; 202 int ret;
222 203
@@ -225,53 +206,24 @@ static ssize_t ad5064_write_dac_powerdown(struct device *dev,
225 return ret; 206 return ret;
226 207
227 mutex_lock(&indio_dev->mlock); 208 mutex_lock(&indio_dev->mlock);
228 st->pwr_down[this_attr->address] = pwr_down; 209 st->pwr_down[chan->channel] = pwr_down;
229 210
230 ret = ad5064_sync_powerdown_mode(st, this_attr->address); 211 ret = ad5064_sync_powerdown_mode(st, chan->channel);
231 mutex_unlock(&indio_dev->mlock); 212 mutex_unlock(&indio_dev->mlock);
232 return ret ? ret : len; 213 return ret ? ret : len;
233} 214}
234 215
235static IIO_CONST_ATTR(out_voltage_powerdown_mode_available, 216static int ad5064_get_vref(struct ad5064_state *st,
236 "1kohm_to_gnd 100kohm_to_gnd three_state"); 217 struct iio_chan_spec const *chan)
237 218{
238#define IIO_DEV_ATTR_DAC_POWERDOWN_MODE(_chan) \ 219 unsigned int i;
239 IIO_DEVICE_ATTR(out_voltage##_chan##_powerdown_mode, \
240 S_IRUGO | S_IWUSR, \
241 ad5064_read_powerdown_mode, \
242 ad5064_write_powerdown_mode, _chan);
243
244#define IIO_DEV_ATTR_DAC_POWERDOWN(_chan) \
245 IIO_DEVICE_ATTR(out_voltage##_chan##_powerdown, \
246 S_IRUGO | S_IWUSR, \
247 ad5064_read_dac_powerdown, \
248 ad5064_write_dac_powerdown, _chan)
249
250static IIO_DEV_ATTR_DAC_POWERDOWN(0);
251static IIO_DEV_ATTR_DAC_POWERDOWN_MODE(0);
252static IIO_DEV_ATTR_DAC_POWERDOWN(1);
253static IIO_DEV_ATTR_DAC_POWERDOWN_MODE(1);
254static IIO_DEV_ATTR_DAC_POWERDOWN(2);
255static IIO_DEV_ATTR_DAC_POWERDOWN_MODE(2);
256static IIO_DEV_ATTR_DAC_POWERDOWN(3);
257static IIO_DEV_ATTR_DAC_POWERDOWN_MODE(3);
258
259static struct attribute *ad5064_attributes[] = {
260 &iio_dev_attr_out_voltage0_powerdown.dev_attr.attr,
261 &iio_dev_attr_out_voltage1_powerdown.dev_attr.attr,
262 &iio_dev_attr_out_voltage2_powerdown.dev_attr.attr,
263 &iio_dev_attr_out_voltage3_powerdown.dev_attr.attr,
264 &iio_dev_attr_out_voltage0_powerdown_mode.dev_attr.attr,
265 &iio_dev_attr_out_voltage1_powerdown_mode.dev_attr.attr,
266 &iio_dev_attr_out_voltage2_powerdown_mode.dev_attr.attr,
267 &iio_dev_attr_out_voltage3_powerdown_mode.dev_attr.attr,
268 &iio_const_attr_out_voltage_powerdown_mode_available.dev_attr.attr,
269 NULL,
270};
271 220
272static const struct attribute_group ad5064_attribute_group = { 221 if (st->use_internal_vref)
273 .attrs = ad5064_attributes, 222 return st->chip_info->internal_vref;
274}; 223
224 i = st->chip_info->shared_vref ? 0 : chan->channel;
225 return regulator_get_voltage(st->vref_reg[i].consumer);
226}
275 227
276static int ad5064_read_raw(struct iio_dev *indio_dev, 228static int ad5064_read_raw(struct iio_dev *indio_dev,
277 struct iio_chan_spec const *chan, 229 struct iio_chan_spec const *chan,
@@ -280,7 +232,6 @@ static int ad5064_read_raw(struct iio_dev *indio_dev,
280 long m) 232 long m)
281{ 233{
282 struct ad5064_state *st = iio_priv(indio_dev); 234 struct ad5064_state *st = iio_priv(indio_dev);
283 unsigned int vref;
284 int scale_uv; 235 int scale_uv;
285 236
286 switch (m) { 237 switch (m) {
@@ -288,8 +239,7 @@ static int ad5064_read_raw(struct iio_dev *indio_dev,
288 *val = st->dac_cache[chan->channel]; 239 *val = st->dac_cache[chan->channel];
289 return IIO_VAL_INT; 240 return IIO_VAL_INT;
290 case IIO_CHAN_INFO_SCALE: 241 case IIO_CHAN_INFO_SCALE:
291 vref = st->chip_info->shared_vref ? 0 : chan->channel; 242 scale_uv = ad5064_get_vref(st, chan);
292 scale_uv = regulator_get_voltage(st->vref_reg[vref].consumer);
293 if (scale_uv < 0) 243 if (scale_uv < 0)
294 return scale_uv; 244 return scale_uv;
295 245
@@ -331,13 +281,144 @@ static int ad5064_write_raw(struct iio_dev *indio_dev,
331static const struct iio_info ad5064_info = { 281static const struct iio_info ad5064_info = {
332 .read_raw = ad5064_read_raw, 282 .read_raw = ad5064_read_raw,
333 .write_raw = ad5064_write_raw, 283 .write_raw = ad5064_write_raw,
334 .attrs = &ad5064_attribute_group,
335 .driver_module = THIS_MODULE, 284 .driver_module = THIS_MODULE,
336}; 285};
337 286
287static struct iio_chan_spec_ext_info ad5064_ext_info[] = {
288 {
289 .name = "powerdown",
290 .read = ad5064_read_dac_powerdown,
291 .write = ad5064_write_dac_powerdown,
292 },
293 {
294 .name = "powerdown_mode",
295 .read = ad5064_read_powerdown_mode,
296 .write = ad5064_write_powerdown_mode,
297 },
298 {
299 .name = "powerdown_mode_available",
300 .shared = true,
301 .read = ad5064_read_powerdown_mode_available,
302 },
303 { },
304};
305
306#define AD5064_CHANNEL(chan, bits) { \
307 .type = IIO_VOLTAGE, \
308 .indexed = 1, \
309 .output = 1, \
310 .channel = (chan), \
311 .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT, \
312 .address = AD5064_ADDR_DAC(chan), \
313 .scan_type = IIO_ST('u', (bits), 16, 20 - (bits)), \
314 .ext_info = ad5064_ext_info, \
315}
316
317#define DECLARE_AD5064_CHANNELS(name, bits) \
318const struct iio_chan_spec name[] = { \
319 AD5064_CHANNEL(0, bits), \
320 AD5064_CHANNEL(1, bits), \
321 AD5064_CHANNEL(2, bits), \
322 AD5064_CHANNEL(3, bits), \
323 AD5064_CHANNEL(4, bits), \
324 AD5064_CHANNEL(5, bits), \
325 AD5064_CHANNEL(6, bits), \
326 AD5064_CHANNEL(7, bits), \
327}
328
329static DECLARE_AD5064_CHANNELS(ad5024_channels, 12);
330static DECLARE_AD5064_CHANNELS(ad5044_channels, 14);
331static DECLARE_AD5064_CHANNELS(ad5064_channels, 16);
332
333static const struct ad5064_chip_info ad5064_chip_info_tbl[] = {
334 [ID_AD5024] = {
335 .shared_vref = false,
336 .channels = ad5024_channels,
337 .num_channels = 4,
338 },
339 [ID_AD5025] = {
340 .shared_vref = false,
341 .channels = ad5024_channels,
342 .num_channels = 2,
343 },
344 [ID_AD5044] = {
345 .shared_vref = false,
346 .channels = ad5044_channels,
347 .num_channels = 4,
348 },
349 [ID_AD5045] = {
350 .shared_vref = false,
351 .channels = ad5044_channels,
352 .num_channels = 2,
353 },
354 [ID_AD5064] = {
355 .shared_vref = false,
356 .channels = ad5064_channels,
357 .num_channels = 4,
358 },
359 [ID_AD5064_1] = {
360 .shared_vref = true,
361 .channels = ad5064_channels,
362 .num_channels = 4,
363 },
364 [ID_AD5065] = {
365 .shared_vref = false,
366 .channels = ad5064_channels,
367 .num_channels = 2,
368 },
369 [ID_AD5628_1] = {
370 .shared_vref = true,
371 .internal_vref = 2500000,
372 .channels = ad5024_channels,
373 .num_channels = 8,
374 },
375 [ID_AD5628_2] = {
376 .shared_vref = true,
377 .internal_vref = 5000000,
378 .channels = ad5024_channels,
379 .num_channels = 8,
380 },
381 [ID_AD5648_1] = {
382 .shared_vref = true,
383 .internal_vref = 2500000,
384 .channels = ad5044_channels,
385 .num_channels = 8,
386 },
387 [ID_AD5648_2] = {
388 .shared_vref = true,
389 .internal_vref = 5000000,
390 .channels = ad5044_channels,
391 .num_channels = 8,
392 },
393 [ID_AD5666_1] = {
394 .shared_vref = true,
395 .internal_vref = 2500000,
396 .channels = ad5064_channels,
397 .num_channels = 4,
398 },
399 [ID_AD5666_2] = {
400 .shared_vref = true,
401 .internal_vref = 5000000,
402 .channels = ad5064_channels,
403 .num_channels = 4,
404 },
405 [ID_AD5668_1] = {
406 .shared_vref = true,
407 .internal_vref = 2500000,
408 .channels = ad5064_channels,
409 .num_channels = 8,
410 },
411 [ID_AD5668_2] = {
412 .shared_vref = true,
413 .internal_vref = 5000000,
414 .channels = ad5064_channels,
415 .num_channels = 8,
416 },
417};
418
338static inline unsigned int ad5064_num_vref(struct ad5064_state *st) 419static inline unsigned int ad5064_num_vref(struct ad5064_state *st)
339{ 420{
340 return st->chip_info->shared_vref ? 1 : AD5064_DAC_CHANNELS; 421 return st->chip_info->shared_vref ? 1 : st->chip_info->num_channels;
341} 422}
342 423
343static const char * const ad5064_vref_names[] = { 424static const char * const ad5064_vref_names[] = {
@@ -376,14 +457,24 @@ static int __devinit ad5064_probe(struct spi_device *spi)
376 457
377 ret = regulator_bulk_get(&st->spi->dev, ad5064_num_vref(st), 458 ret = regulator_bulk_get(&st->spi->dev, ad5064_num_vref(st),
378 st->vref_reg); 459 st->vref_reg);
379 if (ret) 460 if (ret) {
380 goto error_free; 461 if (!st->chip_info->internal_vref)
381 462 goto error_free;
382 ret = regulator_bulk_enable(ad5064_num_vref(st), st->vref_reg); 463 st->use_internal_vref = true;
383 if (ret) 464 ret = ad5064_spi_write(st, AD5064_CMD_CONFIG, 0,
384 goto error_free_reg; 465 AD5064_CONFIG_INT_VREF_ENABLE, 0);
466 if (ret) {
467 dev_err(&spi->dev, "Failed to enable internal vref: %d\n",
468 ret);
469 goto error_free;
470 }
471 } else {
472 ret = regulator_bulk_enable(ad5064_num_vref(st), st->vref_reg);
473 if (ret)
474 goto error_free_reg;
475 }
385 476
386 for (i = 0; i < AD5064_DAC_CHANNELS; ++i) { 477 for (i = 0; i < st->chip_info->num_channels; ++i) {
387 st->pwr_down_mode[i] = AD5064_LDAC_PWRDN_1K; 478 st->pwr_down_mode[i] = AD5064_LDAC_PWRDN_1K;
388 st->dac_cache[i] = 0x8000; 479 st->dac_cache[i] = 0x8000;
389 } 480 }
@@ -392,8 +483,8 @@ static int __devinit ad5064_probe(struct spi_device *spi)
392 indio_dev->name = spi_get_device_id(spi)->name; 483 indio_dev->name = spi_get_device_id(spi)->name;
393 indio_dev->info = &ad5064_info; 484 indio_dev->info = &ad5064_info;
394 indio_dev->modes = INDIO_DIRECT_MODE; 485 indio_dev->modes = INDIO_DIRECT_MODE;
395 indio_dev->channels = st->chip_info->channel; 486 indio_dev->channels = st->chip_info->channels;
396 indio_dev->num_channels = AD5064_DAC_CHANNELS; 487 indio_dev->num_channels = st->chip_info->num_channels;
397 488
398 ret = iio_device_register(indio_dev); 489 ret = iio_device_register(indio_dev);
399 if (ret) 490 if (ret)
@@ -402,9 +493,11 @@ static int __devinit ad5064_probe(struct spi_device *spi)
402 return 0; 493 return 0;
403 494
404error_disable_reg: 495error_disable_reg:
405 regulator_bulk_disable(ad5064_num_vref(st), st->vref_reg); 496 if (!st->use_internal_vref)
497 regulator_bulk_disable(ad5064_num_vref(st), st->vref_reg);
406error_free_reg: 498error_free_reg:
407 regulator_bulk_free(ad5064_num_vref(st), st->vref_reg); 499 if (!st->use_internal_vref)
500 regulator_bulk_free(ad5064_num_vref(st), st->vref_reg);
408error_free: 501error_free:
409 iio_free_device(indio_dev); 502 iio_free_device(indio_dev);
410 503
@@ -419,8 +512,10 @@ static int __devexit ad5064_remove(struct spi_device *spi)
419 512
420 iio_device_unregister(indio_dev); 513 iio_device_unregister(indio_dev);
421 514
422 regulator_bulk_disable(ad5064_num_vref(st), st->vref_reg); 515 if (!st->use_internal_vref) {
423 regulator_bulk_free(ad5064_num_vref(st), st->vref_reg); 516 regulator_bulk_disable(ad5064_num_vref(st), st->vref_reg);
517 regulator_bulk_free(ad5064_num_vref(st), st->vref_reg);
518 }
424 519
425 iio_free_device(indio_dev); 520 iio_free_device(indio_dev);
426 521
@@ -429,9 +524,21 @@ static int __devexit ad5064_remove(struct spi_device *spi)
429 524
430static const struct spi_device_id ad5064_id[] = { 525static const struct spi_device_id ad5064_id[] = {
431 {"ad5024", ID_AD5024}, 526 {"ad5024", ID_AD5024},
527 {"ad5025", ID_AD5025},
432 {"ad5044", ID_AD5044}, 528 {"ad5044", ID_AD5044},
529 {"ad5045", ID_AD5045},
433 {"ad5064", ID_AD5064}, 530 {"ad5064", ID_AD5064},
434 {"ad5064-1", ID_AD5064_1}, 531 {"ad5064-1", ID_AD5064_1},
532 {"ad5065", ID_AD5065},
533 {"ad5628-1", ID_AD5628_1},
534 {"ad5628-2", ID_AD5628_2},
535 {"ad5648-1", ID_AD5648_1},
536 {"ad5648-2", ID_AD5648_2},
537 {"ad5666-1", ID_AD5666_1},
538 {"ad5666-2", ID_AD5666_2},
539 {"ad5668-1", ID_AD5668_1},
540 {"ad5668-2", ID_AD5668_2},
541 {"ad5668-3", ID_AD5668_2}, /* similar enough to ad5668-2 */
435 {} 542 {}
436}; 543};
437MODULE_DEVICE_TABLE(spi, ad5064_id); 544MODULE_DEVICE_TABLE(spi, ad5064_id);
@@ -448,5 +555,5 @@ static struct spi_driver ad5064_driver = {
448module_spi_driver(ad5064_driver); 555module_spi_driver(ad5064_driver);
449 556
450MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); 557MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
451MODULE_DESCRIPTION("Analog Devices AD5064/64-1/44/24 DAC"); 558MODULE_DESCRIPTION("Analog Devices AD5024/25/44/45/64/64-1/65, AD5628/48/66/68 DAC");
452MODULE_LICENSE("GPL v2"); 559MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/dac/ad5360.c b/drivers/staging/iio/dac/ad5360.c
index 710b256affcc..cec3693b50a3 100644
--- a/drivers/staging/iio/dac/ad5360.c
+++ b/drivers/staging/iio/dac/ad5360.c
@@ -439,8 +439,8 @@ static int __devinit ad5360_alloc_channels(struct iio_dev *indio_dev)
439 struct iio_chan_spec *channels; 439 struct iio_chan_spec *channels;
440 unsigned int i; 440 unsigned int i;
441 441
442 channels = kcalloc(sizeof(struct iio_chan_spec), 442 channels = kcalloc(st->chip_info->num_channels,
443 st->chip_info->num_channels, GFP_KERNEL); 443 sizeof(struct iio_chan_spec), GFP_KERNEL);
444 444
445 if (!channels) 445 if (!channels)
446 return -ENOMEM; 446 return -ENOMEM;
diff --git a/drivers/staging/iio/dac/ad5380.c b/drivers/staging/iio/dac/ad5380.c
index eff97ae05c4b..4c50716fa801 100644
--- a/drivers/staging/iio/dac/ad5380.c
+++ b/drivers/staging/iio/dac/ad5380.c
@@ -363,8 +363,8 @@ static int __devinit ad5380_alloc_channels(struct iio_dev *indio_dev)
363 struct iio_chan_spec *channels; 363 struct iio_chan_spec *channels;
364 unsigned int i; 364 unsigned int i;
365 365
366 channels = kcalloc(sizeof(struct iio_chan_spec), 366 channels = kcalloc(st->chip_info->num_channels,
367 st->chip_info->num_channels, GFP_KERNEL); 367 sizeof(struct iio_chan_spec), GFP_KERNEL);
368 368
369 if (!channels) 369 if (!channels)
370 return -ENOMEM; 370 return -ENOMEM;
diff --git a/drivers/staging/iio/dac/ad5421.c b/drivers/staging/iio/dac/ad5421.c
index 71ee86824763..0b040b204697 100644
--- a/drivers/staging/iio/dac/ad5421.c
+++ b/drivers/staging/iio/dac/ad5421.c
@@ -536,18 +536,7 @@ static struct spi_driver ad5421_driver = {
536 .probe = ad5421_probe, 536 .probe = ad5421_probe,
537 .remove = __devexit_p(ad5421_remove), 537 .remove = __devexit_p(ad5421_remove),
538}; 538};
539 539module_spi_driver(ad5421_driver);
540static __init int ad5421_init(void)
541{
542 return spi_register_driver(&ad5421_driver);
543}
544module_init(ad5421_init);
545
546static __exit void ad5421_exit(void)
547{
548 spi_unregister_driver(&ad5421_driver);
549}
550module_exit(ad5421_exit);
551 540
552MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); 541MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
553MODULE_DESCRIPTION("Analog Devices AD5421 DAC"); 542MODULE_DESCRIPTION("Analog Devices AD5421 DAC");
diff --git a/drivers/staging/iio/dac/ad5446.c b/drivers/staging/iio/dac/ad5446.c
index 693e7482524c..633ffbb21814 100644
--- a/drivers/staging/iio/dac/ad5446.c
+++ b/drivers/staging/iio/dac/ad5446.c
@@ -149,30 +149,8 @@ static struct attribute *ad5446_attributes[] = {
149 NULL, 149 NULL,
150}; 150};
151 151
152static umode_t ad5446_attr_is_visible(struct kobject *kobj,
153 struct attribute *attr, int n)
154{
155 struct device *dev = container_of(kobj, struct device, kobj);
156 struct iio_dev *indio_dev = dev_get_drvdata(dev);
157 struct ad5446_state *st = iio_priv(indio_dev);
158
159 umode_t mode = attr->mode;
160
161 if (!st->chip_info->store_pwr_down &&
162 (attr == &iio_dev_attr_out_voltage0_powerdown.dev_attr.attr ||
163 attr == &iio_dev_attr_out_voltage_powerdown_mode.
164 dev_attr.attr ||
165 attr ==
166 &iio_const_attr_out_voltage_powerdown_mode_available.
167 dev_attr.attr))
168 mode = 0;
169
170 return mode;
171}
172
173static const struct attribute_group ad5446_attribute_group = { 152static const struct attribute_group ad5446_attribute_group = {
174 .attrs = ad5446_attributes, 153 .attrs = ad5446_attributes,
175 .is_visible = ad5446_attr_is_visible,
176}; 154};
177 155
178#define AD5446_CHANNEL(bits, storage, shift) { \ 156#define AD5446_CHANNEL(bits, storage, shift) { \
@@ -321,6 +299,12 @@ static const struct iio_info ad5446_info = {
321 .driver_module = THIS_MODULE, 299 .driver_module = THIS_MODULE,
322}; 300};
323 301
302static const struct iio_info ad5446_info_no_pwr_down = {
303 .read_raw = ad5446_read_raw,
304 .write_raw = ad5446_write_raw,
305 .driver_module = THIS_MODULE,
306};
307
324static int __devinit ad5446_probe(struct spi_device *spi) 308static int __devinit ad5446_probe(struct spi_device *spi)
325{ 309{
326 struct ad5446_state *st; 310 struct ad5446_state *st;
@@ -350,10 +334,13 @@ static int __devinit ad5446_probe(struct spi_device *spi)
350 st->reg = reg; 334 st->reg = reg;
351 st->spi = spi; 335 st->spi = spi;
352 336
353 /* Estabilish that the iio_dev is a child of the spi device */ 337 /* Establish that the iio_dev is a child of the spi device */
354 indio_dev->dev.parent = &spi->dev; 338 indio_dev->dev.parent = &spi->dev;
355 indio_dev->name = spi_get_device_id(spi)->name; 339 indio_dev->name = spi_get_device_id(spi)->name;
356 indio_dev->info = &ad5446_info; 340 if (st->chip_info->store_pwr_down)
341 indio_dev->info = &ad5446_info;
342 else
343 indio_dev->info = &ad5446_info_no_pwr_down;
357 indio_dev->modes = INDIO_DIRECT_MODE; 344 indio_dev->modes = INDIO_DIRECT_MODE;
358 indio_dev->channels = &st->chip_info->channel; 345 indio_dev->channels = &st->chip_info->channel;
359 indio_dev->num_channels = 1; 346 indio_dev->num_channels = 1;
diff --git a/drivers/staging/iio/dac/ad5764.c b/drivers/staging/iio/dac/ad5764.c
index ff91480ae65c..f73a73079490 100644
--- a/drivers/staging/iio/dac/ad5764.c
+++ b/drivers/staging/iio/dac/ad5764.c
@@ -375,18 +375,7 @@ static struct spi_driver ad5764_driver = {
375 .remove = __devexit_p(ad5764_remove), 375 .remove = __devexit_p(ad5764_remove),
376 .id_table = ad5764_ids, 376 .id_table = ad5764_ids,
377}; 377};
378 378module_spi_driver(ad5764_driver);
379static int __init ad5764_spi_init(void)
380{
381 return spi_register_driver(&ad5764_driver);
382}
383module_init(ad5764_spi_init);
384
385static void __exit ad5764_spi_exit(void)
386{
387 spi_unregister_driver(&ad5764_driver);
388}
389module_exit(ad5764_spi_exit);
390 379
391MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); 380MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
392MODULE_DESCRIPTION("Analog Devices AD5744/AD5744R/AD5764/AD5764R DAC"); 381MODULE_DESCRIPTION("Analog Devices AD5744/AD5744R/AD5764/AD5764R DAC");
diff --git a/drivers/staging/iio/dac/max517.c b/drivers/staging/iio/dac/max517.c
index a4df6d7443c1..41483c72cec1 100644
--- a/drivers/staging/iio/dac/max517.c
+++ b/drivers/staging/iio/dac/max517.c
@@ -179,20 +179,27 @@ static struct attribute_group max518_attribute_group = {
179 .attrs = max518_attributes, 179 .attrs = max518_attributes,
180}; 180};
181 181
182static int max517_suspend(struct i2c_client *client, pm_message_t mesg) 182#ifdef CONFIG_PM_SLEEP
183static int max517_suspend(struct device *dev)
183{ 184{
184 u8 outbuf = COMMAND_PD; 185 u8 outbuf = COMMAND_PD;
185 186
186 return i2c_master_send(client, &outbuf, 1); 187 return i2c_master_send(to_i2c_client(dev), &outbuf, 1);
187} 188}
188 189
189static int max517_resume(struct i2c_client *client) 190static int max517_resume(struct device *dev)
190{ 191{
191 u8 outbuf = 0; 192 u8 outbuf = 0;
192 193
193 return i2c_master_send(client, &outbuf, 1); 194 return i2c_master_send(to_i2c_client(dev), &outbuf, 1);
194} 195}
195 196
197static SIMPLE_DEV_PM_OPS(max517_pm_ops, max517_suspend, max517_resume);
198#define MAX517_PM_OPS (&max517_pm_ops)
199#else
200#define MAX517_PM_OPS NULL
201#endif
202
196static const struct iio_info max517_info = { 203static const struct iio_info max517_info = {
197 .attrs = &max517_attribute_group, 204 .attrs = &max517_attribute_group,
198 .driver_module = THIS_MODULE, 205 .driver_module = THIS_MODULE,
@@ -273,11 +280,10 @@ MODULE_DEVICE_TABLE(i2c, max517_id);
273static struct i2c_driver max517_driver = { 280static struct i2c_driver max517_driver = {
274 .driver = { 281 .driver = {
275 .name = MAX517_DRV_NAME, 282 .name = MAX517_DRV_NAME,
283 .pm = MAX517_PM_OPS,
276 }, 284 },
277 .probe = max517_probe, 285 .probe = max517_probe,
278 .remove = max517_remove, 286 .remove = max517_remove,
279 .suspend = max517_suspend,
280 .resume = max517_resume,
281 .id_table = max517_id, 287 .id_table = max517_id,
282}; 288};
283module_i2c_driver(max517_driver); 289module_i2c_driver(max517_driver);
diff --git a/drivers/staging/iio/dds/ad9834.c b/drivers/staging/iio/dds/ad9834.c
index 5e67104fea18..38a2de08626f 100644
--- a/drivers/staging/iio/dds/ad9834.c
+++ b/drivers/staging/iio/dds/ad9834.c
@@ -281,29 +281,27 @@ static struct attribute *ad9834_attributes[] = {
281 NULL, 281 NULL,
282}; 282};
283 283
284static umode_t ad9834_attr_is_visible(struct kobject *kobj, 284static struct attribute *ad9833_attributes[] = {
285 struct attribute *attr, int n) 285 &iio_dev_attr_dds0_freq0.dev_attr.attr,
286{ 286 &iio_dev_attr_dds0_freq1.dev_attr.attr,
287 struct device *dev = container_of(kobj, struct device, kobj); 287 &iio_const_attr_dds0_freq_scale.dev_attr.attr,
288 struct iio_dev *indio_dev = dev_get_drvdata(dev); 288 &iio_dev_attr_dds0_phase0.dev_attr.attr,
289 struct ad9834_state *st = iio_priv(indio_dev); 289 &iio_dev_attr_dds0_phase1.dev_attr.attr,
290 290 &iio_const_attr_dds0_phase_scale.dev_attr.attr,
291 umode_t mode = attr->mode; 291 &iio_dev_attr_dds0_freqsymbol.dev_attr.attr,
292 292 &iio_dev_attr_dds0_phasesymbol.dev_attr.attr,
293 if (((st->devid == ID_AD9833) || (st->devid == ID_AD9837)) && 293 &iio_dev_attr_dds0_out_enable.dev_attr.attr,
294 ((attr == &iio_dev_attr_dds0_out1_enable.dev_attr.attr) || 294 &iio_dev_attr_dds0_out0_wavetype.dev_attr.attr,
295 (attr == &iio_dev_attr_dds0_out1_wavetype.dev_attr.attr) || 295 &iio_dev_attr_dds0_out0_wavetype_available.dev_attr.attr,
296 (attr == 296 NULL,
297 &iio_dev_attr_dds0_out1_wavetype_available.dev_attr.attr) || 297};
298 (attr == &iio_dev_attr_dds0_pincontrol_en.dev_attr.attr)))
299 mode = 0;
300
301 return mode;
302}
303 298
304static const struct attribute_group ad9834_attribute_group = { 299static const struct attribute_group ad9834_attribute_group = {
305 .attrs = ad9834_attributes, 300 .attrs = ad9834_attributes,
306 .is_visible = ad9834_attr_is_visible, 301};
302
303static const struct attribute_group ad9833_attribute_group = {
304 .attrs = ad9833_attributes,
307}; 305};
308 306
309static const struct iio_info ad9834_info = { 307static const struct iio_info ad9834_info = {
@@ -311,6 +309,11 @@ static const struct iio_info ad9834_info = {
311 .driver_module = THIS_MODULE, 309 .driver_module = THIS_MODULE,
312}; 310};
313 311
312static const struct iio_info ad9833_info = {
313 .attrs = &ad9833_attribute_group,
314 .driver_module = THIS_MODULE,
315};
316
314static int __devinit ad9834_probe(struct spi_device *spi) 317static int __devinit ad9834_probe(struct spi_device *spi)
315{ 318{
316 struct ad9834_platform_data *pdata = spi->dev.platform_data; 319 struct ad9834_platform_data *pdata = spi->dev.platform_data;
@@ -344,7 +347,15 @@ static int __devinit ad9834_probe(struct spi_device *spi)
344 st->reg = reg; 347 st->reg = reg;
345 indio_dev->dev.parent = &spi->dev; 348 indio_dev->dev.parent = &spi->dev;
346 indio_dev->name = spi_get_device_id(spi)->name; 349 indio_dev->name = spi_get_device_id(spi)->name;
347 indio_dev->info = &ad9834_info; 350 switch (st->devid) {
351 case ID_AD9833:
352 case ID_AD9837:
353 indio_dev->info = &ad9833_info;
354 break;
355 default:
356 indio_dev->info = &ad9834_info;
357 break;
358 }
348 indio_dev->modes = INDIO_DIRECT_MODE; 359 indio_dev->modes = INDIO_DIRECT_MODE;
349 360
350 /* Setup default messages */ 361 /* Setup default messages */
diff --git a/drivers/staging/iio/driver.h b/drivers/staging/iio/driver.h
new file mode 100644
index 000000000000..a4f8b2e05af5
--- /dev/null
+++ b/drivers/staging/iio/driver.h
@@ -0,0 +1,34 @@
1/*
2 * Industrial I/O in kernel access map interface.
3 *
4 * Copyright (c) 2011 Jonathan Cameron
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
9 */
10
11#ifndef _IIO_INKERN_H_
12#define _IIO_INKERN_H_
13
14struct iio_map;
15
16/**
17 * iio_map_array_register() - tell the core about inkernel consumers
18 * @indio_dev: provider device
19 * @map: array of mappings specifying association of channel with client
20 */
21int iio_map_array_register(struct iio_dev *indio_dev,
22 struct iio_map *map);
23
24/**
25 * iio_map_array_unregister() - tell the core to remove consumer mappings
26 * @indio_dev: provider device
27 * @map: array of mappings to remove. Note these must have same memory
28 * addresses as those originally added not just equal parameter
29 * values.
30 */
31int iio_map_array_unregister(struct iio_dev *indio_dev,
32 struct iio_map *map);
33
34#endif
diff --git a/drivers/staging/iio/events.h b/drivers/staging/iio/events.h
index bfb63400fa60..c25f0e3c92e9 100644
--- a/drivers/staging/iio/events.h
+++ b/drivers/staging/iio/events.h
@@ -96,8 +96,10 @@ enum iio_event_direction {
96 96
97/* Event code number extraction depends on which type of event we have. 97/* Event code number extraction depends on which type of event we have.
98 * Perhaps review this function in the future*/ 98 * Perhaps review this function in the future*/
99#define IIO_EVENT_CODE_EXTRACT_NUM(mask) ((__s16)(mask & 0xFFFF)) 99#define IIO_EVENT_CODE_EXTRACT_CHAN(mask) ((__s16)(mask & 0xFFFF))
100#define IIO_EVENT_CODE_EXTRACT_CHAN2(mask) ((__s16)(((mask) >> 16) & 0xFFFF))
100 101
101#define IIO_EVENT_CODE_EXTRACT_MODIFIER(mask) ((mask >> 40) & 0xFF) 102#define IIO_EVENT_CODE_EXTRACT_MODIFIER(mask) ((mask >> 40) & 0xFF)
103#define IIO_EVENT_CODE_EXTRACT_DIFF(mask) (((mask) >> 55) & 0x1)
102 104
103#endif 105#endif
diff --git a/drivers/staging/iio/gyro/adis16260_ring.c b/drivers/staging/iio/gyro/adis16260_ring.c
index 699a6152c409..711f15122a08 100644
--- a/drivers/staging/iio/gyro/adis16260_ring.c
+++ b/drivers/staging/iio/gyro/adis16260_ring.c
@@ -115,8 +115,6 @@ int adis16260_configure_ring(struct iio_dev *indio_dev)
115 return ret; 115 return ret;
116 } 116 }
117 indio_dev->buffer = ring; 117 indio_dev->buffer = ring;
118 /* Effectively select the ring buffer implementation */
119 ring->access = &ring_sw_access_funcs;
120 ring->scan_timestamp = true; 118 ring->scan_timestamp = true;
121 indio_dev->setup_ops = &adis16260_ring_setup_ops; 119 indio_dev->setup_ops = &adis16260_ring_setup_ops;
122 120
diff --git a/drivers/staging/iio/iio.h b/drivers/staging/iio/iio.h
index be6ced31f65e..b9cd454f69e2 100644
--- a/drivers/staging/iio/iio.h
+++ b/drivers/staging/iio/iio.h
@@ -26,7 +26,7 @@ enum iio_data_type {
26 26
27/* Could add the raw attributes as well - allowing buffer only devices */ 27/* Could add the raw attributes as well - allowing buffer only devices */
28enum iio_chan_info_enum { 28enum iio_chan_info_enum {
29 /* 0 is reserverd for raw attributes */ 29 /* 0 is reserved for raw attributes */
30 IIO_CHAN_INFO_SCALE = 1, 30 IIO_CHAN_INFO_SCALE = 1,
31 IIO_CHAN_INFO_OFFSET, 31 IIO_CHAN_INFO_OFFSET,
32 IIO_CHAN_INFO_CALIBSCALE, 32 IIO_CHAN_INFO_CALIBSCALE,
@@ -88,10 +88,29 @@ enum iio_endian {
88 IIO_LE, 88 IIO_LE,
89}; 89};
90 90
91struct iio_chan_spec;
92struct iio_dev;
93
94/**
95 * struct iio_chan_spec_ext_info - Extended channel info attribute
96 * @name: Info attribute name
97 * @shared: Whether this attribute is shared between all channels.
98 * @read: Read callback for this info attribute, may be NULL.
99 * @write: Write callback for this info attribute, may be NULL.
100 */
101struct iio_chan_spec_ext_info {
102 const char *name;
103 bool shared;
104 ssize_t (*read)(struct iio_dev *, struct iio_chan_spec const *,
105 char *buf);
106 ssize_t (*write)(struct iio_dev *, struct iio_chan_spec const *,
107 const char *buf, size_t len);
108};
109
91/** 110/**
92 * struct iio_chan_spec - specification of a single channel 111 * struct iio_chan_spec - specification of a single channel
93 * @type: What type of measurement is the channel making. 112 * @type: What type of measurement is the channel making.
94 * @channel: What number or name do we wish to asign the channel. 113 * @channel: What number or name do we wish to assign the channel.
95 * @channel2: If there is a second number for a differential 114 * @channel2: If there is a second number for a differential
96 * channel then this is it. If modified is set then the 115 * channel then this is it. If modified is set then the
97 * value here specifies the modifier. 116 * value here specifies the modifier.
@@ -107,11 +126,14 @@ enum iio_endian {
107 * @info_mask: What information is to be exported about this channel. 126 * @info_mask: What information is to be exported about this channel.
108 * This includes calibbias, scale etc. 127 * This includes calibbias, scale etc.
109 * @event_mask: What events can this channel produce. 128 * @event_mask: What events can this channel produce.
129 * @ext_info: Array of extended info attributes for this channel.
130 * The array is NULL terminated, the last element should
131 * have it's name field set to NULL.
110 * @extend_name: Allows labeling of channel attributes with an 132 * @extend_name: Allows labeling of channel attributes with an
111 * informative name. Note this has no effect codes etc, 133 * informative name. Note this has no effect codes etc,
112 * unlike modifiers. 134 * unlike modifiers.
113 * @datasheet_name: A name used in in kernel mapping of channels. It should 135 * @datasheet_name: A name used in in kernel mapping of channels. It should
114 * corrspond to the first name that the channel is referred 136 * correspond to the first name that the channel is referred
115 * to by in the datasheet (e.g. IND), or the nearest 137 * to by in the datasheet (e.g. IND), or the nearest
116 * possible compound name (e.g. IND-INC). 138 * possible compound name (e.g. IND-INC).
117 * @processed_val: Flag to specify the data access attribute should be 139 * @processed_val: Flag to specify the data access attribute should be
@@ -141,6 +163,7 @@ struct iio_chan_spec {
141 } scan_type; 163 } scan_type;
142 long info_mask; 164 long info_mask;
143 long event_mask; 165 long event_mask;
166 const struct iio_chan_spec_ext_info *ext_info;
144 char *extend_name; 167 char *extend_name;
145 const char *datasheet_name; 168 const char *datasheet_name;
146 unsigned processed_val:1; 169 unsigned processed_val:1;
@@ -197,12 +220,6 @@ static inline s64 iio_get_time_ns(void)
197#define INDIO_ALL_BUFFER_MODES \ 220#define INDIO_ALL_BUFFER_MODES \
198 (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE) 221 (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE)
199 222
200/* Vast majority of this is set by the industrialio subsystem on a
201 * call to iio_device_register. */
202#define IIO_VAL_INT 1
203#define IIO_VAL_INT_PLUS_MICRO 2
204#define IIO_VAL_INT_PLUS_NANO 3
205
206struct iio_trigger; /* forward declaration */ 223struct iio_trigger; /* forward declaration */
207struct iio_dev; 224struct iio_dev;
208 225
@@ -226,7 +243,7 @@ struct iio_dev;
226 * @write_event_config: set if the event is enabled. 243 * @write_event_config: set if the event is enabled.
227 * @read_event_value: read a value associated with the event. Meaning 244 * @read_event_value: read a value associated with the event. Meaning
228 * is event dependant. event_code specifies which event. 245 * is event dependant. event_code specifies which event.
229 * @write_event_value: write the value associate with the event. 246 * @write_event_value: write the value associated with the event.
230 * Meaning is event dependent. 247 * Meaning is event dependent.
231 * @validate_trigger: function to validate the trigger when the 248 * @validate_trigger: function to validate the trigger when the
232 * current trigger gets changed. 249 * current trigger gets changed.
@@ -269,6 +286,9 @@ struct iio_info {
269 struct iio_trigger *trig); 286 struct iio_trigger *trig);
270 int (*update_scan_mode)(struct iio_dev *indio_dev, 287 int (*update_scan_mode)(struct iio_dev *indio_dev,
271 const unsigned long *scan_mask); 288 const unsigned long *scan_mask);
289 int (*debugfs_reg_access)(struct iio_dev *indio_dev,
290 unsigned reg, unsigned writeval,
291 unsigned *readval);
272}; 292};
273 293
274/** 294/**
@@ -310,11 +330,14 @@ struct iio_buffer_setup_ops {
310 * @chan_attr_group: [INTERN] group for all attrs in base directory 330 * @chan_attr_group: [INTERN] group for all attrs in base directory
311 * @name: [DRIVER] name of the device. 331 * @name: [DRIVER] name of the device.
312 * @info: [DRIVER] callbacks and constant info from driver 332 * @info: [DRIVER] callbacks and constant info from driver
333 * @info_exist_lock: [INTERN] lock to prevent use during removal
313 * @chrdev: [INTERN] associated character device 334 * @chrdev: [INTERN] associated character device
314 * @groups: [INTERN] attribute groups 335 * @groups: [INTERN] attribute groups
315 * @groupcounter: [INTERN] index of next attribute group 336 * @groupcounter: [INTERN] index of next attribute group
316 * @flags: [INTERN] file ops related flags including busy flag. 337 * @flags: [INTERN] file ops related flags including busy flag.
317 **/ 338 * @debugfs_dentry: [INTERN] device specific debugfs dentry.
339 * @cached_reg_addr: [INTERN] cached register address for debugfs reads.
340 */
318struct iio_dev { 341struct iio_dev {
319 int id; 342 int id;
320 343
@@ -327,9 +350,9 @@ struct iio_dev {
327 struct iio_buffer *buffer; 350 struct iio_buffer *buffer;
328 struct mutex mlock; 351 struct mutex mlock;
329 352
330 unsigned long *available_scan_masks; 353 const unsigned long *available_scan_masks;
331 unsigned masklength; 354 unsigned masklength;
332 unsigned long *active_scan_mask; 355 const unsigned long *active_scan_mask;
333 struct iio_trigger *trig; 356 struct iio_trigger *trig;
334 struct iio_poll_func *pollfunc; 357 struct iio_poll_func *pollfunc;
335 358
@@ -340,6 +363,7 @@ struct iio_dev {
340 struct attribute_group chan_attr_group; 363 struct attribute_group chan_attr_group;
341 const char *name; 364 const char *name;
342 const struct iio_info *info; 365 const struct iio_info *info;
366 struct mutex info_exist_lock;
343 const struct iio_buffer_setup_ops *setup_ops; 367 const struct iio_buffer_setup_ops *setup_ops;
344 struct cdev chrdev; 368 struct cdev chrdev;
345#define IIO_MAX_GROUPS 6 369#define IIO_MAX_GROUPS 6
@@ -347,6 +371,10 @@ struct iio_dev {
347 int groupcounter; 371 int groupcounter;
348 372
349 unsigned long flags; 373 unsigned long flags;
374#if defined(CONFIG_DEBUG_FS)
375 struct dentry *debugfs_dentry;
376 unsigned cached_reg_addr;
377#endif
350}; 378};
351 379
352/** 380/**
@@ -424,4 +452,20 @@ static inline bool iio_buffer_enabled(struct iio_dev *indio_dev)
424 & (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE); 452 & (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE);
425}; 453};
426 454
455/**
456 * iio_get_debugfs_dentry() - helper function to get the debugfs_dentry
457 * @indio_dev: IIO device info structure for device
458 **/
459#if defined(CONFIG_DEBUG_FS)
460static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev)
461{
462 return indio_dev->debugfs_dentry;
463};
464#else
465static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev)
466{
467 return NULL;
468};
469#endif
470
427#endif /* _INDUSTRIAL_IO_H_ */ 471#endif /* _INDUSTRIAL_IO_H_ */
diff --git a/drivers/staging/iio/iio_core.h b/drivers/staging/iio/iio_core.h
index 107cfb1cbb01..c9dfcba0bac8 100644
--- a/drivers/staging/iio/iio_core.h
+++ b/drivers/staging/iio/iio_core.h
@@ -49,4 +49,8 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
49 49
50#endif 50#endif
51 51
52int iio_device_register_eventset(struct iio_dev *indio_dev);
53void iio_device_unregister_eventset(struct iio_dev *indio_dev);
54int iio_event_getfd(struct iio_dev *indio_dev);
55
52#endif 56#endif
diff --git a/drivers/staging/iio/iio_dummy_evgen.c b/drivers/staging/iio/iio_dummy_evgen.c
index cdbf289bfe2d..f39f346bf04f 100644
--- a/drivers/staging/iio/iio_dummy_evgen.c
+++ b/drivers/staging/iio/iio_dummy_evgen.c
@@ -32,7 +32,7 @@
32 * @chip: irq chip we are faking 32 * @chip: irq chip we are faking
33 * @base: base of irq range 33 * @base: base of irq range
34 * @enabled: mask of which irqs are enabled 34 * @enabled: mask of which irqs are enabled
35 * @inuse: mask of which irqs actually have anyone connected 35 * @inuse: mask of which irqs are connected
36 * @lock: protect the evgen state 36 * @lock: protect the evgen state
37 */ 37 */
38struct iio_dummy_eventgen { 38struct iio_dummy_eventgen {
diff --git a/drivers/staging/iio/iio_hwmon.c b/drivers/staging/iio/iio_hwmon.c
new file mode 100644
index 000000000000..a603a5f51f93
--- /dev/null
+++ b/drivers/staging/iio/iio_hwmon.c
@@ -0,0 +1,232 @@
1/* Hwmon client for industrial I/O devices
2 *
3 * Copyright (c) 2011 Jonathan Cameron
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 */
9
10#include <linux/kernel.h>
11#include <linux/slab.h>
12#include <linux/module.h>
13#include <linux/err.h>
14#include <linux/platform_device.h>
15#include <linux/hwmon.h>
16#include <linux/hwmon-sysfs.h>
17#include "consumer.h"
18#include "types.h"
19
20/**
21 * struct iio_hwmon_state - device instance state
22 * @channels: filled with array of channels from iio
23 * @num_channels: number of channels in channels (saves counting twice)
24 * @hwmon_dev: associated hwmon device
25 * @attr_group: the group of attributes
26 * @attrs: null terminated array of attribute pointers.
27 */
28struct iio_hwmon_state {
29 struct iio_channel *channels;
30 int num_channels;
31 struct device *hwmon_dev;
32 struct attribute_group attr_group;
33 struct attribute **attrs;
34};
35
36/*
37 * Assumes that IIO and hwmon operate in the same base units.
38 * This is supposed to be true, but needs verification for
39 * new channel types.
40 */
41static ssize_t iio_hwmon_read_val(struct device *dev,
42 struct device_attribute *attr,
43 char *buf)
44{
45 long result;
46 int val, ret, scaleint, scalepart;
47 struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr);
48 struct iio_hwmon_state *state = dev_get_drvdata(dev);
49
50 /*
51 * No locking between this pair, so theoretically possible
52 * the scale has changed.
53 */
54 ret = iio_st_read_channel_raw(&state->channels[sattr->index],
55 &val);
56 if (ret < 0)
57 return ret;
58
59 ret = iio_st_read_channel_scale(&state->channels[sattr->index],
60 &scaleint, &scalepart);
61 if (ret < 0)
62 return ret;
63 switch (ret) {
64 case IIO_VAL_INT:
65 result = val * scaleint;
66 break;
67 case IIO_VAL_INT_PLUS_MICRO:
68 result = (s64)val * (s64)scaleint +
69 div_s64((s64)val * (s64)scalepart, 1000000LL);
70 break;
71 case IIO_VAL_INT_PLUS_NANO:
72 result = (s64)val * (s64)scaleint +
73 div_s64((s64)val * (s64)scalepart, 1000000000LL);
74 break;
75 default:
76 return -EINVAL;
77 }
78 return sprintf(buf, "%ld\n", result);
79}
80
81static void iio_hwmon_free_attrs(struct iio_hwmon_state *st)
82{
83 int i;
84 struct sensor_device_attribute *a;
85 for (i = 0; i < st->num_channels; i++)
86 if (st->attrs[i]) {
87 a = to_sensor_dev_attr(
88 container_of(st->attrs[i],
89 struct device_attribute,
90 attr));
91 kfree(a);
92 }
93}
94
95static int __devinit iio_hwmon_probe(struct platform_device *pdev)
96{
97 struct iio_hwmon_state *st;
98 struct sensor_device_attribute *a;
99 int ret, i;
100 int in_i = 1, temp_i = 1, curr_i = 1;
101 enum iio_chan_type type;
102
103 st = kzalloc(sizeof(*st), GFP_KERNEL);
104 if (st == NULL) {
105 ret = -ENOMEM;
106 goto error_ret;
107 }
108
109 st->channels = iio_st_channel_get_all(dev_name(&pdev->dev));
110 if (IS_ERR(st->channels)) {
111 ret = PTR_ERR(st->channels);
112 goto error_free_state;
113 }
114
115 /* count how many attributes we have */
116 while (st->channels[st->num_channels].indio_dev)
117 st->num_channels++;
118
119 st->attrs = kzalloc(sizeof(st->attrs) * (st->num_channels + 1),
120 GFP_KERNEL);
121 if (st->attrs == NULL) {
122 ret = -ENOMEM;
123 goto error_release_channels;
124 }
125 for (i = 0; i < st->num_channels; i++) {
126 a = kzalloc(sizeof(*a), GFP_KERNEL);
127 if (a == NULL) {
128 ret = -ENOMEM;
129 goto error_free_attrs;
130 }
131
132 sysfs_attr_init(&a->dev_attr.attr);
133 ret = iio_st_get_channel_type(&st->channels[i], &type);
134 if (ret < 0) {
135 kfree(a);
136 goto error_free_attrs;
137 }
138 switch (type) {
139 case IIO_VOLTAGE:
140 a->dev_attr.attr.name = kasprintf(GFP_KERNEL,
141 "in%d_input",
142 in_i++);
143 break;
144 case IIO_TEMP:
145 a->dev_attr.attr.name = kasprintf(GFP_KERNEL,
146 "temp%d_input",
147 temp_i++);
148 break;
149 case IIO_CURRENT:
150 a->dev_attr.attr.name = kasprintf(GFP_KERNEL,
151 "curr%d_input",
152 curr_i++);
153 break;
154 default:
155 ret = -EINVAL;
156 kfree(a);
157 goto error_free_attrs;
158 }
159 if (a->dev_attr.attr.name == NULL) {
160 kfree(a);
161 ret = -ENOMEM;
162 goto error_free_attrs;
163 }
164 a->dev_attr.show = iio_hwmon_read_val;
165 a->dev_attr.attr.mode = S_IRUGO;
166 a->index = i;
167 st->attrs[i] = &a->dev_attr.attr;
168 }
169
170 st->attr_group.attrs = st->attrs;
171 platform_set_drvdata(pdev, st);
172 ret = sysfs_create_group(&pdev->dev.kobj, &st->attr_group);
173 if (ret < 0)
174 goto error_free_attrs;
175
176 st->hwmon_dev = hwmon_device_register(&pdev->dev);
177 if (IS_ERR(st->hwmon_dev)) {
178 ret = PTR_ERR(st->hwmon_dev);
179 goto error_remove_group;
180 }
181 return 0;
182
183error_remove_group:
184 sysfs_remove_group(&pdev->dev.kobj, &st->attr_group);
185error_free_attrs:
186 iio_hwmon_free_attrs(st);
187 kfree(st->attrs);
188error_release_channels:
189 iio_st_channel_release_all(st->channels);
190error_free_state:
191 kfree(st);
192error_ret:
193 return ret;
194}
195
196static int __devexit iio_hwmon_remove(struct platform_device *pdev)
197{
198 struct iio_hwmon_state *st = platform_get_drvdata(pdev);
199
200 hwmon_device_unregister(st->hwmon_dev);
201 sysfs_remove_group(&pdev->dev.kobj, &st->attr_group);
202 iio_hwmon_free_attrs(st);
203 kfree(st->attrs);
204 iio_st_channel_release_all(st->channels);
205
206 return 0;
207}
208
209static struct platform_driver __refdata iio_hwmon_driver = {
210 .driver = {
211 .name = "iio_hwmon",
212 .owner = THIS_MODULE,
213 },
214 .probe = iio_hwmon_probe,
215 .remove = __devexit_p(iio_hwmon_remove),
216};
217
218static int iio_inkern_init(void)
219{
220 return platform_driver_register(&iio_hwmon_driver);
221}
222module_init(iio_inkern_init);
223
224static void iio_inkern_exit(void)
225{
226 platform_driver_unregister(&iio_hwmon_driver);
227}
228module_exit(iio_inkern_exit);
229
230MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>");
231MODULE_DESCRIPTION("IIO to hwmon driver");
232MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/iio_simple_dummy_buffer.c b/drivers/staging/iio/iio_simple_dummy_buffer.c
index d6a1c0e82a5b..bb4daf744362 100644
--- a/drivers/staging/iio/iio_simple_dummy_buffer.c
+++ b/drivers/staging/iio/iio_simple_dummy_buffer.c
@@ -142,8 +142,6 @@ int iio_simple_dummy_configure_buffer(struct iio_dev *indio_dev)
142 } 142 }
143 143
144 indio_dev->buffer = buffer; 144 indio_dev->buffer = buffer;
145 /* Tell the core how to access the buffer */
146 buffer->access = &kfifo_access_funcs;
147 145
148 /* Enable timestamps by default */ 146 /* Enable timestamps by default */
149 buffer->scan_timestamp = true; 147 buffer->scan_timestamp = true;
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index 9a2ca55625f4..cd82b56d58af 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -607,9 +607,6 @@ static int ad5933_register_ring_funcs_and_init(struct iio_dev *indio_dev)
607 if (!indio_dev->buffer) 607 if (!indio_dev->buffer)
608 return -ENOMEM; 608 return -ENOMEM;
609 609
610 /* Effectively select the ring buffer implementation */
611 indio_dev->buffer->access = &ring_sw_access_funcs;
612
613 /* Ring buffer functions - here trigger setup related */ 610 /* Ring buffer functions - here trigger setup related */
614 indio_dev->setup_ops = &ad5933_ring_setup_ops; 611 indio_dev->setup_ops = &ad5933_ring_setup_ops;
615 612
diff --git a/drivers/staging/iio/imu/adis16400_ring.c b/drivers/staging/iio/imu/adis16400_ring.c
index ac22de573f3e..8daa038b23e6 100644
--- a/drivers/staging/iio/imu/adis16400_ring.c
+++ b/drivers/staging/iio/imu/adis16400_ring.c
@@ -187,8 +187,6 @@ int adis16400_configure_ring(struct iio_dev *indio_dev)
187 return ret; 187 return ret;
188 } 188 }
189 indio_dev->buffer = ring; 189 indio_dev->buffer = ring;
190 /* Effectively select the ring buffer implementation */
191 ring->access = &ring_sw_access_funcs;
192 ring->scan_timestamp = true; 190 ring->scan_timestamp = true;
193 indio_dev->setup_ops = &adis16400_ring_setup_ops; 191 indio_dev->setup_ops = &adis16400_ring_setup_ops;
194 192
diff --git a/drivers/staging/iio/industrialio-buffer.c b/drivers/staging/iio/industrialio-buffer.c
index d7b1e9e435ae..386ba760f3f1 100644
--- a/drivers/staging/iio/industrialio-buffer.c
+++ b/drivers/staging/iio/industrialio-buffer.c
@@ -489,9 +489,9 @@ ssize_t iio_buffer_show_enable(struct device *dev,
489EXPORT_SYMBOL(iio_buffer_show_enable); 489EXPORT_SYMBOL(iio_buffer_show_enable);
490 490
491/* note NULL used as error indicator as it doesn't make sense. */ 491/* note NULL used as error indicator as it doesn't make sense. */
492static unsigned long *iio_scan_mask_match(unsigned long *av_masks, 492static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
493 unsigned int masklength, 493 unsigned int masklength,
494 unsigned long *mask) 494 const unsigned long *mask)
495{ 495{
496 if (bitmap_empty(mask, masklength)) 496 if (bitmap_empty(mask, masklength))
497 return NULL; 497 return NULL;
@@ -554,7 +554,7 @@ EXPORT_SYMBOL(iio_sw_buffer_preenable);
554int iio_scan_mask_set(struct iio_dev *indio_dev, 554int iio_scan_mask_set(struct iio_dev *indio_dev,
555 struct iio_buffer *buffer, int bit) 555 struct iio_buffer *buffer, int bit)
556{ 556{
557 unsigned long *mask; 557 const unsigned long *mask;
558 unsigned long *trialmask; 558 unsigned long *trialmask;
559 559
560 trialmask = kmalloc(sizeof(*trialmask)* 560 trialmask = kmalloc(sizeof(*trialmask)*
diff --git a/drivers/staging/iio/industrialio-core.c b/drivers/staging/iio/industrialio-core.c
index 19f897f3c85e..d303bfbff27f 100644
--- a/drivers/staging/iio/industrialio-core.c
+++ b/drivers/staging/iio/industrialio-core.c
@@ -22,6 +22,7 @@
22#include <linux/cdev.h> 22#include <linux/cdev.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/anon_inodes.h> 24#include <linux/anon_inodes.h>
25#include <linux/debugfs.h>
25#include "iio.h" 26#include "iio.h"
26#include "iio_core.h" 27#include "iio_core.h"
27#include "iio_core_trigger.h" 28#include "iio_core_trigger.h"
@@ -39,6 +40,8 @@ struct bus_type iio_bus_type = {
39}; 40};
40EXPORT_SYMBOL(iio_bus_type); 41EXPORT_SYMBOL(iio_bus_type);
41 42
43static struct dentry *iio_debugfs_dentry;
44
42static const char * const iio_data_type_name[] = { 45static const char * const iio_data_type_name[] = {
43 [IIO_RAW] = "raw", 46 [IIO_RAW] = "raw",
44 [IIO_PROCESSED] = "input", 47 [IIO_PROCESSED] = "input",
@@ -100,71 +103,6 @@ const struct iio_chan_spec
100 return NULL; 103 return NULL;
101} 104}
102 105
103/**
104 * struct iio_detected_event_list - list element for events that have occurred
105 * @list: linked list header
106 * @ev: the event itself
107 */
108struct iio_detected_event_list {
109 struct list_head list;
110 struct iio_event_data ev;
111};
112
113/**
114 * struct iio_event_interface - chrdev interface for an event line
115 * @dev: device assocated with event interface
116 * @wait: wait queue to allow blocking reads of events
117 * @event_list_lock: mutex to protect the list of detected events
118 * @det_events: list of detected events
119 * @max_events: maximum number of events before new ones are dropped
120 * @current_events: number of events in detected list
121 * @flags: file operations related flags including busy flag.
122 */
123struct iio_event_interface {
124 wait_queue_head_t wait;
125 struct mutex event_list_lock;
126 struct list_head det_events;
127 int max_events;
128 int current_events;
129 struct list_head dev_attr_list;
130 unsigned long flags;
131 struct attribute_group group;
132};
133
134int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
135{
136 struct iio_event_interface *ev_int = indio_dev->event_interface;
137 struct iio_detected_event_list *ev;
138 int ret = 0;
139
140 /* Does anyone care? */
141 mutex_lock(&ev_int->event_list_lock);
142 if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
143 if (ev_int->current_events == ev_int->max_events) {
144 mutex_unlock(&ev_int->event_list_lock);
145 return 0;
146 }
147 ev = kmalloc(sizeof(*ev), GFP_KERNEL);
148 if (ev == NULL) {
149 ret = -ENOMEM;
150 mutex_unlock(&ev_int->event_list_lock);
151 goto error_ret;
152 }
153 ev->ev.id = ev_code;
154 ev->ev.timestamp = timestamp;
155
156 list_add_tail(&ev->list, &ev_int->det_events);
157 ev_int->current_events++;
158 mutex_unlock(&ev_int->event_list_lock);
159 wake_up_interruptible(&ev_int->wait);
160 } else
161 mutex_unlock(&ev_int->event_list_lock);
162
163error_ret:
164 return ret;
165}
166EXPORT_SYMBOL(iio_push_event);
167
168/* This turns up an awful lot */ 106/* This turns up an awful lot */
169ssize_t iio_read_const_attr(struct device *dev, 107ssize_t iio_read_const_attr(struct device *dev,
170 struct device_attribute *attr, 108 struct device_attribute *attr,
@@ -174,143 +112,189 @@ ssize_t iio_read_const_attr(struct device *dev,
174} 112}
175EXPORT_SYMBOL(iio_read_const_attr); 113EXPORT_SYMBOL(iio_read_const_attr);
176 114
177static ssize_t iio_event_chrdev_read(struct file *filep, 115static int __init iio_init(void)
178 char __user *buf,
179 size_t count,
180 loff_t *f_ps)
181{ 116{
182 struct iio_event_interface *ev_int = filep->private_data;
183 struct iio_detected_event_list *el;
184 size_t len = sizeof(el->ev);
185 int ret; 117 int ret;
186 118
187 if (count < len) 119 /* Register sysfs bus */
188 return -EINVAL; 120 ret = bus_register(&iio_bus_type);
189 121 if (ret < 0) {
190 mutex_lock(&ev_int->event_list_lock); 122 printk(KERN_ERR
191 if (list_empty(&ev_int->det_events)) { 123 "%s could not register bus type\n",
192 if (filep->f_flags & O_NONBLOCK) { 124 __FILE__);
193 ret = -EAGAIN; 125 goto error_nothing;
194 goto error_mutex_unlock;
195 }
196 mutex_unlock(&ev_int->event_list_lock);
197 /* Blocking on device; waiting for something to be there */
198 ret = wait_event_interruptible(ev_int->wait,
199 !list_empty(&ev_int
200 ->det_events));
201 if (ret)
202 goto error_ret;
203 /* Single access device so no one else can get the data */
204 mutex_lock(&ev_int->event_list_lock);
205 } 126 }
206 127
207 el = list_first_entry(&ev_int->det_events, 128 ret = alloc_chrdev_region(&iio_devt, 0, IIO_DEV_MAX, "iio");
208 struct iio_detected_event_list, 129 if (ret < 0) {
209 list); 130 printk(KERN_ERR "%s: failed to allocate char dev region\n",
210 if (copy_to_user(buf, &(el->ev), len)) { 131 __FILE__);
211 ret = -EFAULT; 132 goto error_unregister_bus_type;
212 goto error_mutex_unlock;
213 } 133 }
214 list_del(&el->list);
215 ev_int->current_events--;
216 mutex_unlock(&ev_int->event_list_lock);
217 kfree(el);
218 134
219 return len; 135 iio_debugfs_dentry = debugfs_create_dir("iio", NULL);
220 136
221error_mutex_unlock: 137 return 0;
222 mutex_unlock(&ev_int->event_list_lock);
223error_ret:
224 138
139error_unregister_bus_type:
140 bus_unregister(&iio_bus_type);
141error_nothing:
225 return ret; 142 return ret;
226} 143}
227 144
228static int iio_event_chrdev_release(struct inode *inode, struct file *filep) 145static void __exit iio_exit(void)
229{ 146{
230 struct iio_event_interface *ev_int = filep->private_data; 147 if (iio_devt)
231 struct iio_detected_event_list *el, *t; 148 unregister_chrdev_region(iio_devt, IIO_DEV_MAX);
149 bus_unregister(&iio_bus_type);
150 debugfs_remove(iio_debugfs_dentry);
151}
232 152
233 mutex_lock(&ev_int->event_list_lock); 153#if defined(CONFIG_DEBUG_FS)
234 clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags); 154static int iio_debugfs_open(struct inode *inode, struct file *file)
235 /* 155{
236 * In order to maintain a clean state for reopening, 156 if (inode->i_private)
237 * clear out any awaiting events. The mask will prevent 157 file->private_data = inode->i_private;
238 * any new __iio_push_event calls running.
239 */
240 list_for_each_entry_safe(el, t, &ev_int->det_events, list) {
241 list_del(&el->list);
242 kfree(el);
243 }
244 ev_int->current_events = 0;
245 mutex_unlock(&ev_int->event_list_lock);
246 158
247 return 0; 159 return 0;
248} 160}
249 161
250static const struct file_operations iio_event_chrdev_fileops = { 162static ssize_t iio_debugfs_read_reg(struct file *file, char __user *userbuf,
251 .read = iio_event_chrdev_read, 163 size_t count, loff_t *ppos)
252 .release = iio_event_chrdev_release,
253 .owner = THIS_MODULE,
254 .llseek = noop_llseek,
255};
256
257static int iio_event_getfd(struct iio_dev *indio_dev)
258{ 164{
259 struct iio_event_interface *ev_int = indio_dev->event_interface; 165 struct iio_dev *indio_dev = file->private_data;
260 int fd; 166 char buf[20];
167 unsigned val = 0;
168 ssize_t len;
169 int ret;
261 170
262 if (ev_int == NULL) 171 ret = indio_dev->info->debugfs_reg_access(indio_dev,
263 return -ENODEV; 172 indio_dev->cached_reg_addr,
173 0, &val);
174 if (ret)
175 dev_err(indio_dev->dev.parent, "%s: read failed\n", __func__);
264 176
265 mutex_lock(&ev_int->event_list_lock); 177 len = snprintf(buf, sizeof(buf), "0x%X\n", val);
266 if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) { 178
267 mutex_unlock(&ev_int->event_list_lock); 179 return simple_read_from_buffer(userbuf, count, ppos, buf, len);
268 return -EBUSY;
269 }
270 mutex_unlock(&ev_int->event_list_lock);
271 fd = anon_inode_getfd("iio:event",
272 &iio_event_chrdev_fileops, ev_int, O_RDONLY);
273 if (fd < 0) {
274 mutex_lock(&ev_int->event_list_lock);
275 clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
276 mutex_unlock(&ev_int->event_list_lock);
277 }
278 return fd;
279} 180}
280 181
281static int __init iio_init(void) 182static ssize_t iio_debugfs_write_reg(struct file *file,
183 const char __user *userbuf, size_t count, loff_t *ppos)
282{ 184{
185 struct iio_dev *indio_dev = file->private_data;
186 unsigned reg, val;
187 char buf[80];
283 int ret; 188 int ret;
284 189
285 /* Register sysfs bus */ 190 count = min_t(size_t, count, (sizeof(buf)-1));
286 ret = bus_register(&iio_bus_type); 191 if (copy_from_user(buf, userbuf, count))
287 if (ret < 0) { 192 return -EFAULT;
288 printk(KERN_ERR 193
289 "%s could not register bus type\n", 194 buf[count] = 0;
290 __FILE__); 195
291 goto error_nothing; 196 ret = sscanf(buf, "%i %i", &reg, &val);
197
198 switch (ret) {
199 case 1:
200 indio_dev->cached_reg_addr = reg;
201 break;
202 case 2:
203 indio_dev->cached_reg_addr = reg;
204 ret = indio_dev->info->debugfs_reg_access(indio_dev, reg,
205 val, NULL);
206 if (ret) {
207 dev_err(indio_dev->dev.parent, "%s: write failed\n",
208 __func__);
209 return ret;
210 }
211 break;
212 default:
213 return -EINVAL;
292 } 214 }
293 215
294 ret = alloc_chrdev_region(&iio_devt, 0, IIO_DEV_MAX, "iio"); 216 return count;
295 if (ret < 0) { 217}
296 printk(KERN_ERR "%s: failed to allocate char dev region\n", 218
297 __FILE__); 219static const struct file_operations iio_debugfs_reg_fops = {
298 goto error_unregister_bus_type; 220 .open = iio_debugfs_open,
221 .read = iio_debugfs_read_reg,
222 .write = iio_debugfs_write_reg,
223};
224
225static void iio_device_unregister_debugfs(struct iio_dev *indio_dev)
226{
227 debugfs_remove_recursive(indio_dev->debugfs_dentry);
228}
229
230static int iio_device_register_debugfs(struct iio_dev *indio_dev)
231{
232 struct dentry *d;
233
234 if (indio_dev->info->debugfs_reg_access == NULL)
235 return 0;
236
237 if (IS_ERR(iio_debugfs_dentry))
238 return 0;
239
240 indio_dev->debugfs_dentry =
241 debugfs_create_dir(dev_name(&indio_dev->dev),
242 iio_debugfs_dentry);
243 if (IS_ERR(indio_dev->debugfs_dentry))
244 return PTR_ERR(indio_dev->debugfs_dentry);
245
246 if (indio_dev->debugfs_dentry == NULL) {
247 dev_warn(indio_dev->dev.parent,
248 "Failed to create debugfs directory\n");
249 return -EFAULT;
250 }
251
252 d = debugfs_create_file("direct_reg_access", 0644,
253 indio_dev->debugfs_dentry,
254 indio_dev, &iio_debugfs_reg_fops);
255 if (!d) {
256 iio_device_unregister_debugfs(indio_dev);
257 return -ENOMEM;
299 } 258 }
300 259
301 return 0; 260 return 0;
261}
262#else
263static int iio_device_register_debugfs(struct iio_dev *indio_dev)
264{
265 return 0;
266}
302 267
303error_unregister_bus_type: 268static void iio_device_unregister_debugfs(struct iio_dev *indio_dev)
304 bus_unregister(&iio_bus_type); 269{
305error_nothing:
306 return ret;
307} 270}
271#endif /* CONFIG_DEBUG_FS */
308 272
309static void __exit iio_exit(void) 273static ssize_t iio_read_channel_ext_info(struct device *dev,
274 struct device_attribute *attr,
275 char *buf)
310{ 276{
311 if (iio_devt) 277 struct iio_dev *indio_dev = dev_get_drvdata(dev);
312 unregister_chrdev_region(iio_devt, IIO_DEV_MAX); 278 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
313 bus_unregister(&iio_bus_type); 279 const struct iio_chan_spec_ext_info *ext_info;
280
281 ext_info = &this_attr->c->ext_info[this_attr->address];
282
283 return ext_info->read(indio_dev, this_attr->c, buf);
284}
285
286static ssize_t iio_write_channel_ext_info(struct device *dev,
287 struct device_attribute *attr,
288 const char *buf,
289 size_t len)
290{
291 struct iio_dev *indio_dev = dev_get_drvdata(dev);
292 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
293 const struct iio_chan_spec_ext_info *ext_info;
294
295 ext_info = &this_attr->c->ext_info[this_attr->address];
296
297 return ext_info->write(indio_dev, this_attr->c, buf, len);
314} 298}
315 299
316static ssize_t iio_read_channel_info(struct device *dev, 300static ssize_t iio_read_channel_info(struct device *dev,
@@ -455,7 +439,7 @@ int __iio_device_attr_init(struct device_attribute *dev_attr,
455 goto error_ret; 439 goto error_ret;
456 } 440 }
457 441
458 if (chan->differential) { /* Differential can not have modifier */ 442 if (chan->differential) { /* Differential can not have modifier */
459 if (generic) 443 if (generic)
460 name_format 444 name_format
461 = kasprintf(GFP_KERNEL, "%s_%s-%s_%s", 445 = kasprintf(GFP_KERNEL, "%s_%s-%s_%s",
@@ -592,6 +576,7 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
592 struct iio_chan_spec const *chan) 576 struct iio_chan_spec const *chan)
593{ 577{
594 int ret, i, attrcount = 0; 578 int ret, i, attrcount = 0;
579 const struct iio_chan_spec_ext_info *ext_info;
595 580
596 if (chan->channel < 0) 581 if (chan->channel < 0)
597 return 0; 582 return 0;
@@ -626,6 +611,31 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
626 goto error_ret; 611 goto error_ret;
627 attrcount++; 612 attrcount++;
628 } 613 }
614
615 if (chan->ext_info) {
616 unsigned int i = 0;
617 for (ext_info = chan->ext_info; ext_info->name; ext_info++) {
618 ret = __iio_add_chan_devattr(ext_info->name,
619 chan,
620 ext_info->read ?
621 &iio_read_channel_ext_info : NULL,
622 ext_info->write ?
623 &iio_write_channel_ext_info : NULL,
624 i,
625 ext_info->shared,
626 &indio_dev->dev,
627 &indio_dev->channel_attr_list);
628 i++;
629 if (ret == -EBUSY && ext_info->shared)
630 continue;
631
632 if (ret)
633 goto error_ret;
634
635 attrcount++;
636 }
637 }
638
629 ret = attrcount; 639 ret = attrcount;
630error_ret: 640error_ret:
631 return ret; 641 return ret;
@@ -663,7 +673,7 @@ static int iio_device_register_sysfs(struct iio_dev *indio_dev)
663 attrcount = attrcount_orig; 673 attrcount = attrcount_orig;
664 /* 674 /*
665 * New channel registration method - relies on the fact a group does 675 * New channel registration method - relies on the fact a group does
666 * not need to be initialized if it is name is NULL. 676 * not need to be initialized if it is name is NULL.
667 */ 677 */
668 INIT_LIST_HEAD(&indio_dev->channel_attr_list); 678 INIT_LIST_HEAD(&indio_dev->channel_attr_list);
669 if (indio_dev->channels) 679 if (indio_dev->channels)
@@ -726,295 +736,6 @@ static void iio_device_unregister_sysfs(struct iio_dev *indio_dev)
726 kfree(indio_dev->chan_attr_group.attrs); 736 kfree(indio_dev->chan_attr_group.attrs);
727} 737}
728 738
729static const char * const iio_ev_type_text[] = {
730 [IIO_EV_TYPE_THRESH] = "thresh",
731 [IIO_EV_TYPE_MAG] = "mag",
732 [IIO_EV_TYPE_ROC] = "roc",
733 [IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive",
734 [IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive",
735};
736
737static const char * const iio_ev_dir_text[] = {
738 [IIO_EV_DIR_EITHER] = "either",
739 [IIO_EV_DIR_RISING] = "rising",
740 [IIO_EV_DIR_FALLING] = "falling"
741};
742
743static ssize_t iio_ev_state_store(struct device *dev,
744 struct device_attribute *attr,
745 const char *buf,
746 size_t len)
747{
748 struct iio_dev *indio_dev = dev_get_drvdata(dev);
749 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
750 int ret;
751 bool val;
752
753 ret = strtobool(buf, &val);
754 if (ret < 0)
755 return ret;
756
757 ret = indio_dev->info->write_event_config(indio_dev,
758 this_attr->address,
759 val);
760 return (ret < 0) ? ret : len;
761}
762
763static ssize_t iio_ev_state_show(struct device *dev,
764 struct device_attribute *attr,
765 char *buf)
766{
767 struct iio_dev *indio_dev = dev_get_drvdata(dev);
768 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
769 int val = indio_dev->info->read_event_config(indio_dev,
770 this_attr->address);
771
772 if (val < 0)
773 return val;
774 else
775 return sprintf(buf, "%d\n", val);
776}
777
778static ssize_t iio_ev_value_show(struct device *dev,
779 struct device_attribute *attr,
780 char *buf)
781{
782 struct iio_dev *indio_dev = dev_get_drvdata(dev);
783 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
784 int val, ret;
785
786 ret = indio_dev->info->read_event_value(indio_dev,
787 this_attr->address, &val);
788 if (ret < 0)
789 return ret;
790
791 return sprintf(buf, "%d\n", val);
792}
793
794static ssize_t iio_ev_value_store(struct device *dev,
795 struct device_attribute *attr,
796 const char *buf,
797 size_t len)
798{
799 struct iio_dev *indio_dev = dev_get_drvdata(dev);
800 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
801 unsigned long val;
802 int ret;
803
804 if (!indio_dev->info->write_event_value)
805 return -EINVAL;
806
807 ret = strict_strtoul(buf, 10, &val);
808 if (ret)
809 return ret;
810
811 ret = indio_dev->info->write_event_value(indio_dev, this_attr->address,
812 val);
813 if (ret < 0)
814 return ret;
815
816 return len;
817}
818
819static int iio_device_add_event_sysfs(struct iio_dev *indio_dev,
820 struct iio_chan_spec const *chan)
821{
822 int ret = 0, i, attrcount = 0;
823 u64 mask = 0;
824 char *postfix;
825 if (!chan->event_mask)
826 return 0;
827
828 for_each_set_bit(i, &chan->event_mask, sizeof(chan->event_mask)*8) {
829 postfix = kasprintf(GFP_KERNEL, "%s_%s_en",
830 iio_ev_type_text[i/IIO_EV_DIR_MAX],
831 iio_ev_dir_text[i%IIO_EV_DIR_MAX]);
832 if (postfix == NULL) {
833 ret = -ENOMEM;
834 goto error_ret;
835 }
836 if (chan->modified)
837 mask = IIO_MOD_EVENT_CODE(chan->type, 0, chan->channel,
838 i/IIO_EV_DIR_MAX,
839 i%IIO_EV_DIR_MAX);
840 else if (chan->differential)
841 mask = IIO_EVENT_CODE(chan->type,
842 0, 0,
843 i%IIO_EV_DIR_MAX,
844 i/IIO_EV_DIR_MAX,
845 0,
846 chan->channel,
847 chan->channel2);
848 else
849 mask = IIO_UNMOD_EVENT_CODE(chan->type,
850 chan->channel,
851 i/IIO_EV_DIR_MAX,
852 i%IIO_EV_DIR_MAX);
853
854 ret = __iio_add_chan_devattr(postfix,
855 chan,
856 &iio_ev_state_show,
857 iio_ev_state_store,
858 mask,
859 0,
860 &indio_dev->dev,
861 &indio_dev->event_interface->
862 dev_attr_list);
863 kfree(postfix);
864 if (ret)
865 goto error_ret;
866 attrcount++;
867 postfix = kasprintf(GFP_KERNEL, "%s_%s_value",
868 iio_ev_type_text[i/IIO_EV_DIR_MAX],
869 iio_ev_dir_text[i%IIO_EV_DIR_MAX]);
870 if (postfix == NULL) {
871 ret = -ENOMEM;
872 goto error_ret;
873 }
874 ret = __iio_add_chan_devattr(postfix, chan,
875 iio_ev_value_show,
876 iio_ev_value_store,
877 mask,
878 0,
879 &indio_dev->dev,
880 &indio_dev->event_interface->
881 dev_attr_list);
882 kfree(postfix);
883 if (ret)
884 goto error_ret;
885 attrcount++;
886 }
887 ret = attrcount;
888error_ret:
889 return ret;
890}
891
892static inline void __iio_remove_event_config_attrs(struct iio_dev *indio_dev)
893{
894 struct iio_dev_attr *p, *n;
895 list_for_each_entry_safe(p, n,
896 &indio_dev->event_interface->
897 dev_attr_list, l) {
898 kfree(p->dev_attr.attr.name);
899 kfree(p);
900 }
901}
902
903static inline int __iio_add_event_config_attrs(struct iio_dev *indio_dev)
904{
905 int j, ret, attrcount = 0;
906
907 INIT_LIST_HEAD(&indio_dev->event_interface->dev_attr_list);
908 /* Dynically created from the channels array */
909 for (j = 0; j < indio_dev->num_channels; j++) {
910 ret = iio_device_add_event_sysfs(indio_dev,
911 &indio_dev->channels[j]);
912 if (ret < 0)
913 goto error_clear_attrs;
914 attrcount += ret;
915 }
916 return attrcount;
917
918error_clear_attrs:
919 __iio_remove_event_config_attrs(indio_dev);
920
921 return ret;
922}
923
924static bool iio_check_for_dynamic_events(struct iio_dev *indio_dev)
925{
926 int j;
927
928 for (j = 0; j < indio_dev->num_channels; j++)
929 if (indio_dev->channels[j].event_mask != 0)
930 return true;
931 return false;
932}
933
934static void iio_setup_ev_int(struct iio_event_interface *ev_int)
935{
936 mutex_init(&ev_int->event_list_lock);
937 /* discussion point - make this variable? */
938 ev_int->max_events = 10;
939 ev_int->current_events = 0;
940 INIT_LIST_HEAD(&ev_int->det_events);
941 init_waitqueue_head(&ev_int->wait);
942}
943
944static const char *iio_event_group_name = "events";
945static int iio_device_register_eventset(struct iio_dev *indio_dev)
946{
947 struct iio_dev_attr *p;
948 int ret = 0, attrcount_orig = 0, attrcount, attrn;
949 struct attribute **attr;
950
951 if (!(indio_dev->info->event_attrs ||
952 iio_check_for_dynamic_events(indio_dev)))
953 return 0;
954
955 indio_dev->event_interface =
956 kzalloc(sizeof(struct iio_event_interface), GFP_KERNEL);
957 if (indio_dev->event_interface == NULL) {
958 ret = -ENOMEM;
959 goto error_ret;
960 }
961
962 iio_setup_ev_int(indio_dev->event_interface);
963 if (indio_dev->info->event_attrs != NULL) {
964 attr = indio_dev->info->event_attrs->attrs;
965 while (*attr++ != NULL)
966 attrcount_orig++;
967 }
968 attrcount = attrcount_orig;
969 if (indio_dev->channels) {
970 ret = __iio_add_event_config_attrs(indio_dev);
971 if (ret < 0)
972 goto error_free_setup_event_lines;
973 attrcount += ret;
974 }
975
976 indio_dev->event_interface->group.name = iio_event_group_name;
977 indio_dev->event_interface->group.attrs = kcalloc(attrcount + 1,
978 sizeof(indio_dev->event_interface->group.attrs[0]),
979 GFP_KERNEL);
980 if (indio_dev->event_interface->group.attrs == NULL) {
981 ret = -ENOMEM;
982 goto error_free_setup_event_lines;
983 }
984 if (indio_dev->info->event_attrs)
985 memcpy(indio_dev->event_interface->group.attrs,
986 indio_dev->info->event_attrs->attrs,
987 sizeof(indio_dev->event_interface->group.attrs[0])
988 *attrcount_orig);
989 attrn = attrcount_orig;
990 /* Add all elements from the list. */
991 list_for_each_entry(p,
992 &indio_dev->event_interface->dev_attr_list,
993 l)
994 indio_dev->event_interface->group.attrs[attrn++] =
995 &p->dev_attr.attr;
996 indio_dev->groups[indio_dev->groupcounter++] =
997 &indio_dev->event_interface->group;
998
999 return 0;
1000
1001error_free_setup_event_lines:
1002 __iio_remove_event_config_attrs(indio_dev);
1003 kfree(indio_dev->event_interface);
1004error_ret:
1005
1006 return ret;
1007}
1008
1009static void iio_device_unregister_eventset(struct iio_dev *indio_dev)
1010{
1011 if (indio_dev->event_interface == NULL)
1012 return;
1013 __iio_remove_event_config_attrs(indio_dev);
1014 kfree(indio_dev->event_interface->group.attrs);
1015 kfree(indio_dev->event_interface);
1016}
1017
1018static void iio_dev_release(struct device *device) 739static void iio_dev_release(struct device *device)
1019{ 740{
1020 struct iio_dev *indio_dev = container_of(device, struct iio_dev, dev); 741 struct iio_dev *indio_dev = container_of(device, struct iio_dev, dev);
@@ -1023,6 +744,7 @@ static void iio_dev_release(struct device *device)
1023 iio_device_unregister_trigger_consumer(indio_dev); 744 iio_device_unregister_trigger_consumer(indio_dev);
1024 iio_device_unregister_eventset(indio_dev); 745 iio_device_unregister_eventset(indio_dev);
1025 iio_device_unregister_sysfs(indio_dev); 746 iio_device_unregister_sysfs(indio_dev);
747 iio_device_unregister_debugfs(indio_dev);
1026} 748}
1027 749
1028static struct device_type iio_dev_type = { 750static struct device_type iio_dev_type = {
@@ -1052,6 +774,7 @@ struct iio_dev *iio_allocate_device(int sizeof_priv)
1052 device_initialize(&dev->dev); 774 device_initialize(&dev->dev);
1053 dev_set_drvdata(&dev->dev, (void *)dev); 775 dev_set_drvdata(&dev->dev, (void *)dev);
1054 mutex_init(&dev->mlock); 776 mutex_init(&dev->mlock);
777 mutex_init(&dev->info_exist_lock);
1055 778
1056 dev->id = ida_simple_get(&iio_ida, 0, 0, GFP_KERNEL); 779 dev->id = ida_simple_get(&iio_ida, 0, 0, GFP_KERNEL);
1057 if (dev->id < 0) { 780 if (dev->id < 0) {
@@ -1131,6 +854,8 @@ static const struct file_operations iio_buffer_fileops = {
1131 .compat_ioctl = iio_ioctl, 854 .compat_ioctl = iio_ioctl,
1132}; 855};
1133 856
857static const struct iio_buffer_setup_ops noop_ring_setup_ops;
858
1134int iio_device_register(struct iio_dev *indio_dev) 859int iio_device_register(struct iio_dev *indio_dev)
1135{ 860{
1136 int ret; 861 int ret;
@@ -1138,11 +863,17 @@ int iio_device_register(struct iio_dev *indio_dev)
1138 /* configure elements for the chrdev */ 863 /* configure elements for the chrdev */
1139 indio_dev->dev.devt = MKDEV(MAJOR(iio_devt), indio_dev->id); 864 indio_dev->dev.devt = MKDEV(MAJOR(iio_devt), indio_dev->id);
1140 865
866 ret = iio_device_register_debugfs(indio_dev);
867 if (ret) {
868 dev_err(indio_dev->dev.parent,
869 "Failed to register debugfs interfaces\n");
870 goto error_ret;
871 }
1141 ret = iio_device_register_sysfs(indio_dev); 872 ret = iio_device_register_sysfs(indio_dev);
1142 if (ret) { 873 if (ret) {
1143 dev_err(indio_dev->dev.parent, 874 dev_err(indio_dev->dev.parent,
1144 "Failed to register sysfs interfaces\n"); 875 "Failed to register sysfs interfaces\n");
1145 goto error_ret; 876 goto error_unreg_debugfs;
1146 } 877 }
1147 ret = iio_device_register_eventset(indio_dev); 878 ret = iio_device_register_eventset(indio_dev);
1148 if (ret) { 879 if (ret) {
@@ -1153,6 +884,10 @@ int iio_device_register(struct iio_dev *indio_dev)
1153 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) 884 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
1154 iio_device_register_trigger_consumer(indio_dev); 885 iio_device_register_trigger_consumer(indio_dev);
1155 886
887 if ((indio_dev->modes & INDIO_ALL_BUFFER_MODES) &&
888 indio_dev->setup_ops == NULL)
889 indio_dev->setup_ops = &noop_ring_setup_ops;
890
1156 ret = device_add(&indio_dev->dev); 891 ret = device_add(&indio_dev->dev);
1157 if (ret < 0) 892 if (ret < 0)
1158 goto error_unreg_eventset; 893 goto error_unreg_eventset;
@@ -1169,6 +904,8 @@ error_unreg_eventset:
1169 iio_device_unregister_eventset(indio_dev); 904 iio_device_unregister_eventset(indio_dev);
1170error_free_sysfs: 905error_free_sysfs:
1171 iio_device_unregister_sysfs(indio_dev); 906 iio_device_unregister_sysfs(indio_dev);
907error_unreg_debugfs:
908 iio_device_unregister_debugfs(indio_dev);
1172error_ret: 909error_ret:
1173 return ret; 910 return ret;
1174} 911}
@@ -1176,6 +913,9 @@ EXPORT_SYMBOL(iio_device_register);
1176 913
1177void iio_device_unregister(struct iio_dev *indio_dev) 914void iio_device_unregister(struct iio_dev *indio_dev)
1178{ 915{
916 mutex_lock(&indio_dev->info_exist_lock);
917 indio_dev->info = NULL;
918 mutex_unlock(&indio_dev->info_exist_lock);
1179 device_unregister(&indio_dev->dev); 919 device_unregister(&indio_dev->dev);
1180} 920}
1181EXPORT_SYMBOL(iio_device_unregister); 921EXPORT_SYMBOL(iio_device_unregister);
diff --git a/drivers/staging/iio/industrialio-event.c b/drivers/staging/iio/industrialio-event.c
new file mode 100644
index 000000000000..5fdf739e38f9
--- /dev/null
+++ b/drivers/staging/iio/industrialio-event.c
@@ -0,0 +1,453 @@
1/* Industrial I/O event handling
2 *
3 * Copyright (c) 2008 Jonathan Cameron
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * Based on elements of hwmon and input subsystems.
10 */
11
12#include <linux/anon_inodes.h>
13#include <linux/device.h>
14#include <linux/fs.h>
15#include <linux/kernel.h>
16#include <linux/kfifo.h>
17#include <linux/module.h>
18#include <linux/poll.h>
19#include <linux/sched.h>
20#include <linux/slab.h>
21#include <linux/uaccess.h>
22#include <linux/wait.h>
23#include "iio.h"
24#include "iio_core.h"
25#include "sysfs.h"
26#include "events.h"
27
28/**
29 * struct iio_event_interface - chrdev interface for an event line
30 * @wait: wait queue to allow blocking reads of events
31 * @det_events: list of detected events
32 * @dev_attr_list: list of event interface sysfs attribute
33 * @flags: file operations related flags including busy flag.
34 * @group: event interface sysfs attribute group
35 */
36struct iio_event_interface {
37 wait_queue_head_t wait;
38 DECLARE_KFIFO(det_events, struct iio_event_data, 16);
39
40 struct list_head dev_attr_list;
41 unsigned long flags;
42 struct attribute_group group;
43};
44
45int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
46{
47 struct iio_event_interface *ev_int = indio_dev->event_interface;
48 struct iio_event_data ev;
49 int copied;
50
51 /* Does anyone care? */
52 spin_lock(&ev_int->wait.lock);
53 if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
54
55 ev.id = ev_code;
56 ev.timestamp = timestamp;
57
58 copied = kfifo_put(&ev_int->det_events, &ev);
59 if (copied != 0)
60 wake_up_locked_poll(&ev_int->wait, POLLIN);
61 }
62 spin_unlock(&ev_int->wait.lock);
63
64 return 0;
65}
66EXPORT_SYMBOL(iio_push_event);
67
68/**
69 * iio_event_poll() - poll the event queue to find out if it has data
70 */
71static unsigned int iio_event_poll(struct file *filep,
72 struct poll_table_struct *wait)
73{
74 struct iio_event_interface *ev_int = filep->private_data;
75 unsigned int events = 0;
76
77 poll_wait(filep, &ev_int->wait, wait);
78
79 spin_lock(&ev_int->wait.lock);
80 if (!kfifo_is_empty(&ev_int->det_events))
81 events = POLLIN | POLLRDNORM;
82 spin_unlock(&ev_int->wait.lock);
83
84 return events;
85}
86
87static ssize_t iio_event_chrdev_read(struct file *filep,
88 char __user *buf,
89 size_t count,
90 loff_t *f_ps)
91{
92 struct iio_event_interface *ev_int = filep->private_data;
93 unsigned int copied;
94 int ret;
95
96 if (count < sizeof(struct iio_event_data))
97 return -EINVAL;
98
99 spin_lock(&ev_int->wait.lock);
100 if (kfifo_is_empty(&ev_int->det_events)) {
101 if (filep->f_flags & O_NONBLOCK) {
102 ret = -EAGAIN;
103 goto error_unlock;
104 }
105 /* Blocking on device; waiting for something to be there */
106 ret = wait_event_interruptible_locked(ev_int->wait,
107 !kfifo_is_empty(&ev_int->det_events));
108 if (ret)
109 goto error_unlock;
110 /* Single access device so no one else can get the data */
111 }
112
113 ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied);
114
115error_unlock:
116 spin_unlock(&ev_int->wait.lock);
117
118 return ret ? ret : copied;
119}
120
121static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
122{
123 struct iio_event_interface *ev_int = filep->private_data;
124
125 spin_lock(&ev_int->wait.lock);
126 __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
127 /*
128 * In order to maintain a clean state for reopening,
129 * clear out any awaiting events. The mask will prevent
130 * any new __iio_push_event calls running.
131 */
132 kfifo_reset_out(&ev_int->det_events);
133 spin_unlock(&ev_int->wait.lock);
134
135 return 0;
136}
137
138static const struct file_operations iio_event_chrdev_fileops = {
139 .read = iio_event_chrdev_read,
140 .poll = iio_event_poll,
141 .release = iio_event_chrdev_release,
142 .owner = THIS_MODULE,
143 .llseek = noop_llseek,
144};
145
146int iio_event_getfd(struct iio_dev *indio_dev)
147{
148 struct iio_event_interface *ev_int = indio_dev->event_interface;
149 int fd;
150
151 if (ev_int == NULL)
152 return -ENODEV;
153
154 spin_lock(&ev_int->wait.lock);
155 if (__test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
156 spin_unlock(&ev_int->wait.lock);
157 return -EBUSY;
158 }
159 spin_unlock(&ev_int->wait.lock);
160 fd = anon_inode_getfd("iio:event",
161 &iio_event_chrdev_fileops, ev_int, O_RDONLY);
162 if (fd < 0) {
163 spin_lock(&ev_int->wait.lock);
164 __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
165 spin_unlock(&ev_int->wait.lock);
166 }
167 return fd;
168}
169
170static const char * const iio_ev_type_text[] = {
171 [IIO_EV_TYPE_THRESH] = "thresh",
172 [IIO_EV_TYPE_MAG] = "mag",
173 [IIO_EV_TYPE_ROC] = "roc",
174 [IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive",
175 [IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive",
176};
177
178static const char * const iio_ev_dir_text[] = {
179 [IIO_EV_DIR_EITHER] = "either",
180 [IIO_EV_DIR_RISING] = "rising",
181 [IIO_EV_DIR_FALLING] = "falling"
182};
183
184static ssize_t iio_ev_state_store(struct device *dev,
185 struct device_attribute *attr,
186 const char *buf,
187 size_t len)
188{
189 struct iio_dev *indio_dev = dev_get_drvdata(dev);
190 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
191 int ret;
192 bool val;
193
194 ret = strtobool(buf, &val);
195 if (ret < 0)
196 return ret;
197
198 ret = indio_dev->info->write_event_config(indio_dev,
199 this_attr->address,
200 val);
201 return (ret < 0) ? ret : len;
202}
203
204static ssize_t iio_ev_state_show(struct device *dev,
205 struct device_attribute *attr,
206 char *buf)
207{
208 struct iio_dev *indio_dev = dev_get_drvdata(dev);
209 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
210 int val = indio_dev->info->read_event_config(indio_dev,
211 this_attr->address);
212
213 if (val < 0)
214 return val;
215 else
216 return sprintf(buf, "%d\n", val);
217}
218
219static ssize_t iio_ev_value_show(struct device *dev,
220 struct device_attribute *attr,
221 char *buf)
222{
223 struct iio_dev *indio_dev = dev_get_drvdata(dev);
224 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
225 int val, ret;
226
227 ret = indio_dev->info->read_event_value(indio_dev,
228 this_attr->address, &val);
229 if (ret < 0)
230 return ret;
231
232 return sprintf(buf, "%d\n", val);
233}
234
235static ssize_t iio_ev_value_store(struct device *dev,
236 struct device_attribute *attr,
237 const char *buf,
238 size_t len)
239{
240 struct iio_dev *indio_dev = dev_get_drvdata(dev);
241 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
242 unsigned long val;
243 int ret;
244
245 if (!indio_dev->info->write_event_value)
246 return -EINVAL;
247
248 ret = strict_strtoul(buf, 10, &val);
249 if (ret)
250 return ret;
251
252 ret = indio_dev->info->write_event_value(indio_dev, this_attr->address,
253 val);
254 if (ret < 0)
255 return ret;
256
257 return len;
258}
259
260static int iio_device_add_event_sysfs(struct iio_dev *indio_dev,
261 struct iio_chan_spec const *chan)
262{
263 int ret = 0, i, attrcount = 0;
264 u64 mask = 0;
265 char *postfix;
266 if (!chan->event_mask)
267 return 0;
268
269 for_each_set_bit(i, &chan->event_mask, sizeof(chan->event_mask)*8) {
270 postfix = kasprintf(GFP_KERNEL, "%s_%s_en",
271 iio_ev_type_text[i/IIO_EV_DIR_MAX],
272 iio_ev_dir_text[i%IIO_EV_DIR_MAX]);
273 if (postfix == NULL) {
274 ret = -ENOMEM;
275 goto error_ret;
276 }
277 if (chan->modified)
278 mask = IIO_MOD_EVENT_CODE(chan->type, 0, chan->channel,
279 i/IIO_EV_DIR_MAX,
280 i%IIO_EV_DIR_MAX);
281 else if (chan->differential)
282 mask = IIO_EVENT_CODE(chan->type,
283 0, 0,
284 i%IIO_EV_DIR_MAX,
285 i/IIO_EV_DIR_MAX,
286 0,
287 chan->channel,
288 chan->channel2);
289 else
290 mask = IIO_UNMOD_EVENT_CODE(chan->type,
291 chan->channel,
292 i/IIO_EV_DIR_MAX,
293 i%IIO_EV_DIR_MAX);
294
295 ret = __iio_add_chan_devattr(postfix,
296 chan,
297 &iio_ev_state_show,
298 iio_ev_state_store,
299 mask,
300 0,
301 &indio_dev->dev,
302 &indio_dev->event_interface->
303 dev_attr_list);
304 kfree(postfix);
305 if (ret)
306 goto error_ret;
307 attrcount++;
308 postfix = kasprintf(GFP_KERNEL, "%s_%s_value",
309 iio_ev_type_text[i/IIO_EV_DIR_MAX],
310 iio_ev_dir_text[i%IIO_EV_DIR_MAX]);
311 if (postfix == NULL) {
312 ret = -ENOMEM;
313 goto error_ret;
314 }
315 ret = __iio_add_chan_devattr(postfix, chan,
316 iio_ev_value_show,
317 iio_ev_value_store,
318 mask,
319 0,
320 &indio_dev->dev,
321 &indio_dev->event_interface->
322 dev_attr_list);
323 kfree(postfix);
324 if (ret)
325 goto error_ret;
326 attrcount++;
327 }
328 ret = attrcount;
329error_ret:
330 return ret;
331}
332
333static inline void __iio_remove_event_config_attrs(struct iio_dev *indio_dev)
334{
335 struct iio_dev_attr *p, *n;
336 list_for_each_entry_safe(p, n,
337 &indio_dev->event_interface->
338 dev_attr_list, l) {
339 kfree(p->dev_attr.attr.name);
340 kfree(p);
341 }
342}
343
344static inline int __iio_add_event_config_attrs(struct iio_dev *indio_dev)
345{
346 int j, ret, attrcount = 0;
347
348 INIT_LIST_HEAD(&indio_dev->event_interface->dev_attr_list);
349 /* Dynically created from the channels array */
350 for (j = 0; j < indio_dev->num_channels; j++) {
351 ret = iio_device_add_event_sysfs(indio_dev,
352 &indio_dev->channels[j]);
353 if (ret < 0)
354 goto error_clear_attrs;
355 attrcount += ret;
356 }
357 return attrcount;
358
359error_clear_attrs:
360 __iio_remove_event_config_attrs(indio_dev);
361
362 return ret;
363}
364
365static bool iio_check_for_dynamic_events(struct iio_dev *indio_dev)
366{
367 int j;
368
369 for (j = 0; j < indio_dev->num_channels; j++)
370 if (indio_dev->channels[j].event_mask != 0)
371 return true;
372 return false;
373}
374
375static void iio_setup_ev_int(struct iio_event_interface *ev_int)
376{
377 INIT_KFIFO(ev_int->det_events);
378 init_waitqueue_head(&ev_int->wait);
379}
380
381static const char *iio_event_group_name = "events";
382int iio_device_register_eventset(struct iio_dev *indio_dev)
383{
384 struct iio_dev_attr *p;
385 int ret = 0, attrcount_orig = 0, attrcount, attrn;
386 struct attribute **attr;
387
388 if (!(indio_dev->info->event_attrs ||
389 iio_check_for_dynamic_events(indio_dev)))
390 return 0;
391
392 indio_dev->event_interface =
393 kzalloc(sizeof(struct iio_event_interface), GFP_KERNEL);
394 if (indio_dev->event_interface == NULL) {
395 ret = -ENOMEM;
396 goto error_ret;
397 }
398
399 iio_setup_ev_int(indio_dev->event_interface);
400 if (indio_dev->info->event_attrs != NULL) {
401 attr = indio_dev->info->event_attrs->attrs;
402 while (*attr++ != NULL)
403 attrcount_orig++;
404 }
405 attrcount = attrcount_orig;
406 if (indio_dev->channels) {
407 ret = __iio_add_event_config_attrs(indio_dev);
408 if (ret < 0)
409 goto error_free_setup_event_lines;
410 attrcount += ret;
411 }
412
413 indio_dev->event_interface->group.name = iio_event_group_name;
414 indio_dev->event_interface->group.attrs = kcalloc(attrcount + 1,
415 sizeof(indio_dev->event_interface->group.attrs[0]),
416 GFP_KERNEL);
417 if (indio_dev->event_interface->group.attrs == NULL) {
418 ret = -ENOMEM;
419 goto error_free_setup_event_lines;
420 }
421 if (indio_dev->info->event_attrs)
422 memcpy(indio_dev->event_interface->group.attrs,
423 indio_dev->info->event_attrs->attrs,
424 sizeof(indio_dev->event_interface->group.attrs[0])
425 *attrcount_orig);
426 attrn = attrcount_orig;
427 /* Add all elements from the list. */
428 list_for_each_entry(p,
429 &indio_dev->event_interface->dev_attr_list,
430 l)
431 indio_dev->event_interface->group.attrs[attrn++] =
432 &p->dev_attr.attr;
433 indio_dev->groups[indio_dev->groupcounter++] =
434 &indio_dev->event_interface->group;
435
436 return 0;
437
438error_free_setup_event_lines:
439 __iio_remove_event_config_attrs(indio_dev);
440 kfree(indio_dev->event_interface);
441error_ret:
442
443 return ret;
444}
445
446void iio_device_unregister_eventset(struct iio_dev *indio_dev)
447{
448 if (indio_dev->event_interface == NULL)
449 return;
450 __iio_remove_event_config_attrs(indio_dev);
451 kfree(indio_dev->event_interface->group.attrs);
452 kfree(indio_dev->event_interface);
453}
diff --git a/drivers/staging/iio/inkern.c b/drivers/staging/iio/inkern.c
new file mode 100644
index 000000000000..de2c8ea64965
--- /dev/null
+++ b/drivers/staging/iio/inkern.c
@@ -0,0 +1,292 @@
1/* The industrial I/O core in kernel channel mapping
2 *
3 * Copyright (c) 2011 Jonathan Cameron
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 */
9#include <linux/err.h>
10#include <linux/export.h>
11#include <linux/slab.h>
12#include <linux/mutex.h>
13
14#include "iio.h"
15#include "iio_core.h"
16#include "machine.h"
17#include "driver.h"
18#include "consumer.h"
19
20struct iio_map_internal {
21 struct iio_dev *indio_dev;
22 struct iio_map *map;
23 struct list_head l;
24};
25
26static LIST_HEAD(iio_map_list);
27static DEFINE_MUTEX(iio_map_list_lock);
28
29int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps)
30{
31 int i = 0, ret = 0;
32 struct iio_map_internal *mapi;
33
34 if (maps == NULL)
35 return 0;
36
37 mutex_lock(&iio_map_list_lock);
38 while (maps[i].consumer_dev_name != NULL) {
39 mapi = kzalloc(sizeof(*mapi), GFP_KERNEL);
40 if (mapi == NULL) {
41 ret = -ENOMEM;
42 goto error_ret;
43 }
44 mapi->map = &maps[i];
45 mapi->indio_dev = indio_dev;
46 list_add(&mapi->l, &iio_map_list);
47 i++;
48 }
49error_ret:
50 mutex_unlock(&iio_map_list_lock);
51
52 return ret;
53}
54EXPORT_SYMBOL_GPL(iio_map_array_register);
55
56
57/* Assumes the exact same array (e.g. memory locations)
58 * used at unregistration as used at registration rather than
59 * more complex checking of contents.
60 */
61int iio_map_array_unregister(struct iio_dev *indio_dev,
62 struct iio_map *maps)
63{
64 int i = 0, ret = 0;
65 bool found_it;
66 struct iio_map_internal *mapi;
67
68 if (maps == NULL)
69 return 0;
70
71 mutex_lock(&iio_map_list_lock);
72 while (maps[i].consumer_dev_name != NULL) {
73 found_it = false;
74 list_for_each_entry(mapi, &iio_map_list, l)
75 if (&maps[i] == mapi->map) {
76 list_del(&mapi->l);
77 kfree(mapi);
78 found_it = true;
79 break;
80 }
81 if (found_it == false) {
82 ret = -ENODEV;
83 goto error_ret;
84 }
85 }
86error_ret:
87 mutex_unlock(&iio_map_list_lock);
88
89 return ret;
90}
91EXPORT_SYMBOL_GPL(iio_map_array_unregister);
92
93static const struct iio_chan_spec
94*iio_chan_spec_from_name(const struct iio_dev *indio_dev,
95 const char *name)
96{
97 int i;
98 const struct iio_chan_spec *chan = NULL;
99
100 for (i = 0; i < indio_dev->num_channels; i++)
101 if (indio_dev->channels[i].datasheet_name &&
102 strcmp(name, indio_dev->channels[i].datasheet_name) == 0) {
103 chan = &indio_dev->channels[i];
104 break;
105 }
106 return chan;
107}
108
109
110struct iio_channel *iio_st_channel_get(const char *name,
111 const char *channel_name)
112{
113 struct iio_map_internal *c_i = NULL, *c = NULL;
114 struct iio_channel *channel;
115
116 if (name == NULL && channel_name == NULL)
117 return ERR_PTR(-ENODEV);
118
119 /* first find matching entry the channel map */
120 mutex_lock(&iio_map_list_lock);
121 list_for_each_entry(c_i, &iio_map_list, l) {
122 if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
123 (channel_name &&
124 strcmp(channel_name, c_i->map->consumer_channel) != 0))
125 continue;
126 c = c_i;
127 get_device(&c->indio_dev->dev);
128 break;
129 }
130 mutex_unlock(&iio_map_list_lock);
131 if (c == NULL)
132 return ERR_PTR(-ENODEV);
133
134 channel = kmalloc(sizeof(*channel), GFP_KERNEL);
135 if (channel == NULL)
136 return ERR_PTR(-ENOMEM);
137
138 channel->indio_dev = c->indio_dev;
139
140 if (c->map->adc_channel_label)
141 channel->channel =
142 iio_chan_spec_from_name(channel->indio_dev,
143 c->map->adc_channel_label);
144
145 return channel;
146}
147EXPORT_SYMBOL_GPL(iio_st_channel_get);
148
149void iio_st_channel_release(struct iio_channel *channel)
150{
151 put_device(&channel->indio_dev->dev);
152 kfree(channel);
153}
154EXPORT_SYMBOL_GPL(iio_st_channel_release);
155
156struct iio_channel *iio_st_channel_get_all(const char *name)
157{
158 struct iio_channel *chans;
159 struct iio_map_internal *c = NULL;
160 int nummaps = 0;
161 int mapind = 0;
162 int i, ret;
163
164 if (name == NULL)
165 return ERR_PTR(-EINVAL);
166
167 mutex_lock(&iio_map_list_lock);
168 /* first count the matching maps */
169 list_for_each_entry(c, &iio_map_list, l)
170 if (name && strcmp(name, c->map->consumer_dev_name) != 0)
171 continue;
172 else
173 nummaps++;
174
175 if (nummaps == 0) {
176 ret = -ENODEV;
177 goto error_ret;
178 }
179
180 /* NULL terminated array to save passing size */
181 chans = kzalloc(sizeof(*chans)*(nummaps + 1), GFP_KERNEL);
182 if (chans == NULL) {
183 ret = -ENOMEM;
184 goto error_ret;
185 }
186
187 /* for each map fill in the chans element */
188 list_for_each_entry(c, &iio_map_list, l) {
189 if (name && strcmp(name, c->map->consumer_dev_name) != 0)
190 continue;
191 chans[mapind].indio_dev = c->indio_dev;
192 chans[mapind].channel =
193 iio_chan_spec_from_name(chans[mapind].indio_dev,
194 c->map->adc_channel_label);
195 if (chans[mapind].channel == NULL) {
196 ret = -EINVAL;
197 put_device(&chans[mapind].indio_dev->dev);
198 goto error_free_chans;
199 }
200 get_device(&chans[mapind].indio_dev->dev);
201 mapind++;
202 }
203 mutex_unlock(&iio_map_list_lock);
204 if (mapind == 0) {
205 ret = -ENODEV;
206 goto error_free_chans;
207 }
208 return chans;
209
210error_free_chans:
211 for (i = 0; i < nummaps; i++)
212 if (chans[i].indio_dev)
213 put_device(&chans[i].indio_dev->dev);
214 kfree(chans);
215error_ret:
216 mutex_unlock(&iio_map_list_lock);
217
218 return ERR_PTR(ret);
219}
220EXPORT_SYMBOL_GPL(iio_st_channel_get_all);
221
222void iio_st_channel_release_all(struct iio_channel *channels)
223{
224 struct iio_channel *chan = &channels[0];
225
226 while (chan->indio_dev) {
227 put_device(&chan->indio_dev->dev);
228 chan++;
229 }
230 kfree(channels);
231}
232EXPORT_SYMBOL_GPL(iio_st_channel_release_all);
233
234int iio_st_read_channel_raw(struct iio_channel *chan, int *val)
235{
236 int val2, ret;
237
238 mutex_lock(&chan->indio_dev->info_exist_lock);
239 if (chan->indio_dev->info == NULL) {
240 ret = -ENODEV;
241 goto err_unlock;
242 }
243
244 ret = chan->indio_dev->info->read_raw(chan->indio_dev, chan->channel,
245 val, &val2, 0);
246err_unlock:
247 mutex_unlock(&chan->indio_dev->info_exist_lock);
248
249 return ret;
250}
251EXPORT_SYMBOL_GPL(iio_st_read_channel_raw);
252
253int iio_st_read_channel_scale(struct iio_channel *chan, int *val, int *val2)
254{
255 int ret;
256
257 mutex_lock(&chan->indio_dev->info_exist_lock);
258 if (chan->indio_dev->info == NULL) {
259 ret = -ENODEV;
260 goto err_unlock;
261 }
262
263 ret = chan->indio_dev->info->read_raw(chan->indio_dev,
264 chan->channel,
265 val, val2,
266 IIO_CHAN_INFO_SCALE);
267err_unlock:
268 mutex_unlock(&chan->indio_dev->info_exist_lock);
269
270 return ret;
271}
272EXPORT_SYMBOL_GPL(iio_st_read_channel_scale);
273
274int iio_st_get_channel_type(struct iio_channel *chan,
275 enum iio_chan_type *type)
276{
277 int ret = 0;
278 /* Need to verify underlying driver has not gone away */
279
280 mutex_lock(&chan->indio_dev->info_exist_lock);
281 if (chan->indio_dev->info == NULL) {
282 ret = -ENODEV;
283 goto err_unlock;
284 }
285
286 *type = chan->channel->type;
287err_unlock:
288 mutex_unlock(&chan->indio_dev->info_exist_lock);
289
290 return ret;
291}
292EXPORT_SYMBOL_GPL(iio_st_get_channel_type);
diff --git a/drivers/staging/iio/kfifo_buf.c b/drivers/staging/iio/kfifo_buf.c
index e1e9c06cde4a..9f3bd59c0e72 100644
--- a/drivers/staging/iio/kfifo_buf.c
+++ b/drivers/staging/iio/kfifo_buf.c
@@ -59,21 +59,6 @@ static struct attribute_group iio_kfifo_attribute_group = {
59 .name = "buffer", 59 .name = "buffer",
60}; 60};
61 61
62struct iio_buffer *iio_kfifo_allocate(struct iio_dev *indio_dev)
63{
64 struct iio_kfifo *kf;
65
66 kf = kzalloc(sizeof *kf, GFP_KERNEL);
67 if (!kf)
68 return NULL;
69 kf->update_needed = true;
70 iio_buffer_init(&kf->buffer);
71 kf->buffer.attrs = &iio_kfifo_attribute_group;
72
73 return &kf->buffer;
74}
75EXPORT_SYMBOL(iio_kfifo_allocate);
76
77static int iio_get_bytes_per_datum_kfifo(struct iio_buffer *r) 62static int iio_get_bytes_per_datum_kfifo(struct iio_buffer *r)
78{ 63{
79 return r->bytes_per_datum; 64 return r->bytes_per_datum;
@@ -104,12 +89,6 @@ static int iio_set_length_kfifo(struct iio_buffer *r, int length)
104 return 0; 89 return 0;
105} 90}
106 91
107void iio_kfifo_free(struct iio_buffer *r)
108{
109 kfree(iio_to_kfifo(r));
110}
111EXPORT_SYMBOL(iio_kfifo_free);
112
113static int iio_store_to_kfifo(struct iio_buffer *r, 92static int iio_store_to_kfifo(struct iio_buffer *r,
114 u8 *data, 93 u8 *data,
115 s64 timestamp) 94 s64 timestamp)
@@ -137,7 +116,7 @@ static int iio_read_first_n_kfifo(struct iio_buffer *r,
137 return copied; 116 return copied;
138} 117}
139 118
140const struct iio_buffer_access_funcs kfifo_access_funcs = { 119static const struct iio_buffer_access_funcs kfifo_access_funcs = {
141 .store_to = &iio_store_to_kfifo, 120 .store_to = &iio_store_to_kfifo,
142 .read_first_n = &iio_read_first_n_kfifo, 121 .read_first_n = &iio_read_first_n_kfifo,
143 .request_update = &iio_request_update_kfifo, 122 .request_update = &iio_request_update_kfifo,
@@ -146,6 +125,27 @@ const struct iio_buffer_access_funcs kfifo_access_funcs = {
146 .get_length = &iio_get_length_kfifo, 125 .get_length = &iio_get_length_kfifo,
147 .set_length = &iio_set_length_kfifo, 126 .set_length = &iio_set_length_kfifo,
148}; 127};
149EXPORT_SYMBOL(kfifo_access_funcs); 128
129struct iio_buffer *iio_kfifo_allocate(struct iio_dev *indio_dev)
130{
131 struct iio_kfifo *kf;
132
133 kf = kzalloc(sizeof *kf, GFP_KERNEL);
134 if (!kf)
135 return NULL;
136 kf->update_needed = true;
137 iio_buffer_init(&kf->buffer);
138 kf->buffer.attrs = &iio_kfifo_attribute_group;
139 kf->buffer.access = &kfifo_access_funcs;
140
141 return &kf->buffer;
142}
143EXPORT_SYMBOL(iio_kfifo_allocate);
144
145void iio_kfifo_free(struct iio_buffer *r)
146{
147 kfree(iio_to_kfifo(r));
148}
149EXPORT_SYMBOL(iio_kfifo_free);
150 150
151MODULE_LICENSE("GPL"); 151MODULE_LICENSE("GPL");
diff --git a/drivers/staging/iio/kfifo_buf.h b/drivers/staging/iio/kfifo_buf.h
index cc2bd9a1ccfe..9f7da016af04 100644
--- a/drivers/staging/iio/kfifo_buf.h
+++ b/drivers/staging/iio/kfifo_buf.h
@@ -3,8 +3,6 @@
3#include "iio.h" 3#include "iio.h"
4#include "buffer.h" 4#include "buffer.h"
5 5
6extern const struct iio_buffer_access_funcs kfifo_access_funcs;
7
8struct iio_buffer *iio_kfifo_allocate(struct iio_dev *indio_dev); 6struct iio_buffer *iio_kfifo_allocate(struct iio_dev *indio_dev);
9void iio_kfifo_free(struct iio_buffer *r); 7void iio_kfifo_free(struct iio_buffer *r);
10 8
diff --git a/drivers/staging/iio/light/isl29018.c b/drivers/staging/iio/light/isl29018.c
index 849d6a564afa..38ec52b65dfa 100644
--- a/drivers/staging/iio/light/isl29018.c
+++ b/drivers/staging/iio/light/isl29018.c
@@ -592,11 +592,18 @@ static const struct i2c_device_id isl29018_id[] = {
592 592
593MODULE_DEVICE_TABLE(i2c, isl29018_id); 593MODULE_DEVICE_TABLE(i2c, isl29018_id);
594 594
595static const struct of_device_id isl29018_of_match[] = {
596 { .compatible = "invn,isl29018", },
597 { },
598};
599MODULE_DEVICE_TABLE(of, isl29018_of_match);
600
595static struct i2c_driver isl29018_driver = { 601static struct i2c_driver isl29018_driver = {
596 .class = I2C_CLASS_HWMON, 602 .class = I2C_CLASS_HWMON,
597 .driver = { 603 .driver = {
598 .name = "isl29018", 604 .name = "isl29018",
599 .owner = THIS_MODULE, 605 .owner = THIS_MODULE,
606 .of_match_table = isl29018_of_match,
600 }, 607 },
601 .probe = isl29018_probe, 608 .probe = isl29018_probe,
602 .remove = __devexit_p(isl29018_remove), 609 .remove = __devexit_p(isl29018_remove),
diff --git a/drivers/staging/iio/light/tsl2563.c b/drivers/staging/iio/light/tsl2563.c
index ffca85e81ef5..546c95a4ea9e 100644
--- a/drivers/staging/iio/light/tsl2563.c
+++ b/drivers/staging/iio/light/tsl2563.c
@@ -118,7 +118,7 @@ struct tsl2563_chip {
118 struct delayed_work poweroff_work; 118 struct delayed_work poweroff_work;
119 119
120 /* Remember state for suspend and resume functions */ 120 /* Remember state for suspend and resume functions */
121 pm_message_t state; 121 bool suspended;
122 122
123 struct tsl2563_gainlevel_coeff const *gainlevel; 123 struct tsl2563_gainlevel_coeff const *gainlevel;
124 124
@@ -315,7 +315,7 @@ static int tsl2563_get_adc(struct tsl2563_chip *chip)
315 int retry = 1; 315 int retry = 1;
316 int ret = 0; 316 int ret = 0;
317 317
318 if (chip->state.event != PM_EVENT_ON) 318 if (chip->suspended)
319 goto out; 319 goto out;
320 320
321 if (!chip->int_enabled) { 321 if (!chip->int_enabled) {
@@ -708,7 +708,6 @@ static int __devinit tsl2563_probe(struct i2c_client *client,
708 struct tsl2563_chip *chip; 708 struct tsl2563_chip *chip;
709 struct tsl2563_platform_data *pdata = client->dev.platform_data; 709 struct tsl2563_platform_data *pdata = client->dev.platform_data;
710 int err = 0; 710 int err = 0;
711 int ret;
712 u8 id = 0; 711 u8 id = 0;
713 712
714 indio_dev = iio_allocate_device(sizeof(*chip)); 713 indio_dev = iio_allocate_device(sizeof(*chip));
@@ -722,13 +721,15 @@ static int __devinit tsl2563_probe(struct i2c_client *client,
722 721
723 err = tsl2563_detect(chip); 722 err = tsl2563_detect(chip);
724 if (err) { 723 if (err) {
725 dev_err(&client->dev, "device not found, error %d\n", -err); 724 dev_err(&client->dev, "detect error %d\n", -err);
726 goto fail1; 725 goto fail1;
727 } 726 }
728 727
729 err = tsl2563_read_id(chip, &id); 728 err = tsl2563_read_id(chip, &id);
730 if (err) 729 if (err) {
730 dev_err(&client->dev, "read id error %d\n", -err);
731 goto fail1; 731 goto fail1;
732 }
732 733
733 mutex_init(&chip->lock); 734 mutex_init(&chip->lock);
734 735
@@ -751,40 +752,52 @@ static int __devinit tsl2563_probe(struct i2c_client *client,
751 indio_dev->num_channels = ARRAY_SIZE(tsl2563_channels); 752 indio_dev->num_channels = ARRAY_SIZE(tsl2563_channels);
752 indio_dev->dev.parent = &client->dev; 753 indio_dev->dev.parent = &client->dev;
753 indio_dev->modes = INDIO_DIRECT_MODE; 754 indio_dev->modes = INDIO_DIRECT_MODE;
755
754 if (client->irq) 756 if (client->irq)
755 indio_dev->info = &tsl2563_info; 757 indio_dev->info = &tsl2563_info;
756 else 758 else
757 indio_dev->info = &tsl2563_info_no_irq; 759 indio_dev->info = &tsl2563_info_no_irq;
760
758 if (client->irq) { 761 if (client->irq) {
759 ret = request_threaded_irq(client->irq, 762 err = request_threaded_irq(client->irq,
760 NULL, 763 NULL,
761 &tsl2563_event_handler, 764 &tsl2563_event_handler,
762 IRQF_TRIGGER_RISING | IRQF_ONESHOT, 765 IRQF_TRIGGER_RISING | IRQF_ONESHOT,
763 "tsl2563_event", 766 "tsl2563_event",
764 indio_dev); 767 indio_dev);
765 if (ret) 768 if (err) {
766 goto fail2; 769 dev_err(&client->dev, "irq request error %d\n", -err);
770 goto fail1;
771 }
767 } 772 }
773
768 err = tsl2563_configure(chip); 774 err = tsl2563_configure(chip);
769 if (err) 775 if (err) {
770 goto fail3; 776 dev_err(&client->dev, "configure error %d\n", -err);
777 goto fail2;
778 }
771 779
772 INIT_DELAYED_WORK(&chip->poweroff_work, tsl2563_poweroff_work); 780 INIT_DELAYED_WORK(&chip->poweroff_work, tsl2563_poweroff_work);
781
773 /* The interrupt cannot yet be enabled so this is fine without lock */ 782 /* The interrupt cannot yet be enabled so this is fine without lock */
774 schedule_delayed_work(&chip->poweroff_work, 5 * HZ); 783 schedule_delayed_work(&chip->poweroff_work, 5 * HZ);
775 784
776 ret = iio_device_register(indio_dev); 785 err = iio_device_register(indio_dev);
777 if (ret) 786 if (err) {
787 dev_err(&client->dev, "iio registration error %d\n", -err);
778 goto fail3; 788 goto fail3;
789 }
779 790
780 return 0; 791 return 0;
792
781fail3: 793fail3:
794 cancel_delayed_work(&chip->poweroff_work);
795 flush_scheduled_work();
796fail2:
782 if (client->irq) 797 if (client->irq)
783 free_irq(client->irq, indio_dev); 798 free_irq(client->irq, indio_dev);
784fail2:
785 iio_free_device(indio_dev);
786fail1: 799fail1:
787 kfree(chip); 800 iio_free_device(indio_dev);
788 return err; 801 return err;
789} 802}
790 803
@@ -810,9 +823,10 @@ static int tsl2563_remove(struct i2c_client *client)
810 return 0; 823 return 0;
811} 824}
812 825
813static int tsl2563_suspend(struct i2c_client *client, pm_message_t state) 826#ifdef CONFIG_PM_SLEEP
827static int tsl2563_suspend(struct device *dev)
814{ 828{
815 struct tsl2563_chip *chip = i2c_get_clientdata(client); 829 struct tsl2563_chip *chip = i2c_get_clientdata(to_i2c_client(dev));
816 int ret; 830 int ret;
817 831
818 mutex_lock(&chip->lock); 832 mutex_lock(&chip->lock);
@@ -821,16 +835,16 @@ static int tsl2563_suspend(struct i2c_client *client, pm_message_t state)
821 if (ret) 835 if (ret)
822 goto out; 836 goto out;
823 837
824 chip->state = state; 838 chip->suspended = true;
825 839
826out: 840out:
827 mutex_unlock(&chip->lock); 841 mutex_unlock(&chip->lock);
828 return ret; 842 return ret;
829} 843}
830 844
831static int tsl2563_resume(struct i2c_client *client) 845static int tsl2563_resume(struct device *dev)
832{ 846{
833 struct tsl2563_chip *chip = i2c_get_clientdata(client); 847 struct tsl2563_chip *chip = i2c_get_clientdata(to_i2c_client(dev));
834 int ret; 848 int ret;
835 849
836 mutex_lock(&chip->lock); 850 mutex_lock(&chip->lock);
@@ -843,13 +857,19 @@ static int tsl2563_resume(struct i2c_client *client)
843 if (ret) 857 if (ret)
844 goto out; 858 goto out;
845 859
846 chip->state.event = PM_EVENT_ON; 860 chip->suspended = false;
847 861
848out: 862out:
849 mutex_unlock(&chip->lock); 863 mutex_unlock(&chip->lock);
850 return ret; 864 return ret;
851} 865}
852 866
867static SIMPLE_DEV_PM_OPS(tsl2563_pm_ops, tsl2563_suspend, tsl2563_resume);
868#define TSL2563_PM_OPS (&tsl2563_pm_ops)
869#else
870#define TSL2563_PM_OPS NULL
871#endif
872
853static const struct i2c_device_id tsl2563_id[] = { 873static const struct i2c_device_id tsl2563_id[] = {
854 { "tsl2560", 0 }, 874 { "tsl2560", 0 },
855 { "tsl2561", 1 }, 875 { "tsl2561", 1 },
@@ -862,9 +882,8 @@ MODULE_DEVICE_TABLE(i2c, tsl2563_id);
862static struct i2c_driver tsl2563_i2c_driver = { 882static struct i2c_driver tsl2563_i2c_driver = {
863 .driver = { 883 .driver = {
864 .name = "tsl2563", 884 .name = "tsl2563",
885 .pm = TSL2563_PM_OPS,
865 }, 886 },
866 .suspend = tsl2563_suspend,
867 .resume = tsl2563_resume,
868 .probe = tsl2563_probe, 887 .probe = tsl2563_probe,
869 .remove = __devexit_p(tsl2563_remove), 888 .remove = __devexit_p(tsl2563_remove),
870 .id_table = tsl2563_id, 889 .id_table = tsl2563_id,
diff --git a/drivers/staging/iio/light/tsl2583.c b/drivers/staging/iio/light/tsl2583.c
index 5b6455a238d8..8671d98e0448 100644
--- a/drivers/staging/iio/light/tsl2583.c
+++ b/drivers/staging/iio/light/tsl2583.c
@@ -113,7 +113,7 @@ struct taos_lux {
113 113
114/* This structure is intentionally large to accommodate updates via sysfs. */ 114/* This structure is intentionally large to accommodate updates via sysfs. */
115/* Sized to 11 = max 10 segments + 1 termination segment */ 115/* Sized to 11 = max 10 segments + 1 termination segment */
116/* Assumption is is one and only one type of glass used */ 116/* Assumption is one and only one type of glass used */
117static struct taos_lux taos_device_lux[11] = { 117static struct taos_lux taos_device_lux[11] = {
118 { 9830, 8520, 15729 }, 118 { 9830, 8520, 15729 },
119 { 12452, 10807, 23344 }, 119 { 12452, 10807, 23344 },
@@ -884,9 +884,10 @@ fail2:
884 return ret; 884 return ret;
885} 885}
886 886
887static int taos_suspend(struct i2c_client *client, pm_message_t state) 887#ifdef CONFIG_PM_SLEEP
888static int taos_suspend(struct device *dev)
888{ 889{
889 struct iio_dev *indio_dev = i2c_get_clientdata(client); 890 struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
890 struct tsl2583_chip *chip = iio_priv(indio_dev); 891 struct tsl2583_chip *chip = iio_priv(indio_dev);
891 int ret = 0; 892 int ret = 0;
892 893
@@ -901,9 +902,9 @@ static int taos_suspend(struct i2c_client *client, pm_message_t state)
901 return ret; 902 return ret;
902} 903}
903 904
904static int taos_resume(struct i2c_client *client) 905static int taos_resume(struct device *dev)
905{ 906{
906 struct iio_dev *indio_dev = i2c_get_clientdata(client); 907 struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
907 struct tsl2583_chip *chip = iio_priv(indio_dev); 908 struct tsl2583_chip *chip = iio_priv(indio_dev);
908 int ret = 0; 909 int ret = 0;
909 910
@@ -916,6 +917,11 @@ static int taos_resume(struct i2c_client *client)
916 return ret; 917 return ret;
917} 918}
918 919
920static SIMPLE_DEV_PM_OPS(taos_pm_ops, taos_suspend, taos_resume);
921#define TAOS_PM_OPS (&taos_pm_ops)
922#else
923#define TAOS_PM_OPS NULL
924#endif
919 925
920static int __devexit taos_remove(struct i2c_client *client) 926static int __devexit taos_remove(struct i2c_client *client)
921{ 927{
@@ -937,10 +943,9 @@ MODULE_DEVICE_TABLE(i2c, taos_idtable);
937static struct i2c_driver taos_driver = { 943static struct i2c_driver taos_driver = {
938 .driver = { 944 .driver = {
939 .name = "tsl2583", 945 .name = "tsl2583",
946 .pm = TAOS_PM_OPS,
940 }, 947 },
941 .id_table = taos_idtable, 948 .id_table = taos_idtable,
942 .suspend = taos_suspend,
943 .resume = taos_resume,
944 .probe = taos_probe, 949 .probe = taos_probe,
945 .remove = __devexit_p(taos_remove), 950 .remove = __devexit_p(taos_remove),
946}; 951};
diff --git a/drivers/staging/iio/machine.h b/drivers/staging/iio/machine.h
new file mode 100644
index 000000000000..0b1f19bfdc44
--- /dev/null
+++ b/drivers/staging/iio/machine.h
@@ -0,0 +1,24 @@
1/*
2 * Industrial I/O in kernel access map definitions for board files.
3 *
4 * Copyright (c) 2011 Jonathan Cameron
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
9 */
10
11/**
12 * struct iio_map - description of link between consumer and device channels
13 * @adc_channel_label: Label used to identify the channel on the provider.
14 * This is matched against the datasheet_name element
15 * of struct iio_chan_spec.
16 * @consumer_dev_name: Name to uniquely identify the consumer device.
17 * @consumer_channel: Unique name used to idenitify the channel on the
18 * consumer side.
19 */
20struct iio_map {
21 const char *adc_channel_label;
22 const char *consumer_dev_name;
23 const char *consumer_channel;
24};
diff --git a/drivers/staging/iio/magnetometer/ak8975.c b/drivers/staging/iio/magnetometer/ak8975.c
index 3158f12cb051..d5ddac3d8831 100644
--- a/drivers/staging/iio/magnetometer/ak8975.c
+++ b/drivers/staging/iio/magnetometer/ak8975.c
@@ -564,9 +564,17 @@ static const struct i2c_device_id ak8975_id[] = {
564 564
565MODULE_DEVICE_TABLE(i2c, ak8975_id); 565MODULE_DEVICE_TABLE(i2c, ak8975_id);
566 566
567static const struct of_device_id ak8975_of_match[] = {
568 { .compatible = "asahi-kasei,ak8975", },
569 { .compatible = "ak8975", },
570 { }
571};
572MODULE_DEVICE_TABLE(of, ak8975_of_match);
573
567static struct i2c_driver ak8975_driver = { 574static struct i2c_driver ak8975_driver = {
568 .driver = { 575 .driver = {
569 .name = "ak8975", 576 .name = "ak8975",
577 .of_match_table = ak8975_of_match,
570 }, 578 },
571 .probe = ak8975_probe, 579 .probe = ak8975_probe,
572 .remove = __devexit_p(ak8975_remove), 580 .remove = __devexit_p(ak8975_remove),
diff --git a/drivers/staging/iio/magnetometer/hmc5843.c b/drivers/staging/iio/magnetometer/hmc5843.c
index f2e85a9cf196..91dd3da70cb4 100644
--- a/drivers/staging/iio/magnetometer/hmc5843.c
+++ b/drivers/staging/iio/magnetometer/hmc5843.c
@@ -86,7 +86,7 @@
86#define RATE_NOT_USED 0x07 86#define RATE_NOT_USED 0x07
87 87
88/* 88/*
89 * Device Configutration 89 * Device Configuration
90 */ 90 */
91#define CONF_NORMAL 0x00 91#define CONF_NORMAL 0x00
92#define CONF_POSITIVE_BIAS 0x01 92#define CONF_POSITIVE_BIAS 0x01
@@ -142,7 +142,7 @@ static s32 hmc5843_configure(struct i2c_client *client,
142 (operating_mode & 0x03)); 142 (operating_mode & 0x03));
143} 143}
144 144
145/* Return the measurement value from the specified channel */ 145/* Return the measurement value from the specified channel */
146static int hmc5843_read_measurement(struct iio_dev *indio_dev, 146static int hmc5843_read_measurement(struct iio_dev *indio_dev,
147 int address, 147 int address,
148 int *val) 148 int *val)
@@ -169,7 +169,7 @@ static int hmc5843_read_measurement(struct iio_dev *indio_dev,
169/* 169/*
170 * From the datasheet 170 * From the datasheet
171 * 0 - Continuous-Conversion Mode: In continuous-conversion mode, the 171 * 0 - Continuous-Conversion Mode: In continuous-conversion mode, the
172 * device continuously performs conversions an places the result in the 172 * device continuously performs conversions and places the result in the
173 * data register. 173 * data register.
174 * 174 *
175 * 1 - Single-Conversion Mode : device performs a single measurement, 175 * 1 - Single-Conversion Mode : device performs a single measurement,
@@ -588,19 +588,26 @@ static int hmc5843_remove(struct i2c_client *client)
588 return 0; 588 return 0;
589} 589}
590 590
591static int hmc5843_suspend(struct i2c_client *client, pm_message_t mesg) 591#ifdef CONFIG_PM_SLEEP
592static int hmc5843_suspend(struct device *dev)
592{ 593{
593 hmc5843_configure(client, MODE_SLEEP); 594 hmc5843_configure(to_i2c_client(dev), MODE_SLEEP);
594 return 0; 595 return 0;
595} 596}
596 597
597static int hmc5843_resume(struct i2c_client *client) 598static int hmc5843_resume(struct device *dev)
598{ 599{
599 struct hmc5843_data *data = i2c_get_clientdata(client); 600 struct hmc5843_data *data = i2c_get_clientdata(to_i2c_client(dev));
600 hmc5843_configure(client, data->operating_mode); 601 hmc5843_configure(to_i2c_client(dev), data->operating_mode);
601 return 0; 602 return 0;
602} 603}
603 604
605static SIMPLE_DEV_PM_OPS(hmc5843_pm_ops, hmc5843_suspend, hmc5843_resume);
606#define HMC5843_PM_OPS (&hmc5843_pm_ops)
607#else
608#define HMC5843_PM_OPS NULL
609#endif
610
604static const struct i2c_device_id hmc5843_id[] = { 611static const struct i2c_device_id hmc5843_id[] = {
605 { "hmc5843", 0 }, 612 { "hmc5843", 0 },
606 { } 613 { }
@@ -610,14 +617,13 @@ MODULE_DEVICE_TABLE(i2c, hmc5843_id);
610static struct i2c_driver hmc5843_driver = { 617static struct i2c_driver hmc5843_driver = {
611 .driver = { 618 .driver = {
612 .name = "hmc5843", 619 .name = "hmc5843",
620 .pm = HMC5843_PM_OPS,
613 }, 621 },
614 .id_table = hmc5843_id, 622 .id_table = hmc5843_id,
615 .probe = hmc5843_probe, 623 .probe = hmc5843_probe,
616 .remove = hmc5843_remove, 624 .remove = hmc5843_remove,
617 .detect = hmc5843_detect, 625 .detect = hmc5843_detect,
618 .address_list = normal_i2c, 626 .address_list = normal_i2c,
619 .suspend = hmc5843_suspend,
620 .resume = hmc5843_resume,
621}; 627};
622module_i2c_driver(hmc5843_driver); 628module_i2c_driver(hmc5843_driver);
623 629
diff --git a/drivers/staging/iio/meter/ade7758_ring.c b/drivers/staging/iio/meter/ade7758_ring.c
index f29f2b278fe4..c45b23bb1229 100644
--- a/drivers/staging/iio/meter/ade7758_ring.c
+++ b/drivers/staging/iio/meter/ade7758_ring.c
@@ -85,7 +85,7 @@ static irqreturn_t ade7758_trigger_handler(int irq, void *p)
85/** 85/**
86 * ade7758_ring_preenable() setup the parameters of the ring before enabling 86 * ade7758_ring_preenable() setup the parameters of the ring before enabling
87 * 87 *
88 * The complex nature of the setting of the nuber of bytes per datum is due 88 * The complex nature of the setting of the number of bytes per datum is due
89 * to this driver currently ensuring that the timestamp is stored at an 8 89 * to this driver currently ensuring that the timestamp is stored at an 8
90 * byte boundary. 90 * byte boundary.
91 **/ 91 **/
@@ -144,8 +144,6 @@ int ade7758_configure_ring(struct iio_dev *indio_dev)
144 return ret; 144 return ret;
145 } 145 }
146 146
147 /* Effectively select the ring buffer implementation */
148 indio_dev->buffer->access = &ring_sw_access_funcs;
149 indio_dev->setup_ops = &ade7758_ring_setup_ops; 147 indio_dev->setup_ops = &ade7758_ring_setup_ops;
150 148
151 indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time, 149 indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
diff --git a/drivers/staging/iio/meter/meter.h b/drivers/staging/iio/meter/meter.h
index 142c50d71fda..6a3db1423631 100644
--- a/drivers/staging/iio/meter/meter.h
+++ b/drivers/staging/iio/meter/meter.h
@@ -362,7 +362,7 @@
362#define IIO_EVENT_ATTR_CYCEND(_evlist, _show, _store, _mask) \ 362#define IIO_EVENT_ATTR_CYCEND(_evlist, _show, _store, _mask) \
363 IIO_EVENT_ATTR_SH(cycend, _evlist, _show, _store, _mask) 363 IIO_EVENT_ATTR_SH(cycend, _evlist, _show, _store, _mask)
364 364
365/* on the rising and falling edge of the the voltage waveform */ 365/* on the rising and falling edge of the voltage waveform */
366#define IIO_EVENT_ATTR_ZERO_CROSS(_evlist, _show, _store, _mask) \ 366#define IIO_EVENT_ATTR_ZERO_CROSS(_evlist, _show, _store, _mask) \
367 IIO_EVENT_ATTR_SH(zero_cross, _evlist, _show, _store, _mask) 367 IIO_EVENT_ATTR_SH(zero_cross, _evlist, _show, _store, _mask)
368 368
diff --git a/drivers/staging/iio/ring_sw.c b/drivers/staging/iio/ring_sw.c
index 3e24ec455854..b9945ec44faa 100644
--- a/drivers/staging/iio/ring_sw.c
+++ b/drivers/staging/iio/ring_sw.c
@@ -147,7 +147,7 @@ static int iio_read_first_n_sw_rb(struct iio_buffer *r,
147 size_t data_available, buffer_size; 147 size_t data_available, buffer_size;
148 148
149 /* A userspace program has probably made an error if it tries to 149 /* A userspace program has probably made an error if it tries to
150 * read something that is not a whole number of bpds. 150 * read something that is not a whole number of bpds.
151 * Return an error. 151 * Return an error.
152 */ 152 */
153 if (n % ring->buf.bytes_per_datum) { 153 if (n % ring->buf.bytes_per_datum) {
@@ -229,7 +229,7 @@ static int iio_read_first_n_sw_rb(struct iio_buffer *r,
229 229
230 /* setup the next read position */ 230 /* setup the next read position */
231 /* Beware, this may fail due to concurrency fun and games. 231 /* Beware, this may fail due to concurrency fun and games.
232 * Possible that sufficient fill commands have run to push the read 232 * Possible that sufficient fill commands have run to push the read
233 * pointer past where we would be after the rip. If this occurs, leave 233 * pointer past where we would be after the rip. If this occurs, leave
234 * it be. 234 * it be.
235 */ 235 */
@@ -329,6 +329,16 @@ static struct attribute_group iio_ring_attribute_group = {
329 .name = "buffer", 329 .name = "buffer",
330}; 330};
331 331
332static const struct iio_buffer_access_funcs ring_sw_access_funcs = {
333 .store_to = &iio_store_to_sw_rb,
334 .read_first_n = &iio_read_first_n_sw_rb,
335 .request_update = &iio_request_update_sw_rb,
336 .get_bytes_per_datum = &iio_get_bytes_per_datum_sw_rb,
337 .set_bytes_per_datum = &iio_set_bytes_per_datum_sw_rb,
338 .get_length = &iio_get_length_sw_rb,
339 .set_length = &iio_set_length_sw_rb,
340};
341
332struct iio_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev) 342struct iio_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev)
333{ 343{
334 struct iio_buffer *buf; 344 struct iio_buffer *buf;
@@ -341,6 +351,7 @@ struct iio_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev)
341 buf = &ring->buf; 351 buf = &ring->buf;
342 iio_buffer_init(buf); 352 iio_buffer_init(buf);
343 buf->attrs = &iio_ring_attribute_group; 353 buf->attrs = &iio_ring_attribute_group;
354 buf->access = &ring_sw_access_funcs;
344 355
345 return buf; 356 return buf;
346} 357}
@@ -352,16 +363,5 @@ void iio_sw_rb_free(struct iio_buffer *r)
352} 363}
353EXPORT_SYMBOL(iio_sw_rb_free); 364EXPORT_SYMBOL(iio_sw_rb_free);
354 365
355const struct iio_buffer_access_funcs ring_sw_access_funcs = {
356 .store_to = &iio_store_to_sw_rb,
357 .read_first_n = &iio_read_first_n_sw_rb,
358 .request_update = &iio_request_update_sw_rb,
359 .get_bytes_per_datum = &iio_get_bytes_per_datum_sw_rb,
360 .set_bytes_per_datum = &iio_set_bytes_per_datum_sw_rb,
361 .get_length = &iio_get_length_sw_rb,
362 .set_length = &iio_set_length_sw_rb,
363};
364EXPORT_SYMBOL(ring_sw_access_funcs);
365
366MODULE_DESCRIPTION("Industrialio I/O software ring buffer"); 366MODULE_DESCRIPTION("Industrialio I/O software ring buffer");
367MODULE_LICENSE("GPL"); 367MODULE_LICENSE("GPL");
diff --git a/drivers/staging/iio/ring_sw.h b/drivers/staging/iio/ring_sw.h
index e6a6e2c40960..7556e2122367 100644
--- a/drivers/staging/iio/ring_sw.h
+++ b/drivers/staging/iio/ring_sw.h
@@ -25,11 +25,6 @@
25#define _IIO_RING_SW_H_ 25#define _IIO_RING_SW_H_
26#include "buffer.h" 26#include "buffer.h"
27 27
28/**
29 * ring_sw_access_funcs - access functions for a software ring buffer
30 **/
31extern const struct iio_buffer_access_funcs ring_sw_access_funcs;
32
33struct iio_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev); 28struct iio_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev);
34void iio_sw_rb_free(struct iio_buffer *ring); 29void iio_sw_rb_free(struct iio_buffer *ring);
35#endif /* _IIO_RING_SW_H_ */ 30#endif /* _IIO_RING_SW_H_ */
diff --git a/drivers/staging/iio/trigger/iio-trig-bfin-timer.c b/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
index 1cbb25dff8b5..665653d79f02 100644
--- a/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
+++ b/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
@@ -232,17 +232,7 @@ static struct platform_driver iio_bfin_tmr_trigger_driver = {
232 .remove = __devexit_p(iio_bfin_tmr_trigger_remove), 232 .remove = __devexit_p(iio_bfin_tmr_trigger_remove),
233}; 233};
234 234
235static int __init iio_bfin_tmr_trig_init(void) 235module_platform_driver(iio_bfin_tmr_trigger_driver);
236{
237 return platform_driver_register(&iio_bfin_tmr_trigger_driver);
238}
239module_init(iio_bfin_tmr_trig_init);
240
241static void __exit iio_bfin_tmr_trig_exit(void)
242{
243 platform_driver_unregister(&iio_bfin_tmr_trigger_driver);
244}
245module_exit(iio_bfin_tmr_trig_exit);
246 236
247MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); 237MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
248MODULE_DESCRIPTION("Blackfin system timer based trigger for the iio subsystem"); 238MODULE_DESCRIPTION("Blackfin system timer based trigger for the iio subsystem");
diff --git a/drivers/staging/iio/trigger/iio-trig-gpio.c b/drivers/staging/iio/trigger/iio-trig-gpio.c
index f2a655981622..a3465947235e 100644
--- a/drivers/staging/iio/trigger/iio-trig-gpio.c
+++ b/drivers/staging/iio/trigger/iio-trig-gpio.c
@@ -160,17 +160,7 @@ static struct platform_driver iio_gpio_trigger_driver = {
160 }, 160 },
161}; 161};
162 162
163static int __init iio_gpio_trig_init(void) 163module_platform_driver(iio_gpio_trigger_driver);
164{
165 return platform_driver_register(&iio_gpio_trigger_driver);
166}
167module_init(iio_gpio_trig_init);
168
169static void __exit iio_gpio_trig_exit(void)
170{
171 platform_driver_unregister(&iio_gpio_trigger_driver);
172}
173module_exit(iio_gpio_trig_exit);
174 164
175MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>"); 165MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>");
176MODULE_DESCRIPTION("Example gpio trigger for the iio subsystem"); 166MODULE_DESCRIPTION("Example gpio trigger for the iio subsystem");
diff --git a/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c b/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
index bd7416b2c561..a80cf67bf84d 100644
--- a/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
+++ b/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
@@ -195,18 +195,8 @@ static struct platform_driver iio_trig_periodic_rtc_driver = {
195 }, 195 },
196}; 196};
197 197
198static int __init iio_trig_periodic_rtc_init(void) 198module_platform_driver(iio_trig_periodic_rtc_driver);
199{
200 return platform_driver_register(&iio_trig_periodic_rtc_driver);
201}
202
203static void __exit iio_trig_periodic_rtc_exit(void)
204{
205 return platform_driver_unregister(&iio_trig_periodic_rtc_driver);
206}
207 199
208module_init(iio_trig_periodic_rtc_init);
209module_exit(iio_trig_periodic_rtc_exit);
210MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>"); 200MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>");
211MODULE_DESCRIPTION("Periodic realtime clock trigger for the iio subsystem"); 201MODULE_DESCRIPTION("Periodic realtime clock trigger for the iio subsystem");
212MODULE_LICENSE("GPL v2"); 202MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/types.h b/drivers/staging/iio/types.h
index b7d26474ad06..0c3213666901 100644
--- a/drivers/staging/iio/types.h
+++ b/drivers/staging/iio/types.h
@@ -46,4 +46,8 @@ enum iio_modifier {
46 IIO_MOD_LIGHT_IR, 46 IIO_MOD_LIGHT_IR,
47}; 47};
48 48
49#define IIO_VAL_INT 1
50#define IIO_VAL_INT_PLUS_MICRO 2
51#define IIO_VAL_INT_PLUS_NANO 3
52
49#endif /* _IIO_TYPES_H_ */ 53#endif /* _IIO_TYPES_H_ */
diff --git a/drivers/staging/keucr/TODO b/drivers/staging/keucr/TODO
index 1c48e40e2b2c..d6da656eee1d 100644
--- a/drivers/staging/keucr/TODO
+++ b/drivers/staging/keucr/TODO
@@ -9,4 +9,4 @@ TODO:
9 - smcommon.h & smilsub.c: use kernel hweight8(), hweight16() 9 - smcommon.h & smilsub.c: use kernel hweight8(), hweight16()
10 10
11Please send any patches for this driver to Al Cho <acho@novell.com> and 11Please send any patches for this driver to Al Cho <acho@novell.com> and
12Greg Kroah-Hartman <gregkh@suse.de>. 12Greg Kroah-Hartman <gregkh@linuxfoundation.org>.
diff --git a/drivers/staging/line6/capture.c b/drivers/staging/line6/capture.c
index 127f95247749..c85c5b6bffb7 100644
--- a/drivers/staging/line6/capture.c
+++ b/drivers/staging/line6/capture.c
@@ -107,7 +107,7 @@ void line6_unlink_audio_in_urbs(struct snd_line6_pcm *line6pcm)
107 Wait until unlinking of all currently active capture URBs has been 107 Wait until unlinking of all currently active capture URBs has been
108 finished. 108 finished.
109*/ 109*/
110static void wait_clear_audio_in_urbs(struct snd_line6_pcm *line6pcm) 110void line6_wait_clear_audio_in_urbs(struct snd_line6_pcm *line6pcm)
111{ 111{
112 int timeout = HZ; 112 int timeout = HZ;
113 unsigned int i; 113 unsigned int i;
@@ -134,7 +134,7 @@ static void wait_clear_audio_in_urbs(struct snd_line6_pcm *line6pcm)
134void line6_unlink_wait_clear_audio_in_urbs(struct snd_line6_pcm *line6pcm) 134void line6_unlink_wait_clear_audio_in_urbs(struct snd_line6_pcm *line6pcm)
135{ 135{
136 line6_unlink_audio_in_urbs(line6pcm); 136 line6_unlink_audio_in_urbs(line6pcm);
137 wait_clear_audio_in_urbs(line6pcm); 137 line6_wait_clear_audio_in_urbs(line6pcm);
138} 138}
139 139
140/* 140/*
@@ -193,25 +193,6 @@ void line6_capture_check_period(struct snd_line6_pcm *line6pcm, int length)
193 } 193 }
194} 194}
195 195
196int line6_alloc_capture_buffer(struct snd_line6_pcm *line6pcm)
197{
198 /* We may be invoked multiple times in a row so allocate once only */
199 if (line6pcm->buffer_in)
200 return 0;
201
202 line6pcm->buffer_in =
203 kmalloc(LINE6_ISO_BUFFERS * LINE6_ISO_PACKETS *
204 line6pcm->max_packet_size, GFP_KERNEL);
205
206 if (!line6pcm->buffer_in) {
207 dev_err(line6pcm->line6->ifcdev,
208 "cannot malloc capture buffer\n");
209 return -ENOMEM;
210 }
211
212 return 0;
213}
214
215void line6_free_capture_buffer(struct snd_line6_pcm *line6pcm) 196void line6_free_capture_buffer(struct snd_line6_pcm *line6pcm)
216{ 197{
217 kfree(line6pcm->buffer_in); 198 kfree(line6pcm->buffer_in);
@@ -273,9 +254,9 @@ static void audio_in_callback(struct urb *urb)
273 line6pcm->prev_fsize = fsize; 254 line6pcm->prev_fsize = fsize;
274 255
275#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE 256#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
276 if (!(line6pcm->flags & MASK_PCM_IMPULSE)) 257 if (!(line6pcm->flags & LINE6_BITS_PCM_IMPULSE))
277#endif 258#endif
278 if (test_bit(BIT_PCM_ALSA_CAPTURE, &line6pcm->flags) 259 if (test_bit(LINE6_INDEX_PCM_ALSA_CAPTURE_STREAM, &line6pcm->flags)
279 && (fsize > 0)) 260 && (fsize > 0))
280 line6_capture_copy(line6pcm, fbuf, fsize); 261 line6_capture_copy(line6pcm, fbuf, fsize);
281 } 262 }
@@ -291,9 +272,9 @@ static void audio_in_callback(struct urb *urb)
291 submit_audio_in_urb(line6pcm); 272 submit_audio_in_urb(line6pcm);
292 273
293#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE 274#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
294 if (!(line6pcm->flags & MASK_PCM_IMPULSE)) 275 if (!(line6pcm->flags & LINE6_BITS_PCM_IMPULSE))
295#endif 276#endif
296 if (test_bit(BIT_PCM_ALSA_CAPTURE, &line6pcm->flags)) 277 if (test_bit(LINE6_INDEX_PCM_ALSA_CAPTURE_STREAM, &line6pcm->flags))
297 line6_capture_check_period(line6pcm, length); 278 line6_capture_check_period(line6pcm, length);
298 } 279 }
299} 280}
@@ -341,17 +322,17 @@ static int snd_line6_capture_hw_params(struct snd_pcm_substream *substream,
341 } 322 }
342 /* -- [FD] end */ 323 /* -- [FD] end */
343 324
344 if ((line6pcm->flags & MASK_CAPTURE) == 0) { 325 ret = line6_pcm_acquire(line6pcm, LINE6_BIT_PCM_ALSA_CAPTURE_BUFFER);
345 ret = line6_alloc_capture_buffer(line6pcm);
346 326
347 if (ret < 0) 327 if (ret < 0)
348 return ret; 328 return ret;
349 }
350 329
351 ret = snd_pcm_lib_malloc_pages(substream, 330 ret = snd_pcm_lib_malloc_pages(substream,
352 params_buffer_bytes(hw_params)); 331 params_buffer_bytes(hw_params));
353 if (ret < 0) 332 if (ret < 0) {
333 line6_pcm_release(line6pcm, LINE6_BIT_PCM_ALSA_CAPTURE_BUFFER);
354 return ret; 334 return ret;
335 }
355 336
356 line6pcm->period_in = params_period_bytes(hw_params); 337 line6pcm->period_in = params_period_bytes(hw_params);
357 return 0; 338 return 0;
@@ -361,12 +342,7 @@ static int snd_line6_capture_hw_params(struct snd_pcm_substream *substream,
361static int snd_line6_capture_hw_free(struct snd_pcm_substream *substream) 342static int snd_line6_capture_hw_free(struct snd_pcm_substream *substream)
362{ 343{
363 struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream); 344 struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream);
364 345 line6_pcm_release(line6pcm, LINE6_BIT_PCM_ALSA_CAPTURE_BUFFER);
365 if ((line6pcm->flags & MASK_CAPTURE) == 0) {
366 line6_unlink_wait_clear_audio_in_urbs(line6pcm);
367 line6_free_capture_buffer(line6pcm);
368 }
369
370 return snd_pcm_lib_free_pages(substream); 346 return snd_pcm_lib_free_pages(substream);
371} 347}
372 348
@@ -380,7 +356,7 @@ int snd_line6_capture_trigger(struct snd_line6_pcm *line6pcm, int cmd)
380#ifdef CONFIG_PM 356#ifdef CONFIG_PM
381 case SNDRV_PCM_TRIGGER_RESUME: 357 case SNDRV_PCM_TRIGGER_RESUME:
382#endif 358#endif
383 err = line6_pcm_start(line6pcm, MASK_PCM_ALSA_CAPTURE); 359 err = line6_pcm_acquire(line6pcm, LINE6_BIT_PCM_ALSA_CAPTURE_STREAM);
384 360
385 if (err < 0) 361 if (err < 0)
386 return err; 362 return err;
@@ -391,7 +367,7 @@ int snd_line6_capture_trigger(struct snd_line6_pcm *line6pcm, int cmd)
391#ifdef CONFIG_PM 367#ifdef CONFIG_PM
392 case SNDRV_PCM_TRIGGER_SUSPEND: 368 case SNDRV_PCM_TRIGGER_SUSPEND:
393#endif 369#endif
394 err = line6_pcm_stop(line6pcm, MASK_PCM_ALSA_CAPTURE); 370 err = line6_pcm_release(line6pcm, LINE6_BIT_PCM_ALSA_CAPTURE_STREAM);
395 371
396 if (err < 0) 372 if (err < 0)
397 return err; 373 return err;
diff --git a/drivers/staging/line6/capture.h b/drivers/staging/line6/capture.h
index 366cbaa7c88d..4157bcb598a9 100644
--- a/drivers/staging/line6/capture.h
+++ b/drivers/staging/line6/capture.h
@@ -19,7 +19,6 @@
19 19
20extern struct snd_pcm_ops snd_line6_capture_ops; 20extern struct snd_pcm_ops snd_line6_capture_ops;
21 21
22extern int line6_alloc_capture_buffer(struct snd_line6_pcm *line6pcm);
23extern void line6_capture_copy(struct snd_line6_pcm *line6pcm, char *fbuf, 22extern void line6_capture_copy(struct snd_line6_pcm *line6pcm, char *fbuf,
24 int fsize); 23 int fsize);
25extern void line6_capture_check_period(struct snd_line6_pcm *line6pcm, 24extern void line6_capture_check_period(struct snd_line6_pcm *line6pcm,
@@ -30,6 +29,7 @@ extern int line6_submit_audio_in_all_urbs(struct snd_line6_pcm *line6pcm);
30extern void line6_unlink_audio_in_urbs(struct snd_line6_pcm *line6pcm); 29extern void line6_unlink_audio_in_urbs(struct snd_line6_pcm *line6pcm);
31extern void line6_unlink_wait_clear_audio_in_urbs(struct snd_line6_pcm 30extern void line6_unlink_wait_clear_audio_in_urbs(struct snd_line6_pcm
32 *line6pcm); 31 *line6pcm);
32extern void line6_wait_clear_audio_in_urbs(struct snd_line6_pcm *line6pcm);
33extern int snd_line6_capture_trigger(struct snd_line6_pcm *line6pcm, int cmd); 33extern int snd_line6_capture_trigger(struct snd_line6_pcm *line6pcm, int cmd);
34 34
35#endif 35#endif
diff --git a/drivers/staging/line6/driver.c b/drivers/staging/line6/driver.c
index 6a1959e16e00..e8023afd3656 100644
--- a/drivers/staging/line6/driver.c
+++ b/drivers/staging/line6/driver.c
@@ -1346,7 +1346,7 @@ static void __exit line6_exit(void)
1346 if (line6pcm == NULL) 1346 if (line6pcm == NULL)
1347 continue; 1347 continue;
1348 1348
1349 line6_pcm_stop(line6pcm, ~0); 1349 line6_pcm_release(line6pcm, ~0);
1350 } 1350 }
1351 1351
1352 usb_deregister(&line6_driver); 1352 usb_deregister(&line6_driver);
diff --git a/drivers/staging/line6/pcm.c b/drivers/staging/line6/pcm.c
index 37675e66da81..90d2d4475cb4 100644
--- a/drivers/staging/line6/pcm.c
+++ b/drivers/staging/line6/pcm.c
@@ -52,9 +52,9 @@ static ssize_t pcm_set_impulse_volume(struct device *dev,
52 line6pcm->impulse_volume = value; 52 line6pcm->impulse_volume = value;
53 53
54 if (value > 0) 54 if (value > 0)
55 line6_pcm_start(line6pcm, MASK_PCM_IMPULSE); 55 line6_pcm_acquire(line6pcm, LINE6_BITS_PCM_IMPULSE);
56 else 56 else
57 line6_pcm_stop(line6pcm, MASK_PCM_IMPULSE); 57 line6_pcm_release(line6pcm, LINE6_BITS_PCM_IMPULSE);
58 58
59 return count; 59 return count;
60} 60}
@@ -92,29 +92,43 @@ static bool test_flags(unsigned long flags0, unsigned long flags1,
92 return ((flags0 & mask) == 0) && ((flags1 & mask) != 0); 92 return ((flags0 & mask) == 0) && ((flags1 & mask) != 0);
93} 93}
94 94
95int line6_pcm_start(struct snd_line6_pcm *line6pcm, int channels) 95int line6_pcm_acquire(struct snd_line6_pcm *line6pcm, int channels)
96{ 96{
97 unsigned long flags_old = 97 unsigned long flags_old =
98 __sync_fetch_and_or(&line6pcm->flags, channels); 98 __sync_fetch_and_or(&line6pcm->flags, channels);
99 unsigned long flags_new = flags_old | channels; 99 unsigned long flags_new = flags_old | channels;
100 unsigned long flags_final = flags_old;
100 int err = 0; 101 int err = 0;
101 102
102 line6pcm->prev_fbuf = NULL; 103 line6pcm->prev_fbuf = NULL;
103 104
104 if (test_flags(flags_old, flags_new, MASK_CAPTURE)) { 105 if (test_flags(flags_old, flags_new, LINE6_BITS_CAPTURE_BUFFER)) {
106 /* We may be invoked multiple times in a row so allocate once only */
107 if (!line6pcm->buffer_in) {
108 line6pcm->buffer_in =
109 kmalloc(LINE6_ISO_BUFFERS * LINE6_ISO_PACKETS *
110 line6pcm->max_packet_size, GFP_KERNEL);
111
112 if (!line6pcm->buffer_in) {
113 dev_err(line6pcm->line6->ifcdev,
114 "cannot malloc capture buffer\n");
115 err = -ENOMEM;
116 goto pcm_acquire_error;
117 }
118
119 flags_final |= channels & LINE6_BITS_CAPTURE_BUFFER;
120 }
121 }
122
123 if (test_flags(flags_old, flags_new, LINE6_BITS_CAPTURE_STREAM)) {
105 /* 124 /*
106 Waiting for completion of active URBs in the stop handler is 125 Waiting for completion of active URBs in the stop handler is
107 a bug, we therefore report an error if capturing is restarted 126 a bug, we therefore report an error if capturing is restarted
108 too soon. 127 too soon.
109 */ 128 */
110 if (line6pcm->active_urb_in | line6pcm->unlink_urb_in) 129 if (line6pcm->active_urb_in | line6pcm->unlink_urb_in) {
130 dev_err(line6pcm->line6->ifcdev, "Device not yet ready\n");
111 return -EBUSY; 131 return -EBUSY;
112
113 if (!(flags_new & MASK_PCM_ALSA_CAPTURE)) {
114 err = line6_alloc_capture_buffer(line6pcm);
115
116 if (err < 0)
117 goto pcm_start_error;
118 } 132 }
119 133
120 line6pcm->count_in = 0; 134 line6pcm->count_in = 0;
@@ -122,55 +136,78 @@ int line6_pcm_start(struct snd_line6_pcm *line6pcm, int channels)
122 err = line6_submit_audio_in_all_urbs(line6pcm); 136 err = line6_submit_audio_in_all_urbs(line6pcm);
123 137
124 if (err < 0) 138 if (err < 0)
125 goto pcm_start_error; 139 goto pcm_acquire_error;
140
141 flags_final |= channels & LINE6_BITS_CAPTURE_STREAM;
126 } 142 }
127 143
128 if (test_flags(flags_old, flags_new, MASK_PLAYBACK)) { 144 if (test_flags(flags_old, flags_new, LINE6_BITS_PLAYBACK_BUFFER)) {
129 /* 145 /* We may be invoked multiple times in a row so allocate once only */
130 See comment above regarding PCM restart. 146 if (!line6pcm->buffer_out) {
131 */ 147 line6pcm->buffer_out =
132 if (line6pcm->active_urb_out | line6pcm->unlink_urb_out) 148 kmalloc(LINE6_ISO_BUFFERS * LINE6_ISO_PACKETS *
133 return -EBUSY; 149 line6pcm->max_packet_size, GFP_KERNEL);
150
151 if (!line6pcm->buffer_out) {
152 dev_err(line6pcm->line6->ifcdev,
153 "cannot malloc playback buffer\n");
154 err = -ENOMEM;
155 goto pcm_acquire_error;
156 }
134 157
135 if (!(flags_new & MASK_PCM_ALSA_PLAYBACK)) { 158 flags_final |= channels & LINE6_BITS_PLAYBACK_BUFFER;
136 err = line6_alloc_playback_buffer(line6pcm); 159 }
160 }
137 161
138 if (err < 0) 162 if (test_flags(flags_old, flags_new, LINE6_BITS_PLAYBACK_STREAM)) {
139 goto pcm_start_error; 163 /*
164 See comment above regarding PCM restart.
165 */
166 if (line6pcm->active_urb_out | line6pcm->unlink_urb_out) {
167 dev_err(line6pcm->line6->ifcdev, "Device not yet ready\n");
168 return -EBUSY;
140 } 169 }
141 170
142 line6pcm->count_out = 0; 171 line6pcm->count_out = 0;
143 err = line6_submit_audio_out_all_urbs(line6pcm); 172 err = line6_submit_audio_out_all_urbs(line6pcm);
144 173
145 if (err < 0) 174 if (err < 0)
146 goto pcm_start_error; 175 goto pcm_acquire_error;
176
177 flags_final |= channels & LINE6_BITS_PLAYBACK_STREAM;
147 } 178 }
148 179
149 return 0; 180 return 0;
150 181
151pcm_start_error: 182pcm_acquire_error:
152 __sync_fetch_and_and(&line6pcm->flags, ~channels); 183 /*
184 If not all requested resources/streams could be obtained, release
185 those which were successfully obtained (if any).
186 */
187 line6_pcm_release(line6pcm, flags_final & channels);
153 return err; 188 return err;
154} 189}
155 190
156int line6_pcm_stop(struct snd_line6_pcm *line6pcm, int channels) 191int line6_pcm_release(struct snd_line6_pcm *line6pcm, int channels)
157{ 192{
158 unsigned long flags_old = 193 unsigned long flags_old =
159 __sync_fetch_and_and(&line6pcm->flags, ~channels); 194 __sync_fetch_and_and(&line6pcm->flags, ~channels);
160 unsigned long flags_new = flags_old & ~channels; 195 unsigned long flags_new = flags_old & ~channels;
161 196
162 if (test_flags(flags_new, flags_old, MASK_CAPTURE)) { 197 if (test_flags(flags_new, flags_old, LINE6_BITS_CAPTURE_STREAM))
163 line6_unlink_audio_in_urbs(line6pcm); 198 line6_unlink_audio_in_urbs(line6pcm);
164 199
165 if (!(flags_old & MASK_PCM_ALSA_CAPTURE)) 200 if (test_flags(flags_new, flags_old, LINE6_BITS_CAPTURE_BUFFER)) {
166 line6_free_capture_buffer(line6pcm); 201 line6_wait_clear_audio_in_urbs(line6pcm);
202 line6_free_capture_buffer(line6pcm);
167 } 203 }
168 204
169 if (test_flags(flags_new, flags_old, MASK_PLAYBACK)) { 205 if (test_flags(flags_new, flags_old, LINE6_BITS_PLAYBACK_STREAM))
170 line6_unlink_audio_out_urbs(line6pcm); 206 line6_unlink_audio_out_urbs(line6pcm);
171 207
172 if (!(flags_old & MASK_PCM_ALSA_PLAYBACK)) 208 if (test_flags(flags_new, flags_old, LINE6_BITS_PLAYBACK_BUFFER)) {
173 line6_free_playback_buffer(line6pcm); 209 line6_wait_clear_audio_out_urbs(line6pcm);
210 line6_free_playback_buffer(line6pcm);
174 } 211 }
175 212
176 return 0; 213 return 0;
@@ -185,7 +222,7 @@ int snd_line6_trigger(struct snd_pcm_substream *substream, int cmd)
185 unsigned long flags; 222 unsigned long flags;
186 223
187 spin_lock_irqsave(&line6pcm->lock_trigger, flags); 224 spin_lock_irqsave(&line6pcm->lock_trigger, flags);
188 clear_bit(BIT_PREPARED, &line6pcm->flags); 225 clear_bit(LINE6_INDEX_PREPARED, &line6pcm->flags);
189 226
190 snd_pcm_group_for_each_entry(s, substream) { 227 snd_pcm_group_for_each_entry(s, substream) {
191 switch (s->stream) { 228 switch (s->stream) {
@@ -498,13 +535,13 @@ int snd_line6_prepare(struct snd_pcm_substream *substream)
498 535
499 switch (substream->stream) { 536 switch (substream->stream) {
500 case SNDRV_PCM_STREAM_PLAYBACK: 537 case SNDRV_PCM_STREAM_PLAYBACK:
501 if ((line6pcm->flags & MASK_PLAYBACK) == 0) 538 if ((line6pcm->flags & LINE6_BITS_PLAYBACK_STREAM) == 0)
502 line6_unlink_wait_clear_audio_out_urbs(line6pcm); 539 line6_unlink_wait_clear_audio_out_urbs(line6pcm);
503 540
504 break; 541 break;
505 542
506 case SNDRV_PCM_STREAM_CAPTURE: 543 case SNDRV_PCM_STREAM_CAPTURE:
507 if ((line6pcm->flags & MASK_CAPTURE) == 0) 544 if ((line6pcm->flags & LINE6_BITS_CAPTURE_STREAM) == 0)
508 line6_unlink_wait_clear_audio_in_urbs(line6pcm); 545 line6_unlink_wait_clear_audio_in_urbs(line6pcm);
509 546
510 break; 547 break;
@@ -513,7 +550,7 @@ int snd_line6_prepare(struct snd_pcm_substream *substream)
513 MISSING_CASE; 550 MISSING_CASE;
514 } 551 }
515 552
516 if (!test_and_set_bit(BIT_PREPARED, &line6pcm->flags)) { 553 if (!test_and_set_bit(LINE6_INDEX_PREPARED, &line6pcm->flags)) {
517 line6pcm->count_out = 0; 554 line6pcm->count_out = 0;
518 line6pcm->pos_out = 0; 555 line6pcm->pos_out = 0;
519 line6pcm->pos_out_done = 0; 556 line6pcm->pos_out_done = 0;
diff --git a/drivers/staging/line6/pcm.h b/drivers/staging/line6/pcm.h
index 55d8297dd3d9..5210ec8dbe16 100644
--- a/drivers/staging/line6/pcm.h
+++ b/drivers/staging/line6/pcm.h
@@ -46,57 +46,131 @@
46 (line6pcm->pcm->streams[stream].substream) 46 (line6pcm->pcm->streams[stream].substream)
47 47
48/* 48/*
49 PCM mode bits and masks. 49 PCM mode bits.
50 "ALSA": operations triggered by applications via ALSA 50
51 "MONITOR": software monitoring 51 There are several features of the Line6 USB driver which require PCM
52 "IMPULSE": optional impulse response operation 52 data to be exchanged with the device:
53 *) PCM playback and capture via ALSA
54 *) software monitoring (for devices without hardware monitoring)
55 *) optional impulse response measurement
56 However, from the device's point of view, there is just a single
57 capture and playback stream, which must be shared between these
58 subsystems. It is therefore necessary to maintain the state of the
59 subsystems with respect to PCM usage. We define several constants of
60 the form LINE6_BIT_PCM_<subsystem>_<direction>_<resource> with the
61 following meanings:
62 *) <subsystem> is one of
63 -) ALSA: PCM playback and capture via ALSA
64 -) MONITOR: software monitoring
65 -) IMPULSE: optional impulse response measurement
66 *) <direction> is one of
67 -) PLAYBACK: audio output (from host to device)
68 -) CAPTURE: audio input (from device to host)
69 *) <resource> is one of
70 -) BUFFER: buffer required by PCM data stream
71 -) STREAM: actual PCM data stream
72
73 The subsystems call line6_pcm_acquire() to acquire the (shared)
74 resources needed for a particular operation (e.g., allocate the buffer
75 for ALSA playback or start the capture stream for software monitoring).
76 When a resource is no longer needed, it is released by calling
77 line6_pcm_release(). Buffer allocation and stream startup are handled
78 separately to allow the ALSA kernel driver to perform them at
79 appropriate places (since the callback which starts a PCM stream is not
80 allowed to sleep).
53*/ 81*/
54enum { 82enum {
55 /* individual bits: */ 83 /* individual bit indices: */
56 BIT_PCM_ALSA_PLAYBACK, 84 LINE6_INDEX_PCM_ALSA_PLAYBACK_BUFFER,
57 BIT_PCM_ALSA_CAPTURE, 85 LINE6_INDEX_PCM_ALSA_PLAYBACK_STREAM,
58 BIT_PCM_MONITOR_PLAYBACK, 86 LINE6_INDEX_PCM_ALSA_CAPTURE_BUFFER,
59 BIT_PCM_MONITOR_CAPTURE, 87 LINE6_INDEX_PCM_ALSA_CAPTURE_STREAM,
88 LINE6_INDEX_PCM_MONITOR_PLAYBACK_BUFFER,
89 LINE6_INDEX_PCM_MONITOR_PLAYBACK_STREAM,
90 LINE6_INDEX_PCM_MONITOR_CAPTURE_BUFFER,
91 LINE6_INDEX_PCM_MONITOR_CAPTURE_STREAM,
60#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE 92#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
61 BIT_PCM_IMPULSE_PLAYBACK, 93 LINE6_INDEX_PCM_IMPULSE_PLAYBACK_BUFFER,
62 BIT_PCM_IMPULSE_CAPTURE, 94 LINE6_INDEX_PCM_IMPULSE_PLAYBACK_STREAM,
95 LINE6_INDEX_PCM_IMPULSE_CAPTURE_BUFFER,
96 LINE6_INDEX_PCM_IMPULSE_CAPTURE_STREAM,
63#endif 97#endif
64 BIT_PAUSE_PLAYBACK, 98 LINE6_INDEX_PAUSE_PLAYBACK,
65 BIT_PREPARED, 99 LINE6_INDEX_PREPARED,
66 100
67 /* individual masks: */ 101 /* individual bit masks: */
68/* *INDENT-OFF* */ 102 LINE6_BIT(PCM_ALSA_PLAYBACK_BUFFER),
69 MASK_PCM_ALSA_PLAYBACK = 1 << BIT_PCM_ALSA_PLAYBACK, 103 LINE6_BIT(PCM_ALSA_PLAYBACK_STREAM),
70 MASK_PCM_ALSA_CAPTURE = 1 << BIT_PCM_ALSA_CAPTURE, 104 LINE6_BIT(PCM_ALSA_CAPTURE_BUFFER),
71 MASK_PCM_MONITOR_PLAYBACK = 1 << BIT_PCM_MONITOR_PLAYBACK, 105 LINE6_BIT(PCM_ALSA_CAPTURE_STREAM),
72 MASK_PCM_MONITOR_CAPTURE = 1 << BIT_PCM_MONITOR_CAPTURE, 106 LINE6_BIT(PCM_MONITOR_PLAYBACK_BUFFER),
107 LINE6_BIT(PCM_MONITOR_PLAYBACK_STREAM),
108 LINE6_BIT(PCM_MONITOR_CAPTURE_BUFFER),
109 LINE6_BIT(PCM_MONITOR_CAPTURE_STREAM),
73#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE 110#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
74 MASK_PCM_IMPULSE_PLAYBACK = 1 << BIT_PCM_IMPULSE_PLAYBACK, 111 LINE6_BIT(PCM_IMPULSE_PLAYBACK_BUFFER),
75 MASK_PCM_IMPULSE_CAPTURE = 1 << BIT_PCM_IMPULSE_CAPTURE, 112 LINE6_BIT(PCM_IMPULSE_PLAYBACK_STREAM),
113 LINE6_BIT(PCM_IMPULSE_CAPTURE_BUFFER),
114 LINE6_BIT(PCM_IMPULSE_CAPTURE_STREAM),
76#endif 115#endif
77 MASK_PAUSE_PLAYBACK = 1 << BIT_PAUSE_PLAYBACK, 116 LINE6_BIT(PAUSE_PLAYBACK),
78 MASK_PREPARED = 1 << BIT_PREPARED, 117 LINE6_BIT(PREPARED),
79/* *INDENT-ON* */
80 118
81 /* combined masks (by operation): */ 119 /* combined bit masks (by operation): */
82 MASK_PCM_ALSA = MASK_PCM_ALSA_PLAYBACK | MASK_PCM_ALSA_CAPTURE, 120 LINE6_BITS_PCM_ALSA_BUFFER =
83 MASK_PCM_MONITOR = MASK_PCM_MONITOR_PLAYBACK | MASK_PCM_MONITOR_CAPTURE, 121 LINE6_BIT_PCM_ALSA_PLAYBACK_BUFFER |
122 LINE6_BIT_PCM_ALSA_CAPTURE_BUFFER,
123
124 LINE6_BITS_PCM_ALSA_STREAM =
125 LINE6_BIT_PCM_ALSA_PLAYBACK_STREAM |
126 LINE6_BIT_PCM_ALSA_CAPTURE_STREAM,
127
128 LINE6_BITS_PCM_MONITOR =
129 LINE6_BIT_PCM_MONITOR_PLAYBACK_BUFFER |
130 LINE6_BIT_PCM_MONITOR_PLAYBACK_STREAM |
131 LINE6_BIT_PCM_MONITOR_CAPTURE_BUFFER |
132 LINE6_BIT_PCM_MONITOR_CAPTURE_STREAM,
133
134#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
135 LINE6_BITS_PCM_IMPULSE =
136 LINE6_BIT_PCM_IMPULSE_PLAYBACK_BUFFER |
137 LINE6_BIT_PCM_IMPULSE_PLAYBACK_STREAM |
138 LINE6_BIT_PCM_IMPULSE_CAPTURE_BUFFER |
139 LINE6_BIT_PCM_IMPULSE_CAPTURE_STREAM,
140#endif
141
142 /* combined bit masks (by direction): */
143 LINE6_BITS_PLAYBACK_BUFFER =
144#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
145 LINE6_BIT_PCM_IMPULSE_PLAYBACK_BUFFER |
146#endif
147 LINE6_BIT_PCM_ALSA_PLAYBACK_BUFFER |
148 LINE6_BIT_PCM_MONITOR_PLAYBACK_BUFFER ,
149
150 LINE6_BITS_PLAYBACK_STREAM =
151#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
152 LINE6_BIT_PCM_IMPULSE_PLAYBACK_STREAM |
153#endif
154 LINE6_BIT_PCM_ALSA_PLAYBACK_STREAM |
155 LINE6_BIT_PCM_MONITOR_PLAYBACK_STREAM ,
156
157 LINE6_BITS_CAPTURE_BUFFER =
84#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE 158#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
85 MASK_PCM_IMPULSE = MASK_PCM_IMPULSE_PLAYBACK | MASK_PCM_IMPULSE_CAPTURE, 159 LINE6_BIT_PCM_IMPULSE_CAPTURE_BUFFER |
86#endif 160#endif
161 LINE6_BIT_PCM_ALSA_CAPTURE_BUFFER |
162 LINE6_BIT_PCM_MONITOR_CAPTURE_BUFFER ,
87 163
88 /* combined masks (by direction): */ 164 LINE6_BITS_CAPTURE_STREAM =
89#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE 165#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
90 MASK_PLAYBACK = 166 LINE6_BIT_PCM_IMPULSE_CAPTURE_STREAM |
91 MASK_PCM_ALSA_PLAYBACK | MASK_PCM_MONITOR_PLAYBACK |
92 MASK_PCM_IMPULSE_PLAYBACK,
93 MASK_CAPTURE =
94 MASK_PCM_ALSA_CAPTURE | MASK_PCM_MONITOR_CAPTURE |
95 MASK_PCM_IMPULSE_CAPTURE
96#else
97 MASK_PLAYBACK = MASK_PCM_ALSA_PLAYBACK | MASK_PCM_MONITOR_PLAYBACK,
98 MASK_CAPTURE = MASK_PCM_ALSA_CAPTURE | MASK_PCM_MONITOR_CAPTURE
99#endif 167#endif
168 LINE6_BIT_PCM_ALSA_CAPTURE_STREAM |
169 LINE6_BIT_PCM_MONITOR_CAPTURE_STREAM,
170
171 LINE6_BITS_STREAM =
172 LINE6_BITS_PLAYBACK_STREAM |
173 LINE6_BITS_CAPTURE_STREAM
100}; 174};
101 175
102struct line6_pcm_properties { 176struct line6_pcm_properties {
@@ -290,7 +364,7 @@ struct snd_line6_pcm {
290#endif 364#endif
291 365
292 /** 366 /**
293 Several status bits (see BIT_*). 367 Several status bits (see LINE6_BIT_*).
294 */ 368 */
295 unsigned long flags; 369 unsigned long flags;
296 370
@@ -302,16 +376,7 @@ extern int line6_init_pcm(struct usb_line6 *line6,
302extern int snd_line6_trigger(struct snd_pcm_substream *substream, int cmd); 376extern int snd_line6_trigger(struct snd_pcm_substream *substream, int cmd);
303extern int snd_line6_prepare(struct snd_pcm_substream *substream); 377extern int snd_line6_prepare(struct snd_pcm_substream *substream);
304extern void line6_pcm_disconnect(struct snd_line6_pcm *line6pcm); 378extern void line6_pcm_disconnect(struct snd_line6_pcm *line6pcm);
305extern int line6_pcm_start(struct snd_line6_pcm *line6pcm, int channels); 379extern int line6_pcm_acquire(struct snd_line6_pcm *line6pcm, int channels);
306extern int line6_pcm_stop(struct snd_line6_pcm *line6pcm, int channels); 380extern int line6_pcm_release(struct snd_line6_pcm *line6pcm, int channels);
307
308#define PRINT_FRAME_DIFF(op) { \
309 static int diff_prev = 1000; \
310 int diff = line6pcm->last_frame_out - line6pcm->last_frame_in; \
311 if ((diff != diff_prev) && (abs(diff) < 100)) { \
312 printk(KERN_INFO "%s frame diff = %d\n", op, diff); \
313 diff_prev = diff; \
314 } \
315}
316 381
317#endif 382#endif
diff --git a/drivers/staging/line6/playback.c b/drivers/staging/line6/playback.c
index 4152db2328b7..a0ab9d0493fa 100644
--- a/drivers/staging/line6/playback.c
+++ b/drivers/staging/line6/playback.c
@@ -166,7 +166,7 @@ static int submit_audio_out_urb(struct snd_line6_pcm *line6pcm)
166 struct usb_iso_packet_descriptor *fout = 166 struct usb_iso_packet_descriptor *fout =
167 &urb_out->iso_frame_desc[i]; 167 &urb_out->iso_frame_desc[i];
168 168
169 if (line6pcm->flags & MASK_CAPTURE) 169 if (line6pcm->flags & LINE6_BITS_CAPTURE_STREAM)
170 fsize = line6pcm->prev_fsize; 170 fsize = line6pcm->prev_fsize;
171 171
172 if (fsize == 0) { 172 if (fsize == 0) {
@@ -196,8 +196,8 @@ static int submit_audio_out_urb(struct snd_line6_pcm *line6pcm)
196 urb_out->transfer_buffer_length = urb_size; 196 urb_out->transfer_buffer_length = urb_size;
197 urb_out->context = line6pcm; 197 urb_out->context = line6pcm;
198 198
199 if (test_bit(BIT_PCM_ALSA_PLAYBACK, &line6pcm->flags) && 199 if (test_bit(LINE6_INDEX_PCM_ALSA_PLAYBACK_STREAM, &line6pcm->flags) &&
200 !test_bit(BIT_PAUSE_PLAYBACK, &line6pcm->flags)) { 200 !test_bit(LINE6_INDEX_PAUSE_PLAYBACK, &line6pcm->flags)) {
201 struct snd_pcm_runtime *runtime = 201 struct snd_pcm_runtime *runtime =
202 get_substream(line6pcm, SNDRV_PCM_STREAM_PLAYBACK)->runtime; 202 get_substream(line6pcm, SNDRV_PCM_STREAM_PLAYBACK)->runtime;
203 203
@@ -238,10 +238,10 @@ static int submit_audio_out_urb(struct snd_line6_pcm *line6pcm)
238 238
239 if (line6pcm->prev_fbuf != NULL) { 239 if (line6pcm->prev_fbuf != NULL) {
240#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE 240#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
241 if (line6pcm->flags & MASK_PCM_IMPULSE) { 241 if (line6pcm->flags & LINE6_BITS_PCM_IMPULSE) {
242 create_impulse_test_signal(line6pcm, urb_out, 242 create_impulse_test_signal(line6pcm, urb_out,
243 bytes_per_frame); 243 bytes_per_frame);
244 if (line6pcm->flags & MASK_PCM_ALSA_CAPTURE) { 244 if (line6pcm->flags & LINE6_BIT_PCM_ALSA_CAPTURE_STREAM) {
245 line6_capture_copy(line6pcm, 245 line6_capture_copy(line6pcm,
246 urb_out->transfer_buffer, 246 urb_out->transfer_buffer,
247 urb_out-> 247 urb_out->
@@ -254,8 +254,8 @@ static int submit_audio_out_urb(struct snd_line6_pcm *line6pcm)
254 if (! 254 if (!
255 (line6pcm->line6-> 255 (line6pcm->line6->
256 properties->capabilities & LINE6_BIT_HWMON) 256 properties->capabilities & LINE6_BIT_HWMON)
257&& (line6pcm->flags & MASK_PLAYBACK) 257 && (line6pcm->flags & LINE6_BITS_PLAYBACK_STREAM)
258&& (line6pcm->flags & MASK_CAPTURE)) 258 && (line6pcm->flags & LINE6_BITS_CAPTURE_STREAM))
259 add_monitor_signal(urb_out, line6pcm->prev_fbuf, 259 add_monitor_signal(urb_out, line6pcm->prev_fbuf,
260 line6pcm->volume_monitor, 260 line6pcm->volume_monitor,
261 bytes_per_frame); 261 bytes_per_frame);
@@ -321,7 +321,7 @@ void line6_unlink_audio_out_urbs(struct snd_line6_pcm *line6pcm)
321/* 321/*
322 Wait until unlinking of all currently active playback URBs has been finished. 322 Wait until unlinking of all currently active playback URBs has been finished.
323*/ 323*/
324static void wait_clear_audio_out_urbs(struct snd_line6_pcm *line6pcm) 324void line6_wait_clear_audio_out_urbs(struct snd_line6_pcm *line6pcm)
325{ 325{
326 int timeout = HZ; 326 int timeout = HZ;
327 unsigned int i; 327 unsigned int i;
@@ -348,26 +348,7 @@ static void wait_clear_audio_out_urbs(struct snd_line6_pcm *line6pcm)
348void line6_unlink_wait_clear_audio_out_urbs(struct snd_line6_pcm *line6pcm) 348void line6_unlink_wait_clear_audio_out_urbs(struct snd_line6_pcm *line6pcm)
349{ 349{
350 line6_unlink_audio_out_urbs(line6pcm); 350 line6_unlink_audio_out_urbs(line6pcm);
351 wait_clear_audio_out_urbs(line6pcm); 351 line6_wait_clear_audio_out_urbs(line6pcm);
352}
353
354int line6_alloc_playback_buffer(struct snd_line6_pcm *line6pcm)
355{
356 /* We may be invoked multiple times in a row so allocate once only */
357 if (line6pcm->buffer_out)
358 return 0;
359
360 line6pcm->buffer_out =
361 kmalloc(LINE6_ISO_BUFFERS * LINE6_ISO_PACKETS *
362 line6pcm->max_packet_size, GFP_KERNEL);
363
364 if (!line6pcm->buffer_out) {
365 dev_err(line6pcm->line6->ifcdev,
366 "cannot malloc playback buffer\n");
367 return -ENOMEM;
368 }
369
370 return 0;
371} 352}
372 353
373void line6_free_playback_buffer(struct snd_line6_pcm *line6pcm) 354void line6_free_playback_buffer(struct snd_line6_pcm *line6pcm)
@@ -407,7 +388,7 @@ static void audio_out_callback(struct urb *urb)
407 388
408 spin_lock_irqsave(&line6pcm->lock_audio_out, flags); 389 spin_lock_irqsave(&line6pcm->lock_audio_out, flags);
409 390
410 if (test_bit(BIT_PCM_ALSA_PLAYBACK, &line6pcm->flags)) { 391 if (test_bit(LINE6_INDEX_PCM_ALSA_PLAYBACK_STREAM, &line6pcm->flags)) {
411 struct snd_pcm_runtime *runtime = substream->runtime; 392 struct snd_pcm_runtime *runtime = substream->runtime;
412 line6pcm->pos_out_done += 393 line6pcm->pos_out_done +=
413 length / line6pcm->properties->bytes_per_frame; 394 length / line6pcm->properties->bytes_per_frame;
@@ -432,7 +413,7 @@ static void audio_out_callback(struct urb *urb)
432 if (!shutdown) { 413 if (!shutdown) {
433 submit_audio_out_urb(line6pcm); 414 submit_audio_out_urb(line6pcm);
434 415
435 if (test_bit(BIT_PCM_ALSA_PLAYBACK, &line6pcm->flags)) { 416 if (test_bit(LINE6_INDEX_PCM_ALSA_PLAYBACK_STREAM, &line6pcm->flags)) {
436 line6pcm->bytes_out += length; 417 line6pcm->bytes_out += length;
437 if (line6pcm->bytes_out >= line6pcm->period_out) { 418 if (line6pcm->bytes_out >= line6pcm->period_out) {
438 line6pcm->bytes_out %= line6pcm->period_out; 419 line6pcm->bytes_out %= line6pcm->period_out;
@@ -484,17 +465,17 @@ static int snd_line6_playback_hw_params(struct snd_pcm_substream *substream,
484 } 465 }
485 /* -- [FD] end */ 466 /* -- [FD] end */
486 467
487 if ((line6pcm->flags & MASK_PLAYBACK) == 0) { 468 ret = line6_pcm_acquire(line6pcm, LINE6_BIT_PCM_ALSA_PLAYBACK_BUFFER);
488 ret = line6_alloc_playback_buffer(line6pcm);
489 469
490 if (ret < 0) 470 if (ret < 0)
491 return ret; 471 return ret;
492 }
493 472
494 ret = snd_pcm_lib_malloc_pages(substream, 473 ret = snd_pcm_lib_malloc_pages(substream,
495 params_buffer_bytes(hw_params)); 474 params_buffer_bytes(hw_params));
496 if (ret < 0) 475 if (ret < 0) {
476 line6_pcm_release(line6pcm, LINE6_BIT_PCM_ALSA_PLAYBACK_BUFFER);
497 return ret; 477 return ret;
478 }
498 479
499 line6pcm->period_out = params_period_bytes(hw_params); 480 line6pcm->period_out = params_period_bytes(hw_params);
500 return 0; 481 return 0;
@@ -504,12 +485,7 @@ static int snd_line6_playback_hw_params(struct snd_pcm_substream *substream,
504static int snd_line6_playback_hw_free(struct snd_pcm_substream *substream) 485static int snd_line6_playback_hw_free(struct snd_pcm_substream *substream)
505{ 486{
506 struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream); 487 struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream);
507 488 line6_pcm_release(line6pcm, LINE6_BIT_PCM_ALSA_PLAYBACK_BUFFER);
508 if ((line6pcm->flags & MASK_PLAYBACK) == 0) {
509 line6_unlink_wait_clear_audio_out_urbs(line6pcm);
510 line6_free_playback_buffer(line6pcm);
511 }
512
513 return snd_pcm_lib_free_pages(substream); 489 return snd_pcm_lib_free_pages(substream);
514} 490}
515 491
@@ -523,7 +499,7 @@ int snd_line6_playback_trigger(struct snd_line6_pcm *line6pcm, int cmd)
523#ifdef CONFIG_PM 499#ifdef CONFIG_PM
524 case SNDRV_PCM_TRIGGER_RESUME: 500 case SNDRV_PCM_TRIGGER_RESUME:
525#endif 501#endif
526 err = line6_pcm_start(line6pcm, MASK_PCM_ALSA_PLAYBACK); 502 err = line6_pcm_acquire(line6pcm, LINE6_BIT_PCM_ALSA_PLAYBACK_STREAM);
527 503
528 if (err < 0) 504 if (err < 0)
529 return err; 505 return err;
@@ -534,7 +510,7 @@ int snd_line6_playback_trigger(struct snd_line6_pcm *line6pcm, int cmd)
534#ifdef CONFIG_PM 510#ifdef CONFIG_PM
535 case SNDRV_PCM_TRIGGER_SUSPEND: 511 case SNDRV_PCM_TRIGGER_SUSPEND:
536#endif 512#endif
537 err = line6_pcm_stop(line6pcm, MASK_PCM_ALSA_PLAYBACK); 513 err = line6_pcm_release(line6pcm, LINE6_BIT_PCM_ALSA_PLAYBACK_STREAM);
538 514
539 if (err < 0) 515 if (err < 0)
540 return err; 516 return err;
@@ -542,11 +518,11 @@ int snd_line6_playback_trigger(struct snd_line6_pcm *line6pcm, int cmd)
542 break; 518 break;
543 519
544 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 520 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
545 set_bit(BIT_PAUSE_PLAYBACK, &line6pcm->flags); 521 set_bit(LINE6_INDEX_PAUSE_PLAYBACK, &line6pcm->flags);
546 break; 522 break;
547 523
548 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 524 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
549 clear_bit(BIT_PAUSE_PLAYBACK, &line6pcm->flags); 525 clear_bit(LINE6_INDEX_PAUSE_PLAYBACK, &line6pcm->flags);
550 break; 526 break;
551 527
552 default: 528 default:
diff --git a/drivers/staging/line6/playback.h b/drivers/staging/line6/playback.h
index 02487ff24538..743bd6f74c57 100644
--- a/drivers/staging/line6/playback.h
+++ b/drivers/staging/line6/playback.h
@@ -29,13 +29,13 @@
29 29
30extern struct snd_pcm_ops snd_line6_playback_ops; 30extern struct snd_pcm_ops snd_line6_playback_ops;
31 31
32extern int line6_alloc_playback_buffer(struct snd_line6_pcm *line6pcm);
33extern int line6_create_audio_out_urbs(struct snd_line6_pcm *line6pcm); 32extern int line6_create_audio_out_urbs(struct snd_line6_pcm *line6pcm);
34extern void line6_free_playback_buffer(struct snd_line6_pcm *line6pcm); 33extern void line6_free_playback_buffer(struct snd_line6_pcm *line6pcm);
35extern int line6_submit_audio_out_all_urbs(struct snd_line6_pcm *line6pcm); 34extern int line6_submit_audio_out_all_urbs(struct snd_line6_pcm *line6pcm);
36extern void line6_unlink_audio_out_urbs(struct snd_line6_pcm *line6pcm); 35extern void line6_unlink_audio_out_urbs(struct snd_line6_pcm *line6pcm);
37extern void line6_unlink_wait_clear_audio_out_urbs(struct snd_line6_pcm 36extern void line6_unlink_wait_clear_audio_out_urbs(struct snd_line6_pcm
38 *line6pcm); 37 *line6pcm);
38extern void line6_wait_clear_audio_out_urbs(struct snd_line6_pcm *line6pcm);
39extern int snd_line6_playback_trigger(struct snd_line6_pcm *line6pcm, int cmd); 39extern int snd_line6_playback_trigger(struct snd_line6_pcm *line6pcm, int cmd);
40 40
41#endif 41#endif
diff --git a/drivers/staging/line6/toneport.c b/drivers/staging/line6/toneport.c
index f31057830dbc..b754f69a29c4 100644
--- a/drivers/staging/line6/toneport.c
+++ b/drivers/staging/line6/toneport.c
@@ -207,9 +207,9 @@ static int snd_toneport_monitor_put(struct snd_kcontrol *kcontrol,
207 line6pcm->volume_monitor = ucontrol->value.integer.value[0]; 207 line6pcm->volume_monitor = ucontrol->value.integer.value[0];
208 208
209 if (line6pcm->volume_monitor > 0) 209 if (line6pcm->volume_monitor > 0)
210 line6_pcm_start(line6pcm, MASK_PCM_MONITOR); 210 line6_pcm_acquire(line6pcm, LINE6_BITS_PCM_MONITOR);
211 else 211 else
212 line6_pcm_stop(line6pcm, MASK_PCM_MONITOR); 212 line6_pcm_release(line6pcm, LINE6_BITS_PCM_MONITOR);
213 213
214 return 1; 214 return 1;
215} 215}
@@ -264,7 +264,7 @@ static void toneport_start_pcm(unsigned long arg)
264{ 264{
265 struct usb_line6_toneport *toneport = (struct usb_line6_toneport *)arg; 265 struct usb_line6_toneport *toneport = (struct usb_line6_toneport *)arg;
266 struct usb_line6 *line6 = &toneport->line6; 266 struct usb_line6 *line6 = &toneport->line6;
267 line6_pcm_start(line6->line6pcm, MASK_PCM_MONITOR); 267 line6_pcm_acquire(line6->line6pcm, LINE6_BITS_PCM_MONITOR);
268} 268}
269 269
270/* control definition */ 270/* control definition */
@@ -320,7 +320,9 @@ static void toneport_setup(struct usb_line6_toneport *toneport)
320 /* initialize source select: */ 320 /* initialize source select: */
321 switch (usbdev->descriptor.idProduct) { 321 switch (usbdev->descriptor.idProduct) {
322 case LINE6_DEVID_TONEPORT_UX1: 322 case LINE6_DEVID_TONEPORT_UX1:
323 case LINE6_DEVID_TONEPORT_UX2:
323 case LINE6_DEVID_PODSTUDIO_UX1: 324 case LINE6_DEVID_PODSTUDIO_UX1:
325 case LINE6_DEVID_PODSTUDIO_UX2:
324 toneport_send_cmd(usbdev, 326 toneport_send_cmd(usbdev,
325 toneport_source_info[toneport->source].code, 327 toneport_source_info[toneport->source].code,
326 0x0000); 328 0x0000);
@@ -363,7 +365,9 @@ static int toneport_try_init(struct usb_interface *interface,
363 /* register source select control: */ 365 /* register source select control: */
364 switch (usbdev->descriptor.idProduct) { 366 switch (usbdev->descriptor.idProduct) {
365 case LINE6_DEVID_TONEPORT_UX1: 367 case LINE6_DEVID_TONEPORT_UX1:
368 case LINE6_DEVID_TONEPORT_UX2:
366 case LINE6_DEVID_PODSTUDIO_UX1: 369 case LINE6_DEVID_PODSTUDIO_UX1:
370 case LINE6_DEVID_PODSTUDIO_UX2:
367 err = 371 err =
368 snd_ctl_add(line6->card, 372 snd_ctl_add(line6->card,
369 snd_ctl_new1(&toneport_control_source, 373 snd_ctl_new1(&toneport_control_source,
@@ -442,7 +446,7 @@ void line6_toneport_disconnect(struct usb_interface *interface)
442 struct snd_line6_pcm *line6pcm = toneport->line6.line6pcm; 446 struct snd_line6_pcm *line6pcm = toneport->line6.line6pcm;
443 447
444 if (line6pcm != NULL) { 448 if (line6pcm != NULL) {
445 line6_pcm_stop(line6pcm, MASK_PCM_MONITOR); 449 line6_pcm_release(line6pcm, LINE6_BITS_PCM_MONITOR);
446 line6_pcm_disconnect(line6pcm); 450 line6_pcm_disconnect(line6pcm);
447 } 451 }
448 } 452 }
diff --git a/drivers/staging/line6/usbdefs.h b/drivers/staging/line6/usbdefs.h
index aff9e5caea46..353d59d77b04 100644
--- a/drivers/staging/line6/usbdefs.h
+++ b/drivers/staging/line6/usbdefs.h
@@ -39,31 +39,29 @@
39#define LINE6_DEVID_TONEPORT_UX2 0x4142 39#define LINE6_DEVID_TONEPORT_UX2 0x4142
40#define LINE6_DEVID_VARIAX 0x534d 40#define LINE6_DEVID_VARIAX 0x534d
41 41
42enum { 42#define LINE6_BIT(x) LINE6_BIT_ ## x = 1 << LINE6_INDEX_ ## x
43 LINE6_ID_BASSPODXT,
44 LINE6_ID_BASSPODXTLIVE,
45 LINE6_ID_BASSPODXTPRO,
46 LINE6_ID_GUITARPORT,
47 LINE6_ID_POCKETPOD,
48 LINE6_ID_PODHD300,
49 LINE6_ID_PODHD500,
50 LINE6_ID_PODSTUDIO_GX,
51 LINE6_ID_PODSTUDIO_UX1,
52 LINE6_ID_PODSTUDIO_UX2,
53 LINE6_ID_PODX3,
54 LINE6_ID_PODX3LIVE,
55 LINE6_ID_PODXT,
56 LINE6_ID_PODXTLIVE,
57 LINE6_ID_PODXTPRO,
58 LINE6_ID_TONEPORT_GX,
59 LINE6_ID_TONEPORT_UX1,
60 LINE6_ID_TONEPORT_UX2,
61 LINE6_ID_VARIAX
62};
63
64#define LINE6_BIT(x) LINE6_BIT_ ## x = 1 << LINE6_ID_ ## x
65 43
66enum { 44enum {
45 LINE6_INDEX_BASSPODXT,
46 LINE6_INDEX_BASSPODXTLIVE,
47 LINE6_INDEX_BASSPODXTPRO,
48 LINE6_INDEX_GUITARPORT,
49 LINE6_INDEX_POCKETPOD,
50 LINE6_INDEX_PODHD300,
51 LINE6_INDEX_PODHD500,
52 LINE6_INDEX_PODSTUDIO_GX,
53 LINE6_INDEX_PODSTUDIO_UX1,
54 LINE6_INDEX_PODSTUDIO_UX2,
55 LINE6_INDEX_PODX3,
56 LINE6_INDEX_PODX3LIVE,
57 LINE6_INDEX_PODXT,
58 LINE6_INDEX_PODXTLIVE,
59 LINE6_INDEX_PODXTPRO,
60 LINE6_INDEX_TONEPORT_GX,
61 LINE6_INDEX_TONEPORT_UX1,
62 LINE6_INDEX_TONEPORT_UX2,
63 LINE6_INDEX_VARIAX,
64
67 LINE6_BIT(BASSPODXT), 65 LINE6_BIT(BASSPODXT),
68 LINE6_BIT(BASSPODXTLIVE), 66 LINE6_BIT(BASSPODXTLIVE),
69 LINE6_BIT(BASSPODXTPRO), 67 LINE6_BIT(BASSPODXTPRO),
diff --git a/drivers/staging/media/easycap/easycap_main.c b/drivers/staging/media/easycap/easycap_main.c
index 8ff5f38ea196..3d439b790cc6 100644
--- a/drivers/staging/media/easycap/easycap_main.c
+++ b/drivers/staging/media/easycap/easycap_main.c
@@ -3825,6 +3825,7 @@ static int easycap_usb_probe(struct usb_interface *intf,
3825/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ 3825/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
3826 pdata_urb = kzalloc(sizeof(struct data_urb), GFP_KERNEL); 3826 pdata_urb = kzalloc(sizeof(struct data_urb), GFP_KERNEL);
3827 if (!pdata_urb) { 3827 if (!pdata_urb) {
3828 usb_free_urb(purb);
3828 SAM("ERROR: Could not allocate struct data_urb.\n"); 3829 SAM("ERROR: Could not allocate struct data_urb.\n");
3829 return -ENOMEM; 3830 return -ENOMEM;
3830 } 3831 }
diff --git a/drivers/staging/media/lirc/lirc_sasem.c b/drivers/staging/media/lirc/lirc_sasem.c
index 7855baa18e75..74421043b954 100644
--- a/drivers/staging/media/lirc/lirc_sasem.c
+++ b/drivers/staging/media/lirc/lirc_sasem.c
@@ -90,11 +90,11 @@ static void __exit sasem_exit(void);
90struct sasem_context { 90struct sasem_context {
91 91
92 struct usb_device *dev; 92 struct usb_device *dev;
93 int vfd_isopen; /* VFD port has been opened */ 93 int vfd_isopen; /* VFD port has been opened */
94 unsigned int vfd_contrast; /* VFD contrast */ 94 unsigned int vfd_contrast; /* VFD contrast */
95 int ir_isopen; /* IR port has been opened */ 95 int ir_isopen; /* IR port has been opened */
96 int dev_present; /* USB device presence */ 96 int dev_present; /* USB device presence */
97 struct mutex ctx_lock; /* to lock this object */ 97 struct mutex ctx_lock; /* to lock this object */
98 wait_queue_head_t remove_ok; /* For unexpected USB disconnects */ 98 wait_queue_head_t remove_ok; /* For unexpected USB disconnects */
99 99
100 struct lirc_driver *driver; 100 struct lirc_driver *driver;
@@ -106,10 +106,11 @@ struct sasem_context {
106 unsigned char usb_tx_buf[8]; 106 unsigned char usb_tx_buf[8];
107 107
108 struct tx_t { 108 struct tx_t {
109 unsigned char data_buf[SASEM_DATA_BUF_SZ]; /* user data buffer */ 109 unsigned char data_buf[SASEM_DATA_BUF_SZ]; /* user data
110 * buffer */
110 struct completion finished; /* wait for write to finish */ 111 struct completion finished; /* wait for write to finish */
111 atomic_t busy; /* write in progress */ 112 atomic_t busy; /* write in progress */
112 int status; /* status of tx completion */ 113 int status; /* status of tx completion */
113 } tx; 114 } tx;
114 115
115 /* for dealing with repeat codes (wish there was a toggle bit!) */ 116 /* for dealing with repeat codes (wish there was a toggle bit!) */
diff --git a/drivers/staging/mei/TODO b/drivers/staging/mei/TODO
index 7d9a13b0f2dd..fc266018355e 100644
--- a/drivers/staging/mei/TODO
+++ b/drivers/staging/mei/TODO
@@ -3,5 +3,8 @@ TODO:
3Upon Unstaging: 3Upon Unstaging:
4 - move mei.h to include/linux/mei.h 4 - move mei.h to include/linux/mei.h
5 - Documentation/ioctl/ioctl-number.txt 5 - Documentation/ioctl/ioctl-number.txt
6 - move mei.txt under Documentation/mei/
7 - move mei-amt-version.c under Documentation/mei
8 - add hostprogs-y for mei-amt-version.c
6 - drop mei_version.h 9 - drop mei_version.h
7 - Updated MAINTAINERS 10 - Updated MAINTAINERS
diff --git a/drivers/staging/mei/hw.h b/drivers/staging/mei/hw.h
index 9b9008cb6938..24c4c962819e 100644
--- a/drivers/staging/mei/hw.h
+++ b/drivers/staging/mei/hw.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * 2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver 3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2011, Intel Corporation. 4 * Copyright (c) 2003-2012, Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -141,6 +141,11 @@ access to ME_CBD */
141#define HBM_MAJOR_VERSION 1 141#define HBM_MAJOR_VERSION 1
142#define HBM_TIMEOUT 1 /* 1 second */ 142#define HBM_TIMEOUT 1 /* 1 second */
143 143
144/* Host bus message command opcode */
145#define MEI_HBM_CMD_OP_MSK 0x7f
146/* Host bus message command RESPONSE */
147#define MEI_HBM_CMD_RES_MSK 0x80
148
144/* 149/*
145 * MEI Bus Message Command IDs 150 * MEI Bus Message Command IDs
146 */ 151 */
@@ -164,7 +169,7 @@ access to ME_CBD */
164#define CLIENT_DISCONNECT_REQ_CMD 0x07 169#define CLIENT_DISCONNECT_REQ_CMD 0x07
165#define CLIENT_DISCONNECT_RES_CMD 0x87 170#define CLIENT_DISCONNECT_RES_CMD 0x87
166 171
167#define MEI_FLOW_CONTROL_CMD 0x08 172#define MEI_FLOW_CONTROL_CMD 0x08
168 173
169/* 174/*
170 * MEI Stop Reason 175 * MEI Stop Reason
@@ -213,15 +218,9 @@ struct mei_msg_hdr {
213} __packed; 218} __packed;
214 219
215 220
216struct hbm_cmd {
217 u8 cmd:7;
218 u8 is_response:1;
219} __packed;
220
221
222struct mei_bus_message { 221struct mei_bus_message {
223 struct hbm_cmd cmd; 222 u8 hbm_cmd;
224 u8 command_specific_data[]; 223 u8 data[0];
225} __packed; 224} __packed;
226 225
227struct hbm_version { 226struct hbm_version {
@@ -230,41 +229,41 @@ struct hbm_version {
230} __packed; 229} __packed;
231 230
232struct hbm_host_version_request { 231struct hbm_host_version_request {
233 struct hbm_cmd cmd; 232 u8 hbm_cmd;
234 u8 reserved; 233 u8 reserved;
235 struct hbm_version host_version; 234 struct hbm_version host_version;
236} __packed; 235} __packed;
237 236
238struct hbm_host_version_response { 237struct hbm_host_version_response {
239 struct hbm_cmd cmd; 238 u8 hbm_cmd;
240 int host_version_supported; 239 u8 host_version_supported;
241 struct hbm_version me_max_version; 240 struct hbm_version me_max_version;
242} __packed; 241} __packed;
243 242
244struct hbm_host_stop_request { 243struct hbm_host_stop_request {
245 struct hbm_cmd cmd; 244 u8 hbm_cmd;
246 u8 reason; 245 u8 reason;
247 u8 reserved[2]; 246 u8 reserved[2];
248} __packed; 247} __packed;
249 248
250struct hbm_host_stop_response { 249struct hbm_host_stop_response {
251 struct hbm_cmd cmd; 250 u8 hbm_cmd;
252 u8 reserved[3]; 251 u8 reserved[3];
253} __packed; 252} __packed;
254 253
255struct hbm_me_stop_request { 254struct hbm_me_stop_request {
256 struct hbm_cmd cmd; 255 u8 hbm_cmd;
257 u8 reason; 256 u8 reason;
258 u8 reserved[2]; 257 u8 reserved[2];
259} __packed; 258} __packed;
260 259
261struct hbm_host_enum_request { 260struct hbm_host_enum_request {
262 struct hbm_cmd cmd; 261 u8 hbm_cmd;
263 u8 reserved[3]; 262 u8 reserved[3];
264} __packed; 263} __packed;
265 264
266struct hbm_host_enum_response { 265struct hbm_host_enum_response {
267 struct hbm_cmd cmd; 266 u8 hbm_cmd;
268 u8 reserved[3]; 267 u8 reserved[3];
269 u8 valid_addresses[32]; 268 u8 valid_addresses[32];
270} __packed; 269} __packed;
@@ -279,14 +278,14 @@ struct mei_client_properties {
279} __packed; 278} __packed;
280 279
281struct hbm_props_request { 280struct hbm_props_request {
282 struct hbm_cmd cmd; 281 u8 hbm_cmd;
283 u8 address; 282 u8 address;
284 u8 reserved[2]; 283 u8 reserved[2];
285} __packed; 284} __packed;
286 285
287 286
288struct hbm_props_response { 287struct hbm_props_response {
289 struct hbm_cmd cmd; 288 u8 hbm_cmd;
290 u8 address; 289 u8 address;
291 u8 status; 290 u8 status;
292 u8 reserved[1]; 291 u8 reserved[1];
@@ -294,21 +293,21 @@ struct hbm_props_response {
294} __packed; 293} __packed;
295 294
296struct hbm_client_connect_request { 295struct hbm_client_connect_request {
297 struct hbm_cmd cmd; 296 u8 hbm_cmd;
298 u8 me_addr; 297 u8 me_addr;
299 u8 host_addr; 298 u8 host_addr;
300 u8 reserved; 299 u8 reserved;
301} __packed; 300} __packed;
302 301
303struct hbm_client_connect_response { 302struct hbm_client_connect_response {
304 struct hbm_cmd cmd; 303 u8 hbm_cmd;
305 u8 me_addr; 304 u8 me_addr;
306 u8 host_addr; 305 u8 host_addr;
307 u8 status; 306 u8 status;
308} __packed; 307} __packed;
309 308
310struct hbm_client_disconnect_request { 309struct hbm_client_disconnect_request {
311 struct hbm_cmd cmd; 310 u8 hbm_cmd;
312 u8 me_addr; 311 u8 me_addr;
313 u8 host_addr; 312 u8 host_addr;
314 u8 reserved[1]; 313 u8 reserved[1];
@@ -317,7 +316,7 @@ struct hbm_client_disconnect_request {
317#define MEI_FC_MESSAGE_RESERVED_LENGTH 5 316#define MEI_FC_MESSAGE_RESERVED_LENGTH 5
318 317
319struct hbm_flow_control { 318struct hbm_flow_control {
320 struct hbm_cmd cmd; 319 u8 hbm_cmd;
321 u8 me_addr; 320 u8 me_addr;
322 u8 host_addr; 321 u8 host_addr;
323 u8 reserved[MEI_FC_MESSAGE_RESERVED_LENGTH]; 322 u8 reserved[MEI_FC_MESSAGE_RESERVED_LENGTH];
diff --git a/drivers/staging/mei/init.c b/drivers/staging/mei/init.c
index 4ac3696883cb..eab711fb5fc4 100644
--- a/drivers/staging/mei/init.c
+++ b/drivers/staging/mei/init.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * 2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver 3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2011, Intel Corporation. 4 * Copyright (c) 2003-2012, Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -362,11 +362,11 @@ void mei_host_start_message(struct mei_device *dev)
362 host_start_req = 362 host_start_req =
363 (struct hbm_host_version_request *) &dev->wr_msg_buf[1]; 363 (struct hbm_host_version_request *) &dev->wr_msg_buf[1];
364 memset(host_start_req, 0, sizeof(struct hbm_host_version_request)); 364 memset(host_start_req, 0, sizeof(struct hbm_host_version_request));
365 host_start_req->cmd.cmd = HOST_START_REQ_CMD; 365 host_start_req->hbm_cmd = HOST_START_REQ_CMD;
366 host_start_req->host_version.major_version = HBM_MAJOR_VERSION; 366 host_start_req->host_version.major_version = HBM_MAJOR_VERSION;
367 host_start_req->host_version.minor_version = HBM_MINOR_VERSION; 367 host_start_req->host_version.minor_version = HBM_MINOR_VERSION;
368 dev->recvd_msg = false; 368 dev->recvd_msg = false;
369 if (!mei_write_message(dev, mei_hdr, (unsigned char *)host_start_req, 369 if (mei_write_message(dev, mei_hdr, (unsigned char *)host_start_req,
370 mei_hdr->length)) { 370 mei_hdr->length)) {
371 dev_dbg(&dev->pdev->dev, "write send version message to FW fail.\n"); 371 dev_dbg(&dev->pdev->dev, "write send version message to FW fail.\n");
372 dev->mei_state = MEI_RESETING; 372 dev->mei_state = MEI_RESETING;
@@ -398,8 +398,8 @@ void mei_host_enum_clients_message(struct mei_device *dev)
398 398
399 host_enum_req = (struct hbm_host_enum_request *) &dev->wr_msg_buf[1]; 399 host_enum_req = (struct hbm_host_enum_request *) &dev->wr_msg_buf[1];
400 memset(host_enum_req, 0, sizeof(struct hbm_host_enum_request)); 400 memset(host_enum_req, 0, sizeof(struct hbm_host_enum_request));
401 host_enum_req->cmd.cmd = HOST_ENUM_REQ_CMD; 401 host_enum_req->hbm_cmd = HOST_ENUM_REQ_CMD;
402 if (!mei_write_message(dev, mei_hdr, (unsigned char *)host_enum_req, 402 if (mei_write_message(dev, mei_hdr, (unsigned char *)host_enum_req,
403 mei_hdr->length)) { 403 mei_hdr->length)) {
404 dev->mei_state = MEI_RESETING; 404 dev->mei_state = MEI_RESETING;
405 dev_dbg(&dev->pdev->dev, "write send enumeration request message to FW fail.\n"); 405 dev_dbg(&dev->pdev->dev, "write send enumeration request message to FW fail.\n");
@@ -407,7 +407,7 @@ void mei_host_enum_clients_message(struct mei_device *dev)
407 } 407 }
408 dev->init_clients_state = MEI_ENUM_CLIENTS_MESSAGE; 408 dev->init_clients_state = MEI_ENUM_CLIENTS_MESSAGE;
409 dev->init_clients_timer = INIT_CLIENTS_TIMEOUT; 409 dev->init_clients_timer = INIT_CLIENTS_TIMEOUT;
410 return ; 410 return;
411} 411}
412 412
413 413
@@ -482,10 +482,10 @@ int mei_host_client_properties(struct mei_device *dev)
482 482
483 memset(host_cli_req, 0, sizeof(struct hbm_props_request)); 483 memset(host_cli_req, 0, sizeof(struct hbm_props_request));
484 484
485 host_cli_req->cmd.cmd = HOST_CLIENT_PROPERTIES_REQ_CMD; 485 host_cli_req->hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
486 host_cli_req->address = b; 486 host_cli_req->address = b;
487 487
488 if (!mei_write_message(dev, mei_header, 488 if (mei_write_message(dev, mei_header,
489 (unsigned char *)host_cli_req, 489 (unsigned char *)host_cli_req,
490 mei_header->length)) { 490 mei_header->length)) {
491 dev->mei_state = MEI_RESETING; 491 dev->mei_state = MEI_RESETING;
@@ -608,7 +608,7 @@ void mei_host_init_iamthif(struct mei_device *dev)
608 608
609 dev->iamthif_msg_buf = msg_buf; 609 dev->iamthif_msg_buf = msg_buf;
610 610
611 if (!mei_connect(dev, &dev->iamthif_cl)) { 611 if (mei_connect(dev, &dev->iamthif_cl)) {
612 dev_dbg(&dev->pdev->dev, "Failed to connect to AMTHI client\n"); 612 dev_dbg(&dev->pdev->dev, "Failed to connect to AMTHI client\n");
613 dev->iamthif_cl.state = MEI_FILE_DISCONNECTED; 613 dev->iamthif_cl.state = MEI_FILE_DISCONNECTED;
614 dev->iamthif_cl.host_client_id = 0; 614 dev->iamthif_cl.host_client_id = 0;
@@ -670,14 +670,12 @@ int mei_disconnect_host_client(struct mei_device *dev, struct mei_cl *cl)
670 if (dev->mei_host_buffer_is_empty) { 670 if (dev->mei_host_buffer_is_empty) {
671 dev->mei_host_buffer_is_empty = false; 671 dev->mei_host_buffer_is_empty = false;
672 if (mei_disconnect(dev, cl)) { 672 if (mei_disconnect(dev, cl)) {
673 mdelay(10); /* Wait for hardware disconnection ready */
674 list_add_tail(&cb->cb_list,
675 &dev->ctrl_rd_list.mei_cb.cb_list);
676 } else {
677 rets = -ENODEV; 673 rets = -ENODEV;
678 dev_dbg(&dev->pdev->dev, "failed to call mei_disconnect.\n"); 674 dev_dbg(&dev->pdev->dev, "failed to call mei_disconnect.\n");
679 goto free; 675 goto free;
680 } 676 }
677 mdelay(10); /* Wait for hardware disconnection ready */
678 list_add_tail(&cb->cb_list, &dev->ctrl_rd_list.mei_cb.cb_list);
681 } else { 679 } else {
682 dev_dbg(&dev->pdev->dev, "add disconnect cb to control write list\n"); 680 dev_dbg(&dev->pdev->dev, "add disconnect cb to control write list\n");
683 list_add_tail(&cb->cb_list, 681 list_add_tail(&cb->cb_list,
diff --git a/drivers/staging/mei/interface.c b/drivers/staging/mei/interface.c
index eb5df7fc2269..9a2cfafc52a6 100644
--- a/drivers/staging/mei/interface.c
+++ b/drivers/staging/mei/interface.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * 2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver 3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2011, Intel Corporation. 4 * Copyright (c) 2003-2012, Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -125,7 +125,7 @@ int mei_count_empty_write_slots(struct mei_device *dev)
125 * @write_buffer: message buffer will be written 125 * @write_buffer: message buffer will be written
126 * @write_length: message size will be written 126 * @write_length: message size will be written
127 * 127 *
128 * returns 1 if success, 0 - otherwise. 128 * This function returns -EIO if write has failed
129 */ 129 */
130int mei_write_message(struct mei_device *dev, 130int mei_write_message(struct mei_device *dev,
131 struct mei_msg_hdr *header, 131 struct mei_msg_hdr *header,
@@ -157,7 +157,7 @@ int mei_write_message(struct mei_device *dev,
157 dw_to_write = ((write_length + 3) / 4); 157 dw_to_write = ((write_length + 3) / 4);
158 158
159 if (dw_to_write > empty_slots) 159 if (dw_to_write > empty_slots)
160 return 0; 160 return -EIO;
161 161
162 mei_reg_write(dev, H_CB_WW, *((u32 *) header)); 162 mei_reg_write(dev, H_CB_WW, *((u32 *) header));
163 163
@@ -177,9 +177,9 @@ int mei_write_message(struct mei_device *dev,
177 mei_hcsr_set(dev); 177 mei_hcsr_set(dev);
178 dev->me_hw_state = mei_mecsr_read(dev); 178 dev->me_hw_state = mei_mecsr_read(dev);
179 if ((dev->me_hw_state & ME_RDY_HRA) != ME_RDY_HRA) 179 if ((dev->me_hw_state & ME_RDY_HRA) != ME_RDY_HRA)
180 return 0; 180 return -EIO;
181 181
182 return 1; 182 return 0;
183} 183}
184 184
185/** 185/**
@@ -215,26 +215,17 @@ int mei_count_full_read_slots(struct mei_device *dev)
215 * @buffer: message buffer will be written 215 * @buffer: message buffer will be written
216 * @buffer_length: message size will be read 216 * @buffer_length: message size will be read
217 */ 217 */
218void mei_read_slots(struct mei_device *dev, 218void mei_read_slots(struct mei_device *dev, unsigned char *buffer,
219 unsigned char *buffer, unsigned long buffer_length) 219 unsigned long buffer_length)
220{ 220{
221 u32 i = 0; 221 u32 *reg_buf = (u32 *)buffer;
222 unsigned char temp_buf[sizeof(u32)];
223
224 while (buffer_length >= sizeof(u32)) {
225 ((u32 *) buffer)[i] = mei_mecbrw_read(dev);
226 222
227 dev_dbg(&dev->pdev->dev, 223 for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32))
228 "buffer[%d]= %d\n", 224 *reg_buf++ = mei_mecbrw_read(dev);
229 i, ((u32 *) buffer)[i]);
230
231 i++;
232 buffer_length -= sizeof(u32);
233 }
234 225
235 if (buffer_length > 0) { 226 if (buffer_length > 0) {
236 *((u32 *) &temp_buf) = mei_mecbrw_read(dev); 227 u32 reg = mei_mecbrw_read(dev);
237 memcpy(&buffer[i * 4], temp_buf, buffer_length); 228 memcpy(reg_buf, &reg, buffer_length);
238 } 229 }
239 230
240 dev->host_hw_state |= H_IG; 231 dev->host_hw_state |= H_IG;
@@ -284,7 +275,7 @@ int mei_flow_ctrl_creds(struct mei_device *dev, struct mei_cl *cl)
284 * @returns 275 * @returns
285 * 0 on success 276 * 0 on success
286 * -ENOENT when me client is not found 277 * -ENOENT when me client is not found
287 * -EINVAL wehn ctrl credits are <= 0 278 * -EINVAL when ctrl credits are <= 0
288 */ 279 */
289int mei_flow_ctrl_reduce(struct mei_device *dev, struct mei_cl *cl) 280int mei_flow_ctrl_reduce(struct mei_device *dev, struct mei_cl *cl)
290{ 281{
@@ -317,7 +308,7 @@ int mei_flow_ctrl_reduce(struct mei_device *dev, struct mei_cl *cl)
317 * @dev: the device structure 308 * @dev: the device structure
318 * @cl: private data of the file object 309 * @cl: private data of the file object
319 * 310 *
320 * returns 1 if success, 0 - otherwise. 311 * This function returns -EIO on write failure
321 */ 312 */
322int mei_send_flow_control(struct mei_device *dev, struct mei_cl *cl) 313int mei_send_flow_control(struct mei_device *dev, struct mei_cl *cl)
323{ 314{
@@ -335,18 +326,15 @@ int mei_send_flow_control(struct mei_device *dev, struct mei_cl *cl)
335 memset(mei_flow_control, 0, sizeof(*mei_flow_control)); 326 memset(mei_flow_control, 0, sizeof(*mei_flow_control));
336 mei_flow_control->host_addr = cl->host_client_id; 327 mei_flow_control->host_addr = cl->host_client_id;
337 mei_flow_control->me_addr = cl->me_client_id; 328 mei_flow_control->me_addr = cl->me_client_id;
338 mei_flow_control->cmd.cmd = MEI_FLOW_CONTROL_CMD; 329 mei_flow_control->hbm_cmd = MEI_FLOW_CONTROL_CMD;
339 memset(mei_flow_control->reserved, 0, 330 memset(mei_flow_control->reserved, 0,
340 sizeof(mei_flow_control->reserved)); 331 sizeof(mei_flow_control->reserved));
341 dev_dbg(&dev->pdev->dev, "sending flow control host client = %d, ME client = %d\n", 332 dev_dbg(&dev->pdev->dev, "sending flow control host client = %d, ME client = %d\n",
342 cl->host_client_id, cl->me_client_id); 333 cl->host_client_id, cl->me_client_id);
343 if (!mei_write_message(dev, mei_hdr,
344 (unsigned char *) mei_flow_control,
345 sizeof(struct hbm_flow_control)))
346 return 0;
347
348 return 1;
349 334
335 return mei_write_message(dev, mei_hdr,
336 (unsigned char *) mei_flow_control,
337 sizeof(struct hbm_flow_control));
350} 338}
351 339
352/** 340/**
@@ -380,7 +368,7 @@ int mei_other_client_is_connecting(struct mei_device *dev,
380 * @dev: the device structure 368 * @dev: the device structure
381 * @cl: private data of the file object 369 * @cl: private data of the file object
382 * 370 *
383 * returns 1 if success, 0 - otherwise. 371 * This function returns -EIO on write failure
384 */ 372 */
385int mei_disconnect(struct mei_device *dev, struct mei_cl *cl) 373int mei_disconnect(struct mei_device *dev, struct mei_cl *cl)
386{ 374{
@@ -399,15 +387,12 @@ int mei_disconnect(struct mei_device *dev, struct mei_cl *cl)
399 memset(mei_cli_disconnect, 0, sizeof(*mei_cli_disconnect)); 387 memset(mei_cli_disconnect, 0, sizeof(*mei_cli_disconnect));
400 mei_cli_disconnect->host_addr = cl->host_client_id; 388 mei_cli_disconnect->host_addr = cl->host_client_id;
401 mei_cli_disconnect->me_addr = cl->me_client_id; 389 mei_cli_disconnect->me_addr = cl->me_client_id;
402 mei_cli_disconnect->cmd.cmd = CLIENT_DISCONNECT_REQ_CMD; 390 mei_cli_disconnect->hbm_cmd = CLIENT_DISCONNECT_REQ_CMD;
403 mei_cli_disconnect->reserved[0] = 0; 391 mei_cli_disconnect->reserved[0] = 0;
404 392
405 if (!mei_write_message(dev, mei_hdr, 393 return mei_write_message(dev, mei_hdr,
406 (unsigned char *) mei_cli_disconnect, 394 (unsigned char *) mei_cli_disconnect,
407 sizeof(struct hbm_client_disconnect_request))) 395 sizeof(struct hbm_client_disconnect_request));
408 return 0;
409
410 return 1;
411} 396}
412 397
413/** 398/**
@@ -416,7 +401,7 @@ int mei_disconnect(struct mei_device *dev, struct mei_cl *cl)
416 * @dev: the device structure 401 * @dev: the device structure
417 * @cl: private data of the file object 402 * @cl: private data of the file object
418 * 403 *
419 * returns 1 if success, 0 - otherwise. 404 * This function returns -EIO on write failure
420 */ 405 */
421int mei_connect(struct mei_device *dev, struct mei_cl *cl) 406int mei_connect(struct mei_device *dev, struct mei_cl *cl)
422{ 407{
@@ -434,13 +419,10 @@ int mei_connect(struct mei_device *dev, struct mei_cl *cl)
434 (struct hbm_client_connect_request *) &dev->wr_msg_buf[1]; 419 (struct hbm_client_connect_request *) &dev->wr_msg_buf[1];
435 mei_cli_connect->host_addr = cl->host_client_id; 420 mei_cli_connect->host_addr = cl->host_client_id;
436 mei_cli_connect->me_addr = cl->me_client_id; 421 mei_cli_connect->me_addr = cl->me_client_id;
437 mei_cli_connect->cmd.cmd = CLIENT_CONNECT_REQ_CMD; 422 mei_cli_connect->hbm_cmd = CLIENT_CONNECT_REQ_CMD;
438 mei_cli_connect->reserved = 0; 423 mei_cli_connect->reserved = 0;
439 424
440 if (!mei_write_message(dev, mei_hdr, 425 return mei_write_message(dev, mei_hdr,
441 (unsigned char *) mei_cli_connect, 426 (unsigned char *) mei_cli_connect,
442 sizeof(struct hbm_client_connect_request))) 427 sizeof(struct hbm_client_connect_request));
443 return 0;
444
445 return 1;
446} 428}
diff --git a/drivers/staging/mei/interface.h b/drivers/staging/mei/interface.h
index aeae511419c7..fb90c6f8a759 100644
--- a/drivers/staging/mei/interface.h
+++ b/drivers/staging/mei/interface.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * 2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver 3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2011, Intel Corporation. 4 * Copyright (c) 2003-2012, Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -33,7 +33,8 @@
33 33
34 34
35void mei_read_slots(struct mei_device *dev, 35void mei_read_slots(struct mei_device *dev,
36 unsigned char *buffer, unsigned long buffer_length); 36 unsigned char *buffer,
37 unsigned long buffer_length);
37 38
38int mei_write_message(struct mei_device *dev, 39int mei_write_message(struct mei_device *dev,
39 struct mei_msg_hdr *header, 40 struct mei_msg_hdr *header,
@@ -59,7 +60,7 @@ void mei_wd_set_start_timeout(struct mei_device *dev, u16 timeout);
59 */ 60 */
60void mei_watchdog_register(struct mei_device *dev); 61void mei_watchdog_register(struct mei_device *dev);
61/* 62/*
62 * mei_watchdog_unregister - Uegistering watchdog interface 63 * mei_watchdog_unregister - Unregistering watchdog interface
63 * @dev - mei device 64 * @dev - mei device
64 */ 65 */
65void mei_watchdog_unregister(struct mei_device *dev); 66void mei_watchdog_unregister(struct mei_device *dev);
diff --git a/drivers/staging/mei/interrupt.c b/drivers/staging/mei/interrupt.c
index 3544fee34e48..2007d2447b1c 100644
--- a/drivers/staging/mei/interrupt.c
+++ b/drivers/staging/mei/interrupt.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * 2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver 3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2011, Intel Corporation. 4 * Copyright (c) 2003-2012, Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -123,8 +123,7 @@ static int mei_irq_thread_read_amthi_message(struct mei_io_list *complete_list,
123 BUG_ON(mei_hdr->me_addr != dev->iamthif_cl.me_client_id); 123 BUG_ON(mei_hdr->me_addr != dev->iamthif_cl.me_client_id);
124 BUG_ON(dev->iamthif_state != MEI_IAMTHIF_READING); 124 BUG_ON(dev->iamthif_state != MEI_IAMTHIF_READING);
125 125
126 buffer = (unsigned char *) (dev->iamthif_msg_buf + 126 buffer = dev->iamthif_msg_buf + dev->iamthif_msg_buf_index;
127 dev->iamthif_msg_buf_index);
128 BUG_ON(dev->iamthif_mtu < dev->iamthif_msg_buf_index + mei_hdr->length); 127 BUG_ON(dev->iamthif_mtu < dev->iamthif_msg_buf_index + mei_hdr->length);
129 128
130 mei_read_slots(dev, buffer, mei_hdr->length); 129 mei_read_slots(dev, buffer, mei_hdr->length);
@@ -206,9 +205,7 @@ static int mei_irq_thread_read_client_message(struct mei_io_list *complete_list,
206 cl = (struct mei_cl *)cb_pos->file_private; 205 cl = (struct mei_cl *)cb_pos->file_private;
207 if (cl && _mei_irq_thread_state_ok(cl, mei_hdr)) { 206 if (cl && _mei_irq_thread_state_ok(cl, mei_hdr)) {
208 cl->reading_state = MEI_READING; 207 cl->reading_state = MEI_READING;
209 buffer = (unsigned char *) 208 buffer = cb_pos->response_buffer.data + cb_pos->information;
210 (cb_pos->response_buffer.data +
211 cb_pos->information);
212 209
213 if (cb_pos->response_buffer.size < 210 if (cb_pos->response_buffer.size <
214 mei_hdr->length + cb_pos->information) { 211 mei_hdr->length + cb_pos->information) {
@@ -247,8 +244,7 @@ static int mei_irq_thread_read_client_message(struct mei_io_list *complete_list,
247quit: 244quit:
248 dev_dbg(&dev->pdev->dev, "message read\n"); 245 dev_dbg(&dev->pdev->dev, "message read\n");
249 if (!buffer) { 246 if (!buffer) {
250 mei_read_slots(dev, (unsigned char *) dev->rd_msg_buf, 247 mei_read_slots(dev, dev->rd_msg_buf, mei_hdr->length);
251 mei_hdr->length);
252 dev_dbg(&dev->pdev->dev, "discarding message, header =%08x.\n", 248 dev_dbg(&dev->pdev->dev, "discarding message, header =%08x.\n",
253 *(u32 *) dev->rd_msg_buf); 249 *(u32 *) dev->rd_msg_buf);
254 } 250 }
@@ -267,26 +263,25 @@ quit:
267static int _mei_irq_thread_iamthif_read(struct mei_device *dev, s32 *slots) 263static int _mei_irq_thread_iamthif_read(struct mei_device *dev, s32 *slots)
268{ 264{
269 265
270 if (((*slots) * sizeof(u32)) >= (sizeof(struct mei_msg_hdr) 266 if (((*slots) * sizeof(u32)) < (sizeof(struct mei_msg_hdr)
271 + sizeof(struct hbm_flow_control))) { 267 + sizeof(struct hbm_flow_control))) {
272 *slots -= (sizeof(struct mei_msg_hdr) +
273 sizeof(struct hbm_flow_control) + 3) / 4;
274 if (!mei_send_flow_control(dev, &dev->iamthif_cl)) {
275 dev_dbg(&dev->pdev->dev, "iamthif flow control failed\n");
276 } else {
277 dev_dbg(&dev->pdev->dev, "iamthif flow control success\n");
278 dev->iamthif_state = MEI_IAMTHIF_READING;
279 dev->iamthif_flow_control_pending = false;
280 dev->iamthif_msg_buf_index = 0;
281 dev->iamthif_msg_buf_size = 0;
282 dev->iamthif_stall_timer = IAMTHIF_STALL_TIMER;
283 dev->mei_host_buffer_is_empty =
284 mei_host_buffer_is_empty(dev);
285 }
286 return 0;
287 } else {
288 return -EMSGSIZE; 268 return -EMSGSIZE;
289 } 269 }
270 *slots -= (sizeof(struct mei_msg_hdr) +
271 sizeof(struct hbm_flow_control) + 3) / 4;
272 if (mei_send_flow_control(dev, &dev->iamthif_cl)) {
273 dev_dbg(&dev->pdev->dev, "iamthif flow control failed\n");
274 return -EIO;
275 }
276
277 dev_dbg(&dev->pdev->dev, "iamthif flow control success\n");
278 dev->iamthif_state = MEI_IAMTHIF_READING;
279 dev->iamthif_flow_control_pending = false;
280 dev->iamthif_msg_buf_index = 0;
281 dev->iamthif_msg_buf_size = 0;
282 dev->iamthif_stall_timer = IAMTHIF_STALL_TIMER;
283 dev->mei_host_buffer_is_empty = mei_host_buffer_is_empty(dev);
284 return 0;
290} 285}
291 286
292/** 287/**
@@ -310,7 +305,7 @@ static int _mei_irq_thread_close(struct mei_device *dev, s32 *slots,
310 *slots -= (sizeof(struct mei_msg_hdr) + 305 *slots -= (sizeof(struct mei_msg_hdr) +
311 sizeof(struct hbm_client_disconnect_request) + 3) / 4; 306 sizeof(struct hbm_client_disconnect_request) + 3) / 4;
312 307
313 if (!mei_disconnect(dev, cl)) { 308 if (mei_disconnect(dev, cl)) {
314 cl->status = 0; 309 cl->status = 0;
315 cb_pos->information = 0; 310 cb_pos->information = 0;
316 list_move_tail(&cb_pos->cb_list, 311 list_move_tail(&cb_pos->cb_list,
@@ -601,8 +596,7 @@ static void mei_client_disconnect_request(struct mei_device *dev,
601 &dev->ext_msg_buf[1]; 596 &dev->ext_msg_buf[1];
602 disconnect_res->host_addr = cl_pos->host_client_id; 597 disconnect_res->host_addr = cl_pos->host_client_id;
603 disconnect_res->me_addr = cl_pos->me_client_id; 598 disconnect_res->me_addr = cl_pos->me_client_id;
604 *(u8 *) (&disconnect_res->cmd) = 599 disconnect_res->hbm_cmd = CLIENT_DISCONNECT_RES_CMD;
605 CLIENT_DISCONNECT_RES_CMD;
606 disconnect_res->status = 0; 600 disconnect_res->status = 0;
607 dev->extra_write_index = 2; 601 dev->extra_write_index = 2;
608 break; 602 break;
@@ -632,15 +626,13 @@ static void mei_irq_thread_read_bus_message(struct mei_device *dev,
632 struct hbm_host_stop_request *host_stop_req; 626 struct hbm_host_stop_request *host_stop_req;
633 int res; 627 int res;
634 628
635 unsigned char *buffer;
636 629
637 /* read the message to our buffer */ 630 /* read the message to our buffer */
638 buffer = (unsigned char *) dev->rd_msg_buf;
639 BUG_ON(mei_hdr->length >= sizeof(dev->rd_msg_buf)); 631 BUG_ON(mei_hdr->length >= sizeof(dev->rd_msg_buf));
640 mei_read_slots(dev, buffer, mei_hdr->length); 632 mei_read_slots(dev, dev->rd_msg_buf, mei_hdr->length);
641 mei_msg = (struct mei_bus_message *) buffer; 633 mei_msg = (struct mei_bus_message *)dev->rd_msg_buf;
642 634
643 switch (*(u8 *) mei_msg) { 635 switch (mei_msg->hbm_cmd) {
644 case HOST_START_RES_CMD: 636 case HOST_START_RES_CMD:
645 version_res = (struct hbm_host_version_response *) mei_msg; 637 version_res = (struct hbm_host_version_response *) mei_msg;
646 if (version_res->host_version_supported) { 638 if (version_res->host_version_supported) {
@@ -659,6 +651,7 @@ static void mei_irq_thread_read_bus_message(struct mei_device *dev,
659 } else { 651 } else {
660 dev->version = version_res->me_max_version; 652 dev->version = version_res->me_max_version;
661 /* send stop message */ 653 /* send stop message */
654 mei_hdr = (struct mei_msg_hdr *)&dev->wr_msg_buf[0];
662 mei_hdr->host_addr = 0; 655 mei_hdr->host_addr = 0;
663 mei_hdr->me_addr = 0; 656 mei_hdr->me_addr = 0;
664 mei_hdr->length = sizeof(struct hbm_host_stop_request); 657 mei_hdr->length = sizeof(struct hbm_host_stop_request);
@@ -671,7 +664,7 @@ static void mei_irq_thread_read_bus_message(struct mei_device *dev,
671 memset(host_stop_req, 664 memset(host_stop_req,
672 0, 665 0,
673 sizeof(struct hbm_host_stop_request)); 666 sizeof(struct hbm_host_stop_request));
674 host_stop_req->cmd.cmd = HOST_STOP_REQ_CMD; 667 host_stop_req->hbm_cmd = HOST_STOP_REQ_CMD;
675 host_stop_req->reason = DRIVER_STOP_REQUEST; 668 host_stop_req->reason = DRIVER_STOP_REQUEST;
676 mei_write_message(dev, mei_hdr, 669 mei_write_message(dev, mei_hdr,
677 (unsigned char *) (host_stop_req), 670 (unsigned char *) (host_stop_req),
@@ -725,7 +718,7 @@ static void mei_irq_thread_read_bus_message(struct mei_device *dev,
725 dev->me_client_index++; 718 dev->me_client_index++;
726 dev->me_client_presentation_num++; 719 dev->me_client_presentation_num++;
727 720
728 /** Send Client Propeties request **/ 721 /** Send Client Properties request **/
729 res = mei_host_client_properties(dev); 722 res = mei_host_client_properties(dev);
730 if (res < 0) { 723 if (res < 0) {
731 dev_dbg(&dev->pdev->dev, "mei_host_client_properties() failed"); 724 dev_dbg(&dev->pdev->dev, "mei_host_client_properties() failed");
@@ -811,7 +804,7 @@ static void mei_irq_thread_read_bus_message(struct mei_device *dev,
811 host_stop_req = 804 host_stop_req =
812 (struct hbm_host_stop_request *) &dev->ext_msg_buf[1]; 805 (struct hbm_host_stop_request *) &dev->ext_msg_buf[1];
813 memset(host_stop_req, 0, sizeof(struct hbm_host_stop_request)); 806 memset(host_stop_req, 0, sizeof(struct hbm_host_stop_request));
814 host_stop_req->cmd.cmd = HOST_STOP_REQ_CMD; 807 host_stop_req->hbm_cmd = HOST_STOP_REQ_CMD;
815 host_stop_req->reason = DRIVER_STOP_REQUEST; 808 host_stop_req->reason = DRIVER_STOP_REQUEST;
816 host_stop_req->reserved[0] = 0; 809 host_stop_req->reserved[0] = 0;
817 host_stop_req->reserved[1] = 0; 810 host_stop_req->reserved[1] = 0;
@@ -844,24 +837,21 @@ static int _mei_irq_thread_read(struct mei_device *dev, s32 *slots,
844{ 837{
845 if ((*slots * sizeof(u32)) >= (sizeof(struct mei_msg_hdr) + 838 if ((*slots * sizeof(u32)) >= (sizeof(struct mei_msg_hdr) +
846 sizeof(struct hbm_flow_control))) { 839 sizeof(struct hbm_flow_control))) {
847 *slots -= (sizeof(struct mei_msg_hdr) +
848 sizeof(struct hbm_flow_control) + 3) / 4;
849 if (!mei_send_flow_control(dev, cl)) {
850 cl->status = -ENODEV;
851 cb_pos->information = 0;
852 list_move_tail(&cb_pos->cb_list,
853 &cmpl_list->mei_cb.cb_list);
854 return -ENODEV;
855 } else {
856 list_move_tail(&cb_pos->cb_list,
857 &dev->read_list.mei_cb.cb_list);
858 }
859 } else {
860 /* return the cancel routine */ 840 /* return the cancel routine */
861 list_del(&cb_pos->cb_list); 841 list_del(&cb_pos->cb_list);
862 return -EBADMSG; 842 return -EBADMSG;
863 } 843 }
864 844
845 *slots -= (sizeof(struct mei_msg_hdr) +
846 sizeof(struct hbm_flow_control) + 3) / 4;
847 if (mei_send_flow_control(dev, cl)) {
848 cl->status = -ENODEV;
849 cb_pos->information = 0;
850 list_move_tail(&cb_pos->cb_list, &cmpl_list->mei_cb.cb_list);
851 return -ENODEV;
852 }
853 list_move_tail(&cb_pos->cb_list, &dev->read_list.mei_cb.cb_list);
854
865 return 0; 855 return 0;
866} 856}
867 857
@@ -887,7 +877,7 @@ static int _mei_irq_thread_ioctl(struct mei_device *dev, s32 *slots,
887 cl->state = MEI_FILE_CONNECTING; 877 cl->state = MEI_FILE_CONNECTING;
888 *slots -= (sizeof(struct mei_msg_hdr) + 878 *slots -= (sizeof(struct mei_msg_hdr) +
889 sizeof(struct hbm_client_connect_request) + 3) / 4; 879 sizeof(struct hbm_client_connect_request) + 3) / 4;
890 if (!mei_connect(dev, cl)) { 880 if (mei_connect(dev, cl)) {
891 cl->status = -ENODEV; 881 cl->status = -ENODEV;
892 cb_pos->information = 0; 882 cb_pos->information = 0;
893 list_del(&cb_pos->cb_list); 883 list_del(&cb_pos->cb_list);
@@ -944,7 +934,7 @@ static int _mei_irq_thread_cmpl(struct mei_device *dev, s32 *slots,
944 mei_hdr->length); 934 mei_hdr->length);
945 *slots -= (sizeof(struct mei_msg_hdr) + 935 *slots -= (sizeof(struct mei_msg_hdr) +
946 mei_hdr->length + 3) / 4; 936 mei_hdr->length + 3) / 4;
947 if (!mei_write_message(dev, mei_hdr, 937 if (mei_write_message(dev, mei_hdr,
948 (unsigned char *) 938 (unsigned char *)
949 (cb_pos->request_buffer.data + 939 (cb_pos->request_buffer.data +
950 cb_pos->information), 940 cb_pos->information),
@@ -973,7 +963,7 @@ static int _mei_irq_thread_cmpl(struct mei_device *dev, s32 *slots,
973 963
974 (*slots) -= (sizeof(struct mei_msg_hdr) + 964 (*slots) -= (sizeof(struct mei_msg_hdr) +
975 mei_hdr->length + 3) / 4; 965 mei_hdr->length + 3) / 4;
976 if (!mei_write_message(dev, mei_hdr, 966 if (mei_write_message(dev, mei_hdr,
977 (unsigned char *) 967 (unsigned char *)
978 (cb_pos->request_buffer.data + 968 (cb_pos->request_buffer.data +
979 cb_pos->information), 969 cb_pos->information),
@@ -1034,7 +1024,7 @@ static int _mei_irq_thread_cmpl_iamthif(struct mei_device *dev, s32 *slots,
1034 *slots -= (sizeof(struct mei_msg_hdr) + 1024 *slots -= (sizeof(struct mei_msg_hdr) +
1035 mei_hdr->length + 3) / 4; 1025 mei_hdr->length + 3) / 4;
1036 1026
1037 if (!mei_write_message(dev, mei_hdr, 1027 if (mei_write_message(dev, mei_hdr,
1038 (dev->iamthif_msg_buf + 1028 (dev->iamthif_msg_buf +
1039 dev->iamthif_msg_buf_index), 1029 dev->iamthif_msg_buf_index),
1040 mei_hdr->length)) { 1030 mei_hdr->length)) {
@@ -1069,7 +1059,7 @@ static int _mei_irq_thread_cmpl_iamthif(struct mei_device *dev, s32 *slots,
1069 *slots -= (sizeof(struct mei_msg_hdr) + 1059 *slots -= (sizeof(struct mei_msg_hdr) +
1070 mei_hdr->length + 3) / 4; 1060 mei_hdr->length + 3) / 4;
1071 1061
1072 if (!mei_write_message(dev, mei_hdr, 1062 if (mei_write_message(dev, mei_hdr,
1073 (dev->iamthif_msg_buf + 1063 (dev->iamthif_msg_buf +
1074 dev->iamthif_msg_buf_index), 1064 dev->iamthif_msg_buf_index),
1075 mei_hdr->length)) { 1065 mei_hdr->length)) {
@@ -1286,7 +1276,7 @@ static int mei_irq_thread_write_handler(struct mei_io_list *cmpl_list,
1286 } 1276 }
1287 } 1277 }
1288 if (dev->stop) 1278 if (dev->stop)
1289 return ~ENODEV; 1279 return -ENODEV;
1290 1280
1291 /* complete control write list CB */ 1281 /* complete control write list CB */
1292 dev_dbg(&dev->pdev->dev, "complete control write list cb.\n"); 1282 dev_dbg(&dev->pdev->dev, "complete control write list cb.\n");
@@ -1423,7 +1413,7 @@ void mei_timer(struct work_struct *work)
1423 1413
1424 if (dev->iamthif_stall_timer) { 1414 if (dev->iamthif_stall_timer) {
1425 if (--dev->iamthif_stall_timer == 0) { 1415 if (--dev->iamthif_stall_timer == 0) {
1426 dev_dbg(&dev->pdev->dev, "reseting because of hang to amthi.\n"); 1416 dev_dbg(&dev->pdev->dev, "resetting because of hang to amthi.\n");
1427 mei_reset(dev, 1); 1417 mei_reset(dev, 1);
1428 dev->iamthif_msg_buf_size = 0; 1418 dev->iamthif_msg_buf_size = 0;
1429 dev->iamthif_msg_buf_index = 0; 1419 dev->iamthif_msg_buf_index = 0;
@@ -1513,7 +1503,7 @@ irqreturn_t mei_interrupt_thread_handler(int irq, void *dev_id)
1513 dev->host_hw_state = mei_hcsr_read(dev); 1503 dev->host_hw_state = mei_hcsr_read(dev);
1514 1504
1515 /* Ack the interrupt here 1505 /* Ack the interrupt here
1516 * In case of MSI we don't go throuhg the quick handler */ 1506 * In case of MSI we don't go through the quick handler */
1517 if (pci_dev_msi_enabled(dev->pdev)) 1507 if (pci_dev_msi_enabled(dev->pdev))
1518 mei_reg_write(dev, H_CSR, dev->host_hw_state); 1508 mei_reg_write(dev, H_CSR, dev->host_hw_state);
1519 1509
@@ -1549,7 +1539,7 @@ irqreturn_t mei_interrupt_thread_handler(int irq, void *dev_id)
1549 return IRQ_HANDLED; 1539 return IRQ_HANDLED;
1550 } 1540 }
1551 } 1541 }
1552 /* check slots avalable for reading */ 1542 /* check slots available for reading */
1553 slots = mei_count_full_read_slots(dev); 1543 slots = mei_count_full_read_slots(dev);
1554 dev_dbg(&dev->pdev->dev, "slots =%08x extra_write_index =%08x.\n", 1544 dev_dbg(&dev->pdev->dev, "slots =%08x extra_write_index =%08x.\n",
1555 slots, dev->extra_write_index); 1545 slots, dev->extra_write_index);
diff --git a/drivers/staging/mei/iorw.c b/drivers/staging/mei/iorw.c
index 0752ead4269a..0a80dc4e62f3 100644
--- a/drivers/staging/mei/iorw.c
+++ b/drivers/staging/mei/iorw.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * 2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver 3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2011, Intel Corporation. 4 * Copyright (c) 2003-2012, Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -37,7 +37,6 @@
37#include "hw.h" 37#include "hw.h"
38#include "mei.h" 38#include "mei.h"
39#include "interface.h" 39#include "interface.h"
40#include "mei_version.h"
41 40
42 41
43 42
@@ -109,8 +108,8 @@ int mei_ioctl_connect_client(struct file *file,
109 dev_dbg(&dev->pdev->dev, "FW Client - Max Msg Len = %d\n", 108 dev_dbg(&dev->pdev->dev, "FW Client - Max Msg Len = %d\n",
110 dev->me_clients[i].props.max_msg_length); 109 dev->me_clients[i].props.max_msg_length);
111 110
112 /* if we're connecting to amthi client so we will use the exist 111 /* if we're connecting to amthi client then we will use the
113 * connection 112 * existing connection
114 */ 113 */
115 if (uuid_le_cmp(data->in_client_uuid, mei_amthi_guid) == 0) { 114 if (uuid_le_cmp(data->in_client_uuid, mei_amthi_guid) == 0) {
116 dev_dbg(&dev->pdev->dev, "FW Client is amthi\n"); 115 dev_dbg(&dev->pdev->dev, "FW Client is amthi\n");
@@ -162,7 +161,7 @@ int mei_ioctl_connect_client(struct file *file,
162 && !mei_other_client_is_connecting(dev, cl)) { 161 && !mei_other_client_is_connecting(dev, cl)) {
163 dev_dbg(&dev->pdev->dev, "Sending Connect Message\n"); 162 dev_dbg(&dev->pdev->dev, "Sending Connect Message\n");
164 dev->mei_host_buffer_is_empty = false; 163 dev->mei_host_buffer_is_empty = false;
165 if (!mei_connect(dev, cl)) { 164 if (mei_connect(dev, cl)) {
166 dev_dbg(&dev->pdev->dev, "Sending connect message - failed\n"); 165 dev_dbg(&dev->pdev->dev, "Sending connect message - failed\n");
167 rets = -ENODEV; 166 rets = -ENODEV;
168 goto end; 167 goto end;
@@ -434,13 +433,11 @@ int mei_start_read(struct mei_device *dev, struct mei_cl *cl)
434 cl->read_cb = cb; 433 cl->read_cb = cb;
435 if (dev->mei_host_buffer_is_empty) { 434 if (dev->mei_host_buffer_is_empty) {
436 dev->mei_host_buffer_is_empty = false; 435 dev->mei_host_buffer_is_empty = false;
437 if (!mei_send_flow_control(dev, cl)) { 436 if (mei_send_flow_control(dev, cl)) {
438 rets = -ENODEV; 437 rets = -ENODEV;
439 goto unlock; 438 goto unlock;
440 } else {
441 list_add_tail(&cb->cb_list,
442 &dev->read_list.mei_cb.cb_list);
443 } 439 }
440 list_add_tail(&cb->cb_list, &dev->read_list.mei_cb.cb_list);
444 } else { 441 } else {
445 list_add_tail(&cb->cb_list, &dev->ctrl_wr_list.mei_cb.cb_list); 442 list_add_tail(&cb->cb_list, &dev->ctrl_wr_list.mei_cb.cb_list);
446 } 443 }
@@ -500,7 +497,7 @@ int amthi_write(struct mei_device *dev, struct mei_cl_cb *cb)
500 mei_hdr.me_addr = dev->iamthif_cl.me_client_id; 497 mei_hdr.me_addr = dev->iamthif_cl.me_client_id;
501 mei_hdr.reserved = 0; 498 mei_hdr.reserved = 0;
502 dev->iamthif_msg_buf_index += mei_hdr.length; 499 dev->iamthif_msg_buf_index += mei_hdr.length;
503 if (!mei_write_message(dev, &mei_hdr, 500 if (mei_write_message(dev, &mei_hdr,
504 (unsigned char *)(dev->iamthif_msg_buf), 501 (unsigned char *)(dev->iamthif_msg_buf),
505 mei_hdr.length)) 502 mei_hdr.length))
506 return -ENODEV; 503 return -ENODEV;
diff --git a/drivers/staging/mei/main.c b/drivers/staging/mei/main.c
index 1e1a9f996e7c..7c9321fa7bb1 100644
--- a/drivers/staging/mei/main.c
+++ b/drivers/staging/mei/main.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * 2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver 3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2011, Intel Corporation. 4 * Copyright (c) 2003-2012, Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -38,7 +38,6 @@
38#include "mei_dev.h" 38#include "mei_dev.h"
39#include "mei.h" 39#include "mei.h"
40#include "interface.h" 40#include "interface.h"
41#include "mei_version.h"
42 41
43 42
44#define MEI_READ_TIMEOUT 45 43#define MEI_READ_TIMEOUT 45
@@ -50,7 +49,6 @@
50 */ 49 */
51static char mei_driver_name[] = MEI_DRIVER_NAME; 50static char mei_driver_name[] = MEI_DRIVER_NAME;
52static const char mei_driver_string[] = "Intel(R) Management Engine Interface"; 51static const char mei_driver_string[] = "Intel(R) Management Engine Interface";
53static const char mei_driver_version[] = MEI_DRIVER_VERSION;
54 52
55/* The device pointer */ 53/* The device pointer */
56/* Currently this driver works as long as there is only a single AMT device. */ 54/* Currently this driver works as long as there is only a single AMT device. */
@@ -430,7 +428,7 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
430 goto free; 428 goto free;
431 } else if ((!cl->read_cb || !cl->read_cb->information) && 429 } else if ((!cl->read_cb || !cl->read_cb->information) &&
432 *offset > 0) { 430 *offset > 0) {
433 /*Offset needs to be cleaned for contingous reads*/ 431 /*Offset needs to be cleaned for contiguous reads*/
434 *offset = 0; 432 *offset = 0;
435 rets = 0; 433 rets = 0;
436 goto out; 434 goto out;
@@ -493,7 +491,7 @@ copy_buffer:
493 goto free; 491 goto free;
494 } 492 }
495 493
496 /* length is being turncated to PAGE_SIZE, however, */ 494 /* length is being truncated to PAGE_SIZE, however, */
497 /* information size may be longer */ 495 /* information size may be longer */
498 length = min_t(size_t, length, (cb->information - *offset)); 496 length = min_t(size_t, length, (cb->information - *offset));
499 497
@@ -740,7 +738,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
740 mei_hdr.reserved = 0; 738 mei_hdr.reserved = 0;
741 dev_dbg(&dev->pdev->dev, "call mei_write_message header=%08x.\n", 739 dev_dbg(&dev->pdev->dev, "call mei_write_message header=%08x.\n",
742 *((u32 *) &mei_hdr)); 740 *((u32 *) &mei_hdr));
743 if (!mei_write_message(dev, &mei_hdr, 741 if (mei_write_message(dev, &mei_hdr,
744 (unsigned char *) (write_cb->request_buffer.data), 742 (unsigned char *) (write_cb->request_buffer.data),
745 mei_hdr.length)) { 743 mei_hdr.length)) {
746 rets = -ENODEV; 744 rets = -ENODEV;
@@ -1206,8 +1204,7 @@ static int __init mei_init_module(void)
1206{ 1204{
1207 int ret; 1205 int ret;
1208 1206
1209 pr_debug("mei: %s - version %s\n", 1207 pr_debug("mei: %s\n", mei_driver_string);
1210 mei_driver_string, mei_driver_version);
1211 /* init pci module */ 1208 /* init pci module */
1212 ret = pci_register_driver(&mei_driver); 1209 ret = pci_register_driver(&mei_driver);
1213 if (ret < 0) 1210 if (ret < 0)
@@ -1238,4 +1235,3 @@ module_exit(mei_exit_module);
1238MODULE_AUTHOR("Intel Corporation"); 1235MODULE_AUTHOR("Intel Corporation");
1239MODULE_DESCRIPTION("Intel(R) Management Engine Interface"); 1236MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
1240MODULE_LICENSE("GPL v2"); 1237MODULE_LICENSE("GPL v2");
1241MODULE_VERSION(MEI_DRIVER_VERSION);
diff --git a/drivers/staging/mei/mei-amt-version.c b/drivers/staging/mei/mei-amt-version.c
new file mode 100644
index 000000000000..ac2a507be253
--- /dev/null
+++ b/drivers/staging/mei/mei-amt-version.c
@@ -0,0 +1,481 @@
1/******************************************************************************
2 * Intel Management Engine Interface (Intel MEI) Linux driver
3 * Intel MEI Interface Header
4 *
5 * This file is provided under a dual BSD/GPLv2 license. When using or
6 * redistributing this file, you may do so under either license.
7 *
8 * GPL LICENSE SUMMARY
9 *
10 * Copyright(c) 2012 Intel Corporation. All rights reserved.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * USA
25 *
26 * The full GNU General Public License is included in this distribution
27 * in the file called LICENSE.GPL.
28 *
29 * Contact Information:
30 * Intel Corporation.
31 * linux-mei@linux.intel.com
32 * http://www.intel.com
33 *
34 * BSD LICENSE
35 *
36 * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
37 * All rights reserved.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 *
43 * * Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * * Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in
47 * the documentation and/or other materials provided with the
48 * distribution.
49 * * Neither the name Intel Corporation nor the names of its
50 * contributors may be used to endorse or promote products derived
51 * from this software without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
54 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
56 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
57 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
58 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
59 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
63 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 *
65 *****************************************************************************/
66
67#include <stdio.h>
68#include <stdlib.h>
69#include <string.h>
70#include <fcntl.h>
71#include <sys/ioctl.h>
72#include <unistd.h>
73#include <errno.h>
74#include <stdint.h>
75#include <stdbool.h>
76#include <bits/wordsize.h>
77#include "mei.h"
78
79/*****************************************************************************
80 * Intel Management Engine Interface
81 *****************************************************************************/
82
83#define mei_msg(_me, fmt, ARGS...) do { \
84 if (_me->verbose) \
85 fprintf(stderr, fmt, ##ARGS); \
86} while (0)
87
88#define mei_err(_me, fmt, ARGS...) do { \
89 fprintf(stderr, "Error: " fmt, ##ARGS); \
90} while (0)
91
92struct mei {
93 uuid_le guid;
94 bool initialized;
95 bool verbose;
96 unsigned int buf_size;
97 unsigned char prot_ver;
98 int fd;
99};
100
101static void mei_deinit(struct mei *cl)
102{
103 if (cl->fd != -1)
104 close(cl->fd);
105 cl->fd = -1;
106 cl->buf_size = 0;
107 cl->prot_ver = 0;
108 cl->initialized = false;
109}
110
111static bool mei_init(struct mei *me, const uuid_le *guid,
112 unsigned char req_protocol_version, bool verbose)
113{
114 int result;
115 struct mei_client *cl;
116 struct mei_connect_client_data data;
117
118 mei_deinit(me);
119
120 me->verbose = verbose;
121
122 me->fd = open("/dev/mei", O_RDWR);
123 if (me->fd == -1) {
124 mei_err(me, "Cannot establish a handle to the Intel MEI driver\n");
125 goto err;
126 }
127 memcpy(&me->guid, guid, sizeof(*guid));
128 memset(&data, 0, sizeof(data));
129 me->initialized = true;
130
131 memcpy(&data.in_client_uuid, &me->guid, sizeof(me->guid));
132 result = ioctl(me->fd, IOCTL_MEI_CONNECT_CLIENT, &data);
133 if (result) {
134 mei_err(me, "IOCTL_MEI_CONNECT_CLIENT receive message. err=%d\n", result);
135 goto err;
136 }
137 cl = &data.out_client_properties;
138 mei_msg(me, "max_message_length %d\n", cl->max_msg_length);
139 mei_msg(me, "protocol_version %d\n", cl->protocol_version);
140
141 if ((req_protocol_version > 0) &&
142 (cl->protocol_version != req_protocol_version)) {
143 mei_err(me, "Intel MEI protocol version not supported\n");
144 goto err;
145 }
146
147 me->buf_size = cl->max_msg_length;
148 me->prot_ver = cl->protocol_version;
149
150 return true;
151err:
152 mei_deinit(me);
153 return false;
154}
155
156static ssize_t mei_recv_msg(struct mei *me, unsigned char *buffer,
157 ssize_t len, unsigned long timeout)
158{
159 ssize_t rc;
160
161 mei_msg(me, "call read length = %zd\n", len);
162
163 rc = read(me->fd, buffer, len);
164 if (rc < 0) {
165 mei_err(me, "read failed with status %zd %s\n",
166 rc, strerror(errno));
167 mei_deinit(me);
168 } else {
169 mei_msg(me, "read succeeded with result %zd\n", rc);
170 }
171 return rc;
172}
173
174static ssize_t mei_send_msg(struct mei *me, const unsigned char *buffer,
175 ssize_t len, unsigned long timeout)
176{
177 struct timeval tv;
178 ssize_t written;
179 ssize_t rc;
180 fd_set set;
181
182 tv.tv_sec = timeout / 1000;
183 tv.tv_usec = (timeout % 1000) * 1000000;
184
185 mei_msg(me, "call write length = %zd\n", len);
186
187 written = write(me->fd, buffer, len);
188 if (written < 0) {
189 rc = -errno;
190 mei_err(me, "write failed with status %zd %s\n",
191 written, strerror(errno));
192 goto out;
193 }
194
195 FD_ZERO(&set);
196 FD_SET(me->fd, &set);
197 rc = select(me->fd + 1 , &set, NULL, NULL, &tv);
198 if (rc > 0 && FD_ISSET(me->fd, &set)) {
199 mei_msg(me, "write success\n");
200 } else if (rc == 0) {
201 mei_err(me, "write failed on timeout with status\n");
202 goto out;
203 } else { /* rc < 0 */
204 mei_err(me, "write failed on select with status %zd\n", rc);
205 goto out;
206 }
207
208 rc = written;
209out:
210 if (rc < 0)
211 mei_deinit(me);
212
213 return rc;
214}
215
216/***************************************************************************
217 * Intel Advanced Management Technolgy ME Client
218 ***************************************************************************/
219
220#define AMT_MAJOR_VERSION 1
221#define AMT_MINOR_VERSION 1
222
223#define AMT_STATUS_SUCCESS 0x0
224#define AMT_STATUS_INTERNAL_ERROR 0x1
225#define AMT_STATUS_NOT_READY 0x2
226#define AMT_STATUS_INVALID_AMT_MODE 0x3
227#define AMT_STATUS_INVALID_MESSAGE_LENGTH 0x4
228
229#define AMT_STATUS_HOST_IF_EMPTY_RESPONSE 0x4000
230#define AMT_STATUS_SDK_RESOURCES 0x1004
231
232
233#define AMT_BIOS_VERSION_LEN 65
234#define AMT_VERSIONS_NUMBER 50
235#define AMT_UNICODE_STRING_LEN 20
236
237struct amt_unicode_string {
238 uint16_t length;
239 char string[AMT_UNICODE_STRING_LEN];
240} __attribute__((packed));
241
242struct amt_version_type {
243 struct amt_unicode_string description;
244 struct amt_unicode_string version;
245} __attribute__((packed));
246
247struct amt_version {
248 uint8_t major;
249 uint8_t minor;
250} __attribute__((packed));
251
252struct amt_code_versions {
253 uint8_t bios[AMT_BIOS_VERSION_LEN];
254 uint32_t count;
255 struct amt_version_type versions[AMT_VERSIONS_NUMBER];
256} __attribute__((packed));
257
258/***************************************************************************
259 * Intel Advanced Management Technolgy Host Interface
260 ***************************************************************************/
261
262struct amt_host_if_msg_header {
263 struct amt_version version;
264 uint16_t _reserved;
265 uint32_t command;
266 uint32_t length;
267} __attribute__((packed));
268
269struct amt_host_if_resp_header {
270 struct amt_host_if_msg_header header;
271 uint32_t status;
272 unsigned char data[0];
273} __attribute__((packed));
274
275const uuid_le MEI_IAMTHIF = UUID_LE(0x12f80028, 0xb4b7, 0x4b2d, \
276 0xac, 0xa8, 0x46, 0xe0, 0xff, 0x65, 0x81, 0x4c);
277
278#define AMT_HOST_IF_CODE_VERSIONS_REQUEST 0x0400001A
279#define AMT_HOST_IF_CODE_VERSIONS_RESPONSE 0x0480001A
280
281const struct amt_host_if_msg_header CODE_VERSION_REQ = {
282 .version = {AMT_MAJOR_VERSION, AMT_MINOR_VERSION},
283 ._reserved = 0,
284 .command = AMT_HOST_IF_CODE_VERSIONS_REQUEST,
285 .length = 0
286};
287
288
289struct amt_host_if {
290 struct mei mei_cl;
291 unsigned long send_timeout;
292 bool initialized;
293};
294
295
296static bool amt_host_if_init(struct amt_host_if *acmd,
297 unsigned long send_timeout, bool verbose)
298{
299 acmd->send_timeout = (send_timeout) ? send_timeout : 20000;
300 acmd->initialized = mei_init(&acmd->mei_cl, &MEI_IAMTHIF, 0, verbose);
301 return acmd->initialized;
302}
303
304static void amt_host_if_deinit(struct amt_host_if *acmd)
305{
306 mei_deinit(&acmd->mei_cl);
307 acmd->initialized = false;
308}
309
310static uint32_t amt_verify_code_versions(const struct amt_host_if_resp_header *resp)
311{
312 uint32_t status = AMT_STATUS_SUCCESS;
313 struct amt_code_versions *code_ver;
314 size_t code_ver_len;
315 uint32_t ver_type_cnt;
316 uint32_t len;
317 uint32_t i;
318
319 code_ver = (struct amt_code_versions *)resp->data;
320 /* length - sizeof(status) */
321 code_ver_len = resp->header.length - sizeof(uint32_t);
322 ver_type_cnt = code_ver_len -
323 sizeof(code_ver->bios) -
324 sizeof(code_ver->count);
325 if (code_ver->count != ver_type_cnt / sizeof(struct amt_version_type)) {
326 status = AMT_STATUS_INTERNAL_ERROR;
327 goto out;
328 }
329
330 for (i = 0; i < code_ver->count; i++) {
331 len = code_ver->versions[i].description.length;
332
333 if (len > AMT_UNICODE_STRING_LEN) {
334 status = AMT_STATUS_INTERNAL_ERROR;
335 goto out;
336 }
337
338 len = code_ver->versions[i].version.length;
339 if (code_ver->versions[i].version.string[len] != '\0' ||
340 len != strlen(code_ver->versions[i].version.string)) {
341 status = AMT_STATUS_INTERNAL_ERROR;
342 goto out;
343 }
344 }
345out:
346 return status;
347}
348
349static uint32_t amt_verify_response_header(uint32_t command,
350 const struct amt_host_if_msg_header *resp_hdr,
351 uint32_t response_size)
352{
353 if (response_size < sizeof(struct amt_host_if_resp_header)) {
354 return AMT_STATUS_INTERNAL_ERROR;
355 } else if (response_size != (resp_hdr->length +
356 sizeof(struct amt_host_if_msg_header))) {
357 return AMT_STATUS_INTERNAL_ERROR;
358 } else if (resp_hdr->command != command) {
359 return AMT_STATUS_INTERNAL_ERROR;
360 } else if (resp_hdr->_reserved != 0) {
361 return AMT_STATUS_INTERNAL_ERROR;
362 } else if (resp_hdr->version.major != AMT_MAJOR_VERSION ||
363 resp_hdr->version.minor < AMT_MINOR_VERSION) {
364 return AMT_STATUS_INTERNAL_ERROR;
365 }
366 return AMT_STATUS_SUCCESS;
367}
368
369static uint32_t amt_host_if_call(struct amt_host_if *acmd,
370 const unsigned char *command, ssize_t command_sz,
371 uint8_t **read_buf, uint32_t rcmd,
372 unsigned int expected_sz)
373{
374 uint32_t in_buf_sz;
375 uint32_t out_buf_sz;
376 ssize_t written;
377 uint32_t status;
378 struct amt_host_if_resp_header *msg_hdr;
379
380 in_buf_sz = acmd->mei_cl.buf_size;
381 *read_buf = (uint8_t *)malloc(sizeof(uint8_t) * in_buf_sz);
382 if (*read_buf == NULL)
383 return AMT_STATUS_SDK_RESOURCES;
384 memset(*read_buf, 0, in_buf_sz);
385 msg_hdr = (struct amt_host_if_resp_header *)*read_buf;
386
387 written = mei_send_msg(&acmd->mei_cl,
388 command, command_sz, acmd->send_timeout);
389 if (written != command_sz)
390 return AMT_STATUS_INTERNAL_ERROR;
391
392 out_buf_sz = mei_recv_msg(&acmd->mei_cl, *read_buf, in_buf_sz, 2000);
393 if (out_buf_sz <= 0)
394 return AMT_STATUS_HOST_IF_EMPTY_RESPONSE;
395
396 status = msg_hdr->status;
397 if (status != AMT_STATUS_SUCCESS)
398 return status;
399
400 status = amt_verify_response_header(rcmd,
401 &msg_hdr->header, out_buf_sz);
402 if (status != AMT_STATUS_SUCCESS)
403 return status;
404
405 if (expected_sz && expected_sz != out_buf_sz)
406 return AMT_STATUS_INTERNAL_ERROR;
407
408 return AMT_STATUS_SUCCESS;
409}
410
411
412static uint32_t amt_get_code_versions(struct amt_host_if *cmd,
413 struct amt_code_versions *versions)
414{
415 struct amt_host_if_resp_header *response = NULL;
416 uint32_t status;
417
418 status = amt_host_if_call(cmd,
419 (const unsigned char *)&CODE_VERSION_REQ,
420 sizeof(CODE_VERSION_REQ),
421 (uint8_t **)&response,
422 AMT_HOST_IF_CODE_VERSIONS_RESPONSE, 0);
423
424 if (status != AMT_STATUS_SUCCESS)
425 goto out;
426
427 status = amt_verify_code_versions(response);
428 if (status != AMT_STATUS_SUCCESS)
429 goto out;
430
431 memcpy(versions, response->data, sizeof(struct amt_code_versions));
432out:
433 if (response != NULL)
434 free(response);
435
436 return status;
437}
438
439/************************** end of amt_host_if_command ***********************/
440int main(int argc, char **argv)
441{
442 struct amt_code_versions ver;
443 struct amt_host_if acmd;
444 unsigned int i;
445 uint32_t status;
446 int ret;
447 bool verbose;
448
449 verbose = (argc > 1 && strcmp(argv[1], "-v") == 0);
450
451 if (!amt_host_if_init(&acmd, 5000, verbose)) {
452 ret = 1;
453 goto out;
454 }
455
456 status = amt_get_code_versions(&acmd, &ver);
457
458 amt_host_if_deinit(&acmd);
459
460 switch (status) {
461 case AMT_STATUS_HOST_IF_EMPTY_RESPONSE:
462 printf("Intel AMT: DISABLED\n");
463 ret = 0;
464 break;
465 case AMT_STATUS_SUCCESS:
466 printf("Intel AMT: ENABLED\n");
467 for (i = 0; i < ver.count; i++) {
468 printf("%s:\t%s\n", ver.versions[i].description.string,
469 ver.versions[i].version.string);
470 }
471 ret = 0;
472 break;
473 default:
474 printf("An error has occurred\n");
475 ret = 1;
476 break;
477 }
478
479out:
480 return ret;
481}
diff --git a/drivers/staging/mei/mei.h b/drivers/staging/mei/mei.h
index 6da7c4f33f91..bc0d8b69c49e 100644
--- a/drivers/staging/mei/mei.h
+++ b/drivers/staging/mei/mei.h
@@ -1,63 +1,68 @@
1/* 1/******************************************************************************
2 2 * Intel Management Engine Interface (Intel MEI) Linux driver
3 Intel Management Engine Interface (Intel MEI) Linux driver 3 * Intel MEI Interface Header
4 Intel MEI Interface Header 4 *
5 5 * This file is provided under a dual BSD/GPLv2 license. When using or
6 This file is provided under a dual BSD/GPLv2 license. When using or 6 * redistributing this file, you may do so under either license.
7 redistributing this file, you may do so under either license. 7 *
8 8 * GPL LICENSE SUMMARY
9 GPL LICENSE SUMMARY 9 *
10 10 * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
11 Copyright(c) 2003-2011 Intel Corporation. All rights reserved. 11 *
12 12 * This program is free software; you can redistribute it and/or modify
13 This program is free software; you can redistribute it and/or modify 13 * it under the terms of version 2 of the GNU General Public License as
14 it under the terms of version 2 of the GNU General Public License as 14 * published by the Free Software Foundation.
15 published by the Free Software Foundation. 15 *
16 16 * This program is distributed in the hope that it will be useful, but
17 This program is distributed in the hope that it will be useful, but 17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * General Public License for more details.
20 General Public License for more details. 20 *
21 21 * You should have received a copy of the GNU General Public License
22 Contact Information: 22 * along with this program; if not, write to the Free Software
23 Intel Corporation. 23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 linux-mei@linux.intel.com 24 * USA
25 http://www.intel.com 25 *
26 26 * The full GNU General Public License is included in this distribution
27 27 * in the file called LICENSE.GPL.
28 BSD LICENSE 28 *
29 29 * Contact Information:
30 Copyright(c) 2003-2011 Intel Corporation. All rights reserved. 30 * Intel Corporation.
31 All rights reserved. 31 * linux-mei@linux.intel.com
32 32 * http://www.intel.com
33 Redistribution and use in source and binary forms, with or without 33 *
34 modification, are permitted provided that the following conditions 34 * BSD LICENSE
35 are met: 35 *
36 36 * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
37 * Redistributions of source code must retain the above copyright 37 * All rights reserved.
38 notice, this list of conditions and the following disclaimer. 38 *
39 * Redistributions in binary form must reproduce the above copyright 39 * Redistribution and use in source and binary forms, with or without
40 notice, this list of conditions and the following disclaimer in 40 * modification, are permitted provided that the following conditions
41 the documentation and/or other materials provided with the 41 * are met:
42 distribution. 42 *
43 * Neither the name of Intel Corporation nor the names of its 43 * * Redistributions of source code must retain the above copyright
44 contributors may be used to endorse or promote products derived 44 * notice, this list of conditions and the following disclaimer.
45 from this software without specific prior written permission. 45 * * Redistributions in binary form must reproduce the above copyright
46 46 * notice, this list of conditions and the following disclaimer in
47 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 47 * the documentation and/or other materials provided with the
48 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 48 * distribution.
49 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 49 * * Neither the name Intel Corporation nor the names of its
50 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 50 * contributors may be used to endorse or promote products derived
51 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 51 * from this software without specific prior written permission.
52 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 52 *
53 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
54 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 54 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
55 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
56 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 56 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
57 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 57 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
58 58 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
59*/ 59 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
63 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 *
65 *****************************************************************************/
61 66
62#ifndef _LINUX_MEI_H 67#ifndef _LINUX_MEI_H
63#define _LINUX_MEI_H 68#define _LINUX_MEI_H
@@ -72,7 +77,7 @@
72 * Only in close() (file_operation release()) the communication between 77 * Only in close() (file_operation release()) the communication between
73 * the clients is disconnected 78 * the clients is disconnected
74 * 79 *
75 * The IOCTL argument is a struct with a union the contains 80 * The IOCTL argument is a struct with a union that contains
76 * the input parameter and the output parameter for this IOCTL. 81 * the input parameter and the output parameter for this IOCTL.
77 * 82 *
78 * The input parameter is UUID of the FW Client. 83 * The input parameter is UUID of the FW Client.
diff --git a/drivers/staging/mei/mei.txt b/drivers/staging/mei/mei.txt
index 516bfe7319a6..2785697da59d 100644
--- a/drivers/staging/mei/mei.txt
+++ b/drivers/staging/mei/mei.txt
@@ -4,7 +4,7 @@ Intel(R) Management Engine Interface (Intel(R) MEI)
4Introduction 4Introduction
5======================= 5=======================
6 6
7The Intel Management Engine (Intel ME) is an isolated andprotected computing 7The Intel Management Engine (Intel ME) is an isolated and protected computing
8resource (Co-processor) residing inside certain Intel chipsets. The Intel ME 8resource (Co-processor) residing inside certain Intel chipsets. The Intel ME
9provides support for computer/IT management features. The feature set 9provides support for computer/IT management features. The feature set
10depends on the Intel chipset SKU. 10depends on the Intel chipset SKU.
@@ -176,8 +176,8 @@ Intel AMT OS Health Watchdog:
176============================= 176=============================
177The Intel AMT Watchdog is an OS Health (Hang/Crash) watchdog. 177The Intel AMT Watchdog is an OS Health (Hang/Crash) watchdog.
178Whenever the OS hangs or crashes, Intel AMT will send an event 178Whenever the OS hangs or crashes, Intel AMT will send an event
179to any subsciber to this event. This mechanism means that 179to any subscriber to this event. This mechanism means that
180IT knows when a platform crashes even when there is a hard failureon the host. 180IT knows when a platform crashes even when there is a hard failure on the host.
181 181
182The Intel AMT Watchdog is composed of two parts: 182The Intel AMT Watchdog is composed of two parts:
183 1) Firmware feature - receives the heartbeats 183 1) Firmware feature - receives the heartbeats
diff --git a/drivers/staging/mei/mei_dev.h b/drivers/staging/mei/mei_dev.h
index 82bacfc624c5..10b1b4e2f8ac 100644
--- a/drivers/staging/mei/mei_dev.h
+++ b/drivers/staging/mei/mei_dev.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * 2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver 3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2011, Intel Corporation. 4 * Copyright (c) 2003-2012, Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -30,6 +30,8 @@
30#define MEI_WD_PARAMS_SIZE 4 30#define MEI_WD_PARAMS_SIZE 4
31#define MEI_WD_STATE_INDEPENDENCE_MSG_SENT (1 << 0) 31#define MEI_WD_STATE_INDEPENDENCE_MSG_SENT (1 << 0)
32 32
33#define MEI_RD_MSG_BUF_SIZE (128 * sizeof(u32))
34
33/* 35/*
34 * MEI PCI Device object 36 * MEI PCI Device object
35 */ 37 */
@@ -87,7 +89,7 @@ enum mei_states {
87 MEI_POWER_UP 89 MEI_POWER_UP
88}; 90};
89 91
90/* init clients states*/ 92/* init clients states*/
91enum mei_init_clients_states { 93enum mei_init_clients_states {
92 MEI_START_MESSAGE = 0, 94 MEI_START_MESSAGE = 0,
93 MEI_ENUM_CLIENTS_MESSAGE, 95 MEI_ENUM_CLIENTS_MESSAGE,
@@ -125,7 +127,7 @@ enum mei_cb_major_types {
125 */ 127 */
126struct mei_message_data { 128struct mei_message_data {
127 u32 size; 129 u32 size;
128 char *data; 130 unsigned char *data;
129} __packed; 131} __packed;
130 132
131 133
@@ -219,7 +221,7 @@ struct mei_device {
219 bool need_reset; 221 bool need_reset;
220 222
221 u32 extra_write_index; 223 u32 extra_write_index;
222 u32 rd_msg_buf[128]; /* used for control messages */ 224 unsigned char rd_msg_buf[MEI_RD_MSG_BUF_SIZE]; /* control messages */
223 u32 wr_msg_buf[128]; /* used for control messages */ 225 u32 wr_msg_buf[128]; /* used for control messages */
224 u32 ext_msg_buf[8]; /* for control responses */ 226 u32 ext_msg_buf[8]; /* for control responses */
225 u32 rd_msg_hdr; 227 u32 rd_msg_hdr;
diff --git a/drivers/staging/mei/mei_version.h b/drivers/staging/mei/mei_version.h
deleted file mode 100644
index 075bad8f0bf5..000000000000
--- a/drivers/staging/mei/mei_version.h
+++ /dev/null
@@ -1,31 +0,0 @@
1/*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2011, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
17
18#ifndef MEI_VERSION_H
19#define MEI_VERSION_H
20
21#define MAJOR_VERSION 7
22#define MINOR_VERSION 1
23#define QUICK_FIX_NUMBER 20
24#define VER_BUILD 1
25
26#define MEI_DRV_VER1 __stringify(MAJOR_VERSION) "." __stringify(MINOR_VERSION)
27#define MEI_DRV_VER2 __stringify(QUICK_FIX_NUMBER) "." __stringify(VER_BUILD)
28
29#define MEI_DRIVER_VERSION MEI_DRV_VER1 "." MEI_DRV_VER2
30
31#endif
diff --git a/drivers/staging/mei/wd.c b/drivers/staging/mei/wd.c
index 8094941a98f1..a6910da78a64 100644
--- a/drivers/staging/mei/wd.c
+++ b/drivers/staging/mei/wd.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * 2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver 3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2011, Intel Corporation. 4 * Copyright (c) 2003-2012, Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -74,7 +74,7 @@ bool mei_wd_host_init(struct mei_device *dev)
74 74
75 dev_dbg(&dev->pdev->dev, "check wd_cl\n"); 75 dev_dbg(&dev->pdev->dev, "check wd_cl\n");
76 if (MEI_FILE_CONNECTING == dev->wd_cl.state) { 76 if (MEI_FILE_CONNECTING == dev->wd_cl.state) {
77 if (!mei_connect(dev, &dev->wd_cl)) { 77 if (mei_connect(dev, &dev->wd_cl)) {
78 dev_dbg(&dev->pdev->dev, "Failed to connect to WD client\n"); 78 dev_dbg(&dev->pdev->dev, "Failed to connect to WD client\n");
79 dev->wd_cl.state = MEI_FILE_DISCONNECTED; 79 dev->wd_cl.state = MEI_FILE_DISCONNECTED;
80 dev->wd_cl.host_client_id = 0; 80 dev->wd_cl.host_client_id = 0;
@@ -119,9 +119,7 @@ int mei_wd_send(struct mei_device *dev)
119 else 119 else
120 return -EINVAL; 120 return -EINVAL;
121 121
122 if (mei_write_message(dev, mei_hdr, dev->wd_data, mei_hdr->length)) 122 return mei_write_message(dev, mei_hdr, dev->wd_data, mei_hdr->length);
123 return 0;
124 return -EIO;
125} 123}
126 124
127/** 125/**
diff --git a/drivers/staging/nvec/Kconfig b/drivers/staging/nvec/Kconfig
index 86a8b8c418c0..731301f524a6 100644
--- a/drivers/staging/nvec/Kconfig
+++ b/drivers/staging/nvec/Kconfig
@@ -7,21 +7,21 @@ config MFD_NVEC
7 7
8config KEYBOARD_NVEC 8config KEYBOARD_NVEC
9 bool "Keyboard on nVidia compliant EC" 9 bool "Keyboard on nVidia compliant EC"
10 depends on MFD_NVEC && INPUT=y 10 depends on MFD_NVEC && INPUT
11 help 11 help
12 Say Y here to enable support for a keyboard connected to 12 Say Y here to enable support for a keyboard connected to
13 a nVidia compliant embedded controller. 13 a nVidia compliant embedded controller.
14 14
15config SERIO_NVEC_PS2 15config SERIO_NVEC_PS2
16 bool "PS2 on nVidia EC" 16 bool "PS2 on nVidia EC"
17 depends on MFD_NVEC && MOUSE_PS2 17 depends on MFD_NVEC && SERIO
18 help 18 help
19 Say Y here to enable support for a Touchpad / Mouse connected 19 Say Y here to enable support for a Touchpad / Mouse connected
20 to a nVidia compliant embedded controller. 20 to a nVidia compliant embedded controller.
21 21
22config NVEC_POWER 22config NVEC_POWER
23 bool "NVEC charger and battery" 23 bool "NVEC charger and battery"
24 depends on MFD_NVEC && POWER_SUPPLY=y 24 depends on MFD_NVEC && POWER_SUPPLY
25 help 25 help
26 Say Y to enable support for battery and charger interface for 26 Say Y to enable support for battery and charger interface for
27 nVidia compliant embedded controllers. 27 nVidia compliant embedded controllers.
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index fafdfa25e139..3c60088871e0 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -49,7 +49,7 @@
49#define I2C_CNFG_DEBOUNCE_CNT_SHIFT 12 49#define I2C_CNFG_DEBOUNCE_CNT_SHIFT 12
50 50
51#define I2C_SL_CNFG 0x20 51#define I2C_SL_CNFG 0x20
52#define I2C_SL_NEWL (1<<2) 52#define I2C_SL_NEWSL (1<<2)
53#define I2C_SL_NACK (1<<1) 53#define I2C_SL_NACK (1<<1)
54#define I2C_SL_RESP (1<<0) 54#define I2C_SL_RESP (1<<0)
55#define I2C_SL_IRQ (1<<3) 55#define I2C_SL_IRQ (1<<3)
@@ -687,7 +687,7 @@ static void tegra_init_i2c_slave(struct nvec_chip *nvec)
687 687
688 clk_set_rate(nvec->i2c_clk, 8 * 80000); 688 clk_set_rate(nvec->i2c_clk, 8 * 80000);
689 689
690 writel(I2C_SL_NEWL, nvec->base + I2C_SL_CNFG); 690 writel(I2C_SL_NEWSL, nvec->base + I2C_SL_CNFG);
691 writel(0x1E, nvec->base + I2C_SL_DELAY_COUNT); 691 writel(0x1E, nvec->base + I2C_SL_DELAY_COUNT);
692 692
693 writel(nvec->i2c_addr>>1, nvec->base + I2C_SL_ADDR1); 693 writel(nvec->i2c_addr>>1, nvec->base + I2C_SL_ADDR1);
@@ -701,7 +701,7 @@ static void tegra_init_i2c_slave(struct nvec_chip *nvec)
701static void nvec_disable_i2c_slave(struct nvec_chip *nvec) 701static void nvec_disable_i2c_slave(struct nvec_chip *nvec)
702{ 702{
703 disable_irq(nvec->irq); 703 disable_irq(nvec->irq);
704 writel(I2C_SL_NEWL | I2C_SL_NACK, nvec->base + I2C_SL_CNFG); 704 writel(I2C_SL_NEWSL | I2C_SL_NACK, nvec->base + I2C_SL_CNFG);
705 clk_disable(nvec->i2c_clk); 705 clk_disable(nvec->i2c_clk);
706} 706}
707 707
@@ -784,11 +784,6 @@ static int __devinit tegra_nvec_probe(struct platform_device *pdev)
784 nvec->i2c_clk = i2c_clk; 784 nvec->i2c_clk = i2c_clk;
785 nvec->rx = &nvec->msg_pool[0]; 785 nvec->rx = &nvec->msg_pool[0];
786 786
787 /* Set the gpio to low when we've got something to say */
788 err = gpio_request(nvec->gpio, "nvec gpio");
789 if (err < 0)
790 dev_err(nvec->dev, "couldn't request gpio\n");
791
792 ATOMIC_INIT_NOTIFIER_HEAD(&nvec->notifier_list); 787 ATOMIC_INIT_NOTIFIER_HEAD(&nvec->notifier_list);
793 788
794 init_completion(&nvec->sync_write); 789 init_completion(&nvec->sync_write);
@@ -802,6 +797,12 @@ static int __devinit tegra_nvec_probe(struct platform_device *pdev)
802 INIT_WORK(&nvec->tx_work, nvec_request_master); 797 INIT_WORK(&nvec->tx_work, nvec_request_master);
803 nvec->wq = alloc_workqueue("nvec", WQ_NON_REENTRANT, 2); 798 nvec->wq = alloc_workqueue("nvec", WQ_NON_REENTRANT, 2);
804 799
800 err = gpio_request_one(nvec->gpio, GPIOF_OUT_INIT_HIGH, "nvec gpio");
801 if (err < 0) {
802 dev_err(nvec->dev, "couldn't request gpio\n");
803 goto failed;
804 }
805
805 err = request_irq(nvec->irq, nvec_interrupt, 0, "nvec", nvec); 806 err = request_irq(nvec->irq, nvec_interrupt, 0, "nvec", nvec);
806 if (err) { 807 if (err) {
807 dev_err(nvec->dev, "couldn't request irq\n"); 808 dev_err(nvec->dev, "couldn't request irq\n");
@@ -813,8 +814,6 @@ static int __devinit tegra_nvec_probe(struct platform_device *pdev)
813 814
814 clk_enable(i2c_clk); 815 clk_enable(i2c_clk);
815 816
816 gpio_direction_output(nvec->gpio, 1);
817 gpio_set_value(nvec->gpio, 1);
818 817
819 /* enable event reporting */ 818 /* enable event reporting */
820 nvec_write_async(nvec, EC_ENABLE_EVENT_REPORTING, 819 nvec_write_async(nvec, EC_ENABLE_EVENT_REPORTING,
diff --git a/drivers/staging/nvec/nvec_ps2.c b/drivers/staging/nvec/nvec_ps2.c
index 742f5ccfe763..14a6f687cf75 100644
--- a/drivers/staging/nvec/nvec_ps2.c
+++ b/drivers/staging/nvec/nvec_ps2.c
@@ -21,10 +21,18 @@
21 21
22#include "nvec.h" 22#include "nvec.h"
23 23
24#define START_STREAMING {'\x06', '\x03', '\x04'} 24#define START_STREAMING {'\x06', '\x03', '\x06'}
25#define STOP_STREAMING {'\x06', '\x04'} 25#define STOP_STREAMING {'\x06', '\x04'}
26#define SEND_COMMAND {'\x06', '\x01', '\xf4', '\x01'} 26#define SEND_COMMAND {'\x06', '\x01', '\xf4', '\x01'}
27 27
28#ifdef NVEC_PS2_DEBUG
29#define NVEC_PHD(str, buf, len) \
30 print_hex_dump(KERN_DEBUG, str, DUMP_PREFIX_NONE, \
31 16, 1, buf, len, false)
32#else
33#define NVEC_PHD(str, buf, len)
34#endif
35
28static const unsigned char MOUSE_RESET[] = {'\x06', '\x01', '\xff', '\x03'}; 36static const unsigned char MOUSE_RESET[] = {'\x06', '\x01', '\xff', '\x03'};
29 37
30struct nvec_ps2 { 38struct nvec_ps2 {
@@ -67,18 +75,18 @@ static int nvec_ps2_notifier(struct notifier_block *nb,
67 case NVEC_PS2_EVT: 75 case NVEC_PS2_EVT:
68 for (i = 0; i < msg[1]; i++) 76 for (i = 0; i < msg[1]; i++)
69 serio_interrupt(ps2_dev.ser_dev, msg[2 + i], 0); 77 serio_interrupt(ps2_dev.ser_dev, msg[2 + i], 0);
78 NVEC_PHD("ps/2 mouse event: ", &msg[2], msg[1]);
70 return NOTIFY_STOP; 79 return NOTIFY_STOP;
71 80
72 case NVEC_PS2: 81 case NVEC_PS2:
73 if (msg[2] == 1) 82 if (msg[2] == 1) {
74 for (i = 0; i < (msg[1] - 2); i++) 83 for (i = 0; i < (msg[1] - 2); i++)
75 serio_interrupt(ps2_dev.ser_dev, msg[i + 4], 0); 84 serio_interrupt(ps2_dev.ser_dev, msg[i + 4], 0);
76 else if (msg[1] != 2) { /* !ack */ 85 NVEC_PHD("ps/2 mouse reply: ", &msg[4], msg[1] - 2);
77 print_hex_dump(KERN_WARNING, "unhandled mouse event: ",
78 DUMP_PREFIX_NONE, 16, 1,
79 msg, msg[1] + 2, true);
80 } 86 }
81 87
88 else if (msg[1] != 2) /* !ack */
89 NVEC_PHD("unhandled mouse event: ", msg, msg[1] + 2);
82 return NOTIFY_STOP; 90 return NOTIFY_STOP;
83 } 91 }
84 92
@@ -90,10 +98,10 @@ static int __devinit nvec_mouse_probe(struct platform_device *pdev)
90 struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent); 98 struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent);
91 struct serio *ser_dev = kzalloc(sizeof(struct serio), GFP_KERNEL); 99 struct serio *ser_dev = kzalloc(sizeof(struct serio), GFP_KERNEL);
92 100
93 ser_dev->id.type = SERIO_8042; 101 ser_dev->id.type = SERIO_PS_PSTHRU;
94 ser_dev->write = ps2_sendcommand; 102 ser_dev->write = ps2_sendcommand;
95 ser_dev->open = ps2_startstreaming; 103 ser_dev->start = ps2_startstreaming;
96 ser_dev->close = ps2_stopstreaming; 104 ser_dev->stop = ps2_stopstreaming;
97 105
98 strlcpy(ser_dev->name, "nvec mouse", sizeof(ser_dev->name)); 106 strlcpy(ser_dev->name, "nvec mouse", sizeof(ser_dev->name));
99 strlcpy(ser_dev->phys, "nvec", sizeof(ser_dev->phys)); 107 strlcpy(ser_dev->phys, "nvec", sizeof(ser_dev->phys));
@@ -111,8 +119,35 @@ static int __devinit nvec_mouse_probe(struct platform_device *pdev)
111 return 0; 119 return 0;
112} 120}
113 121
122static int nvec_mouse_suspend(struct platform_device *pdev, pm_message_t state)
123{
124 struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent);
125
126 /* disable mouse */
127 nvec_write_async(nvec, "\x06\xf4", 2);
128
129 /* send cancel autoreceive */
130 nvec_write_async(nvec, "\x06\x04", 2);
131
132 return 0;
133}
134
135static int nvec_mouse_resume(struct platform_device *pdev)
136{
137 struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent);
138
139 ps2_startstreaming(ps2_dev.ser_dev);
140
141 /* enable mouse */
142 nvec_write_async(nvec, "\x06\xf5", 2);
143
144 return 0;
145}
146
114static struct platform_driver nvec_mouse_driver = { 147static struct platform_driver nvec_mouse_driver = {
115 .probe = nvec_mouse_probe, 148 .probe = nvec_mouse_probe,
149 .suspend = nvec_mouse_suspend,
150 .resume = nvec_mouse_resume,
116 .driver = { 151 .driver = {
117 .name = "nvec-mouse", 152 .name = "nvec-mouse",
118 .owner = THIS_MODULE, 153 .owner = THIS_MODULE,
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
index 63800ba71d06..e31949c9c87e 100644
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -164,9 +164,9 @@ int cvm_oct_phy_setup_device(struct net_device *dev)
164 164
165 int phy_addr = cvmx_helper_board_get_mii_address(priv->port); 165 int phy_addr = cvmx_helper_board_get_mii_address(priv->port);
166 if (phy_addr != -1) { 166 if (phy_addr != -1) {
167 char phy_id[20]; 167 char phy_id[MII_BUS_ID_SIZE + 3];
168 168
169 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, "0", phy_addr); 169 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, "mdio-octeon-0", phy_addr);
170 170
171 priv->phydev = phy_connect(dev, phy_id, cvm_oct_adjust_link, 0, 171 priv->phydev = phy_connect(dev, phy_id, cvm_oct_adjust_link, 0,
172 PHY_INTERFACE_MODE_GMII); 172 PHY_INTERFACE_MODE_GMII);
diff --git a/drivers/staging/omapdrm/omap_crtc.c b/drivers/staging/omapdrm/omap_crtc.c
index 17ca163e5896..490a7f15604b 100644
--- a/drivers/staging/omapdrm/omap_crtc.c
+++ b/drivers/staging/omapdrm/omap_crtc.c
@@ -118,29 +118,35 @@ static void omap_crtc_load_lut(struct drm_crtc *crtc)
118{ 118{
119} 119}
120 120
121static void page_flip_cb(void *arg) 121static void vblank_cb(void *arg)
122{ 122{
123 static uint32_t sequence = 0;
123 struct drm_crtc *crtc = arg; 124 struct drm_crtc *crtc = arg;
124 struct drm_device *dev = crtc->dev; 125 struct drm_device *dev = crtc->dev;
125 struct omap_crtc *omap_crtc = to_omap_crtc(crtc); 126 struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
126 struct drm_pending_vblank_event *event = omap_crtc->event; 127 struct drm_pending_vblank_event *event = omap_crtc->event;
127 struct drm_framebuffer *old_fb = omap_crtc->old_fb;
128 struct timeval now;
129 unsigned long flags; 128 unsigned long flags;
129 struct timeval now;
130 130
131 WARN_ON(!event); 131 WARN_ON(!event);
132 132
133 omap_crtc->event = NULL; 133 omap_crtc->event = NULL;
134 omap_crtc->old_fb = NULL;
135
136 omap_crtc_mode_set_base(crtc, crtc->x, crtc->y, old_fb);
137 134
138 /* wakeup userspace */ 135 /* wakeup userspace */
139 /* TODO: this should happen *after* flip in vsync IRQ handler */
140 if (event) { 136 if (event) {
137 do_gettimeofday(&now);
138
141 spin_lock_irqsave(&dev->event_lock, flags); 139 spin_lock_irqsave(&dev->event_lock, flags);
140 /* TODO: we can't yet use the vblank time accounting,
141 * because omapdss lower layer is the one that knows
142 * the irq # and registers the handler, which more or
143 * less defeats how drm_irq works.. for now just fake
144 * the sequence number and use gettimeofday..
145 *
142 event->event.sequence = drm_vblank_count_and_time( 146 event->event.sequence = drm_vblank_count_and_time(
143 dev, omap_crtc->id, &now); 147 dev, omap_crtc->id, &now);
148 */
149 event->event.sequence = sequence++;
144 event->event.tv_sec = now.tv_sec; 150 event->event.tv_sec = now.tv_sec;
145 event->event.tv_usec = now.tv_usec; 151 event->event.tv_usec = now.tv_usec;
146 list_add_tail(&event->base.link, 152 list_add_tail(&event->base.link,
@@ -150,6 +156,23 @@ static void page_flip_cb(void *arg)
150 } 156 }
151} 157}
152 158
159static void page_flip_cb(void *arg)
160{
161 struct drm_crtc *crtc = arg;
162 struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
163 struct drm_framebuffer *old_fb = omap_crtc->old_fb;
164
165 omap_crtc->old_fb = NULL;
166
167 omap_crtc_mode_set_base(crtc, crtc->x, crtc->y, old_fb);
168
169 /* really we'd like to setup the callback atomically w/ setting the
170 * new scanout buffer to avoid getting stuck waiting an extra vblank
171 * cycle.. for now go for correctness and later figure out speed..
172 */
173 omap_plane_on_endwin(omap_crtc->plane, vblank_cb, crtc);
174}
175
153static int omap_crtc_page_flip_locked(struct drm_crtc *crtc, 176static int omap_crtc_page_flip_locked(struct drm_crtc *crtc,
154 struct drm_framebuffer *fb, 177 struct drm_framebuffer *fb,
155 struct drm_pending_vblank_event *event) 178 struct drm_pending_vblank_event *event)
diff --git a/drivers/staging/omapdrm/omap_debugfs.c b/drivers/staging/omapdrm/omap_debugfs.c
index da920dfdc59c..2f122e00b51d 100644
--- a/drivers/staging/omapdrm/omap_debugfs.c
+++ b/drivers/staging/omapdrm/omap_debugfs.c
@@ -20,23 +20,118 @@
20#include "omap_drv.h" 20#include "omap_drv.h"
21#include "omap_dmm_tiler.h" 21#include "omap_dmm_tiler.h"
22 22
23#include "drm_fb_helper.h"
24
25
23#ifdef CONFIG_DEBUG_FS 26#ifdef CONFIG_DEBUG_FS
24 27
28static int gem_show(struct seq_file *m, void *arg)
29{
30 struct drm_info_node *node = (struct drm_info_node *) m->private;
31 struct drm_device *dev = node->minor->dev;
32 struct omap_drm_private *priv = dev->dev_private;
33 int ret;
34
35 ret = mutex_lock_interruptible(&dev->struct_mutex);
36 if (ret)
37 return ret;
38
39 seq_printf(m, "All Objects:\n");
40 omap_gem_describe_objects(&priv->obj_list, m);
41
42 mutex_unlock(&dev->struct_mutex);
43
44 return 0;
45}
46
47static int mm_show(struct seq_file *m, void *arg)
48{
49 struct drm_info_node *node = (struct drm_info_node *) m->private;
50 struct drm_device *dev = node->minor->dev;
51 return drm_mm_dump_table(m, dev->mm_private);
52}
53
54static int fb_show(struct seq_file *m, void *arg)
55{
56 struct drm_info_node *node = (struct drm_info_node *) m->private;
57 struct drm_device *dev = node->minor->dev;
58 struct omap_drm_private *priv = dev->dev_private;
59 struct drm_framebuffer *fb;
60 int ret;
61
62 ret = mutex_lock_interruptible(&dev->mode_config.mutex);
63 if (ret)
64 return ret;
65
66 ret = mutex_lock_interruptible(&dev->struct_mutex);
67 if (ret) {
68 mutex_unlock(&dev->mode_config.mutex);
69 return ret;
70 }
71
72 seq_printf(m, "fbcon ");
73 omap_framebuffer_describe(priv->fbdev->fb, m);
74
75 list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
76 if (fb == priv->fbdev->fb)
77 continue;
78
79 seq_printf(m, "user ");
80 omap_framebuffer_describe(fb, m);
81 }
82
83 mutex_unlock(&dev->struct_mutex);
84 mutex_unlock(&dev->mode_config.mutex);
85
86 return 0;
87}
88
89/* list of debufs files that are applicable to all devices */
25static struct drm_info_list omap_debugfs_list[] = { 90static struct drm_info_list omap_debugfs_list[] = {
91 {"gem", gem_show, 0},
92 {"mm", mm_show, 0},
93 {"fb", fb_show, 0},
94};
95
96/* list of debugfs files that are specific to devices with dmm/tiler */
97static struct drm_info_list omap_dmm_debugfs_list[] = {
26 {"tiler_map", tiler_map_show, 0}, 98 {"tiler_map", tiler_map_show, 0},
27}; 99};
28 100
29int omap_debugfs_init(struct drm_minor *minor) 101int omap_debugfs_init(struct drm_minor *minor)
30{ 102{
31 return drm_debugfs_create_files(omap_debugfs_list, 103 struct drm_device *dev = minor->dev;
104 int ret;
105
106 ret = drm_debugfs_create_files(omap_debugfs_list,
32 ARRAY_SIZE(omap_debugfs_list), 107 ARRAY_SIZE(omap_debugfs_list),
33 minor->debugfs_root, minor); 108 minor->debugfs_root, minor);
109
110 if (ret) {
111 dev_err(dev->dev, "could not install omap_debugfs_list\n");
112 return ret;
113 }
114
115 if (dmm_is_available())
116 ret = drm_debugfs_create_files(omap_dmm_debugfs_list,
117 ARRAY_SIZE(omap_dmm_debugfs_list),
118 minor->debugfs_root, minor);
119
120 if (ret) {
121 dev_err(dev->dev, "could not install omap_dmm_debugfs_list\n");
122 return ret;
123 }
124
125 return ret;
34} 126}
35 127
36void omap_debugfs_cleanup(struct drm_minor *minor) 128void omap_debugfs_cleanup(struct drm_minor *minor)
37{ 129{
38 drm_debugfs_remove_files(omap_debugfs_list, 130 drm_debugfs_remove_files(omap_debugfs_list,
39 ARRAY_SIZE(omap_debugfs_list), minor); 131 ARRAY_SIZE(omap_debugfs_list), minor);
132 if (dmm_is_available())
133 drm_debugfs_remove_files(omap_dmm_debugfs_list,
134 ARRAY_SIZE(omap_dmm_debugfs_list), minor);
40} 135}
41 136
42#endif 137#endif
diff --git a/drivers/staging/omapdrm/omap_dmm_tiler.c b/drivers/staging/omapdrm/omap_dmm_tiler.c
index 852d9440f725..1ecb6a73d790 100644
--- a/drivers/staging/omapdrm/omap_dmm_tiler.c
+++ b/drivers/staging/omapdrm/omap_dmm_tiler.c
@@ -34,6 +34,8 @@
34#include "omap_dmm_tiler.h" 34#include "omap_dmm_tiler.h"
35#include "omap_dmm_priv.h" 35#include "omap_dmm_priv.h"
36 36
37#define DMM_DRIVER_NAME "dmm"
38
37/* mappings for associating views to luts */ 39/* mappings for associating views to luts */
38static struct tcm *containers[TILFMT_NFORMATS]; 40static struct tcm *containers[TILFMT_NFORMATS];
39static struct dmm *omap_dmm; 41static struct dmm *omap_dmm;
@@ -465,7 +467,12 @@ size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h)
465 return round_up(geom[fmt].cpp * w, PAGE_SIZE) * h; 467 return round_up(geom[fmt].cpp * w, PAGE_SIZE) * h;
466} 468}
467 469
468int omap_dmm_remove(void) 470bool dmm_is_initialized(void)
471{
472 return omap_dmm ? true : false;
473}
474
475static int omap_dmm_remove(struct platform_device *dev)
469{ 476{
470 struct tiler_block *block, *_block; 477 struct tiler_block *block, *_block;
471 int i; 478 int i;
@@ -499,40 +506,49 @@ int omap_dmm_remove(void)
499 if (omap_dmm->irq != -1) 506 if (omap_dmm->irq != -1)
500 free_irq(omap_dmm->irq, omap_dmm); 507 free_irq(omap_dmm->irq, omap_dmm);
501 508
509 iounmap(omap_dmm->base);
502 kfree(omap_dmm); 510 kfree(omap_dmm);
511 omap_dmm = NULL;
503 } 512 }
504 513
505 return 0; 514 return 0;
506} 515}
507 516
508int omap_dmm_init(struct drm_device *dev) 517static int omap_dmm_probe(struct platform_device *dev)
509{ 518{
510 int ret = -EFAULT, i; 519 int ret = -EFAULT, i;
511 struct tcm_area area = {0}; 520 struct tcm_area area = {0};
512 u32 hwinfo, pat_geom, lut_table_size; 521 u32 hwinfo, pat_geom, lut_table_size;
513 struct omap_drm_platform_data *pdata = dev->dev->platform_data; 522 struct resource *mem;
514
515 if (!pdata || !pdata->dmm_pdata) {
516 dev_err(dev->dev, "dmm platform data not present, skipping\n");
517 return ret;
518 }
519 523
520 omap_dmm = kzalloc(sizeof(*omap_dmm), GFP_KERNEL); 524 omap_dmm = kzalloc(sizeof(*omap_dmm), GFP_KERNEL);
521 if (!omap_dmm) { 525 if (!omap_dmm) {
522 dev_err(dev->dev, "failed to allocate driver data section\n"); 526 dev_err(&dev->dev, "failed to allocate driver data section\n");
523 goto fail; 527 goto fail;
524 } 528 }
525 529
526 /* lookup hwmod data - base address and irq */ 530 /* lookup hwmod data - base address and irq */
527 omap_dmm->base = pdata->dmm_pdata->base; 531 mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
528 omap_dmm->irq = pdata->dmm_pdata->irq; 532 if (!mem) {
529 omap_dmm->dev = dev->dev; 533 dev_err(&dev->dev, "failed to get base address resource\n");
534 goto fail;
535 }
536
537 omap_dmm->base = ioremap(mem->start, SZ_2K);
530 538
531 if (!omap_dmm->base) { 539 if (!omap_dmm->base) {
532 dev_err(dev->dev, "failed to get dmm base address\n"); 540 dev_err(&dev->dev, "failed to get dmm base address\n");
541 goto fail;
542 }
543
544 omap_dmm->irq = platform_get_irq(dev, 0);
545 if (omap_dmm->irq < 0) {
546 dev_err(&dev->dev, "failed to get IRQ resource\n");
533 goto fail; 547 goto fail;
534 } 548 }
535 549
550 omap_dmm->dev = &dev->dev;
551
536 hwinfo = readl(omap_dmm->base + DMM_PAT_HWINFO); 552 hwinfo = readl(omap_dmm->base + DMM_PAT_HWINFO);
537 omap_dmm->num_engines = (hwinfo >> 24) & 0x1F; 553 omap_dmm->num_engines = (hwinfo >> 24) & 0x1F;
538 omap_dmm->num_lut = (hwinfo >> 16) & 0x1F; 554 omap_dmm->num_lut = (hwinfo >> 16) & 0x1F;
@@ -556,7 +572,7 @@ int omap_dmm_init(struct drm_device *dev)
556 "omap_dmm_irq_handler", omap_dmm); 572 "omap_dmm_irq_handler", omap_dmm);
557 573
558 if (ret) { 574 if (ret) {
559 dev_err(dev->dev, "couldn't register IRQ %d, error %d\n", 575 dev_err(&dev->dev, "couldn't register IRQ %d, error %d\n",
560 omap_dmm->irq, ret); 576 omap_dmm->irq, ret);
561 omap_dmm->irq = -1; 577 omap_dmm->irq = -1;
562 goto fail; 578 goto fail;
@@ -575,25 +591,30 @@ int omap_dmm_init(struct drm_device *dev)
575 591
576 omap_dmm->lut = vmalloc(lut_table_size * sizeof(*omap_dmm->lut)); 592 omap_dmm->lut = vmalloc(lut_table_size * sizeof(*omap_dmm->lut));
577 if (!omap_dmm->lut) { 593 if (!omap_dmm->lut) {
578 dev_err(dev->dev, "could not allocate lut table\n"); 594 dev_err(&dev->dev, "could not allocate lut table\n");
579 ret = -ENOMEM; 595 ret = -ENOMEM;
580 goto fail; 596 goto fail;
581 } 597 }
582 598
583 omap_dmm->dummy_page = alloc_page(GFP_KERNEL | __GFP_DMA32); 599 omap_dmm->dummy_page = alloc_page(GFP_KERNEL | __GFP_DMA32);
584 if (!omap_dmm->dummy_page) { 600 if (!omap_dmm->dummy_page) {
585 dev_err(dev->dev, "could not allocate dummy page\n"); 601 dev_err(&dev->dev, "could not allocate dummy page\n");
586 ret = -ENOMEM; 602 ret = -ENOMEM;
587 goto fail; 603 goto fail;
588 } 604 }
605
606 /* set dma mask for device */
607 /* NOTE: this is a workaround for the hwmod not initializing properly */
608 dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
609
589 omap_dmm->dummy_pa = page_to_phys(omap_dmm->dummy_page); 610 omap_dmm->dummy_pa = page_to_phys(omap_dmm->dummy_page);
590 611
591 /* alloc refill memory */ 612 /* alloc refill memory */
592 omap_dmm->refill_va = dma_alloc_coherent(dev->dev, 613 omap_dmm->refill_va = dma_alloc_coherent(&dev->dev,
593 REFILL_BUFFER_SIZE * omap_dmm->num_engines, 614 REFILL_BUFFER_SIZE * omap_dmm->num_engines,
594 &omap_dmm->refill_pa, GFP_KERNEL); 615 &omap_dmm->refill_pa, GFP_KERNEL);
595 if (!omap_dmm->refill_va) { 616 if (!omap_dmm->refill_va) {
596 dev_err(dev->dev, "could not allocate refill memory\n"); 617 dev_err(&dev->dev, "could not allocate refill memory\n");
597 goto fail; 618 goto fail;
598 } 619 }
599 620
@@ -602,7 +623,7 @@ int omap_dmm_init(struct drm_device *dev)
602 omap_dmm->num_engines * sizeof(struct refill_engine), 623 omap_dmm->num_engines * sizeof(struct refill_engine),
603 GFP_KERNEL); 624 GFP_KERNEL);
604 if (!omap_dmm->engines) { 625 if (!omap_dmm->engines) {
605 dev_err(dev->dev, "could not allocate engines\n"); 626 dev_err(&dev->dev, "could not allocate engines\n");
606 ret = -ENOMEM; 627 ret = -ENOMEM;
607 goto fail; 628 goto fail;
608 } 629 }
@@ -624,7 +645,7 @@ int omap_dmm_init(struct drm_device *dev)
624 omap_dmm->tcm = kzalloc(omap_dmm->num_lut * sizeof(*omap_dmm->tcm), 645 omap_dmm->tcm = kzalloc(omap_dmm->num_lut * sizeof(*omap_dmm->tcm),
625 GFP_KERNEL); 646 GFP_KERNEL);
626 if (!omap_dmm->tcm) { 647 if (!omap_dmm->tcm) {
627 dev_err(dev->dev, "failed to allocate lut ptrs\n"); 648 dev_err(&dev->dev, "failed to allocate lut ptrs\n");
628 ret = -ENOMEM; 649 ret = -ENOMEM;
629 goto fail; 650 goto fail;
630 } 651 }
@@ -636,7 +657,7 @@ int omap_dmm_init(struct drm_device *dev)
636 NULL); 657 NULL);
637 658
638 if (!omap_dmm->tcm[i]) { 659 if (!omap_dmm->tcm[i]) {
639 dev_err(dev->dev, "failed to allocate container\n"); 660 dev_err(&dev->dev, "failed to allocate container\n");
640 ret = -ENOMEM; 661 ret = -ENOMEM;
641 goto fail; 662 goto fail;
642 } 663 }
@@ -676,7 +697,7 @@ int omap_dmm_init(struct drm_device *dev)
676 return 0; 697 return 0;
677 698
678fail: 699fail:
679 omap_dmm_remove(); 700 omap_dmm_remove(dev);
680 return ret; 701 return ret;
681} 702}
682 703
@@ -766,10 +787,18 @@ int tiler_map_show(struct seq_file *s, void *arg)
766 const char *a2d = special; 787 const char *a2d = special;
767 const char *m2dp = m2d, *a2dp = a2d; 788 const char *m2dp = m2d, *a2dp = a2d;
768 char nice[128]; 789 char nice[128];
769 int h_adj = omap_dmm->lut_height / ydiv; 790 int h_adj;
770 int w_adj = omap_dmm->lut_width / xdiv; 791 int w_adj;
771 unsigned long flags; 792 unsigned long flags;
772 793
794 if (!omap_dmm) {
795 /* early return if dmm/tiler device is not initialized */
796 return 0;
797 }
798
799 h_adj = omap_dmm->lut_height / ydiv;
800 w_adj = omap_dmm->lut_width / xdiv;
801
773 map = kzalloc(h_adj * sizeof(*map), GFP_KERNEL); 802 map = kzalloc(h_adj * sizeof(*map), GFP_KERNEL);
774 global_map = kzalloc((w_adj + 1) * h_adj, GFP_KERNEL); 803 global_map = kzalloc((w_adj + 1) * h_adj, GFP_KERNEL);
775 804
@@ -828,3 +857,17 @@ error:
828 return 0; 857 return 0;
829} 858}
830#endif 859#endif
860
861struct platform_driver omap_dmm_driver = {
862 .probe = omap_dmm_probe,
863 .remove = omap_dmm_remove,
864 .driver = {
865 .owner = THIS_MODULE,
866 .name = DMM_DRIVER_NAME,
867 },
868};
869
870MODULE_LICENSE("GPL v2");
871MODULE_AUTHOR("Andy Gross <andy.gross@ti.com>");
872MODULE_DESCRIPTION("OMAP DMM/Tiler Driver");
873MODULE_ALIAS("platform:" DMM_DRIVER_NAME);
diff --git a/drivers/staging/omapdrm/omap_dmm_tiler.h b/drivers/staging/omapdrm/omap_dmm_tiler.h
index f87cb657d683..7b1052a329e4 100644
--- a/drivers/staging/omapdrm/omap_dmm_tiler.h
+++ b/drivers/staging/omapdrm/omap_dmm_tiler.h
@@ -16,6 +16,7 @@
16#ifndef OMAP_DMM_TILER_H 16#ifndef OMAP_DMM_TILER_H
17#define OMAP_DMM_TILER_H 17#define OMAP_DMM_TILER_H
18 18
19#include <plat/cpu.h>
19#include "omap_drv.h" 20#include "omap_drv.h"
20#include "tcm.h" 21#include "tcm.h"
21 22
@@ -72,10 +73,6 @@ struct tiler_block {
72#define TIL_ADDR(x, orient, a)\ 73#define TIL_ADDR(x, orient, a)\
73 ((u32) (x) | (orient) | ((a) << SHIFT_ACC_MODE)) 74 ((u32) (x) | (orient) | ((a) << SHIFT_ACC_MODE))
74 75
75/* externally accessible functions */
76int omap_dmm_init(struct drm_device *dev);
77int omap_dmm_remove(void);
78
79#ifdef CONFIG_DEBUG_FS 76#ifdef CONFIG_DEBUG_FS
80int tiler_map_show(struct seq_file *s, void *arg); 77int tiler_map_show(struct seq_file *s, void *arg);
81#endif 78#endif
@@ -97,7 +94,9 @@ uint32_t tiler_stride(enum tiler_fmt fmt);
97size_t tiler_size(enum tiler_fmt fmt, uint16_t w, uint16_t h); 94size_t tiler_size(enum tiler_fmt fmt, uint16_t w, uint16_t h);
98size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h); 95size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h);
99void tiler_align(enum tiler_fmt fmt, uint16_t *w, uint16_t *h); 96void tiler_align(enum tiler_fmt fmt, uint16_t *w, uint16_t *h);
97bool dmm_is_initialized(void);
100 98
99extern struct platform_driver omap_dmm_driver;
101 100
102/* GEM bo flags -> tiler fmt */ 101/* GEM bo flags -> tiler fmt */
103static inline enum tiler_fmt gem2fmt(uint32_t flags) 102static inline enum tiler_fmt gem2fmt(uint32_t flags)
@@ -127,9 +126,9 @@ static inline bool validfmt(enum tiler_fmt fmt)
127 } 126 }
128} 127}
129 128
130struct omap_dmm_platform_data { 129static inline int dmm_is_available(void)
131 void __iomem *base; 130{
132 int irq; 131 return cpu_is_omap44xx();
133}; 132}
134 133
135#endif 134#endif
diff --git a/drivers/staging/omapdrm/omap_drv.c b/drivers/staging/omapdrm/omap_drv.c
index 3bbea9aac404..3df5b4c58ecd 100644
--- a/drivers/staging/omapdrm/omap_drv.c
+++ b/drivers/staging/omapdrm/omap_drv.c
@@ -21,6 +21,7 @@
21 21
22#include "drm_crtc_helper.h" 22#include "drm_crtc_helper.h"
23#include "drm_fb_helper.h" 23#include "drm_fb_helper.h"
24#include "omap_dmm_tiler.h"
24 25
25#define DRIVER_NAME MODULE_NAME 26#define DRIVER_NAME MODULE_NAME
26#define DRIVER_DESC "OMAP DRM" 27#define DRIVER_DESC "OMAP DRM"
@@ -570,6 +571,11 @@ static int dev_load(struct drm_device *dev, unsigned long flags)
570 571
571 dev->dev_private = priv; 572 dev->dev_private = priv;
572 573
574 priv->wq = alloc_workqueue("omapdrm",
575 WQ_UNBOUND | WQ_NON_REENTRANT, 1);
576
577 INIT_LIST_HEAD(&priv->obj_list);
578
573 omap_gem_init(dev); 579 omap_gem_init(dev);
574 580
575 ret = omap_modeset_init(dev); 581 ret = omap_modeset_init(dev);
@@ -598,6 +604,8 @@ static int dev_load(struct drm_device *dev, unsigned long flags)
598 604
599static int dev_unload(struct drm_device *dev) 605static int dev_unload(struct drm_device *dev)
600{ 606{
607 struct omap_drm_private *priv = dev->dev_private;
608
601 DBG("unload: dev=%p", dev); 609 DBG("unload: dev=%p", dev);
602 610
603 drm_vblank_cleanup(dev); 611 drm_vblank_cleanup(dev);
@@ -607,6 +615,9 @@ static int dev_unload(struct drm_device *dev)
607 omap_modeset_free(dev); 615 omap_modeset_free(dev);
608 omap_gem_deinit(dev); 616 omap_gem_deinit(dev);
609 617
618 flush_workqueue(priv->wq);
619 destroy_workqueue(priv->wq);
620
610 kfree(dev->dev_private); 621 kfree(dev->dev_private);
611 dev->dev_private = NULL; 622 dev->dev_private = NULL;
612 623
@@ -792,6 +803,9 @@ static void pdev_shutdown(struct platform_device *device)
792static int pdev_probe(struct platform_device *device) 803static int pdev_probe(struct platform_device *device)
793{ 804{
794 DBG("%s", device->name); 805 DBG("%s", device->name);
806 if (platform_driver_register(&omap_dmm_driver))
807 dev_err(&device->dev, "DMM registration failed\n");
808
795 return drm_platform_init(&omap_drm_driver, device); 809 return drm_platform_init(&omap_drm_driver, device);
796} 810}
797 811
@@ -799,6 +813,8 @@ static int pdev_remove(struct platform_device *device)
799{ 813{
800 DBG(""); 814 DBG("");
801 drm_platform_exit(&omap_drm_driver, device); 815 drm_platform_exit(&omap_drm_driver, device);
816
817 platform_driver_unregister(&omap_dmm_driver);
802 return 0; 818 return 0;
803} 819}
804 820
diff --git a/drivers/staging/omapdrm/omap_drv.h b/drivers/staging/omapdrm/omap_drv.h
index 61fe022dda5b..b7e0f0773003 100644
--- a/drivers/staging/omapdrm/omap_drv.h
+++ b/drivers/staging/omapdrm/omap_drv.h
@@ -42,21 +42,31 @@
42struct omap_drm_private { 42struct omap_drm_private {
43 unsigned int num_crtcs; 43 unsigned int num_crtcs;
44 struct drm_crtc *crtcs[8]; 44 struct drm_crtc *crtcs[8];
45
45 unsigned int num_planes; 46 unsigned int num_planes;
46 struct drm_plane *planes[8]; 47 struct drm_plane *planes[8];
48
47 unsigned int num_encoders; 49 unsigned int num_encoders;
48 struct drm_encoder *encoders[8]; 50 struct drm_encoder *encoders[8];
51
49 unsigned int num_connectors; 52 unsigned int num_connectors;
50 struct drm_connector *connectors[8]; 53 struct drm_connector *connectors[8];
51 54
52 struct drm_fb_helper *fbdev; 55 struct drm_fb_helper *fbdev;
53 56
57 struct workqueue_struct *wq;
58
59 struct list_head obj_list;
60
54 bool has_dmm; 61 bool has_dmm;
55}; 62};
56 63
57#ifdef CONFIG_DEBUG_FS 64#ifdef CONFIG_DEBUG_FS
58int omap_debugfs_init(struct drm_minor *minor); 65int omap_debugfs_init(struct drm_minor *minor);
59void omap_debugfs_cleanup(struct drm_minor *minor); 66void omap_debugfs_cleanup(struct drm_minor *minor);
67void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
68void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
69void omap_gem_describe_objects(struct list_head *list, struct seq_file *m);
60#endif 70#endif
61 71
62struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev); 72struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev);
@@ -75,6 +85,8 @@ int omap_plane_mode_set(struct drm_plane *plane,
75 unsigned int crtc_w, unsigned int crtc_h, 85 unsigned int crtc_w, unsigned int crtc_h,
76 uint32_t src_x, uint32_t src_y, 86 uint32_t src_x, uint32_t src_y,
77 uint32_t src_w, uint32_t src_h); 87 uint32_t src_w, uint32_t src_h);
88void omap_plane_on_endwin(struct drm_plane *plane,
89 void (*fxn)(void *), void *arg);
78 90
79struct drm_encoder *omap_encoder_init(struct drm_device *dev, 91struct drm_encoder *omap_encoder_init(struct drm_device *dev,
80 struct omap_overlay_manager *mgr); 92 struct omap_overlay_manager *mgr);
@@ -92,13 +104,16 @@ void omap_connector_mode_set(struct drm_connector *connector,
92void omap_connector_flush(struct drm_connector *connector, 104void omap_connector_flush(struct drm_connector *connector,
93 int x, int y, int w, int h); 105 int x, int y, int w, int h);
94 106
107uint32_t omap_framebuffer_get_formats(uint32_t *pixel_formats,
108 uint32_t max_formats, enum omap_color_mode supported_modes);
95struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev, 109struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
96 struct drm_file *file, struct drm_mode_fb_cmd2 *mode_cmd); 110 struct drm_file *file, struct drm_mode_fb_cmd2 *mode_cmd);
97struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, 111struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
98 struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos); 112 struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
99struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p); 113struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p);
100int omap_framebuffer_pin(struct drm_framebuffer *fb); 114int omap_framebuffer_replace(struct drm_framebuffer *a,
101void omap_framebuffer_unpin(struct drm_framebuffer *fb); 115 struct drm_framebuffer *b, void *arg,
116 void (*unpin)(void *arg, struct drm_gem_object *bo));
102void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, int x, int y, 117void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, int x, int y,
103 struct omap_overlay_info *info); 118 struct omap_overlay_info *info);
104struct drm_connector *omap_framebuffer_get_next_connector( 119struct drm_connector *omap_framebuffer_get_next_connector(
diff --git a/drivers/staging/omapdrm/omap_fb.c b/drivers/staging/omapdrm/omap_fb.c
index d021a7ec58df..04b235b6724a 100644
--- a/drivers/staging/omapdrm/omap_fb.c
+++ b/drivers/staging/omapdrm/omap_fb.c
@@ -59,6 +59,20 @@ static const struct format formats[] = {
59 { OMAP_DSS_COLOR_UYVY, DRM_FORMAT_UYVY, {{2, 1}}, true }, 59 { OMAP_DSS_COLOR_UYVY, DRM_FORMAT_UYVY, {{2, 1}}, true },
60}; 60};
61 61
62/* convert from overlay's pixel formats bitmask to an array of fourcc's */
63uint32_t omap_framebuffer_get_formats(uint32_t *pixel_formats,
64 uint32_t max_formats, enum omap_color_mode supported_modes)
65{
66 uint32_t nformats = 0;
67 int i = 0;
68
69 for (i = 0; i < ARRAY_SIZE(formats) && nformats < max_formats; i++)
70 if (formats[i].dss_format & supported_modes)
71 pixel_formats[nformats++] = formats[i].pixel_format;
72
73 return nformats;
74}
75
62/* per-plane info for the fb: */ 76/* per-plane info for the fb: */
63struct plane { 77struct plane {
64 struct drm_gem_object *bo; 78 struct drm_gem_object *bo;
@@ -87,7 +101,7 @@ static int omap_framebuffer_create_handle(struct drm_framebuffer *fb,
87static void omap_framebuffer_destroy(struct drm_framebuffer *fb) 101static void omap_framebuffer_destroy(struct drm_framebuffer *fb)
88{ 102{
89 struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb); 103 struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
90 int i, n = drm_format_num_planes(omap_fb->format->pixel_format); 104 int i, n = drm_format_num_planes(fb->pixel_format);
91 105
92 DBG("destroy: FB ID: %d (%p)", fb->base.id, fb); 106 DBG("destroy: FB ID: %d (%p)", fb->base.id, fb);
93 107
@@ -123,41 +137,6 @@ static const struct drm_framebuffer_funcs omap_framebuffer_funcs = {
123 .dirty = omap_framebuffer_dirty, 137 .dirty = omap_framebuffer_dirty,
124}; 138};
125 139
126/* pins buffer in preparation for scanout */
127int omap_framebuffer_pin(struct drm_framebuffer *fb)
128{
129 struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
130 int ret, i, n = drm_format_num_planes(omap_fb->format->pixel_format);
131
132 for (i = 0; i < n; i++) {
133 struct plane *plane = &omap_fb->planes[i];
134 ret = omap_gem_get_paddr(plane->bo, &plane->paddr, true);
135 if (ret)
136 goto fail;
137 }
138
139 return 0;
140
141fail:
142 while (--i > 0) {
143 struct plane *plane = &omap_fb->planes[i];
144 omap_gem_put_paddr(plane->bo);
145 }
146 return ret;
147}
148
149/* releases buffer when done with scanout */
150void omap_framebuffer_unpin(struct drm_framebuffer *fb)
151{
152 struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
153 int i, n = drm_format_num_planes(omap_fb->format->pixel_format);
154
155 for (i = 0; i < n; i++) {
156 struct plane *plane = &omap_fb->planes[i];
157 omap_gem_put_paddr(plane->bo);
158 }
159}
160
161/* update ovl info for scanout, handles cases of multi-planar fb's, etc. 140/* update ovl info for scanout, handles cases of multi-planar fb's, etc.
162 */ 141 */
163void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, int x, int y, 142void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, int x, int y,
@@ -187,10 +166,59 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, int x, int y,
187 } 166 }
188} 167}
189 168
169/* Call for unpin 'a' (if not NULL), and pin 'b' (if not NULL). Although
170 * buffers to unpin are just just pushed to the unpin fifo so that the
171 * caller can defer unpin until vblank.
172 *
173 * Note if this fails (ie. something went very wrong!), all buffers are
174 * unpinned, and the caller disables the overlay. We could have tried
175 * to revert back to the previous set of pinned buffers but if things are
176 * hosed there is no guarantee that would succeed.
177 */
178int omap_framebuffer_replace(struct drm_framebuffer *a,
179 struct drm_framebuffer *b, void *arg,
180 void (*unpin)(void *arg, struct drm_gem_object *bo))
181{
182 int ret = 0, i, na, nb;
183 struct omap_framebuffer *ofba = to_omap_framebuffer(a);
184 struct omap_framebuffer *ofbb = to_omap_framebuffer(b);
185
186 na = a ? drm_format_num_planes(a->pixel_format) : 0;
187 nb = b ? drm_format_num_planes(b->pixel_format) : 0;
188
189 for (i = 0; i < max(na, nb); i++) {
190 struct plane *pa, *pb;
191
192 pa = (i < na) ? &ofba->planes[i] : NULL;
193 pb = (i < nb) ? &ofbb->planes[i] : NULL;
194
195 if (pa) {
196 unpin(arg, pa->bo);
197 pa->paddr = 0;
198 }
199
200 if (pb && !ret)
201 ret = omap_gem_get_paddr(pb->bo, &pb->paddr, true);
202 }
203
204 if (ret) {
205 /* something went wrong.. unpin what has been pinned */
206 for (i = 0; i < nb; i++) {
207 struct plane *pb = &ofba->planes[i];
208 if (pb->paddr) {
209 unpin(arg, pb->bo);
210 pb->paddr = 0;
211 }
212 }
213 }
214
215 return ret;
216}
217
190struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p) 218struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p)
191{ 219{
192 struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb); 220 struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
193 if (p >= drm_format_num_planes(omap_fb->format->pixel_format)) 221 if (p >= drm_format_num_planes(fb->pixel_format))
194 return NULL; 222 return NULL;
195 return omap_fb->planes[p].bo; 223 return omap_fb->planes[p].bo;
196} 224}
@@ -249,6 +277,24 @@ void omap_framebuffer_flush(struct drm_framebuffer *fb,
249 } 277 }
250} 278}
251 279
280#ifdef CONFIG_DEBUG_FS
281void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
282{
283 struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
284 int i, n = drm_format_num_planes(fb->pixel_format);
285
286 seq_printf(m, "fb: %dx%d@%4.4s\n", fb->width, fb->height,
287 (char *)&fb->pixel_format);
288
289 for (i = 0; i < n; i++) {
290 struct plane *plane = &omap_fb->planes[i];
291 seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
292 i, plane->offset, plane->pitch);
293 omap_gem_describe(plane->bo, m);
294 }
295}
296#endif
297
252struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev, 298struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
253 struct drm_file *file, struct drm_mode_fb_cmd2 *mode_cmd) 299 struct drm_file *file, struct drm_mode_fb_cmd2 *mode_cmd)
254{ 300{
@@ -337,8 +383,8 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
337 383
338 plane->bo = bos[i]; 384 plane->bo = bos[i];
339 plane->offset = mode_cmd->offsets[i]; 385 plane->offset = mode_cmd->offsets[i];
340 plane->pitch = mode_cmd->pitches[i]; 386 plane->pitch = pitch;
341 plane->paddr = pitch; 387 plane->paddr = 0;
342 } 388 }
343 389
344 drm_helper_mode_fill_fb_struct(fb, mode_cmd); 390 drm_helper_mode_fill_fb_struct(fb, mode_cmd);
diff --git a/drivers/staging/omapdrm/omap_fbdev.c b/drivers/staging/omapdrm/omap_fbdev.c
index 96940bbfc6f4..11acd4c35ed2 100644
--- a/drivers/staging/omapdrm/omap_fbdev.c
+++ b/drivers/staging/omapdrm/omap_fbdev.c
@@ -37,6 +37,9 @@ struct omap_fbdev {
37 struct drm_framebuffer *fb; 37 struct drm_framebuffer *fb;
38 struct drm_gem_object *bo; 38 struct drm_gem_object *bo;
39 bool ywrap_enabled; 39 bool ywrap_enabled;
40
41 /* for deferred dmm roll when getting called in atomic ctx */
42 struct work_struct work;
40}; 43};
41 44
42static void omap_fbdev_flush(struct fb_info *fbi, int x, int y, int w, int h); 45static void omap_fbdev_flush(struct fb_info *fbi, int x, int y, int w, int h);
@@ -75,12 +78,22 @@ static void omap_fbdev_imageblit(struct fb_info *fbi,
75 image->width, image->height); 78 image->width, image->height);
76} 79}
77 80
81static void pan_worker(struct work_struct *work)
82{
83 struct omap_fbdev *fbdev = container_of(work, struct omap_fbdev, work);
84 struct fb_info *fbi = fbdev->base.fbdev;
85 int npages;
86
87 /* DMM roll shifts in 4K pages: */
88 npages = fbi->fix.line_length >> PAGE_SHIFT;
89 omap_gem_roll(fbdev->bo, fbi->var.yoffset * npages);
90}
91
78static int omap_fbdev_pan_display(struct fb_var_screeninfo *var, 92static int omap_fbdev_pan_display(struct fb_var_screeninfo *var,
79 struct fb_info *fbi) 93 struct fb_info *fbi)
80{ 94{
81 struct drm_fb_helper *helper = get_fb(fbi); 95 struct drm_fb_helper *helper = get_fb(fbi);
82 struct omap_fbdev *fbdev = to_omap_fbdev(helper); 96 struct omap_fbdev *fbdev = to_omap_fbdev(helper);
83 int npages;
84 97
85 if (!helper) 98 if (!helper)
86 goto fallback; 99 goto fallback;
@@ -88,9 +101,12 @@ static int omap_fbdev_pan_display(struct fb_var_screeninfo *var,
88 if (!fbdev->ywrap_enabled) 101 if (!fbdev->ywrap_enabled)
89 goto fallback; 102 goto fallback;
90 103
91 /* DMM roll shifts in 4K pages: */ 104 if (drm_can_sleep()) {
92 npages = fbi->fix.line_length >> PAGE_SHIFT; 105 pan_worker(&fbdev->work);
93 omap_gem_roll(fbdev->bo, var->yoffset * npages); 106 } else {
107 struct omap_drm_private *priv = helper->dev->dev_private;
108 queue_work(priv->wq, &fbdev->work);
109 }
94 110
95 return 0; 111 return 0;
96 112
@@ -336,6 +352,8 @@ struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
336 goto fail; 352 goto fail;
337 } 353 }
338 354
355 INIT_WORK(&fbdev->work, pan_worker);
356
339 helper = &fbdev->base; 357 helper = &fbdev->base;
340 358
341 helper->funcs = &omap_fb_helper_funcs; 359 helper->funcs = &omap_fb_helper_funcs;
diff --git a/drivers/staging/omapdrm/omap_gem.c b/drivers/staging/omapdrm/omap_gem.c
index b7d6f886c5cf..921f058cc6a4 100644
--- a/drivers/staging/omapdrm/omap_gem.c
+++ b/drivers/staging/omapdrm/omap_gem.c
@@ -45,6 +45,8 @@ int _drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
45struct omap_gem_object { 45struct omap_gem_object {
46 struct drm_gem_object base; 46 struct drm_gem_object base;
47 47
48 struct list_head mm_list;
49
48 uint32_t flags; 50 uint32_t flags;
49 51
50 /** width/height for tiled formats (rounded up to slot boundaries) */ 52 /** width/height for tiled formats (rounded up to slot boundaries) */
@@ -151,10 +153,23 @@ static void evict_entry(struct drm_gem_object *obj,
151 enum tiler_fmt fmt, struct usergart_entry *entry) 153 enum tiler_fmt fmt, struct usergart_entry *entry)
152{ 154{
153 if (obj->dev->dev_mapping) { 155 if (obj->dev->dev_mapping) {
154 size_t size = PAGE_SIZE * usergart[fmt].height; 156 struct omap_gem_object *omap_obj = to_omap_bo(obj);
157 int n = usergart[fmt].height;
158 size_t size = PAGE_SIZE * n;
155 loff_t off = mmap_offset(obj) + 159 loff_t off = mmap_offset(obj) +
156 (entry->obj_pgoff << PAGE_SHIFT); 160 (entry->obj_pgoff << PAGE_SHIFT);
157 unmap_mapping_range(obj->dev->dev_mapping, off, size, 1); 161 const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
162 if (m > 1) {
163 int i;
164 /* if stride > than PAGE_SIZE then sparse mapping: */
165 for (i = n; i > 0; i--) {
166 unmap_mapping_range(obj->dev->dev_mapping,
167 off, PAGE_SIZE, 1);
168 off += PAGE_SIZE * m;
169 }
170 } else {
171 unmap_mapping_range(obj->dev->dev_mapping, off, size, 1);
172 }
158 } 173 }
159 174
160 entry->obj = NULL; 175 entry->obj = NULL;
@@ -254,13 +269,17 @@ static void omap_gem_detach_pages(struct drm_gem_object *obj)
254/** get mmap offset */ 269/** get mmap offset */
255static uint64_t mmap_offset(struct drm_gem_object *obj) 270static uint64_t mmap_offset(struct drm_gem_object *obj)
256{ 271{
272 struct drm_device *dev = obj->dev;
273
274 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
275
257 if (!obj->map_list.map) { 276 if (!obj->map_list.map) {
258 /* Make it mmapable */ 277 /* Make it mmapable */
259 size_t size = omap_gem_mmap_size(obj); 278 size_t size = omap_gem_mmap_size(obj);
260 int ret = _drm_gem_create_mmap_offset_size(obj, size); 279 int ret = _drm_gem_create_mmap_offset_size(obj, size);
261 280
262 if (ret) { 281 if (ret) {
263 dev_err(obj->dev->dev, "could not allocate mmap offset"); 282 dev_err(dev->dev, "could not allocate mmap offset\n");
264 return 0; 283 return 0;
265 } 284 }
266 } 285 }
@@ -336,26 +355,39 @@ static int fault_2d(struct drm_gem_object *obj,
336 void __user *vaddr; 355 void __user *vaddr;
337 int i, ret, slots; 356 int i, ret, slots;
338 357
339 if (!usergart) 358 /*
340 return -EFAULT; 359 * Note the height of the slot is also equal to the number of pages
341 360 * that need to be mapped in to fill 4kb wide CPU page. If the slot
342 /* TODO: this fxn might need a bit tweaking to deal w/ tiled buffers 361 * height is 64, then 64 pages fill a 4kb wide by 64 row region.
343 * that are wider than 4kb 362 */
363 const int n = usergart[fmt].height;
364 const int n_shift = usergart[fmt].height_shift;
365
366 /*
367 * If buffer width in bytes > PAGE_SIZE then the virtual stride is
368 * rounded up to next multiple of PAGE_SIZE.. this need to be taken
369 * into account in some of the math, so figure out virtual stride
370 * in pages
344 */ 371 */
372 const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
345 373
346 /* We don't use vmf->pgoff since that has the fake offset: */ 374 /* We don't use vmf->pgoff since that has the fake offset: */
347 pgoff = ((unsigned long)vmf->virtual_address - 375 pgoff = ((unsigned long)vmf->virtual_address -
348 vma->vm_start) >> PAGE_SHIFT; 376 vma->vm_start) >> PAGE_SHIFT;
349 377
350 /* actual address we start mapping at is rounded down to previous slot 378 /*
379 * Actual address we start mapping at is rounded down to previous slot
351 * boundary in the y direction: 380 * boundary in the y direction:
352 */ 381 */
353 base_pgoff = round_down(pgoff, usergart[fmt].height); 382 base_pgoff = round_down(pgoff, m << n_shift);
354 vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
355 entry = &usergart[fmt].entry[usergart[fmt].last];
356 383
384 /* figure out buffer width in slots */
357 slots = omap_obj->width >> usergart[fmt].slot_shift; 385 slots = omap_obj->width >> usergart[fmt].slot_shift;
358 386
387 vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
388
389 entry = &usergart[fmt].entry[usergart[fmt].last];
390
359 /* evict previous buffer using this usergart entry, if any: */ 391 /* evict previous buffer using this usergart entry, if any: */
360 if (entry->obj) 392 if (entry->obj)
361 evict_entry(entry->obj, fmt, entry); 393 evict_entry(entry->obj, fmt, entry);
@@ -363,23 +395,30 @@ static int fault_2d(struct drm_gem_object *obj,
363 entry->obj = obj; 395 entry->obj = obj;
364 entry->obj_pgoff = base_pgoff; 396 entry->obj_pgoff = base_pgoff;
365 397
366 /* now convert base_pgoff to phys offset from virt offset: 398 /* now convert base_pgoff to phys offset from virt offset: */
367 */ 399 base_pgoff = (base_pgoff >> n_shift) * slots;
368 base_pgoff = (base_pgoff >> usergart[fmt].height_shift) * slots; 400
369 401 /* for wider-than 4k.. figure out which part of the slot-row we want: */
370 /* map in pages. Note the height of the slot is also equal to the 402 if (m > 1) {
371 * number of pages that need to be mapped in to fill 4kb wide CPU page. 403 int off = pgoff % m;
372 * If the height is 64, then 64 pages fill a 4kb wide by 64 row region. 404 entry->obj_pgoff += off;
373 * Beyond the valid pixel part of the buffer, we set pages[i] to NULL to 405 base_pgoff /= m;
374 * get a dummy page mapped in.. if someone reads/writes it they will get 406 slots = min(slots - (off << n_shift), n);
375 * random/undefined content, but at least it won't be corrupting 407 base_pgoff += off << n_shift;
376 * whatever other random page used to be mapped in, or other undefined 408 vaddr += off << PAGE_SHIFT;
377 * behavior. 409 }
410
411 /*
412 * Map in pages. Beyond the valid pixel part of the buffer, we set
413 * pages[i] to NULL to get a dummy page mapped in.. if someone
414 * reads/writes it they will get random/undefined content, but at
415 * least it won't be corrupting whatever other random page used to
416 * be mapped in, or other undefined behavior.
378 */ 417 */
379 memcpy(pages, &omap_obj->pages[base_pgoff], 418 memcpy(pages, &omap_obj->pages[base_pgoff],
380 sizeof(struct page *) * slots); 419 sizeof(struct page *) * slots);
381 memset(pages + slots, 0, 420 memset(pages + slots, 0,
382 sizeof(struct page *) * (usergart[fmt].height - slots)); 421 sizeof(struct page *) * (n - slots));
383 422
384 ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true); 423 ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
385 if (ret) { 424 if (ret) {
@@ -387,16 +426,15 @@ static int fault_2d(struct drm_gem_object *obj,
387 return ret; 426 return ret;
388 } 427 }
389 428
390 i = usergart[fmt].height;
391 pfn = entry->paddr >> PAGE_SHIFT; 429 pfn = entry->paddr >> PAGE_SHIFT;
392 430
393 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, 431 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
394 pfn, pfn << PAGE_SHIFT); 432 pfn, pfn << PAGE_SHIFT);
395 433
396 while (i--) { 434 for (i = n; i > 0; i--) {
397 vm_insert_mixed(vma, (unsigned long)vaddr, pfn); 435 vm_insert_mixed(vma, (unsigned long)vaddr, pfn);
398 pfn += usergart[fmt].stride_pfn; 436 pfn += usergart[fmt].stride_pfn;
399 vaddr += PAGE_SIZE; 437 vaddr += PAGE_SIZE * m;
400 } 438 }
401 439
402 /* simple round-robin: */ 440 /* simple round-robin: */
@@ -566,6 +604,8 @@ fail:
566 604
567/* Set scrolling position. This allows us to implement fast scrolling 605/* Set scrolling position. This allows us to implement fast scrolling
568 * for console. 606 * for console.
607 *
608 * Call only from non-atomic contexts.
569 */ 609 */
570int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll) 610int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll)
571{ 611{
@@ -580,18 +620,6 @@ int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll)
580 620
581 omap_obj->roll = roll; 621 omap_obj->roll = roll;
582 622
583 if (in_atomic() || mutex_is_locked(&obj->dev->struct_mutex)) {
584 /* this can get called from fbcon in atomic context.. so
585 * just ignore it and wait for next time called from
586 * interruptible context to update the PAT.. the result
587 * may be that user sees wrap-around instead of scrolling
588 * momentarily on the screen. If we wanted to be fancier
589 * we could perhaps schedule some workqueue work at this
590 * point.
591 */
592 return 0;
593 }
594
595 mutex_lock(&obj->dev->struct_mutex); 623 mutex_lock(&obj->dev->struct_mutex);
596 624
597 /* if we aren't mapped yet, we don't need to do anything */ 625 /* if we aren't mapped yet, we don't need to do anything */
@@ -774,6 +802,56 @@ void *omap_gem_vaddr(struct drm_gem_object *obj)
774 return omap_obj->vaddr; 802 return omap_obj->vaddr;
775} 803}
776 804
805#ifdef CONFIG_DEBUG_FS
806void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
807{
808 struct drm_device *dev = obj->dev;
809 struct omap_gem_object *omap_obj = to_omap_bo(obj);
810 uint64_t off = 0;
811
812 WARN_ON(! mutex_is_locked(&dev->struct_mutex));
813
814 if (obj->map_list.map)
815 off = (uint64_t)obj->map_list.hash.key;
816
817 seq_printf(m, "%08x: %2d (%2d) %08llx %08Zx (%2d) %p %4d",
818 omap_obj->flags, obj->name, obj->refcount.refcount.counter,
819 off, omap_obj->paddr, omap_obj->paddr_cnt,
820 omap_obj->vaddr, omap_obj->roll);
821
822 if (omap_obj->flags & OMAP_BO_TILED) {
823 seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
824 if (omap_obj->block) {
825 struct tcm_area *area = &omap_obj->block->area;
826 seq_printf(m, " (%dx%d, %dx%d)",
827 area->p0.x, area->p0.y,
828 area->p1.x, area->p1.y);
829 }
830 } else {
831 seq_printf(m, " %d", obj->size);
832 }
833
834 seq_printf(m, "\n");
835}
836
837void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
838{
839 struct omap_gem_object *omap_obj;
840 int count = 0;
841 size_t size = 0;
842
843 list_for_each_entry(omap_obj, list, mm_list) {
844 struct drm_gem_object *obj = &omap_obj->base;
845 seq_printf(m, " ");
846 omap_gem_describe(obj, m);
847 count++;
848 size += obj->size;
849 }
850
851 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
852}
853#endif
854
777/* Buffer Synchronization: 855/* Buffer Synchronization:
778 */ 856 */
779 857
@@ -1040,6 +1118,10 @@ void omap_gem_free_object(struct drm_gem_object *obj)
1040 1118
1041 evict(obj); 1119 evict(obj);
1042 1120
1121 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1122
1123 list_del(&omap_obj->mm_list);
1124
1043 if (obj->map_list.map) { 1125 if (obj->map_list.map) {
1044 drm_gem_free_mmap_offset(obj); 1126 drm_gem_free_mmap_offset(obj);
1045 } 1127 }
@@ -1140,6 +1222,8 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1140 goto fail; 1222 goto fail;
1141 } 1223 }
1142 1224
1225 list_add(&omap_obj->mm_list, &priv->obj_list);
1226
1143 obj = &omap_obj->base; 1227 obj = &omap_obj->base;
1144 1228
1145 if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) { 1229 if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
@@ -1186,12 +1270,11 @@ void omap_gem_init(struct drm_device *dev)
1186 const enum tiler_fmt fmts[] = { 1270 const enum tiler_fmt fmts[] = {
1187 TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT 1271 TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
1188 }; 1272 };
1189 int i, j, ret; 1273 int i, j;
1190 1274
1191 ret = omap_dmm_init(dev); 1275 if (!dmm_is_initialized()) {
1192 if (ret) {
1193 /* DMM only supported on OMAP4 and later, so this isn't fatal */ 1276 /* DMM only supported on OMAP4 and later, so this isn't fatal */
1194 dev_warn(dev->dev, "omap_dmm_init failed, disabling DMM\n"); 1277 dev_warn(dev->dev, "DMM not available, disable DMM support\n");
1195 return; 1278 return;
1196 } 1279 }
1197 1280
@@ -1241,6 +1324,5 @@ void omap_gem_deinit(struct drm_device *dev)
1241 /* I believe we can rely on there being no more outstanding GEM 1324 /* I believe we can rely on there being no more outstanding GEM
1242 * objects which could depend on usergart/dmm at this point. 1325 * objects which could depend on usergart/dmm at this point.
1243 */ 1326 */
1244 omap_dmm_remove();
1245 kfree(usergart); 1327 kfree(usergart);
1246} 1328}
diff --git a/drivers/staging/omapdrm/omap_gem_helpers.c b/drivers/staging/omapdrm/omap_gem_helpers.c
index 29275c7209e9..f895363a5e54 100644
--- a/drivers/staging/omapdrm/omap_gem_helpers.c
+++ b/drivers/staging/omapdrm/omap_gem_helpers.c
@@ -84,7 +84,7 @@ fail:
84 page_cache_release(pages[i]); 84 page_cache_release(pages[i]);
85 } 85 }
86 drm_free_large(pages); 86 drm_free_large(pages);
87 return ERR_PTR(PTR_ERR(p)); 87 return ERR_CAST(p);
88} 88}
89 89
90/** 90/**
diff --git a/drivers/staging/omapdrm/omap_plane.c b/drivers/staging/omapdrm/omap_plane.c
index 97909124a1fe..7997be74010d 100644
--- a/drivers/staging/omapdrm/omap_plane.c
+++ b/drivers/staging/omapdrm/omap_plane.c
@@ -17,6 +17,8 @@
17 * this program. If not, see <http://www.gnu.org/licenses/>. 17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */ 18 */
19 19
20#include <linux/kfifo.h>
21
20#include "omap_drv.h" 22#include "omap_drv.h"
21 23
22/* some hackery because omapdss has an 'enum omap_plane' (which would be 24/* some hackery because omapdss has an 'enum omap_plane' (which would be
@@ -29,6 +31,11 @@
29 * plane funcs 31 * plane funcs
30 */ 32 */
31 33
34struct callback {
35 void (*fxn)(void *);
36 void *arg;
37};
38
32#define to_omap_plane(x) container_of(x, struct omap_plane, base) 39#define to_omap_plane(x) container_of(x, struct omap_plane, base)
33 40
34struct omap_plane { 41struct omap_plane {
@@ -43,8 +50,84 @@ struct omap_plane {
43 50
44 /* last fb that we pinned: */ 51 /* last fb that we pinned: */
45 struct drm_framebuffer *pinned_fb; 52 struct drm_framebuffer *pinned_fb;
53
54 uint32_t nformats;
55 uint32_t formats[32];
56
57 /* for synchronizing access to unpins fifo */
58 struct mutex unpin_mutex;
59
60 /* set of bo's pending unpin until next END_WIN irq */
61 DECLARE_KFIFO_PTR(unpin_fifo, struct drm_gem_object *);
62 int num_unpins, pending_num_unpins;
63
64 /* for deferred unpin when we need to wait for scanout complete irq */
65 struct work_struct work;
66
67 /* callback on next endwin irq */
68 struct callback endwin;
46}; 69};
47 70
71/* map from ovl->id to the irq we are interested in for scanout-done */
72static const uint32_t id2irq[] = {
73 [OMAP_DSS_GFX] = DISPC_IRQ_GFX_END_WIN,
74 [OMAP_DSS_VIDEO1] = DISPC_IRQ_VID1_END_WIN,
75 [OMAP_DSS_VIDEO2] = DISPC_IRQ_VID2_END_WIN,
76 [OMAP_DSS_VIDEO3] = DISPC_IRQ_VID3_END_WIN,
77};
78
79static void dispc_isr(void *arg, uint32_t mask)
80{
81 struct drm_plane *plane = arg;
82 struct omap_plane *omap_plane = to_omap_plane(plane);
83 struct omap_drm_private *priv = plane->dev->dev_private;
84
85 omap_dispc_unregister_isr(dispc_isr, plane,
86 id2irq[omap_plane->ovl->id]);
87
88 queue_work(priv->wq, &omap_plane->work);
89}
90
91static void unpin_worker(struct work_struct *work)
92{
93 struct omap_plane *omap_plane =
94 container_of(work, struct omap_plane, work);
95 struct callback endwin;
96
97 mutex_lock(&omap_plane->unpin_mutex);
98 DBG("unpinning %d of %d", omap_plane->num_unpins,
99 omap_plane->num_unpins + omap_plane->pending_num_unpins);
100 while (omap_plane->num_unpins > 0) {
101 struct drm_gem_object *bo = NULL;
102 int ret = kfifo_get(&omap_plane->unpin_fifo, &bo);
103 WARN_ON(!ret);
104 omap_gem_put_paddr(bo);
105 drm_gem_object_unreference_unlocked(bo);
106 omap_plane->num_unpins--;
107 }
108 endwin = omap_plane->endwin;
109 omap_plane->endwin.fxn = NULL;
110 mutex_unlock(&omap_plane->unpin_mutex);
111
112 if (endwin.fxn)
113 endwin.fxn(endwin.arg);
114}
115
116static void install_irq(struct drm_plane *plane)
117{
118 struct omap_plane *omap_plane = to_omap_plane(plane);
119 struct omap_overlay *ovl = omap_plane->ovl;
120 int ret;
121
122 ret = omap_dispc_register_isr(dispc_isr, plane, id2irq[ovl->id]);
123
124 /*
125 * omapdss has upper limit on # of registered irq handlers,
126 * which we shouldn't hit.. but if we do the limit should
127 * be raised or bad things happen:
128 */
129 WARN_ON(ret == -EBUSY);
130}
48 131
49/* push changes down to dss2 */ 132/* push changes down to dss2 */
50static int commit(struct drm_plane *plane) 133static int commit(struct drm_plane *plane)
@@ -71,6 +154,11 @@ static int commit(struct drm_plane *plane)
71 return ret; 154 return ret;
72 } 155 }
73 156
157 mutex_lock(&omap_plane->unpin_mutex);
158 omap_plane->num_unpins += omap_plane->pending_num_unpins;
159 omap_plane->pending_num_unpins = 0;
160 mutex_unlock(&omap_plane->unpin_mutex);
161
74 /* our encoder doesn't necessarily get a commit() after this, in 162 /* our encoder doesn't necessarily get a commit() after this, in
75 * particular in the dpms() and mode_set_base() cases, so force the 163 * particular in the dpms() and mode_set_base() cases, so force the
76 * manager to update: 164 * manager to update:
@@ -83,8 +171,20 @@ static int commit(struct drm_plane *plane)
83 dev_err(dev->dev, "could not apply settings\n"); 171 dev_err(dev->dev, "could not apply settings\n");
84 return ret; 172 return ret;
85 } 173 }
174
175 /*
176 * NOTE: really this should be atomic w/ mgr->apply() but
177 * omapdss does not expose such an API
178 */
179 if (omap_plane->num_unpins > 0)
180 install_irq(plane);
181
182 } else {
183 struct omap_drm_private *priv = dev->dev_private;
184 queue_work(priv->wq, &omap_plane->work);
86 } 185 }
87 186
187
88 if (ovl->is_enabled(ovl)) { 188 if (ovl->is_enabled(ovl)) {
89 omap_framebuffer_flush(plane->fb, info->pos_x, info->pos_y, 189 omap_framebuffer_flush(plane->fb, info->pos_x, info->pos_y,
90 info->out_width, info->out_height); 190 info->out_width, info->out_height);
@@ -137,21 +237,48 @@ static void update_manager(struct drm_plane *plane)
137 } 237 }
138} 238}
139 239
240static void unpin(void *arg, struct drm_gem_object *bo)
241{
242 struct drm_plane *plane = arg;
243 struct omap_plane *omap_plane = to_omap_plane(plane);
244
245 if (kfifo_put(&omap_plane->unpin_fifo,
246 (const struct drm_gem_object **)&bo)) {
247 omap_plane->pending_num_unpins++;
248 /* also hold a ref so it isn't free'd while pinned */
249 drm_gem_object_reference(bo);
250 } else {
251 dev_err(plane->dev->dev, "unpin fifo full!\n");
252 omap_gem_put_paddr(bo);
253 }
254}
255
140/* update which fb (if any) is pinned for scanout */ 256/* update which fb (if any) is pinned for scanout */
141static int update_pin(struct drm_plane *plane, struct drm_framebuffer *fb) 257static int update_pin(struct drm_plane *plane, struct drm_framebuffer *fb)
142{ 258{
143 struct omap_plane *omap_plane = to_omap_plane(plane); 259 struct omap_plane *omap_plane = to_omap_plane(plane);
144 int ret = 0; 260 struct drm_framebuffer *pinned_fb = omap_plane->pinned_fb;
261
262 if (pinned_fb != fb) {
263 int ret;
264
265 DBG("%p -> %p", pinned_fb, fb);
266
267 mutex_lock(&omap_plane->unpin_mutex);
268 ret = omap_framebuffer_replace(pinned_fb, fb, plane, unpin);
269 mutex_unlock(&omap_plane->unpin_mutex);
270
271 if (ret) {
272 dev_err(plane->dev->dev, "could not swap %p -> %p\n",
273 omap_plane->pinned_fb, fb);
274 omap_plane->pinned_fb = NULL;
275 return ret;
276 }
145 277
146 if (omap_plane->pinned_fb != fb) {
147 if (omap_plane->pinned_fb)
148 omap_framebuffer_unpin(omap_plane->pinned_fb);
149 omap_plane->pinned_fb = fb; 278 omap_plane->pinned_fb = fb;
150 if (fb)
151 ret = omap_framebuffer_pin(fb);
152 } 279 }
153 280
154 return ret; 281 return 0;
155} 282}
156 283
157/* update parameters that are dependent on the framebuffer dimensions and 284/* update parameters that are dependent on the framebuffer dimensions and
@@ -241,6 +368,8 @@ static void omap_plane_destroy(struct drm_plane *plane)
241 DBG("%s", omap_plane->ovl->name); 368 DBG("%s", omap_plane->ovl->name);
242 omap_plane_disable(plane); 369 omap_plane_disable(plane);
243 drm_plane_cleanup(plane); 370 drm_plane_cleanup(plane);
371 WARN_ON(omap_plane->pending_num_unpins + omap_plane->num_unpins > 0);
372 kfifo_free(&omap_plane->unpin_fifo);
244 kfree(omap_plane); 373 kfree(omap_plane);
245} 374}
246 375
@@ -258,37 +387,34 @@ int omap_plane_dpms(struct drm_plane *plane, int mode)
258 if (!r) 387 if (!r)
259 r = ovl->enable(ovl); 388 r = ovl->enable(ovl);
260 } else { 389 } else {
390 struct omap_drm_private *priv = plane->dev->dev_private;
261 r = ovl->disable(ovl); 391 r = ovl->disable(ovl);
262 update_pin(plane, NULL); 392 update_pin(plane, NULL);
393 queue_work(priv->wq, &omap_plane->work);
263 } 394 }
264 395
265 return r; 396 return r;
266} 397}
267 398
399void omap_plane_on_endwin(struct drm_plane *plane,
400 void (*fxn)(void *), void *arg)
401{
402 struct omap_plane *omap_plane = to_omap_plane(plane);
403
404 mutex_lock(&omap_plane->unpin_mutex);
405 omap_plane->endwin.fxn = fxn;
406 omap_plane->endwin.arg = arg;
407 mutex_unlock(&omap_plane->unpin_mutex);
408
409 install_irq(plane);
410}
411
268static const struct drm_plane_funcs omap_plane_funcs = { 412static const struct drm_plane_funcs omap_plane_funcs = {
269 .update_plane = omap_plane_update, 413 .update_plane = omap_plane_update,
270 .disable_plane = omap_plane_disable, 414 .disable_plane = omap_plane_disable,
271 .destroy = omap_plane_destroy, 415 .destroy = omap_plane_destroy,
272}; 416};
273 417
274static const uint32_t formats[] = {
275 DRM_FORMAT_RGB565,
276 DRM_FORMAT_RGBX4444,
277 DRM_FORMAT_XRGB4444,
278 DRM_FORMAT_RGBA4444,
279 DRM_FORMAT_ABGR4444,
280 DRM_FORMAT_XRGB1555,
281 DRM_FORMAT_ARGB1555,
282 DRM_FORMAT_RGB888,
283 DRM_FORMAT_RGBX8888,
284 DRM_FORMAT_XRGB8888,
285 DRM_FORMAT_RGBA8888,
286 DRM_FORMAT_ARGB8888,
287 DRM_FORMAT_NV12,
288 DRM_FORMAT_YUYV,
289 DRM_FORMAT_UYVY,
290};
291
292/* initialize plane */ 418/* initialize plane */
293struct drm_plane *omap_plane_init(struct drm_device *dev, 419struct drm_plane *omap_plane_init(struct drm_device *dev,
294 struct omap_overlay *ovl, unsigned int possible_crtcs, 420 struct omap_overlay *ovl, unsigned int possible_crtcs,
@@ -296,21 +422,38 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
296{ 422{
297 struct drm_plane *plane = NULL; 423 struct drm_plane *plane = NULL;
298 struct omap_plane *omap_plane; 424 struct omap_plane *omap_plane;
425 int ret;
299 426
300 DBG("%s: possible_crtcs=%08x, priv=%d", ovl->name, 427 DBG("%s: possible_crtcs=%08x, priv=%d", ovl->name,
301 possible_crtcs, priv); 428 possible_crtcs, priv);
302 429
430 /* friendly reminder to update table for future hw: */
431 WARN_ON(ovl->id >= ARRAY_SIZE(id2irq));
432
303 omap_plane = kzalloc(sizeof(*omap_plane), GFP_KERNEL); 433 omap_plane = kzalloc(sizeof(*omap_plane), GFP_KERNEL);
304 if (!omap_plane) { 434 if (!omap_plane) {
305 dev_err(dev->dev, "could not allocate plane\n"); 435 dev_err(dev->dev, "could not allocate plane\n");
306 goto fail; 436 goto fail;
307 } 437 }
308 438
439 mutex_init(&omap_plane->unpin_mutex);
440
441 ret = kfifo_alloc(&omap_plane->unpin_fifo, 16, GFP_KERNEL);
442 if (ret) {
443 dev_err(dev->dev, "could not allocate unpin FIFO\n");
444 goto fail;
445 }
446
447 INIT_WORK(&omap_plane->work, unpin_worker);
448
449 omap_plane->nformats = omap_framebuffer_get_formats(
450 omap_plane->formats, ARRAY_SIZE(omap_plane->formats),
451 ovl->supported_modes);
309 omap_plane->ovl = ovl; 452 omap_plane->ovl = ovl;
310 plane = &omap_plane->base; 453 plane = &omap_plane->base;
311 454
312 drm_plane_init(dev, plane, possible_crtcs, &omap_plane_funcs, 455 drm_plane_init(dev, plane, possible_crtcs, &omap_plane_funcs,
313 formats, ARRAY_SIZE(formats), priv); 456 omap_plane->formats, omap_plane->nformats, priv);
314 457
315 /* get our starting configuration, set defaults for parameters 458 /* get our starting configuration, set defaults for parameters
316 * we don't currently use, etc: 459 * we don't currently use, etc:
@@ -330,7 +473,7 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
330 if (priv) 473 if (priv)
331 omap_plane->info.zorder = 0; 474 omap_plane->info.zorder = 0;
332 else 475 else
333 omap_plane->info.zorder = 1; 476 omap_plane->info.zorder = ovl->id;
334 477
335 update_manager(plane); 478 update_manager(plane);
336 479
diff --git a/drivers/staging/ozwpan/Kbuild b/drivers/staging/ozwpan/Kbuild
new file mode 100644
index 000000000000..6cc84cb3f0a6
--- /dev/null
+++ b/drivers/staging/ozwpan/Kbuild
@@ -0,0 +1,19 @@
1# -----------------------------------------------------------------------------
2# Copyright (c) 2011 Ozmo Inc
3# Released under the GNU General Public License Version 2 (GPLv2).
4# -----------------------------------------------------------------------------
5obj-$(CONFIG_USB_WPAN_HCD) += ozwpan.o
6ozwpan-y := \
7 ozmain.o \
8 ozpd.o \
9 ozusbsvc.o \
10 ozusbsvc1.o \
11 ozhcd.o \
12 ozeltbuf.o \
13 ozproto.o \
14 ozcdev.o \
15 ozurbparanoia.o \
16 oztrace.o \
17 ozevent.o
18
19
diff --git a/drivers/staging/ozwpan/Kconfig b/drivers/staging/ozwpan/Kconfig
new file mode 100644
index 000000000000..7904caec546a
--- /dev/null
+++ b/drivers/staging/ozwpan/Kconfig
@@ -0,0 +1,9 @@
1config USB_WPAN_HCD
2 tristate "USB over WiFi Host Controller"
3 depends on USB && NET
4 help
5 A driver for USB Host Controllers that are compatible with
6 Ozmo Devices USB over WiFi technology.
7
8 To compile this driver a module, choose M here: the module
9 will be called "ozwpan".
diff --git a/drivers/staging/ozwpan/README b/drivers/staging/ozwpan/README
new file mode 100644
index 000000000000..bb1a69b94541
--- /dev/null
+++ b/drivers/staging/ozwpan/README
@@ -0,0 +1,25 @@
1OZWPAN USB Host Controller Driver
2---------------------------------
3This driver is a USB HCD driver that does not have an associated a physical
4device but instead uses Wi-Fi to communicate with the wireless peripheral.
5The USB requests are converted into a layer 2 network protocol and transmitted
6on the network using an ethertype (0x892e) regestered to Ozmo Device Inc.
7This driver is compatible with existing wireless devices that use Ozmo Devices
8technology.
9
10To operate the driver must be bound to a suitable network interface. This can
11be done when the module is loaded (specifying the name of the network interface
12as a paramter - e.g. 'insmod ozwpan g_net_dev=go0') or can be bound after
13loading using an ioctl call. See the ozappif.h file and the ioctls
14OZ_IOCTL_ADD_BINDING and OZ_IOCTL_REMOVE_BINDING.
15
16The devices connect to the host use Wi-Fi Direct so a network card that supports
17Wi-Fi direct is required. A recent version (0.8.x or later) version of the
18wpa_supplicant can be used to setup the network interface to create a persistent
19autonomous group (for older pre-WFD peripherals) or put in a listen state to
20allow group negotiation to occur for more recent devices that support WFD.
21
22The protocol used over the network does not directly mimic the USB bus
23transactions as this would be rather busy and inefficient. Instead the chapter 9
24requests are converted into a request/response pair of messages. (See
25ozprotocol.h for data structures used in the protocol).
diff --git a/drivers/staging/ozwpan/TODO b/drivers/staging/ozwpan/TODO
new file mode 100644
index 000000000000..f7a9c122f596
--- /dev/null
+++ b/drivers/staging/ozwpan/TODO
@@ -0,0 +1,12 @@
1TODO:
2 - review user mode interface and determine if ioctls can be replaced
3 with something better. correctly export data structures to user mode
4 if ioctls are still required and allocate ioctl numbers from
5 ioctl-number.txt.
6 - check USB HCD implementation is complete and correct.
7 - remove any debug and trace code.
8 - code review by USB developer community.
9 - testing with as many devices as possible.
10
11Please send any patches for this driver to Chris Kelly <ckelly@ozmodevices.com>
12and Greg Kroah-Hartman <gregkh@linuxfoundation.org>.
diff --git a/drivers/staging/ozwpan/ozappif.h b/drivers/staging/ozwpan/ozappif.h
new file mode 100644
index 000000000000..af0273293872
--- /dev/null
+++ b/drivers/staging/ozwpan/ozappif.h
@@ -0,0 +1,46 @@
1/* -----------------------------------------------------------------------------
2 * Copyright (c) 2011 Ozmo Inc
3 * Released under the GNU General Public License Version 2 (GPLv2).
4 * -----------------------------------------------------------------------------
5 */
6#ifndef _OZAPPIF_H
7#define _OZAPPIF_H
8
9#include "ozeventdef.h"
10
11#define OZ_IOCTL_MAGIC 0xf4
12
13struct oz_mac_addr {
14 unsigned char a[6];
15};
16
17#define OZ_MAX_PDS 8
18
19struct oz_pd_list {
20 int count;
21 struct oz_mac_addr addr[OZ_MAX_PDS];
22};
23
24#define OZ_MAX_BINDING_LEN 32
25
26struct oz_binding_info {
27 char name[OZ_MAX_BINDING_LEN];
28};
29
30struct oz_test {
31 int action;
32};
33
34#define OZ_IOCTL_GET_PD_LIST _IOR(OZ_IOCTL_MAGIC, 0, struct oz_pd_list)
35#define OZ_IOCTL_SET_ACTIVE_PD _IOW(OZ_IOCTL_MAGIC, 1, struct oz_mac_addr)
36#define OZ_IOCTL_GET_ACTIVE_PD _IOR(OZ_IOCTL_MAGIC, 2, struct oz_mac_addr)
37#define OZ_IOCTL_CLEAR_EVENTS _IO(OZ_IOCTL_MAGIC, 3)
38#define OZ_IOCTL_GET_EVENTS _IOR(OZ_IOCTL_MAGIC, 4, struct oz_evtlist)
39#define OZ_IOCTL_ADD_BINDING _IOW(OZ_IOCTL_MAGIC, 5, struct oz_binding_info)
40#define OZ_IOCTL_TEST _IOWR(OZ_IOCTL_MAGIC, 6, struct oz_test)
41#define OZ_IOCTL_SET_EVENT_MASK _IOW(OZ_IOCTL_MAGIC, 7, unsigned long)
42#define OZ_IOCTL_REMOVE_BINDING _IOW(OZ_IOCTL_MAGIC, 8, struct oz_binding_info)
43#define OZ_IOCTL_MAX 9
44
45
46#endif /* _OZAPPIF_H */
diff --git a/drivers/staging/ozwpan/ozcdev.c b/drivers/staging/ozwpan/ozcdev.c
new file mode 100644
index 000000000000..1c380d687963
--- /dev/null
+++ b/drivers/staging/ozwpan/ozcdev.c
@@ -0,0 +1,521 @@
1/* -----------------------------------------------------------------------------
2 * Copyright (c) 2011 Ozmo Inc
3 * Released under the GNU General Public License Version 2 (GPLv2).
4 * -----------------------------------------------------------------------------
5 */
6#include <linux/module.h>
7#include <linux/fs.h>
8#include <linux/cdev.h>
9#include <linux/uaccess.h>
10#include <linux/netdevice.h>
11#include <linux/poll.h>
12#include <linux/sched.h>
13#include "ozconfig.h"
14#include "ozprotocol.h"
15#include "oztrace.h"
16#include "ozappif.h"
17#include "ozeltbuf.h"
18#include "ozpd.h"
19#include "ozproto.h"
20#include "ozevent.h"
21/*------------------------------------------------------------------------------
22 */
23#define OZ_RD_BUF_SZ 256
24struct oz_cdev {
25 dev_t devnum;
26 struct cdev cdev;
27 wait_queue_head_t rdq;
28 spinlock_t lock;
29 u8 active_addr[ETH_ALEN];
30 struct oz_pd *active_pd;
31};
32
33/* Per PD context for the serial service stored in the PD. */
34struct oz_serial_ctx {
35 atomic_t ref_count;
36 u8 tx_seq_num;
37 u8 rx_seq_num;
38 u8 rd_buf[OZ_RD_BUF_SZ];
39 int rd_in;
40 int rd_out;
41};
42/*------------------------------------------------------------------------------
43 */
44int g_taction;
45/*------------------------------------------------------------------------------
46 */
47static struct oz_cdev g_cdev;
48/*------------------------------------------------------------------------------
49 * Context: process and softirq
50 */
51static struct oz_serial_ctx *oz_cdev_claim_ctx(struct oz_pd *pd)
52{
53 struct oz_serial_ctx *ctx;
54 spin_lock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
55 ctx = (struct oz_serial_ctx *)pd->app_ctx[OZ_APPID_SERIAL-1];
56 if (ctx)
57 atomic_inc(&ctx->ref_count);
58 spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
59 return ctx;
60}
61/*------------------------------------------------------------------------------
62 * Context: softirq or process
63 */
64static void oz_cdev_release_ctx(struct oz_serial_ctx *ctx)
65{
66 if (atomic_dec_and_test(&ctx->ref_count)) {
67 oz_trace("Dealloc serial context.\n");
68 kfree(ctx);
69 }
70}
71/*------------------------------------------------------------------------------
72 * Context: process
73 */
74int oz_cdev_open(struct inode *inode, struct file *filp)
75{
76 struct oz_cdev *dev;
77 oz_trace("oz_cdev_open()\n");
78 oz_trace("major = %d minor = %d\n", imajor(inode), iminor(inode));
79 dev = container_of(inode->i_cdev, struct oz_cdev, cdev);
80 filp->private_data = dev;
81 return 0;
82}
83/*------------------------------------------------------------------------------
84 * Context: process
85 */
86int oz_cdev_release(struct inode *inode, struct file *filp)
87{
88 oz_trace("oz_cdev_release()\n");
89 return 0;
90}
91/*------------------------------------------------------------------------------
92 * Context: process
93 */
94ssize_t oz_cdev_read(struct file *filp, char __user *buf, size_t count,
95 loff_t *fpos)
96{
97 int n;
98 int ix;
99
100 struct oz_pd *pd;
101 struct oz_serial_ctx *ctx = 0;
102
103 spin_lock_bh(&g_cdev.lock);
104 pd = g_cdev.active_pd;
105 if (pd)
106 oz_pd_get(pd);
107 spin_unlock_bh(&g_cdev.lock);
108 if (pd == 0)
109 return -1;
110 ctx = oz_cdev_claim_ctx(pd);
111 if (ctx == 0)
112 goto out2;
113 n = ctx->rd_in - ctx->rd_out;
114 if (n < 0)
115 n += OZ_RD_BUF_SZ;
116 if (count > n)
117 count = n;
118 ix = ctx->rd_out;
119 n = OZ_RD_BUF_SZ - ix;
120 if (n > count)
121 n = count;
122 if (copy_to_user(buf, &ctx->rd_buf[ix], n)) {
123 count = 0;
124 goto out1;
125 }
126 ix += n;
127 if (ix == OZ_RD_BUF_SZ)
128 ix = 0;
129 if (n < count) {
130 if (copy_to_user(&buf[n], ctx->rd_buf, count-n)) {
131 count = 0;
132 goto out1;
133 }
134 ix = count-n;
135 }
136 ctx->rd_out = ix;
137out1:
138 oz_cdev_release_ctx(ctx);
139out2:
140 oz_pd_put(pd);
141 return count;
142}
143/*------------------------------------------------------------------------------
144 * Context: process
145 */
146ssize_t oz_cdev_write(struct file *filp, const char __user *buf, size_t count,
147 loff_t *fpos)
148{
149 struct oz_pd *pd;
150 struct oz_elt_buf *eb;
151 struct oz_elt_info *ei = 0;
152 struct oz_elt *elt;
153 struct oz_app_hdr *app_hdr;
154 struct oz_serial_ctx *ctx;
155
156 spin_lock_bh(&g_cdev.lock);
157 pd = g_cdev.active_pd;
158 if (pd)
159 oz_pd_get(pd);
160 spin_unlock_bh(&g_cdev.lock);
161 if (pd == 0)
162 return -1;
163 eb = &pd->elt_buff;
164 ei = oz_elt_info_alloc(eb);
165 if (ei == 0) {
166 count = 0;
167 goto out;
168 }
169 elt = (struct oz_elt *)ei->data;
170 app_hdr = (struct oz_app_hdr *)(elt+1);
171 elt->length = sizeof(struct oz_app_hdr) + count;
172 elt->type = OZ_ELT_APP_DATA;
173 ei->app_id = OZ_APPID_SERIAL;
174 ei->length = elt->length + sizeof(struct oz_elt);
175 app_hdr->app_id = OZ_APPID_SERIAL;
176 if (copy_from_user(app_hdr+1, buf, count))
177 goto out;
178 spin_lock_bh(&pd->app_lock[OZ_APPID_USB-1]);
179 ctx = (struct oz_serial_ctx *)pd->app_ctx[OZ_APPID_SERIAL-1];
180 if (ctx) {
181 app_hdr->elt_seq_num = ctx->tx_seq_num++;
182 if (ctx->tx_seq_num == 0)
183 ctx->tx_seq_num = 1;
184 spin_lock(&eb->lock);
185 if (oz_queue_elt_info(eb, 0, 0, ei) == 0)
186 ei = 0;
187 spin_unlock(&eb->lock);
188 }
189 spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
190out:
191 if (ei) {
192 count = 0;
193 spin_lock_bh(&eb->lock);
194 oz_elt_info_free(eb, ei);
195 spin_unlock_bh(&eb->lock);
196 }
197 oz_pd_put(pd);
198 return count;
199}
200/*------------------------------------------------------------------------------
201 * Context: process
202 */
203static int oz_set_active_pd(u8 *addr)
204{
205 int rc = 0;
206 struct oz_pd *pd;
207 struct oz_pd *old_pd;
208 pd = oz_pd_find(addr);
209 if (pd) {
210 spin_lock_bh(&g_cdev.lock);
211 memcpy(g_cdev.active_addr, addr, ETH_ALEN);
212 old_pd = g_cdev.active_pd;
213 g_cdev.active_pd = pd;
214 spin_unlock_bh(&g_cdev.lock);
215 if (old_pd)
216 oz_pd_put(old_pd);
217 } else {
218 if (!memcmp(addr, "\0\0\0\0\0\0", sizeof(addr))) {
219 spin_lock_bh(&g_cdev.lock);
220 pd = g_cdev.active_pd;
221 g_cdev.active_pd = 0;
222 memset(g_cdev.active_addr, 0,
223 sizeof(g_cdev.active_addr));
224 spin_unlock_bh(&g_cdev.lock);
225 if (pd)
226 oz_pd_put(pd);
227 } else {
228 rc = -1;
229 }
230 }
231 return rc;
232}
233/*------------------------------------------------------------------------------
234 * Context: process
235 */
236long oz_cdev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
237{
238 int rc = 0;
239 if (_IOC_TYPE(cmd) != OZ_IOCTL_MAGIC)
240 return -ENOTTY;
241 if (_IOC_NR(cmd) > OZ_IOCTL_MAX)
242 return -ENOTTY;
243 if (_IOC_DIR(cmd) & _IOC_READ)
244 rc = !access_ok(VERIFY_WRITE, (void __user *)arg,
245 _IOC_SIZE(cmd));
246 else if (_IOC_DIR(cmd) & _IOC_WRITE)
247 rc = !access_ok(VERIFY_READ, (void __user *)arg,
248 _IOC_SIZE(cmd));
249 if (rc)
250 return -EFAULT;
251 switch (cmd) {
252 case OZ_IOCTL_GET_PD_LIST: {
253 struct oz_pd_list list;
254 oz_trace("OZ_IOCTL_GET_PD_LIST\n");
255 list.count = oz_get_pd_list(list.addr, OZ_MAX_PDS);
256 if (copy_to_user((void __user *)arg, &list,
257 sizeof(list)))
258 return -EFAULT;
259 }
260 break;
261 case OZ_IOCTL_SET_ACTIVE_PD: {
262 u8 addr[ETH_ALEN];
263 oz_trace("OZ_IOCTL_SET_ACTIVE_PD\n");
264 if (copy_from_user(addr, (void __user *)arg, ETH_ALEN))
265 return -EFAULT;
266 rc = oz_set_active_pd(addr);
267 }
268 break;
269 case OZ_IOCTL_GET_ACTIVE_PD: {
270 u8 addr[ETH_ALEN];
271 oz_trace("OZ_IOCTL_GET_ACTIVE_PD\n");
272 spin_lock_bh(&g_cdev.lock);
273 memcpy(addr, g_cdev.active_addr, ETH_ALEN);
274 spin_unlock_bh(&g_cdev.lock);
275 if (copy_to_user((void __user *)arg, addr, ETH_ALEN))
276 return -EFAULT;
277 }
278 break;
279#ifdef WANT_EVENT_TRACE
280 case OZ_IOCTL_CLEAR_EVENTS:
281 oz_events_clear();
282 break;
283 case OZ_IOCTL_GET_EVENTS:
284 rc = oz_events_copy((void __user *)arg);
285 break;
286 case OZ_IOCTL_SET_EVENT_MASK:
287 if (copy_from_user(&g_evt_mask, (void __user *)arg,
288 sizeof(unsigned long))) {
289 return -EFAULT;
290 }
291 break;
292#endif /* WANT_EVENT_TRACE */
293 case OZ_IOCTL_ADD_BINDING:
294 case OZ_IOCTL_REMOVE_BINDING: {
295 struct oz_binding_info b;
296 if (copy_from_user(&b, (void __user *)arg,
297 sizeof(struct oz_binding_info))) {
298 return -EFAULT;
299 }
300 /* Make sure name is null terminated. */
301 b.name[OZ_MAX_BINDING_LEN-1] = 0;
302 if (cmd == OZ_IOCTL_ADD_BINDING)
303 oz_binding_add(b.name);
304 else
305 oz_binding_remove(b.name);
306 }
307 break;
308 }
309 return rc;
310}
311/*------------------------------------------------------------------------------
312 * Context: process
313 */
314unsigned int oz_cdev_poll(struct file *filp, poll_table *wait)
315{
316 unsigned int ret = 0;
317 struct oz_cdev *dev = filp->private_data;
318 oz_trace("Poll called wait = %p\n", wait);
319 spin_lock_bh(&dev->lock);
320 if (dev->active_pd) {
321 struct oz_serial_ctx *ctx = oz_cdev_claim_ctx(dev->active_pd);
322 if (ctx) {
323 if (ctx->rd_in != ctx->rd_out)
324 ret |= POLLIN | POLLRDNORM;
325 oz_cdev_release_ctx(ctx);
326 }
327 }
328 spin_unlock_bh(&dev->lock);
329 if (wait)
330 poll_wait(filp, &dev->rdq, wait);
331 return ret;
332}
333/*------------------------------------------------------------------------------
334 */
335const struct file_operations oz_fops = {
336 .owner = THIS_MODULE,
337 .open = oz_cdev_open,
338 .release = oz_cdev_release,
339 .read = oz_cdev_read,
340 .write = oz_cdev_write,
341 .unlocked_ioctl = oz_cdev_ioctl,
342 .poll = oz_cdev_poll
343};
344/*------------------------------------------------------------------------------
345 * Context: process
346 */
347int oz_cdev_register(void)
348{
349 int err;
350 memset(&g_cdev, 0, sizeof(g_cdev));
351 err = alloc_chrdev_region(&g_cdev.devnum, 0, 1, "ozwpan");
352 if (err < 0)
353 return err;
354 oz_trace("Alloc dev number %d:%d\n", MAJOR(g_cdev.devnum),
355 MINOR(g_cdev.devnum));
356 cdev_init(&g_cdev.cdev, &oz_fops);
357 g_cdev.cdev.owner = THIS_MODULE;
358 g_cdev.cdev.ops = &oz_fops;
359 spin_lock_init(&g_cdev.lock);
360 init_waitqueue_head(&g_cdev.rdq);
361 err = cdev_add(&g_cdev.cdev, g_cdev.devnum, 1);
362 return 0;
363}
364/*------------------------------------------------------------------------------
365 * Context: process
366 */
367int oz_cdev_deregister(void)
368{
369 cdev_del(&g_cdev.cdev);
370 unregister_chrdev_region(g_cdev.devnum, 1);
371 return 0;
372}
373/*------------------------------------------------------------------------------
374 * Context: process
375 */
376int oz_cdev_init(void)
377{
378 oz_event_log(OZ_EVT_SERVICE, 1, OZ_APPID_SERIAL, 0, 0);
379 oz_app_enable(OZ_APPID_SERIAL, 1);
380 return 0;
381}
382/*------------------------------------------------------------------------------
383 * Context: process
384 */
385void oz_cdev_term(void)
386{
387 oz_event_log(OZ_EVT_SERVICE, 2, OZ_APPID_SERIAL, 0, 0);
388 oz_app_enable(OZ_APPID_SERIAL, 0);
389}
390/*------------------------------------------------------------------------------
391 * Context: softirq-serialized
392 */
393int oz_cdev_start(struct oz_pd *pd, int resume)
394{
395 struct oz_serial_ctx *ctx;
396 struct oz_serial_ctx *old_ctx = 0;
397 oz_event_log(OZ_EVT_SERVICE, 3, OZ_APPID_SERIAL, 0, resume);
398 if (resume) {
399 oz_trace("Serial service resumed.\n");
400 return 0;
401 }
402 ctx = kzalloc(sizeof(struct oz_serial_ctx), GFP_ATOMIC);
403 if (ctx == 0)
404 return -ENOMEM;
405 atomic_set(&ctx->ref_count, 1);
406 ctx->tx_seq_num = 1;
407 spin_lock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
408 old_ctx = pd->app_ctx[OZ_APPID_SERIAL-1];
409 if (old_ctx) {
410 spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
411 kfree(ctx);
412 } else {
413 pd->app_ctx[OZ_APPID_SERIAL-1] = ctx;
414 spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
415 }
416 spin_lock(&g_cdev.lock);
417 if ((g_cdev.active_pd == 0) &&
418 (memcmp(pd->mac_addr, g_cdev.active_addr, ETH_ALEN) == 0)) {
419 oz_pd_get(pd);
420 g_cdev.active_pd = pd;
421 oz_trace("Active PD arrived.\n");
422 }
423 spin_unlock(&g_cdev.lock);
424 oz_trace("Serial service started.\n");
425 return 0;
426}
427/*------------------------------------------------------------------------------
428 * Context: softirq or process
429 */
430void oz_cdev_stop(struct oz_pd *pd, int pause)
431{
432 struct oz_serial_ctx *ctx;
433 oz_event_log(OZ_EVT_SERVICE, 4, OZ_APPID_SERIAL, 0, pause);
434 if (pause) {
435 oz_trace("Serial service paused.\n");
436 return;
437 }
438 spin_lock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
439 ctx = (struct oz_serial_ctx *)pd->app_ctx[OZ_APPID_SERIAL-1];
440 pd->app_ctx[OZ_APPID_SERIAL-1] = 0;
441 spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
442 if (ctx)
443 oz_cdev_release_ctx(ctx);
444 spin_lock(&g_cdev.lock);
445 if (pd == g_cdev.active_pd)
446 g_cdev.active_pd = 0;
447 else
448 pd = 0;
449 spin_unlock(&g_cdev.lock);
450 if (pd) {
451 oz_pd_put(pd);
452 oz_trace("Active PD departed.\n");
453 }
454 oz_trace("Serial service stopped.\n");
455}
456/*------------------------------------------------------------------------------
457 * Context: softirq-serialized
458 */
459void oz_cdev_rx(struct oz_pd *pd, struct oz_elt *elt)
460{
461 struct oz_serial_ctx *ctx;
462 struct oz_app_hdr *app_hdr;
463 u8 *data;
464 int len;
465 int space;
466 int copy_sz;
467 int ix;
468
469 ctx = oz_cdev_claim_ctx(pd);
470 if (ctx == 0) {
471 oz_trace("Cannot claim serial context.\n");
472 return;
473 }
474
475 app_hdr = (struct oz_app_hdr *)(elt+1);
476 /* If sequence number is non-zero then check it is not a duplicate.
477 */
478 if (app_hdr->elt_seq_num != 0) {
479 if (((ctx->rx_seq_num - app_hdr->elt_seq_num) & 0x80) == 0) {
480 /* Reject duplicate element. */
481 oz_trace("Duplicate element:%02x %02x\n",
482 app_hdr->elt_seq_num, ctx->rx_seq_num);
483 goto out;
484 }
485 }
486 ctx->rx_seq_num = app_hdr->elt_seq_num;
487 len = elt->length - sizeof(struct oz_app_hdr);
488 data = ((u8 *)(elt+1)) + sizeof(struct oz_app_hdr);
489 if (len <= 0)
490 goto out;
491 space = ctx->rd_out - ctx->rd_in - 1;
492 if (space < 0)
493 space += OZ_RD_BUF_SZ;
494 if (len > space) {
495 oz_trace("Not enough space:%d %d\n", len, space);
496 len = space;
497 }
498 ix = ctx->rd_in;
499 copy_sz = OZ_RD_BUF_SZ - ix;
500 if (copy_sz > len)
501 copy_sz = len;
502 memcpy(&ctx->rd_buf[ix], data, copy_sz);
503 len -= copy_sz;
504 ix += copy_sz;
505 if (ix == OZ_RD_BUF_SZ)
506 ix = 0;
507 if (len) {
508 memcpy(ctx->rd_buf, data+copy_sz, len);
509 ix = len;
510 }
511 ctx->rd_in = ix;
512 wake_up(&g_cdev.rdq);
513out:
514 oz_cdev_release_ctx(ctx);
515}
516/*------------------------------------------------------------------------------
517 * Context: softirq
518 */
519void oz_cdev_heartbeat(struct oz_pd *pd)
520{
521}
diff --git a/drivers/staging/ozwpan/ozcdev.h b/drivers/staging/ozwpan/ozcdev.h
new file mode 100644
index 000000000000..698014bb8d72
--- /dev/null
+++ b/drivers/staging/ozwpan/ozcdev.h
@@ -0,0 +1,18 @@
1/* -----------------------------------------------------------------------------
2 * Copyright (c) 2011 Ozmo Inc
3 * Released under the GNU General Public License Version 2 (GPLv2).
4 * -----------------------------------------------------------------------------
5 */
6#ifndef _OZCDEV_H
7#define _OZCDEV_H
8
9int oz_cdev_register(void);
10int oz_cdev_deregister(void);
11int oz_cdev_init(void);
12void oz_cdev_term(void);
13int oz_cdev_start(struct oz_pd *pd, int resume);
14void oz_cdev_stop(struct oz_pd *pd, int pause);
15void oz_cdev_rx(struct oz_pd *pd, struct oz_elt *elt);
16void oz_cdev_heartbeat(struct oz_pd *pd);
17
18#endif /* _OZCDEV_H */
diff --git a/drivers/staging/ozwpan/ozconfig.h b/drivers/staging/ozwpan/ozconfig.h
new file mode 100644
index 000000000000..43e6373a009c
--- /dev/null
+++ b/drivers/staging/ozwpan/ozconfig.h
@@ -0,0 +1,27 @@
1/* -----------------------------------------------------------------------------
2 * Copyright (c) 2011 Ozmo Inc
3 * Released under the GNU General Public License Version 2 (GPLv2).
4 * ---------------------------------------------------------------------------*/
5#ifndef _OZCONFIG_H
6#define _OZCONFIG_H
7
8/* #define WANT_TRACE */
9#ifdef WANT_TRACE
10#define WANT_VERBOSE_TRACE
11#endif /* #ifdef WANT_TRACE */
12/* #define WANT_URB_PARANOIA */
13
14/* #define WANT_PRE_2_6_39 */
15#define WANT_EVENT_TRACE
16
17/* These defines determine what verbose trace is displayed. */
18#ifdef WANT_VERBOSE_TRACE
19/* #define WANT_TRACE_STREAM */
20/* #define WANT_TRACE_URB */
21/* #define WANT_TRACE_CTRL_DETAIL */
22#define WANT_TRACE_HUB
23/* #define WANT_TRACE_RX_FRAMES */
24/* #define WANT_TRACE_TX_FRAMES */
25#endif /* WANT_VERBOSE_TRACE */
26
27#endif /* _OZCONFIG_H */
diff --git a/drivers/staging/ozwpan/ozeltbuf.c b/drivers/staging/ozwpan/ozeltbuf.c
new file mode 100644
index 000000000000..988f522475d9
--- /dev/null
+++ b/drivers/staging/ozwpan/ozeltbuf.c
@@ -0,0 +1,339 @@
1/* -----------------------------------------------------------------------------
2 * Copyright (c) 2011 Ozmo Inc
3 * Released under the GNU General Public License Version 2 (GPLv2).
4 * -----------------------------------------------------------------------------
5 */
6#include <linux/init.h>
7#include <linux/module.h>
8#include <linux/netdevice.h>
9#include "ozconfig.h"
10#include "ozprotocol.h"
11#include "ozeltbuf.h"
12#include "ozpd.h"
13#include "oztrace.h"
14/*------------------------------------------------------------------------------
15 */
16#define OZ_ELT_INFO_MAGIC_USED 0x35791057
17#define OZ_ELT_INFO_MAGIC_FREE 0x78940102
18/*------------------------------------------------------------------------------
19 * Context: softirq-serialized
20 */
21int oz_elt_buf_init(struct oz_elt_buf *buf)
22{
23 memset(buf, 0, sizeof(struct oz_elt_buf));
24 INIT_LIST_HEAD(&buf->stream_list);
25 INIT_LIST_HEAD(&buf->order_list);
26 INIT_LIST_HEAD(&buf->isoc_list);
27 buf->max_free_elts = 32;
28 spin_lock_init(&buf->lock);
29 return 0;
30}
31/*------------------------------------------------------------------------------
32 * Context: softirq or process
33 */
34void oz_elt_buf_term(struct oz_elt_buf *buf)
35{
36 struct list_head *e;
37 int i;
38 /* Free any elements in the order or isoc lists. */
39 for (i = 0; i < 2; i++) {
40 struct list_head *list;
41 if (i)
42 list = &buf->order_list;
43 else
44 list = &buf->isoc_list;
45 e = list->next;
46 while (e != list) {
47 struct oz_elt_info *ei =
48 container_of(e, struct oz_elt_info, link_order);
49 e = e->next;
50 kfree(ei);
51 }
52 }
53 /* Free any elelment in the pool. */
54 while (buf->elt_pool) {
55 struct oz_elt_info *ei =
56 container_of(buf->elt_pool, struct oz_elt_info, link);
57 buf->elt_pool = buf->elt_pool->next;
58 kfree(ei);
59 }
60 buf->free_elts = 0;
61}
62/*------------------------------------------------------------------------------
63 * Context: softirq or process
64 */
65struct oz_elt_info *oz_elt_info_alloc(struct oz_elt_buf *buf)
66{
67 struct oz_elt_info *ei = 0;
68 spin_lock_bh(&buf->lock);
69 if (buf->free_elts && buf->elt_pool) {
70 ei = container_of(buf->elt_pool, struct oz_elt_info, link);
71 buf->elt_pool = ei->link.next;
72 buf->free_elts--;
73 spin_unlock_bh(&buf->lock);
74 if (ei->magic != OZ_ELT_INFO_MAGIC_FREE) {
75 oz_trace("oz_elt_info_alloc: ei with bad magic: 0x%x\n",
76 ei->magic);
77 }
78 } else {
79 spin_unlock_bh(&buf->lock);
80 ei = kmalloc(sizeof(struct oz_elt_info), GFP_ATOMIC);
81 }
82 if (ei) {
83 ei->flags = 0;
84 ei->app_id = 0;
85 ei->callback = 0;
86 ei->context = 0;
87 ei->stream = 0;
88 ei->magic = OZ_ELT_INFO_MAGIC_USED;
89 INIT_LIST_HEAD(&ei->link);
90 INIT_LIST_HEAD(&ei->link_order);
91 }
92 return ei;
93}
94/*------------------------------------------------------------------------------
95 * Precondition: oz_elt_buf.lock must be held.
96 * Context: softirq or process
97 */
98void oz_elt_info_free(struct oz_elt_buf *buf, struct oz_elt_info *ei)
99{
100 if (ei) {
101 if (ei->magic == OZ_ELT_INFO_MAGIC_USED) {
102 buf->free_elts++;
103 ei->link.next = buf->elt_pool;
104 buf->elt_pool = &ei->link;
105 ei->magic = OZ_ELT_INFO_MAGIC_FREE;
106 } else {
107 oz_trace("oz_elt_info_free: bad magic ei: %p"
108 " magic: 0x%x\n",
109 ei, ei->magic);
110 }
111 }
112}
113/*------------------------------------------------------------------------------
114 * Context: softirq
115 */
116void oz_elt_info_free_chain(struct oz_elt_buf *buf, struct list_head *list)
117{
118 struct list_head *e;
119 e = list->next;
120 spin_lock_bh(&buf->lock);
121 while (e != list) {
122 struct oz_elt_info *ei;
123 ei = container_of(e, struct oz_elt_info, link);
124 e = e->next;
125 oz_elt_info_free(buf, ei);
126 }
127 spin_unlock_bh(&buf->lock);
128}
129/*------------------------------------------------------------------------------
130 */
131int oz_elt_stream_create(struct oz_elt_buf *buf, u8 id, int max_buf_count)
132{
133 struct oz_elt_stream *st;
134
135 oz_trace("oz_elt_stream_create(0x%x)\n", id);
136
137 st = kzalloc(sizeof(struct oz_elt_stream), GFP_ATOMIC | __GFP_ZERO);
138 if (st == 0)
139 return -ENOMEM;
140 atomic_set(&st->ref_count, 1);
141 st->id = id;
142 st->max_buf_count = max_buf_count;
143 INIT_LIST_HEAD(&st->elt_list);
144 spin_lock_bh(&buf->lock);
145 list_add_tail(&st->link, &buf->stream_list);
146 spin_unlock_bh(&buf->lock);
147 return 0;
148}
149/*------------------------------------------------------------------------------
150 */
151int oz_elt_stream_delete(struct oz_elt_buf *buf, u8 id)
152{
153 struct list_head *e;
154 struct oz_elt_stream *st;
155 oz_trace("oz_elt_stream_delete(0x%x)\n", id);
156 spin_lock_bh(&buf->lock);
157 e = buf->stream_list.next;
158 while (e != &buf->stream_list) {
159 st = container_of(e, struct oz_elt_stream, link);
160 if (st->id == id) {
161 list_del(e);
162 break;
163 }
164 st = 0;
165 }
166 if (!st) {
167 spin_unlock_bh(&buf->lock);
168 return -1;
169 }
170 e = st->elt_list.next;
171 while (e != &st->elt_list) {
172 struct oz_elt_info *ei =
173 container_of(e, struct oz_elt_info, link);
174 e = e->next;
175 list_del_init(&ei->link);
176 list_del_init(&ei->link_order);
177 st->buf_count -= ei->length;
178 oz_trace2(OZ_TRACE_STREAM, "Stream down: %d %d %d\n",
179 st->buf_count,
180 ei->length, atomic_read(&st->ref_count));
181 oz_elt_stream_put(st);
182 oz_elt_info_free(buf, ei);
183 }
184 spin_unlock_bh(&buf->lock);
185 oz_elt_stream_put(st);
186 return 0;
187}
188/*------------------------------------------------------------------------------
189 */
190void oz_elt_stream_get(struct oz_elt_stream *st)
191{
192 atomic_inc(&st->ref_count);
193}
194/*------------------------------------------------------------------------------
195 */
196void oz_elt_stream_put(struct oz_elt_stream *st)
197{
198 if (atomic_dec_and_test(&st->ref_count)) {
199 oz_trace("Stream destroyed\n");
200 kfree(st);
201 }
202}
203/*------------------------------------------------------------------------------
204 * Precondition: Element buffer lock must be held.
205 * If this function fails the caller is responsible for deallocating the elt
206 * info structure.
207 */
208int oz_queue_elt_info(struct oz_elt_buf *buf, u8 isoc, u8 id,
209 struct oz_elt_info *ei)
210{
211 struct oz_elt_stream *st = 0;
212 struct list_head *e;
213 if (id) {
214 list_for_each(e, &buf->stream_list) {
215 st = container_of(e, struct oz_elt_stream, link);
216 if (st->id == id)
217 break;
218 }
219 if (e == &buf->stream_list) {
220 /* Stream specified but stream not known so fail.
221 * Caller deallocates element info. */
222 return -1;
223 }
224 }
225 if (st) {
226 /* If this is an ISOC fixed element that needs a frame number
227 * then insert that now. Earlier we stored the unit count in
228 * this field.
229 */
230 struct oz_isoc_fixed *body = (struct oz_isoc_fixed *)
231 &ei->data[sizeof(struct oz_elt)];
232 if ((body->app_id == OZ_APPID_USB) && (body->type
233 == OZ_USB_ENDPOINT_DATA) &&
234 (body->format == OZ_DATA_F_ISOC_FIXED)) {
235 u8 unit_count = body->frame_number;
236 body->frame_number = st->frame_number;
237 st->frame_number += unit_count;
238 }
239 /* Claim stream and update accounts */
240 oz_elt_stream_get(st);
241 ei->stream = st;
242 st->buf_count += ei->length;
243 /* Add to list in stream. */
244 list_add_tail(&ei->link, &st->elt_list);
245 oz_trace2(OZ_TRACE_STREAM, "Stream up: %d %d\n",
246 st->buf_count, ei->length);
247 /* Check if we have too much buffered for this stream. If so
248 * start dropping elements until we are back in bounds.
249 */
250 while ((st->buf_count > st->max_buf_count) &&
251 !list_empty(&st->elt_list)) {
252 struct oz_elt_info *ei2 =
253 list_first_entry(&st->elt_list,
254 struct oz_elt_info, link);
255 list_del_init(&ei2->link);
256 list_del_init(&ei2->link_order);
257 st->buf_count -= ei2->length;
258 oz_elt_info_free(buf, ei2);
259 oz_elt_stream_put(st);
260 }
261 }
262 list_add_tail(&ei->link_order, isoc ?
263 &buf->isoc_list : &buf->order_list);
264 return 0;
265}
266/*------------------------------------------------------------------------------
267 */
268int oz_select_elts_for_tx(struct oz_elt_buf *buf, u8 isoc, unsigned *len,
269 unsigned max_len, struct list_head *list)
270{
271 int count = 0;
272 struct list_head *e;
273 struct list_head *el;
274 struct oz_elt_info *ei;
275 spin_lock_bh(&buf->lock);
276 if (isoc)
277 el = &buf->isoc_list;
278 else
279 el = &buf->order_list;
280 e = el->next;
281 while (e != el) {
282 struct oz_app_hdr *app_hdr;
283 ei = container_of(e, struct oz_elt_info, link_order);
284 e = e->next;
285 if ((*len + ei->length) <= max_len) {
286 app_hdr = (struct oz_app_hdr *)
287 &ei->data[sizeof(struct oz_elt)];
288 app_hdr->elt_seq_num = buf->tx_seq_num[ei->app_id]++;
289 if (buf->tx_seq_num[ei->app_id] == 0)
290 buf->tx_seq_num[ei->app_id] = 1;
291 *len += ei->length;
292 list_del(&ei->link);
293 list_del(&ei->link_order);
294 if (ei->stream) {
295 ei->stream->buf_count -= ei->length;
296 oz_trace2(OZ_TRACE_STREAM,
297 "Stream down: %d %d\n",
298 ei->stream->buf_count, ei->length);
299 oz_elt_stream_put(ei->stream);
300 ei->stream = 0;
301 }
302 INIT_LIST_HEAD(&ei->link_order);
303 list_add_tail(&ei->link, list);
304 count++;
305 } else {
306 break;
307 }
308 }
309 spin_unlock_bh(&buf->lock);
310 return count;
311}
312/*------------------------------------------------------------------------------
313 */
314int oz_are_elts_available(struct oz_elt_buf *buf)
315{
316 return buf->order_list.next != &buf->order_list;
317}
318/*------------------------------------------------------------------------------
319 */
320void oz_trim_elt_pool(struct oz_elt_buf *buf)
321{
322 struct list_head *free = 0;
323 struct list_head *e;
324 spin_lock_bh(&buf->lock);
325 while (buf->free_elts > buf->max_free_elts) {
326 e = buf->elt_pool;
327 buf->elt_pool = e->next;
328 e->next = free;
329 free = e;
330 buf->free_elts--;
331 }
332 spin_unlock_bh(&buf->lock);
333 while (free) {
334 struct oz_elt_info *ei =
335 container_of(free, struct oz_elt_info, link);
336 free = free->next;
337 kfree(ei);
338 }
339}
diff --git a/drivers/staging/ozwpan/ozeltbuf.h b/drivers/staging/ozwpan/ozeltbuf.h
new file mode 100644
index 000000000000..03c12f57b9bb
--- /dev/null
+++ b/drivers/staging/ozwpan/ozeltbuf.h
@@ -0,0 +1,70 @@
1/* -----------------------------------------------------------------------------
2 * Copyright (c) 2011 Ozmo Inc
3 * Released under the GNU General Public License Version 2 (GPLv2).
4 * -----------------------------------------------------------------------------
5 */
6#ifndef _OZELTBUF_H
7#define _OZELTBUF_H
8
9#include "ozprotocol.h"
10
11/*-----------------------------------------------------------------------------
12 */
13struct oz_pd;
14typedef void (*oz_elt_callback_t)(struct oz_pd *pd, long context);
15
16struct oz_elt_stream {
17 struct list_head link;
18 struct list_head elt_list;
19 atomic_t ref_count;
20 unsigned buf_count;
21 unsigned max_buf_count;
22 u8 frame_number;
23 u8 id;
24};
25
26#define OZ_MAX_ELT_PAYLOAD 255
27struct oz_elt_info {
28 struct list_head link;
29 struct list_head link_order;
30 u8 flags;
31 u8 app_id;
32 oz_elt_callback_t callback;
33 long context;
34 struct oz_elt_stream *stream;
35 u8 data[sizeof(struct oz_elt) + OZ_MAX_ELT_PAYLOAD];
36 int length;
37 unsigned magic;
38};
39/* Flags values */
40#define OZ_EI_F_MARKED 0x1
41
42struct oz_elt_buf {
43 spinlock_t lock;
44 struct list_head stream_list;
45 struct list_head order_list;
46 struct list_head isoc_list;
47 struct list_head *elt_pool;
48 int free_elts;
49 int max_free_elts;
50 u8 tx_seq_num[OZ_NB_APPS];
51};
52
53int oz_elt_buf_init(struct oz_elt_buf *buf);
54void oz_elt_buf_term(struct oz_elt_buf *buf);
55struct oz_elt_info *oz_elt_info_alloc(struct oz_elt_buf *buf);
56void oz_elt_info_free(struct oz_elt_buf *buf, struct oz_elt_info *ei);
57void oz_elt_info_free_chain(struct oz_elt_buf *buf, struct list_head *list);
58int oz_elt_stream_create(struct oz_elt_buf *buf, u8 id, int max_buf_count);
59int oz_elt_stream_delete(struct oz_elt_buf *buf, u8 id);
60void oz_elt_stream_get(struct oz_elt_stream *st);
61void oz_elt_stream_put(struct oz_elt_stream *st);
62int oz_queue_elt_info(struct oz_elt_buf *buf, u8 isoc, u8 id,
63 struct oz_elt_info *ei);
64int oz_select_elts_for_tx(struct oz_elt_buf *buf, u8 isoc, unsigned *len,
65 unsigned max_len, struct list_head *list);
66int oz_are_elts_available(struct oz_elt_buf *buf);
67void oz_trim_elt_pool(struct oz_elt_buf *buf);
68
69#endif /* _OZELTBUF_H */
70
diff --git a/drivers/staging/ozwpan/ozevent.c b/drivers/staging/ozwpan/ozevent.c
new file mode 100644
index 000000000000..73703d3e96bd
--- /dev/null
+++ b/drivers/staging/ozwpan/ozevent.c
@@ -0,0 +1,116 @@
1/* -----------------------------------------------------------------------------
2 * Copyright (c) 2011 Ozmo Inc
3 * Released under the GNU General Public License Version 2 (GPLv2).
4 * -----------------------------------------------------------------------------
5 */
6#include "ozconfig.h"
7#ifdef WANT_EVENT_TRACE
8#include <linux/jiffies.h>
9#include <linux/uaccess.h>
10#include "oztrace.h"
11#include "ozevent.h"
12/*------------------------------------------------------------------------------
13 */
14unsigned long g_evt_mask = 0xffffffff;
15/*------------------------------------------------------------------------------
16 */
17#define OZ_MAX_EVTS 2048 /* Must be power of 2 */
18DEFINE_SPINLOCK(g_eventlock);
19static int g_evt_in;
20static int g_evt_out;
21static int g_missed_events;
22static struct oz_event g_events[OZ_MAX_EVTS];
23/*------------------------------------------------------------------------------
24 * Context: process
25 */
26void oz_event_init(void)
27{
28 oz_trace("Event tracing initialized\n");
29 g_evt_in = g_evt_out = 0;
30 g_missed_events = 0;
31}
32/*------------------------------------------------------------------------------
33 * Context: process
34 */
35void oz_event_term(void)
36{
37 oz_trace("Event tracing terminated\n");
38}
39/*------------------------------------------------------------------------------
40 * Context: any
41 */
42void oz_event_log2(u8 evt, u8 ctx1, u16 ctx2, void *ctx3, unsigned ctx4)
43{
44 unsigned long irqstate;
45 int ix;
46 spin_lock_irqsave(&g_eventlock, irqstate);
47 ix = (g_evt_in + 1) & (OZ_MAX_EVTS - 1);
48 if (ix != g_evt_out) {
49 struct oz_event *e = &g_events[g_evt_in];
50 e->jiffies = jiffies;
51 e->evt = evt;
52 e->ctx1 = ctx1;
53 e->ctx2 = ctx2;
54 e->ctx3 = ctx3;
55 e->ctx4 = ctx4;
56 g_evt_in = ix;
57 } else {
58 g_missed_events++;
59 }
60 spin_unlock_irqrestore(&g_eventlock, irqstate);
61}
62/*------------------------------------------------------------------------------
63 * Context: process
64 */
65int oz_events_copy(struct oz_evtlist __user *lst)
66{
67 int first;
68 int ix;
69 struct hdr {
70 int count;
71 int missed;
72 } hdr;
73 ix = g_evt_out;
74 hdr.count = g_evt_in - ix;
75 if (hdr.count < 0)
76 hdr.count += OZ_MAX_EVTS;
77 if (hdr.count > OZ_EVT_LIST_SZ)
78 hdr.count = OZ_EVT_LIST_SZ;
79 hdr.missed = g_missed_events;
80 g_missed_events = 0;
81 if (copy_to_user((void __user *)lst, &hdr, sizeof(hdr)))
82 return -EFAULT;
83 first = OZ_MAX_EVTS - ix;
84 if (first > hdr.count)
85 first = hdr.count;
86 if (first) {
87 int sz = first*sizeof(struct oz_event);
88 void __user *p = (void __user *)lst->evts;
89 if (copy_to_user(p, &g_events[ix], sz))
90 return -EFAULT;
91 if (hdr.count > first) {
92 p = (void __user *)&lst->evts[first];
93 sz = (hdr.count-first)*sizeof(struct oz_event);
94 if (copy_to_user(p, g_events, sz))
95 return -EFAULT;
96 }
97 }
98 ix += hdr.count;
99 if (ix >= OZ_MAX_EVTS)
100 ix -= OZ_MAX_EVTS;
101 g_evt_out = ix;
102 return 0;
103}
104/*------------------------------------------------------------------------------
105 * Context: process
106 */
107void oz_events_clear(void)
108{
109 unsigned long irqstate;
110 spin_lock_irqsave(&g_eventlock, irqstate);
111 g_evt_in = g_evt_out = 0;
112 g_missed_events = 0;
113 spin_unlock_irqrestore(&g_eventlock, irqstate);
114}
115#endif /* WANT_EVENT_TRACE */
116
diff --git a/drivers/staging/ozwpan/ozevent.h b/drivers/staging/ozwpan/ozevent.h
new file mode 100644
index 000000000000..f033d014c6f3
--- /dev/null
+++ b/drivers/staging/ozwpan/ozevent.h
@@ -0,0 +1,31 @@
1/* -----------------------------------------------------------------------------
2 * Copyright (c) 2011 Ozmo Inc
3 * Released under the GNU General Public License Version 2 (GPLv2).
4 * -----------------------------------------------------------------------------
5 */
6#ifndef _OZEVENT_H
7#define _OZEVENT_H
8#include "ozconfig.h"
9#include "ozeventdef.h"
10
11#ifdef WANT_EVENT_TRACE
12extern unsigned long g_evt_mask;
13void oz_event_init(void);
14void oz_event_term(void);
15void oz_event_log2(u8 evt, u8 ctx1, u16 ctx2, void *ctx3, unsigned ctx4);
16#define oz_event_log(__evt, __ctx1, __ctx2, __ctx3, __ctx4) \
17 do { \
18 if ((1<<(__evt)) & g_evt_mask) \
19 oz_event_log2(__evt, __ctx1, __ctx2, __ctx3, __ctx4); \
20 } while (0)
21int oz_events_copy(struct oz_evtlist __user *lst);
22void oz_events_clear(void);
23#else
24#define oz_event_init()
25#define oz_event_term()
26#define oz_event_log(__evt, __ctx1, __ctx2, __ctx3, __ctx4)
27#define oz_events_copy(__lst)
28#define oz_events_clear()
29#endif /* WANT_EVENT_TRACE */
30
31#endif /* _OZEVENT_H */
diff --git a/drivers/staging/ozwpan/ozeventdef.h b/drivers/staging/ozwpan/ozeventdef.h
new file mode 100644
index 000000000000..a880288bab11
--- /dev/null
+++ b/drivers/staging/ozwpan/ozeventdef.h
@@ -0,0 +1,47 @@
1/* -----------------------------------------------------------------------------
2 * Copyright (c) 2011 Ozmo Inc
3 * Released under the GNU General Public License Version 2 (GPLv2).
4 * -----------------------------------------------------------------------------
5 */
6#ifndef _OZEVENTDEF_H
7#define _OZEVENTDEF_H
8
9#define OZ_EVT_RX_FRAME 0
10#define OZ_EVT_RX_PROCESS 1
11#define OZ_EVT_TX_FRAME 2
12#define OZ_EVT_TX_ISOC 3
13#define OZ_EVT_URB_SUBMIT 4
14#define OZ_EVT_URB_DONE 5
15#define OZ_EVT_URB_CANCEL 6
16#define OZ_EVT_CTRL_REQ 7
17#define OZ_EVT_CTRL_CNF 8
18#define OZ_EVT_CTRL_LOCAL 9
19#define OZ_EVT_CONNECT_REQ 10
20#define OZ_EVT_CONNECT_RSP 11
21#define OZ_EVT_EP_CREDIT 12
22#define OZ_EVT_EP_BUFFERING 13
23#define OZ_EVT_TX_ISOC_DONE 14
24#define OZ_EVT_TX_ISOC_DROP 15
25#define OZ_EVT_TIMER_CTRL 16
26#define OZ_EVT_TIMER 17
27#define OZ_EVT_PD_STATE 18
28#define OZ_EVT_SERVICE 19
29#define OZ_EVT_DEBUG 20
30
31struct oz_event {
32 unsigned long jiffies;
33 unsigned char evt;
34 unsigned char ctx1;
35 unsigned short ctx2;
36 void *ctx3;
37 unsigned ctx4;
38};
39
40#define OZ_EVT_LIST_SZ 64
41struct oz_evtlist {
42 int count;
43 int missed;
44 struct oz_event evts[OZ_EVT_LIST_SZ];
45};
46
47#endif /* _OZEVENTDEF_H */
diff --git a/drivers/staging/ozwpan/ozhcd.c b/drivers/staging/ozwpan/ozhcd.c
new file mode 100644
index 000000000000..750b14eb505e
--- /dev/null
+++ b/drivers/staging/ozwpan/ozhcd.c
@@ -0,0 +1,2256 @@
1/* -----------------------------------------------------------------------------
2 * Copyright (c) 2011 Ozmo Inc
3 * Released under the GNU General Public License Version 2 (GPLv2).
4 *
5 * This file provides the implementation of a USB host controller device that
6 * does not have any associated hardware. Instead the virtual device is
7 * connected to the WiFi network and emulates the operation of a USB hcd by
8 * receiving and sending network frames.
9 * Note:
10 * We take great pains to reduce the amount of code where interrupts need to be
11 * disabled and in this respect we are different from standard HCD's. In
12 * particular we don't want in_irq() code bleeding over to the protocol side of
13 * the driver.
14 * The troublesome functions are the urb enqueue and dequeue functions both of
15 * which can be called in_irq(). So for these functions we put the urbs into a
16 * queue and request a tasklet to process them. This means that a spinlock with
17 * interrupts disabled must be held for insertion and removal but most code is
18 * is in tasklet or soft irq context. The lock that protects this list is called
19 * the tasklet lock and serves the purpose of the 'HCD lock' which must be held
20 * when calling the following functions.
21 * usb_hcd_link_urb_to_ep()
22 * usb_hcd_unlink_urb_from_ep()
23 * usb_hcd_flush_endpoint()
24 * usb_hcd_check_unlink_urb()
25 * -----------------------------------------------------------------------------
26 */
27#include <linux/platform_device.h>
28#include <linux/usb.h>
29#include <linux/jiffies.h>
30#include <linux/slab.h>
31#include <linux/export.h>
32#include "linux/usb/hcd.h"
33#include <asm/unaligned.h>
34#include "ozconfig.h"
35#include "ozusbif.h"
36#include "oztrace.h"
37#include "ozurbparanoia.h"
38#include "ozevent.h"
39/*------------------------------------------------------------------------------
40 * Number of units of buffering to capture for an isochronous IN endpoint before
41 * allowing data to be indicated up.
42 */
43#define OZ_IN_BUFFERING_UNITS 50
44/* Name of our platform device.
45 */
46#define OZ_PLAT_DEV_NAME "ozwpan"
47/* Maximum number of free urb links that can be kept in the pool.
48 */
49#define OZ_MAX_LINK_POOL_SIZE 16
50/* Get endpoint object from the containing link.
51 */
52#define ep_from_link(__e) container_of((__e), struct oz_endpoint, link)
53/*------------------------------------------------------------------------------
54 * Used to link urbs together and also store some status information for each
55 * urb.
56 * A cache of these are kept in a pool to reduce number of calls to kmalloc.
57 */
58struct oz_urb_link {
59 struct list_head link;
60 struct urb *urb;
61 struct oz_port *port;
62 u8 req_id;
63 u8 ep_num;
64 unsigned long submit_jiffies;
65};
66
67/* Holds state information about a USB endpoint.
68 */
69struct oz_endpoint {
70 struct list_head urb_list; /* List of oz_urb_link items. */
71 struct list_head link; /* For isoc ep, links in to isoc
72 lists of oz_port. */
73 unsigned long last_jiffies;
74 int credit;
75 int credit_ceiling;
76 u8 ep_num;
77 u8 attrib;
78 u8 *buffer;
79 int buffer_size;
80 int in_ix;
81 int out_ix;
82 int buffered_units;
83 unsigned flags;
84 int start_frame;
85};
86/* Bits in the flags field. */
87#define OZ_F_EP_BUFFERING 0x1
88#define OZ_F_EP_HAVE_STREAM 0x2
89
90/* Holds state information about a USB interface.
91 */
92struct oz_interface {
93 unsigned ep_mask;
94 u8 alt;
95};
96
97/* Holds state information about an hcd port.
98 */
99#define OZ_NB_ENDPOINTS 16
100struct oz_port {
101 unsigned flags;
102 unsigned status;
103 void *hpd;
104 struct oz_hcd *ozhcd;
105 spinlock_t port_lock;
106 u8 bus_addr;
107 u8 next_req_id;
108 u8 config_num;
109 int num_iface;
110 struct oz_interface *iface;
111 struct oz_endpoint *out_ep[OZ_NB_ENDPOINTS];
112 struct oz_endpoint *in_ep[OZ_NB_ENDPOINTS];
113 struct list_head isoc_out_ep;
114 struct list_head isoc_in_ep;
115};
116#define OZ_PORT_F_PRESENT 0x1
117#define OZ_PORT_F_CHANGED 0x2
118#define OZ_PORT_F_DYING 0x4
119
120/* Data structure in the private context area of struct usb_hcd.
121 */
122#define OZ_NB_PORTS 8
123struct oz_hcd {
124 spinlock_t hcd_lock;
125 struct list_head urb_pending_list;
126 struct list_head urb_cancel_list;
127 struct list_head orphanage;
128 int conn_port; /* Port that is currently connecting, -1 if none.*/
129 struct oz_port ports[OZ_NB_PORTS];
130 uint flags;
131 struct usb_hcd *hcd;
132};
133/* Bits in flags field.
134 */
135#define OZ_HDC_F_SUSPENDED 0x1
136
137/*------------------------------------------------------------------------------
138 * Static function prototypes.
139 */
140static int oz_hcd_start(struct usb_hcd *hcd);
141static void oz_hcd_stop(struct usb_hcd *hcd);
142static void oz_hcd_shutdown(struct usb_hcd *hcd);
143static int oz_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
144 gfp_t mem_flags);
145static int oz_hcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status);
146static void oz_hcd_endpoint_disable(struct usb_hcd *hcd,
147 struct usb_host_endpoint *ep);
148static void oz_hcd_endpoint_reset(struct usb_hcd *hcd,
149 struct usb_host_endpoint *ep);
150static int oz_hcd_get_frame_number(struct usb_hcd *hcd);
151static int oz_hcd_hub_status_data(struct usb_hcd *hcd, char *buf);
152static int oz_hcd_hub_control(struct usb_hcd *hcd, u16 req_type, u16 wvalue,
153 u16 windex, char *buf, u16 wlength);
154static int oz_hcd_bus_suspend(struct usb_hcd *hcd);
155static int oz_hcd_bus_resume(struct usb_hcd *hcd);
156static int oz_plat_probe(struct platform_device *dev);
157static int oz_plat_remove(struct platform_device *dev);
158static void oz_plat_shutdown(struct platform_device *dev);
159static int oz_plat_suspend(struct platform_device *dev, pm_message_t msg);
160static int oz_plat_resume(struct platform_device *dev);
161static void oz_urb_process_tasklet(unsigned long unused);
162static int oz_build_endpoints_for_config(struct usb_hcd *hcd,
163 struct oz_port *port, struct usb_host_config *config,
164 gfp_t mem_flags);
165static void oz_clean_endpoints_for_config(struct usb_hcd *hcd,
166 struct oz_port *port);
167static int oz_build_endpoints_for_interface(struct usb_hcd *hcd,
168 struct oz_port *port,
169 struct usb_host_interface *intf, gfp_t mem_flags);
170static void oz_clean_endpoints_for_interface(struct usb_hcd *hcd,
171 struct oz_port *port, int if_ix);
172static void oz_process_ep0_urb(struct oz_hcd *ozhcd, struct urb *urb,
173 gfp_t mem_flags);
174static struct oz_urb_link *oz_remove_urb(struct oz_endpoint *ep,
175 struct urb *urb);
176static void oz_hcd_clear_orphanage(struct oz_hcd *ozhcd, int status);
177/*------------------------------------------------------------------------------
178 * Static external variables.
179 */
180static struct platform_device *g_plat_dev;
181static struct oz_hcd *g_ozhcd;
182static DEFINE_SPINLOCK(g_hcdlock); /* Guards g_ozhcd. */
183static const char g_hcd_name[] = "Ozmo WPAN";
184static struct list_head *g_link_pool;
185static int g_link_pool_size;
186static DEFINE_SPINLOCK(g_link_lock);
187static DEFINE_SPINLOCK(g_tasklet_lock);
188static struct tasklet_struct g_urb_process_tasklet;
189static struct tasklet_struct g_urb_cancel_tasklet;
190static atomic_t g_pending_urbs = ATOMIC_INIT(0);
191static const struct hc_driver g_oz_hc_drv = {
192 .description = g_hcd_name,
193 .product_desc = "Ozmo Devices WPAN",
194 .hcd_priv_size = sizeof(struct oz_hcd),
195 .flags = HCD_USB11,
196 .start = oz_hcd_start,
197 .stop = oz_hcd_stop,
198 .shutdown = oz_hcd_shutdown,
199 .urb_enqueue = oz_hcd_urb_enqueue,
200 .urb_dequeue = oz_hcd_urb_dequeue,
201 .endpoint_disable = oz_hcd_endpoint_disable,
202 .endpoint_reset = oz_hcd_endpoint_reset,
203 .get_frame_number = oz_hcd_get_frame_number,
204 .hub_status_data = oz_hcd_hub_status_data,
205 .hub_control = oz_hcd_hub_control,
206 .bus_suspend = oz_hcd_bus_suspend,
207 .bus_resume = oz_hcd_bus_resume,
208};
209
210static struct platform_driver g_oz_plat_drv = {
211 .probe = oz_plat_probe,
212 .remove = oz_plat_remove,
213 .shutdown = oz_plat_shutdown,
214 .suspend = oz_plat_suspend,
215 .resume = oz_plat_resume,
216 .driver = {
217 .name = OZ_PLAT_DEV_NAME,
218 .owner = THIS_MODULE,
219 },
220};
221/*------------------------------------------------------------------------------
222 * Gets our private context area (which is of type struct oz_hcd) from the
223 * usb_hcd structure.
224 * Context: any
225 */
226static inline struct oz_hcd *oz_hcd_private(struct usb_hcd *hcd)
227{
228 return (struct oz_hcd *)hcd->hcd_priv;
229}
230/*------------------------------------------------------------------------------
231 * Searches list of ports to find the index of the one with a specified USB
232 * bus address. If none of the ports has the bus address then the connection
233 * port is returned, if there is one or -1 otherwise.
234 * Context: any
235 */
236static int oz_get_port_from_addr(struct oz_hcd *ozhcd, u8 bus_addr)
237{
238 int i;
239 for (i = 0; i < OZ_NB_PORTS; i++) {
240 if (ozhcd->ports[i].bus_addr == bus_addr)
241 return i;
242 }
243 return ozhcd->conn_port;
244}
245/*------------------------------------------------------------------------------
246 * Allocates an urb link, first trying the pool but going to heap if empty.
247 * Context: any
248 */
249static struct oz_urb_link *oz_alloc_urb_link(void)
250{
251 struct oz_urb_link *urbl = 0;
252 unsigned long irq_state;
253 spin_lock_irqsave(&g_link_lock, irq_state);
254 if (g_link_pool) {
255 urbl = container_of(g_link_pool, struct oz_urb_link, link);
256 g_link_pool = urbl->link.next;
257 --g_link_pool_size;
258 }
259 spin_unlock_irqrestore(&g_link_lock, irq_state);
260 if (urbl == 0)
261 urbl = kmalloc(sizeof(struct oz_urb_link), GFP_ATOMIC);
262 return urbl;
263}
264/*------------------------------------------------------------------------------
265 * Frees an urb link by putting it in the pool if there is enough space or
266 * deallocating it to heap otherwise.
267 * Context: any
268 */
269static void oz_free_urb_link(struct oz_urb_link *urbl)
270{
271 if (urbl) {
272 unsigned long irq_state;
273 spin_lock_irqsave(&g_link_lock, irq_state);
274 if (g_link_pool_size < OZ_MAX_LINK_POOL_SIZE) {
275 urbl->link.next = g_link_pool;
276 g_link_pool = &urbl->link;
277 urbl = 0;
278 g_link_pool_size++;
279 }
280 spin_unlock_irqrestore(&g_link_lock, irq_state);
281 if (urbl)
282 kfree(urbl);
283 }
284}
285/*------------------------------------------------------------------------------
286 * Deallocates all the urb links in the pool.
287 * Context: unknown
288 */
289static void oz_empty_link_pool(void)
290{
291 struct list_head *e;
292 unsigned long irq_state;
293 spin_lock_irqsave(&g_link_lock, irq_state);
294 e = g_link_pool;
295 g_link_pool = 0;
296 g_link_pool_size = 0;
297 spin_unlock_irqrestore(&g_link_lock, irq_state);
298 while (e) {
299 struct oz_urb_link *urbl =
300 container_of(e, struct oz_urb_link, link);
301 e = e->next;
302 kfree(urbl);
303 }
304}
305/*------------------------------------------------------------------------------
306 * Allocates endpoint structure and optionally a buffer. If a buffer is
307 * allocated it immediately follows the endpoint structure.
308 * Context: softirq
309 */
310static struct oz_endpoint *oz_ep_alloc(gfp_t mem_flags, int buffer_size)
311{
312 struct oz_endpoint *ep =
313 kzalloc(sizeof(struct oz_endpoint)+buffer_size, mem_flags);
314 if (ep) {
315 INIT_LIST_HEAD(&ep->urb_list);
316 INIT_LIST_HEAD(&ep->link);
317 ep->credit = -1;
318 if (buffer_size) {
319 ep->buffer_size = buffer_size;
320 ep->buffer = (u8 *)(ep+1);
321 }
322 }
323 return ep;
324}
325/*------------------------------------------------------------------------------
326 * Pre-condition: Must be called with g_tasklet_lock held and interrupts
327 * disabled.
328 * Context: softirq or process
329 */
330struct oz_urb_link *oz_uncancel_urb(struct oz_hcd *ozhcd, struct urb *urb)
331{
332 struct oz_urb_link *urbl;
333 struct list_head *e;
334 list_for_each(e, &ozhcd->urb_cancel_list) {
335 urbl = container_of(e, struct oz_urb_link, link);
336 if (urb == urbl->urb) {
337 list_del_init(e);
338 return urbl;
339 }
340 }
341 return 0;
342}
343/*------------------------------------------------------------------------------
344 * This is called when we have finished processing an urb. It unlinks it from
345 * the ep and returns it to the core.
346 * Context: softirq or process
347 */
348static void oz_complete_urb(struct usb_hcd *hcd, struct urb *urb,
349 int status, unsigned long submit_jiffies)
350{
351 struct oz_hcd *ozhcd = oz_hcd_private(hcd);
352 unsigned long irq_state;
353 struct oz_urb_link *cancel_urbl = 0;
354 spin_lock_irqsave(&g_tasklet_lock, irq_state);
355 usb_hcd_unlink_urb_from_ep(hcd, urb);
356 /* Clear hcpriv which will prevent it being put in the cancel list
357 * in the event that an attempt is made to cancel it.
358 */
359 urb->hcpriv = 0;
360 /* Walk the cancel list in case the urb is already sitting there.
361 * Since we process the cancel list in a tasklet rather than in
362 * the dequeue function this could happen.
363 */
364 cancel_urbl = oz_uncancel_urb(ozhcd, urb);
365 /* Note: we release lock but do not enable local irqs.
366 * It appears that usb_hcd_giveback_urb() expects irqs to be disabled,
367 * or at least other host controllers disable interrupts at this point
368 * so we do the same. We must, however, release the lock otherwise a
369 * deadlock will occur if an urb is submitted to our driver in the urb
370 * completion function. Because we disable interrupts it is possible
371 * that the urb_enqueue function can be called with them disabled.
372 */
373 spin_unlock(&g_tasklet_lock);
374 if (oz_forget_urb(urb)) {
375 oz_trace("OZWPAN: ERROR Unknown URB %p\n", urb);
376 } else {
377 static unsigned long last_time;
378 atomic_dec(&g_pending_urbs);
379 oz_trace2(OZ_TRACE_URB,
380 "%lu: giveback_urb(%p,%x) %lu %lu pending:%d\n",
381 jiffies, urb, status, jiffies-submit_jiffies,
382 jiffies-last_time, atomic_read(&g_pending_urbs));
383 last_time = jiffies;
384 oz_event_log(OZ_EVT_URB_DONE, 0, 0, urb, status);
385 usb_hcd_giveback_urb(hcd, urb, status);
386 }
387 spin_lock(&g_tasklet_lock);
388 spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
389 if (cancel_urbl)
390 oz_free_urb_link(cancel_urbl);
391}
392/*------------------------------------------------------------------------------
393 * Deallocates an endpoint including deallocating any associated stream and
394 * returning any queued urbs to the core.
395 * Context: softirq
396 */
397static void oz_ep_free(struct oz_port *port, struct oz_endpoint *ep)
398{
399 oz_trace("oz_ep_free()\n");
400 if (port) {
401 struct list_head list;
402 struct oz_hcd *ozhcd = port->ozhcd;
403 INIT_LIST_HEAD(&list);
404 if (ep->flags & OZ_F_EP_HAVE_STREAM)
405 oz_usb_stream_delete(port->hpd, ep->ep_num);
406 /* Transfer URBs to the orphanage while we hold the lock. */
407 spin_lock_bh(&ozhcd->hcd_lock);
408 /* Note: this works even if ep->urb_list is empty.*/
409 list_replace_init(&ep->urb_list, &list);
410 /* Put the URBs in the orphanage. */
411 list_splice_tail(&list, &ozhcd->orphanage);
412 spin_unlock_bh(&ozhcd->hcd_lock);
413 }
414 oz_trace("Freeing endpoint memory\n");
415 kfree(ep);
416}
417/*------------------------------------------------------------------------------
418 * Context: softirq
419 */
420static int oz_enqueue_ep_urb(struct oz_port *port, u8 ep_addr, int in_dir,
421 struct urb *urb, u8 req_id)
422{
423 struct oz_urb_link *urbl;
424 struct oz_endpoint *ep;
425 int err = 0;
426 if (ep_addr >= OZ_NB_ENDPOINTS) {
427 oz_trace("Invalid endpoint number in oz_enqueue_ep_urb().\n");
428 return -EINVAL;
429 }
430 urbl = oz_alloc_urb_link();
431 if (!urbl)
432 return -ENOMEM;
433 urbl->submit_jiffies = jiffies;
434 urbl->urb = urb;
435 urbl->req_id = req_id;
436 urbl->ep_num = ep_addr;
437 /* Hold lock while we insert the URB into the list within the
438 * endpoint structure.
439 */
440 spin_lock_bh(&port->ozhcd->hcd_lock);
441 /* If the urb has been unlinked while out of any list then
442 * complete it now.
443 */
444 if (urb->unlinked) {
445 spin_unlock_bh(&port->ozhcd->hcd_lock);
446 oz_trace("urb %p unlinked so complete immediately\n", urb);
447 oz_complete_urb(port->ozhcd->hcd, urb, 0, 0);
448 oz_free_urb_link(urbl);
449 return 0;
450 }
451 if (in_dir)
452 ep = port->in_ep[ep_addr];
453 else
454 ep = port->out_ep[ep_addr];
455 if (ep && port->hpd) {
456 list_add_tail(&urbl->link, &ep->urb_list);
457 if (!in_dir && ep_addr && (ep->credit < 0)) {
458 ep->last_jiffies = jiffies;
459 ep->credit = 0;
460 oz_event_log(OZ_EVT_EP_CREDIT, ep->ep_num,
461 0, 0, ep->credit);
462 }
463 } else {
464 err = -EPIPE;
465 }
466 spin_unlock_bh(&port->ozhcd->hcd_lock);
467 if (err)
468 oz_free_urb_link(urbl);
469 return err;
470}
471/*------------------------------------------------------------------------------
472 * Removes an urb from the queue in the endpoint.
473 * Returns 0 if it is found and -EIDRM otherwise.
474 * Context: softirq
475 */
476static int oz_dequeue_ep_urb(struct oz_port *port, u8 ep_addr, int in_dir,
477 struct urb *urb)
478{
479 struct oz_urb_link *urbl = 0;
480 struct oz_endpoint *ep;
481 spin_lock_bh(&port->ozhcd->hcd_lock);
482 if (in_dir)
483 ep = port->in_ep[ep_addr];
484 else
485 ep = port->out_ep[ep_addr];
486 if (ep) {
487 struct list_head *e;
488 list_for_each(e, &ep->urb_list) {
489 urbl = container_of(e, struct oz_urb_link, link);
490 if (urbl->urb == urb) {
491 list_del_init(e);
492 break;
493 }
494 urbl = 0;
495 }
496 }
497 spin_unlock_bh(&port->ozhcd->hcd_lock);
498 if (urbl)
499 oz_free_urb_link(urbl);
500 return urbl ? 0 : -EIDRM;
501}
502/*------------------------------------------------------------------------------
503 * Finds an urb given its request id.
504 * Context: softirq
505 */
506static struct urb *oz_find_urb_by_id(struct oz_port *port, int ep_ix,
507 u8 req_id)
508{
509 struct oz_hcd *ozhcd = port->ozhcd;
510 struct urb *urb = 0;
511 struct oz_urb_link *urbl = 0;
512 struct oz_endpoint *ep;
513
514 spin_lock_bh(&ozhcd->hcd_lock);
515 ep = port->out_ep[ep_ix];
516 if (ep) {
517 struct list_head *e;
518 list_for_each(e, &ep->urb_list) {
519 urbl = container_of(e, struct oz_urb_link, link);
520 if (urbl->req_id == req_id) {
521 urb = urbl->urb;
522 list_del_init(e);
523 break;
524 }
525 }
526 }
527 spin_unlock_bh(&ozhcd->hcd_lock);
528 /* If urb is non-zero then we we must have an urb link to delete.
529 */
530 if (urb)
531 oz_free_urb_link(urbl);
532 return urb;
533}
534/*------------------------------------------------------------------------------
535 * Pre-condition: Port lock must be held.
536 * Context: softirq
537 */
538static void oz_acquire_port(struct oz_port *port, void *hpd)
539{
540 INIT_LIST_HEAD(&port->isoc_out_ep);
541 INIT_LIST_HEAD(&port->isoc_in_ep);
542 port->flags |= OZ_PORT_F_PRESENT | OZ_PORT_F_CHANGED;
543 port->status |= USB_PORT_STAT_CONNECTION |
544 (USB_PORT_STAT_C_CONNECTION << 16);
545 oz_usb_get(hpd);
546 port->hpd = hpd;
547}
548/*------------------------------------------------------------------------------
549 * Context: softirq
550 */
551static struct oz_hcd *oz_hcd_claim(void)
552{
553 struct oz_hcd *ozhcd;
554 spin_lock_bh(&g_hcdlock);
555 ozhcd = g_ozhcd;
556 if (ozhcd)
557 usb_get_hcd(ozhcd->hcd);
558 spin_unlock_bh(&g_hcdlock);
559 return ozhcd;
560}
561/*------------------------------------------------------------------------------
562 * Context: softirq
563 */
564static inline void oz_hcd_put(struct oz_hcd *ozhcd)
565{
566 if (ozhcd)
567 usb_put_hcd(ozhcd->hcd);
568}
569/*------------------------------------------------------------------------------
570 * This is called by the protocol handler to notify that a PD has arrived.
571 * We allocate a port to associate with the PD and create a structure for
572 * endpoint 0. This port is made the connection port.
573 * In the event that one of the other port is already a connection port then
574 * we fail.
575 * TODO We should be able to do better than fail and should be able remember
576 * that this port needs configuring and make it the connection port once the
577 * current connection port has been assigned an address. Collisions here are
578 * probably very rare indeed.
579 * Context: softirq
580 */
581void *oz_hcd_pd_arrived(void *hpd)
582{
583 int i;
584 void *hport = 0;
585 struct oz_hcd *ozhcd = 0;
586 struct oz_endpoint *ep;
587 oz_trace("oz_hcd_pd_arrived()\n");
588 ozhcd = oz_hcd_claim();
589 if (ozhcd == 0)
590 return 0;
591 /* Allocate an endpoint object in advance (before holding hcd lock) to
592 * use for out endpoint 0.
593 */
594 ep = oz_ep_alloc(GFP_ATOMIC, 0);
595 spin_lock_bh(&ozhcd->hcd_lock);
596 if (ozhcd->conn_port >= 0) {
597 spin_unlock_bh(&ozhcd->hcd_lock);
598 oz_trace("conn_port >= 0\n");
599 goto out;
600 }
601 for (i = 0; i < OZ_NB_PORTS; i++) {
602 struct oz_port *port = &ozhcd->ports[i];
603 spin_lock(&port->port_lock);
604 if ((port->flags & OZ_PORT_F_PRESENT) == 0) {
605 oz_acquire_port(port, hpd);
606 spin_unlock(&port->port_lock);
607 break;
608 }
609 spin_unlock(&port->port_lock);
610 }
611 if (i < OZ_NB_PORTS) {
612 oz_trace("Setting conn_port = %d\n", i);
613 ozhcd->conn_port = i;
614 /* Attach out endpoint 0.
615 */
616 ozhcd->ports[i].out_ep[0] = ep;
617 ep = 0;
618 hport = &ozhcd->ports[i];
619 spin_unlock_bh(&ozhcd->hcd_lock);
620 if (ozhcd->flags & OZ_HDC_F_SUSPENDED) {
621 oz_trace("Resuming root hub\n");
622 usb_hcd_resume_root_hub(ozhcd->hcd);
623 }
624 usb_hcd_poll_rh_status(ozhcd->hcd);
625 } else {
626 spin_unlock_bh(&ozhcd->hcd_lock);
627 }
628out:
629 if (ep) /* ep is non-null if not used. */
630 oz_ep_free(0, ep);
631 oz_hcd_put(ozhcd);
632 return hport;
633}
634/*------------------------------------------------------------------------------
635 * This is called by the protocol handler to notify that the PD has gone away.
636 * We need to deallocate all resources and then request that the root hub is
637 * polled. We release the reference we hold on the PD.
638 * Context: softirq
639 */
640void oz_hcd_pd_departed(void *hport)
641{
642 struct oz_port *port = (struct oz_port *)hport;
643 struct oz_hcd *ozhcd;
644 void *hpd;
645 struct oz_endpoint *ep = 0;
646
647 oz_trace("oz_hcd_pd_departed()\n");
648 if (port == 0) {
649 oz_trace("oz_hcd_pd_departed() port = 0\n");
650 return;
651 }
652 ozhcd = port->ozhcd;
653 if (ozhcd == 0)
654 return;
655 /* Check if this is the connection port - if so clear it.
656 */
657 spin_lock_bh(&ozhcd->hcd_lock);
658 if ((ozhcd->conn_port >= 0) &&
659 (port == &ozhcd->ports[ozhcd->conn_port])) {
660 oz_trace("Clearing conn_port\n");
661 ozhcd->conn_port = -1;
662 }
663 spin_lock(&port->port_lock);
664 port->flags |= OZ_PORT_F_DYING;
665 spin_unlock(&port->port_lock);
666 spin_unlock_bh(&ozhcd->hcd_lock);
667
668 oz_clean_endpoints_for_config(ozhcd->hcd, port);
669 spin_lock_bh(&port->port_lock);
670 hpd = port->hpd;
671 port->hpd = 0;
672 port->bus_addr = 0xff;
673 port->flags &= ~(OZ_PORT_F_PRESENT | OZ_PORT_F_DYING);
674 port->flags |= OZ_PORT_F_CHANGED;
675 port->status &= ~USB_PORT_STAT_CONNECTION;
676 port->status |= (USB_PORT_STAT_C_CONNECTION << 16);
677 /* If there is an endpont 0 then clear the pointer while we hold
678 * the spinlock be we deallocate it after releasing the lock.
679 */
680 if (port->out_ep[0]) {
681 ep = port->out_ep[0];
682 port->out_ep[0] = 0;
683 }
684 spin_unlock_bh(&port->port_lock);
685 if (ep)
686 oz_ep_free(port, ep);
687 usb_hcd_poll_rh_status(ozhcd->hcd);
688 oz_usb_put(hpd);
689}
690/*------------------------------------------------------------------------------
691 * Context: softirq
692 */
693void oz_hcd_pd_reset(void *hpd, void *hport)
694{
695 /* Cleanup the current configuration and report reset to the core.
696 */
697 struct oz_port *port = (struct oz_port *)hport;
698 struct oz_hcd *ozhcd = port->ozhcd;
699 oz_trace("PD Reset\n");
700 spin_lock_bh(&port->port_lock);
701 port->flags |= OZ_PORT_F_CHANGED;
702 port->status |= USB_PORT_STAT_RESET;
703 port->status |= (USB_PORT_STAT_C_RESET << 16);
704 spin_unlock_bh(&port->port_lock);
705 oz_clean_endpoints_for_config(ozhcd->hcd, port);
706 usb_hcd_poll_rh_status(ozhcd->hcd);
707}
708/*------------------------------------------------------------------------------
709 * Context: softirq
710 */
711void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, u8 *desc,
712 int length, int offset, int total_size)
713{
714 struct oz_port *port = (struct oz_port *)hport;
715 struct urb *urb;
716 int err = 0;
717
718 oz_event_log(OZ_EVT_CTRL_CNF, 0, req_id, 0, status);
719 oz_trace("oz_hcd_get_desc_cnf length = %d offs = %d tot_size = %d\n",
720 length, offset, total_size);
721 urb = oz_find_urb_by_id(port, 0, req_id);
722 if (!urb)
723 return;
724 if (status == 0) {
725 int copy_len;
726 int required_size = urb->transfer_buffer_length;
727 if (required_size > total_size)
728 required_size = total_size;
729 copy_len = required_size-offset;
730 if (length <= copy_len)
731 copy_len = length;
732 memcpy(urb->transfer_buffer+offset, desc, copy_len);
733 offset += copy_len;
734 if (offset < required_size) {
735 struct usb_ctrlrequest *setup =
736 (struct usb_ctrlrequest *)urb->setup_packet;
737 unsigned wvalue = le16_to_cpu(setup->wValue);
738 if (oz_enqueue_ep_urb(port, 0, 0, urb, req_id))
739 err = -ENOMEM;
740 else if (oz_usb_get_desc_req(port->hpd, req_id,
741 setup->bRequestType, (u8)(wvalue>>8),
742 (u8)wvalue, setup->wIndex, offset,
743 required_size-offset)) {
744 oz_dequeue_ep_urb(port, 0, 0, urb);
745 err = -ENOMEM;
746 }
747 if (err == 0)
748 return;
749 }
750 }
751 urb->actual_length = total_size;
752 oz_complete_urb(port->ozhcd->hcd, urb, 0, 0);
753}
754/*------------------------------------------------------------------------------
755 * Context: softirq
756 */
757#ifdef WANT_TRACE
758static void oz_display_conf_type(u8 t)
759{
760 switch (t) {
761 case USB_REQ_GET_STATUS:
762 oz_trace("USB_REQ_GET_STATUS - cnf\n");
763 break;
764 case USB_REQ_CLEAR_FEATURE:
765 oz_trace("USB_REQ_CLEAR_FEATURE - cnf\n");
766 break;
767 case USB_REQ_SET_FEATURE:
768 oz_trace("USB_REQ_SET_FEATURE - cnf\n");
769 break;
770 case USB_REQ_SET_ADDRESS:
771 oz_trace("USB_REQ_SET_ADDRESS - cnf\n");
772 break;
773 case USB_REQ_GET_DESCRIPTOR:
774 oz_trace("USB_REQ_GET_DESCRIPTOR - cnf\n");
775 break;
776 case USB_REQ_SET_DESCRIPTOR:
777 oz_trace("USB_REQ_SET_DESCRIPTOR - cnf\n");
778 break;
779 case USB_REQ_GET_CONFIGURATION:
780 oz_trace("USB_REQ_GET_CONFIGURATION - cnf\n");
781 break;
782 case USB_REQ_SET_CONFIGURATION:
783 oz_trace("USB_REQ_SET_CONFIGURATION - cnf\n");
784 break;
785 case USB_REQ_GET_INTERFACE:
786 oz_trace("USB_REQ_GET_INTERFACE - cnf\n");
787 break;
788 case USB_REQ_SET_INTERFACE:
789 oz_trace("USB_REQ_SET_INTERFACE - cnf\n");
790 break;
791 case USB_REQ_SYNCH_FRAME:
792 oz_trace("USB_REQ_SYNCH_FRAME - cnf\n");
793 break;
794 }
795}
796#else
797#define oz_display_conf_type(__x)
798#endif /* WANT_TRACE */
799/*------------------------------------------------------------------------------
800 * Context: softirq
801 */
802static void oz_hcd_complete_set_config(struct oz_port *port, struct urb *urb,
803 u8 rcode, u8 config_num)
804{
805 int rc = 0;
806 struct usb_hcd *hcd = port->ozhcd->hcd;
807 if (rcode == 0) {
808 port->config_num = config_num;
809 oz_clean_endpoints_for_config(hcd, port);
810 if (oz_build_endpoints_for_config(hcd, port,
811 &urb->dev->config[port->config_num-1], GFP_ATOMIC)) {
812 rc = -ENOMEM;
813 }
814 } else {
815 rc = -ENOMEM;
816 }
817 oz_complete_urb(hcd, urb, rc, 0);
818}
819/*------------------------------------------------------------------------------
820 * Context: softirq
821 */
822static void oz_hcd_complete_set_interface(struct oz_port *port, struct urb *urb,
823 u8 rcode, u8 if_num, u8 alt)
824{
825 struct usb_hcd *hcd = port->ozhcd->hcd;
826 int rc = 0;
827 if (rcode == 0) {
828 struct usb_host_config *config;
829 struct usb_host_interface *intf;
830 oz_trace("Set interface %d alt %d\n", if_num, alt);
831 oz_clean_endpoints_for_interface(hcd, port, if_num);
832 config = &urb->dev->config[port->config_num-1];
833 intf = &config->intf_cache[if_num]->altsetting[alt];
834 if (oz_build_endpoints_for_interface(hcd, port, intf,
835 GFP_ATOMIC))
836 rc = -ENOMEM;
837 else
838 port->iface[if_num].alt = alt;
839 } else {
840 rc = -ENOMEM;
841 }
842 oz_complete_urb(hcd, urb, rc, 0);
843}
844/*------------------------------------------------------------------------------
845 * Context: softirq
846 */
847void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode, u8 *data,
848 int data_len)
849{
850 struct oz_port *port = (struct oz_port *)hport;
851 struct urb *urb;
852 struct usb_ctrlrequest *setup;
853 struct usb_hcd *hcd = port->ozhcd->hcd;
854 unsigned windex;
855 unsigned wvalue;
856
857 oz_event_log(OZ_EVT_CTRL_CNF, 0, req_id, 0, rcode);
858 oz_trace("oz_hcd_control_cnf rcode=%u len=%d\n", rcode, data_len);
859 urb = oz_find_urb_by_id(port, 0, req_id);
860 if (!urb) {
861 oz_trace("URB not found\n");
862 return;
863 }
864 setup = (struct usb_ctrlrequest *)urb->setup_packet;
865 windex = le16_to_cpu(setup->wIndex);
866 wvalue = le16_to_cpu(setup->wValue);
867 if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
868 /* Standard requests */
869 oz_display_conf_type(setup->bRequest);
870 switch (setup->bRequest) {
871 case USB_REQ_SET_CONFIGURATION:
872 oz_hcd_complete_set_config(port, urb, rcode,
873 (u8)wvalue);
874 break;
875 case USB_REQ_SET_INTERFACE:
876 oz_hcd_complete_set_interface(port, urb, rcode,
877 (u8)windex, (u8)wvalue);
878 break;
879 default:
880 oz_complete_urb(hcd, urb, 0, 0);
881 }
882
883 } else {
884 int copy_len;
885 oz_trace("VENDOR-CLASS - cnf\n");
886 if (data_len <= urb->transfer_buffer_length)
887 copy_len = data_len;
888 else
889 copy_len = urb->transfer_buffer_length;
890 if (copy_len)
891 memcpy(urb->transfer_buffer, data, copy_len);
892 urb->actual_length = copy_len;
893 oz_complete_urb(hcd, urb, 0, 0);
894 }
895}
896/*------------------------------------------------------------------------------
897 * Context: softirq-serialized
898 */
899static int oz_hcd_buffer_data(struct oz_endpoint *ep, u8 *data, int data_len)
900{
901 int space;
902 int copy_len;
903 if (!ep->buffer)
904 return -1;
905 space = ep->out_ix-ep->in_ix-1;
906 if (space < 0)
907 space += ep->buffer_size;
908 if (space < (data_len+1)) {
909 oz_trace("Buffer full\n");
910 return -1;
911 }
912 ep->buffer[ep->in_ix] = (u8)data_len;
913 if (++ep->in_ix == ep->buffer_size)
914 ep->in_ix = 0;
915 copy_len = ep->buffer_size - ep->in_ix;
916 if (copy_len > data_len)
917 copy_len = data_len;
918 memcpy(&ep->buffer[ep->in_ix], data, copy_len);
919
920 if (copy_len < data_len) {
921 memcpy(ep->buffer, data+copy_len, data_len-copy_len);
922 ep->in_ix = data_len-copy_len;
923 } else {
924 ep->in_ix += copy_len;
925 }
926 if (ep->in_ix == ep->buffer_size)
927 ep->in_ix = 0;
928 ep->buffered_units++;
929 return 0;
930}
931/*------------------------------------------------------------------------------
932 * Context: softirq-serialized
933 */
934void oz_hcd_data_ind(void *hport, u8 endpoint, u8 *data, int data_len)
935{
936 struct oz_port *port = (struct oz_port *)hport;
937 struct oz_endpoint *ep;
938 struct oz_hcd *ozhcd = port->ozhcd;
939 spin_lock_bh(&ozhcd->hcd_lock);
940 ep = port->in_ep[endpoint & USB_ENDPOINT_NUMBER_MASK];
941 if (ep == 0)
942 goto done;
943 switch (ep->attrib & USB_ENDPOINT_XFERTYPE_MASK) {
944 case USB_ENDPOINT_XFER_INT:
945 case USB_ENDPOINT_XFER_BULK:
946 if (!list_empty(&ep->urb_list)) {
947 struct oz_urb_link *urbl =
948 list_first_entry(&ep->urb_list,
949 struct oz_urb_link, link);
950 struct urb *urb;
951 int copy_len;
952 list_del_init(&urbl->link);
953 spin_unlock_bh(&ozhcd->hcd_lock);
954 urb = urbl->urb;
955 oz_free_urb_link(urbl);
956 if (data_len <= urb->transfer_buffer_length)
957 copy_len = data_len;
958 else
959 copy_len = urb->transfer_buffer_length;
960 memcpy(urb->transfer_buffer, data, copy_len);
961 urb->actual_length = copy_len;
962 oz_complete_urb(port->ozhcd->hcd, urb, 0, 0);
963 return;
964 }
965 break;
966 case USB_ENDPOINT_XFER_ISOC:
967 oz_hcd_buffer_data(ep, data, data_len);
968 break;
969 }
970done:
971 spin_unlock_bh(&ozhcd->hcd_lock);
972}
973/*------------------------------------------------------------------------------
974 * Context: unknown
975 */
976static inline int oz_usb_get_frame_number(void)
977{
978 return jiffies_to_msecs(get_jiffies_64());
979}
980/*------------------------------------------------------------------------------
981 * Context: softirq
982 */
983int oz_hcd_heartbeat(void *hport)
984{
985 int rc = 0;
986 struct oz_port *port = (struct oz_port *)hport;
987 struct oz_hcd *ozhcd = port->ozhcd;
988 struct oz_urb_link *urbl;
989 struct list_head xfr_list;
990 struct list_head *e;
991 struct list_head *n;
992 struct urb *urb;
993 struct oz_endpoint *ep;
994 unsigned long now = jiffies;
995 INIT_LIST_HEAD(&xfr_list);
996 /* Check the OUT isoc endpoints to see if any URB data can be sent.
997 */
998 spin_lock_bh(&ozhcd->hcd_lock);
999 list_for_each(e, &port->isoc_out_ep) {
1000 ep = ep_from_link(e);
1001 if (ep->credit < 0)
1002 continue;
1003 ep->credit += (now - ep->last_jiffies);
1004 if (ep->credit > ep->credit_ceiling)
1005 ep->credit = ep->credit_ceiling;
1006 oz_event_log(OZ_EVT_EP_CREDIT, ep->ep_num, 0, 0, ep->credit);
1007 ep->last_jiffies = now;
1008 while (ep->credit && !list_empty(&ep->urb_list)) {
1009 urbl = list_first_entry(&ep->urb_list,
1010 struct oz_urb_link, link);
1011 urb = urbl->urb;
1012 if (ep->credit < urb->number_of_packets)
1013 break;
1014 ep->credit -= urb->number_of_packets;
1015 oz_event_log(OZ_EVT_EP_CREDIT, ep->ep_num, 0, 0,
1016 ep->credit);
1017 list_del(&urbl->link);
1018 list_add_tail(&urbl->link, &xfr_list);
1019 }
1020 }
1021 spin_unlock_bh(&ozhcd->hcd_lock);
1022 /* Send to PD and complete URBs.
1023 */
1024 list_for_each_safe(e, n, &xfr_list) {
1025 unsigned long t;
1026 urbl = container_of(e, struct oz_urb_link, link);
1027 urb = urbl->urb;
1028 t = urbl->submit_jiffies;
1029 list_del_init(e);
1030 urb->error_count = 0;
1031 urb->start_frame = oz_usb_get_frame_number();
1032 oz_usb_send_isoc(port->hpd, urbl->ep_num, urb);
1033 oz_free_urb_link(urbl);
1034 oz_complete_urb(port->ozhcd->hcd, urb, 0, t);
1035 }
1036 /* Check the IN isoc endpoints to see if any URBs can be completed.
1037 */
1038 spin_lock_bh(&ozhcd->hcd_lock);
1039 list_for_each(e, &port->isoc_in_ep) {
1040 struct oz_endpoint *ep = ep_from_link(e);
1041 if (ep->flags & OZ_F_EP_BUFFERING) {
1042 if (ep->buffered_units * OZ_IN_BUFFERING_UNITS) {
1043 ep->flags &= ~OZ_F_EP_BUFFERING;
1044 ep->credit = 0;
1045 oz_event_log(OZ_EVT_EP_CREDIT,
1046 ep->ep_num | USB_DIR_IN,
1047 0, 0, ep->credit);
1048 ep->last_jiffies = now;
1049 ep->start_frame = 0;
1050 oz_event_log(OZ_EVT_EP_BUFFERING,
1051 ep->ep_num | USB_DIR_IN, 0, 0, 0);
1052 }
1053 continue;
1054 }
1055 ep->credit += (now - ep->last_jiffies);
1056 oz_event_log(OZ_EVT_EP_CREDIT, ep->ep_num | USB_DIR_IN,
1057 0, 0, ep->credit);
1058 ep->last_jiffies = now;
1059 while (!list_empty(&ep->urb_list)) {
1060 struct oz_urb_link *urbl =
1061 list_first_entry(&ep->urb_list,
1062 struct oz_urb_link, link);
1063 struct urb *urb = urbl->urb;
1064 int len = 0;
1065 int copy_len;
1066 int i;
1067 if (ep->credit < urb->number_of_packets)
1068 break;
1069 if (ep->buffered_units < urb->number_of_packets)
1070 break;
1071 urb->actual_length = 0;
1072 for (i = 0; i < urb->number_of_packets; i++) {
1073 len = ep->buffer[ep->out_ix];
1074 if (++ep->out_ix == ep->buffer_size)
1075 ep->out_ix = 0;
1076 copy_len = ep->buffer_size - ep->out_ix;
1077 if (copy_len > len)
1078 copy_len = len;
1079 memcpy(urb->transfer_buffer,
1080 &ep->buffer[ep->out_ix], copy_len);
1081 if (copy_len < len) {
1082 memcpy(urb->transfer_buffer+copy_len,
1083 ep->buffer, len-copy_len);
1084 ep->out_ix = len-copy_len;
1085 } else
1086 ep->out_ix += copy_len;
1087 if (ep->out_ix == ep->buffer_size)
1088 ep->out_ix = 0;
1089 urb->iso_frame_desc[i].offset =
1090 urb->actual_length;
1091 urb->actual_length += len;
1092 urb->iso_frame_desc[i].actual_length = len;
1093 urb->iso_frame_desc[i].status = 0;
1094 }
1095 ep->buffered_units -= urb->number_of_packets;
1096 urb->error_count = 0;
1097 urb->start_frame = ep->start_frame;
1098 ep->start_frame += urb->number_of_packets;
1099 list_del(&urbl->link);
1100 list_add_tail(&urbl->link, &xfr_list);
1101 ep->credit -= urb->number_of_packets;
1102 oz_event_log(OZ_EVT_EP_CREDIT, ep->ep_num | USB_DIR_IN,
1103 0, 0, ep->credit);
1104 }
1105 }
1106 if (!list_empty(&port->isoc_out_ep) || !list_empty(&port->isoc_in_ep))
1107 rc = 1;
1108 spin_unlock_bh(&ozhcd->hcd_lock);
1109 /* Complete the filled URBs.
1110 */
1111 list_for_each_safe(e, n, &xfr_list) {
1112 urbl = container_of(e, struct oz_urb_link, link);
1113 urb = urbl->urb;
1114 list_del_init(e);
1115 oz_free_urb_link(urbl);
1116 oz_complete_urb(port->ozhcd->hcd, urb, 0, 0);
1117 }
1118 /* Check if there are any ep0 requests that have timed out.
1119 * If so resent to PD.
1120 */
1121 ep = port->out_ep[0];
1122 if (ep) {
1123 struct list_head *e;
1124 struct list_head *n;
1125 spin_lock_bh(&ozhcd->hcd_lock);
1126 list_for_each_safe(e, n, &ep->urb_list) {
1127 urbl = container_of(e, struct oz_urb_link, link);
1128 if (time_after(now, urbl->submit_jiffies+HZ/2)) {
1129 oz_trace("%ld: Request 0x%p timeout\n",
1130 now, urbl->urb);
1131 urbl->submit_jiffies = now;
1132 list_del(e);
1133 list_add_tail(e, &xfr_list);
1134 }
1135 }
1136 if (!list_empty(&ep->urb_list))
1137 rc = 1;
1138 spin_unlock_bh(&ozhcd->hcd_lock);
1139 e = xfr_list.next;
1140 while (e != &xfr_list) {
1141 urbl = container_of(e, struct oz_urb_link, link);
1142 e = e->next;
1143 oz_trace("Resending request to PD.\n");
1144 oz_process_ep0_urb(ozhcd, urbl->urb, GFP_ATOMIC);
1145 oz_free_urb_link(urbl);
1146 }
1147 }
1148 return rc;
1149}
1150/*------------------------------------------------------------------------------
1151 * Context: softirq
1152 */
1153static int oz_build_endpoints_for_interface(struct usb_hcd *hcd,
1154 struct oz_port *port,
1155 struct usb_host_interface *intf, gfp_t mem_flags)
1156{
1157 struct oz_hcd *ozhcd = port->ozhcd;
1158 int i;
1159 int if_ix = intf->desc.bInterfaceNumber;
1160 int request_heartbeat = 0;
1161 oz_trace("interface[%d] = %p\n", if_ix, intf);
1162 for (i = 0; i < intf->desc.bNumEndpoints; i++) {
1163 struct usb_host_endpoint *hep = &intf->endpoint[i];
1164 u8 ep_addr = hep->desc.bEndpointAddress;
1165 u8 ep_num = ep_addr & USB_ENDPOINT_NUMBER_MASK;
1166 struct oz_endpoint *ep;
1167 int buffer_size = 0;
1168
1169 oz_trace("%d bEndpointAddress = %x\n", i, ep_addr);
1170 if ((ep_addr & USB_ENDPOINT_DIR_MASK) &&
1171 ((hep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
1172 == USB_ENDPOINT_XFER_ISOC)) {
1173 buffer_size = 24*1024;
1174 }
1175
1176 ep = oz_ep_alloc(mem_flags, buffer_size);
1177 if (!ep) {
1178 oz_clean_endpoints_for_interface(hcd, port, if_ix);
1179 return -ENOMEM;
1180 }
1181 ep->attrib = hep->desc.bmAttributes;
1182 ep->ep_num = ep_num;
1183 if ((ep->attrib & USB_ENDPOINT_XFERTYPE_MASK)
1184 == USB_ENDPOINT_XFER_ISOC) {
1185 oz_trace("wMaxPacketSize = %d\n",
1186 hep->desc.wMaxPacketSize);
1187 ep->credit_ceiling = 200;
1188 if (ep_addr & USB_ENDPOINT_DIR_MASK) {
1189 ep->flags |= OZ_F_EP_BUFFERING;
1190 oz_event_log(OZ_EVT_EP_BUFFERING,
1191 ep->ep_num | USB_DIR_IN, 1, 0, 0);
1192 } else {
1193 ep->flags |= OZ_F_EP_HAVE_STREAM;
1194 if (oz_usb_stream_create(port->hpd, ep_num))
1195 ep->flags &= ~OZ_F_EP_HAVE_STREAM;
1196 }
1197 }
1198 spin_lock_bh(&ozhcd->hcd_lock);
1199 if (ep_addr & USB_ENDPOINT_DIR_MASK) {
1200 port->in_ep[ep_num] = ep;
1201 port->iface[if_ix].ep_mask |=
1202 (1<<(ep_num+OZ_NB_ENDPOINTS));
1203 if ((ep->attrib & USB_ENDPOINT_XFERTYPE_MASK)
1204 == USB_ENDPOINT_XFER_ISOC) {
1205 list_add_tail(&ep->link, &port->isoc_in_ep);
1206 request_heartbeat = 1;
1207 }
1208 } else {
1209 port->out_ep[ep_num] = ep;
1210 port->iface[if_ix].ep_mask |= (1<<ep_num);
1211 if ((ep->attrib & USB_ENDPOINT_XFERTYPE_MASK)
1212 == USB_ENDPOINT_XFER_ISOC) {
1213 list_add_tail(&ep->link, &port->isoc_out_ep);
1214 request_heartbeat = 1;
1215 }
1216 }
1217 spin_unlock_bh(&ozhcd->hcd_lock);
1218 if (request_heartbeat && port->hpd)
1219 oz_usb_request_heartbeat(port->hpd);
1220 }
1221 return 0;
1222}
1223/*------------------------------------------------------------------------------
1224 * Context: softirq
1225 */
1226static void oz_clean_endpoints_for_interface(struct usb_hcd *hcd,
1227 struct oz_port *port, int if_ix)
1228{
1229 struct oz_hcd *ozhcd = port->ozhcd;
1230 unsigned mask;
1231 int i;
1232 struct list_head ep_list;
1233
1234 oz_trace("Deleting endpoints for interface %d\n", if_ix);
1235 if (if_ix >= port->num_iface)
1236 return;
1237 INIT_LIST_HEAD(&ep_list);
1238 spin_lock_bh(&ozhcd->hcd_lock);
1239 mask = port->iface[if_ix].ep_mask;
1240 port->iface[if_ix].ep_mask = 0;
1241 for (i = 0; i < OZ_NB_ENDPOINTS; i++) {
1242 struct list_head *e;
1243 /* Gather OUT endpoints.
1244 */
1245 if ((mask & (1<<i)) && port->out_ep[i]) {
1246 e = &port->out_ep[i]->link;
1247 port->out_ep[i] = 0;
1248 /* Remove from isoc list if present.
1249 */
1250 list_del(e);
1251 list_add_tail(e, &ep_list);
1252 }
1253 /* Gather IN endpoints.
1254 */
1255 if ((mask & (1<<(i+OZ_NB_ENDPOINTS))) && port->in_ep[i]) {
1256 e = &port->in_ep[i]->link;
1257 port->in_ep[i] = 0;
1258 list_del(e);
1259 list_add_tail(e, &ep_list);
1260 }
1261 }
1262 spin_unlock_bh(&ozhcd->hcd_lock);
1263 while (!list_empty(&ep_list)) {
1264 struct oz_endpoint *ep =
1265 list_first_entry(&ep_list, struct oz_endpoint, link);
1266 list_del_init(&ep->link);
1267 oz_ep_free(port, ep);
1268 }
1269}
1270/*------------------------------------------------------------------------------
1271 * Context: softirq
1272 */
1273static int oz_build_endpoints_for_config(struct usb_hcd *hcd,
1274 struct oz_port *port, struct usb_host_config *config,
1275 gfp_t mem_flags)
1276{
1277 struct oz_hcd *ozhcd = port->ozhcd;
1278 int i;
1279 int num_iface = config->desc.bNumInterfaces;
1280 if (num_iface) {
1281 struct oz_interface *iface;
1282
1283 iface = kmalloc(num_iface*sizeof(struct oz_interface),
1284 mem_flags | __GFP_ZERO);
1285 if (!iface)
1286 return -ENOMEM;
1287 spin_lock_bh(&ozhcd->hcd_lock);
1288 port->iface = iface;
1289 port->num_iface = num_iface;
1290 spin_unlock_bh(&ozhcd->hcd_lock);
1291 }
1292 for (i = 0; i < num_iface; i++) {
1293 struct usb_host_interface *intf =
1294 &config->intf_cache[i]->altsetting[0];
1295 if (oz_build_endpoints_for_interface(hcd, port, intf,
1296 mem_flags))
1297 goto fail;
1298 }
1299 return 0;
1300fail:
1301 oz_clean_endpoints_for_config(hcd, port);
1302 return -1;
1303}
1304/*------------------------------------------------------------------------------
1305 * Context: softirq
1306 */
1307static void oz_clean_endpoints_for_config(struct usb_hcd *hcd,
1308 struct oz_port *port)
1309{
1310 struct oz_hcd *ozhcd = port->ozhcd;
1311 int i;
1312 oz_trace("Deleting endpoints for configuration.\n");
1313 for (i = 0; i < port->num_iface; i++)
1314 oz_clean_endpoints_for_interface(hcd, port, i);
1315 spin_lock_bh(&ozhcd->hcd_lock);
1316 if (port->iface) {
1317 oz_trace("Freeing interfaces object.\n");
1318 kfree(port->iface);
1319 port->iface = 0;
1320 }
1321 port->num_iface = 0;
1322 spin_unlock_bh(&ozhcd->hcd_lock);
1323}
1324/*------------------------------------------------------------------------------
1325 * Context: tasklet
1326 */
1327static void *oz_claim_hpd(struct oz_port *port)
1328{
1329 void *hpd = 0;
1330 struct oz_hcd *ozhcd = port->ozhcd;
1331 spin_lock_bh(&ozhcd->hcd_lock);
1332 hpd = port->hpd;
1333 if (hpd)
1334 oz_usb_get(hpd);
1335 spin_unlock_bh(&ozhcd->hcd_lock);
1336 return hpd;
1337}
1338/*------------------------------------------------------------------------------
1339 * Context: tasklet
1340 */
1341static void oz_process_ep0_urb(struct oz_hcd *ozhcd, struct urb *urb,
1342 gfp_t mem_flags)
1343{
1344 struct usb_ctrlrequest *setup;
1345 unsigned windex;
1346 unsigned wvalue;
1347 unsigned wlength;
1348 void *hpd = 0;
1349 u8 req_id;
1350 int rc = 0;
1351 unsigned complete = 0;
1352
1353 int port_ix = -1;
1354 struct oz_port *port = 0;
1355
1356 oz_trace2(OZ_TRACE_URB, "%lu: oz_process_ep0_urb(%p)\n", jiffies, urb);
1357 port_ix = oz_get_port_from_addr(ozhcd, urb->dev->devnum);
1358 if (port_ix < 0) {
1359 rc = -EPIPE;
1360 goto out;
1361 }
1362 port = &ozhcd->ports[port_ix];
1363 if (((port->flags & OZ_PORT_F_PRESENT) == 0)
1364 || (port->flags & OZ_PORT_F_DYING)) {
1365 oz_trace("Refusing URB port_ix = %d devnum = %d\n",
1366 port_ix, urb->dev->devnum);
1367 rc = -EPIPE;
1368 goto out;
1369 }
1370 /* Store port in private context data.
1371 */
1372 urb->hcpriv = port;
1373 setup = (struct usb_ctrlrequest *)urb->setup_packet;
1374 windex = le16_to_cpu(setup->wIndex);
1375 wvalue = le16_to_cpu(setup->wValue);
1376 wlength = le16_to_cpu(setup->wLength);
1377 oz_trace2(OZ_TRACE_CTRL_DETAIL, "bRequestType = %x\n",
1378 setup->bRequestType);
1379 oz_trace2(OZ_TRACE_CTRL_DETAIL, "bRequest = %x\n", setup->bRequest);
1380 oz_trace2(OZ_TRACE_CTRL_DETAIL, "wValue = %x\n", wvalue);
1381 oz_trace2(OZ_TRACE_CTRL_DETAIL, "wIndex = %x\n", windex);
1382 oz_trace2(OZ_TRACE_CTRL_DETAIL, "wLength = %x\n", wlength);
1383
1384 req_id = port->next_req_id++;
1385 hpd = oz_claim_hpd(port);
1386 if (hpd == 0) {
1387 oz_trace("Cannot claim port\n");
1388 rc = -EPIPE;
1389 goto out;
1390 }
1391
1392 if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1393 /* Standard requests
1394 */
1395 switch (setup->bRequest) {
1396 case USB_REQ_GET_DESCRIPTOR:
1397 oz_trace("USB_REQ_GET_DESCRIPTOR - req\n");
1398 break;
1399 case USB_REQ_SET_ADDRESS:
1400 oz_event_log(OZ_EVT_CTRL_LOCAL, setup->bRequest,
1401 0, 0, setup->bRequestType);
1402 oz_trace("USB_REQ_SET_ADDRESS - req\n");
1403 oz_trace("Port %d address is 0x%x\n", ozhcd->conn_port,
1404 (u8)le16_to_cpu(setup->wValue));
1405 spin_lock_bh(&ozhcd->hcd_lock);
1406 if (ozhcd->conn_port >= 0) {
1407 ozhcd->ports[ozhcd->conn_port].bus_addr =
1408 (u8)le16_to_cpu(setup->wValue);
1409 oz_trace("Clearing conn_port\n");
1410 ozhcd->conn_port = -1;
1411 }
1412 spin_unlock_bh(&ozhcd->hcd_lock);
1413 complete = 1;
1414 break;
1415 case USB_REQ_SET_CONFIGURATION:
1416 oz_trace("USB_REQ_SET_CONFIGURATION - req\n");
1417 break;
1418 case USB_REQ_GET_CONFIGURATION:
1419 /* We short curcuit this case and reply directly since
1420 * we have the selected configuration number cached.
1421 */
1422 oz_event_log(OZ_EVT_CTRL_LOCAL, setup->bRequest, 0, 0,
1423 setup->bRequestType);
1424 oz_trace("USB_REQ_GET_CONFIGURATION - reply now\n");
1425 if (urb->transfer_buffer_length >= 1) {
1426 urb->actual_length = 1;
1427 *((u8 *)urb->transfer_buffer) =
1428 port->config_num;
1429 complete = 1;
1430 } else {
1431 rc = -EPIPE;
1432 }
1433 break;
1434 case USB_REQ_GET_INTERFACE:
1435 /* We short curcuit this case and reply directly since
1436 * we have the selected interface alternative cached.
1437 */
1438 oz_event_log(OZ_EVT_CTRL_LOCAL, setup->bRequest, 0, 0,
1439 setup->bRequestType);
1440 oz_trace("USB_REQ_GET_INTERFACE - reply now\n");
1441 if (urb->transfer_buffer_length >= 1) {
1442 urb->actual_length = 1;
1443 *((u8 *)urb->transfer_buffer) =
1444 port->iface[(u8)windex].alt;
1445 oz_trace("interface = %d alt = %d\n",
1446 windex, port->iface[(u8)windex].alt);
1447 complete = 1;
1448 } else {
1449 rc = -EPIPE;
1450 }
1451 break;
1452 case USB_REQ_SET_INTERFACE:
1453 oz_trace("USB_REQ_SET_INTERFACE - req\n");
1454 break;
1455 }
1456 }
1457 if (!rc && !complete) {
1458 int data_len = 0;
1459 if ((setup->bRequestType & USB_DIR_IN) == 0)
1460 data_len = wlength;
1461 if (oz_usb_control_req(port->hpd, req_id, setup,
1462 urb->transfer_buffer, data_len)) {
1463 rc = -ENOMEM;
1464 } else {
1465 /* Note: we are queuing the request after we have
1466 * submitted it to be tranmitted. If the request were
1467 * to complete before we queued it then it would not
1468 * be found in the queue. It seems impossible for
1469 * this to happen but if it did the request would
1470 * be resubmitted so the problem would hopefully
1471 * resolve itself. Putting the request into the
1472 * queue before it has been sent is worse since the
1473 * urb could be cancelled while we are using it
1474 * to build the request.
1475 */
1476 if (oz_enqueue_ep_urb(port, 0, 0, urb, req_id))
1477 rc = -ENOMEM;
1478 }
1479 }
1480 oz_usb_put(hpd);
1481out:
1482 if (rc || complete) {
1483 oz_trace("Completing request locally\n");
1484 oz_complete_urb(ozhcd->hcd, urb, rc, 0);
1485 } else {
1486 oz_usb_request_heartbeat(port->hpd);
1487 }
1488}
1489/*------------------------------------------------------------------------------
1490 * Context: tasklet
1491 */
1492static int oz_urb_process(struct oz_hcd *ozhcd, struct urb *urb)
1493{
1494 int rc = 0;
1495 struct oz_port *port = urb->hcpriv;
1496 u8 ep_addr;
1497 /* When we are paranoid we keep a list of urbs which we check against
1498 * before handing one back. This is just for debugging during
1499 * development and should be turned off in the released driver.
1500 */
1501 oz_remember_urb(urb);
1502 /* Check buffer is valid.
1503 */
1504 if (!urb->transfer_buffer && urb->transfer_buffer_length)
1505 return -EINVAL;
1506 /* Check if there is a device at the port - refuse if not.
1507 */
1508 if ((port->flags & OZ_PORT_F_PRESENT) == 0)
1509 return -EPIPE;
1510 ep_addr = usb_pipeendpoint(urb->pipe);
1511 if (ep_addr) {
1512 /* If the request is not for EP0 then queue it.
1513 */
1514 if (oz_enqueue_ep_urb(port, ep_addr, usb_pipein(urb->pipe),
1515 urb, 0))
1516 rc = -EPIPE;
1517 } else {
1518 oz_process_ep0_urb(ozhcd, urb, GFP_ATOMIC);
1519 }
1520 return rc;
1521}
1522/*------------------------------------------------------------------------------
1523 * Context: tasklet
1524 */
1525static void oz_urb_process_tasklet(unsigned long unused)
1526{
1527 unsigned long irq_state;
1528 struct urb *urb;
1529 struct oz_hcd *ozhcd = oz_hcd_claim();
1530 int rc = 0;
1531 if (ozhcd == 0)
1532 return;
1533 /* This is called from a tasklet so is in softirq context but the urb
1534 * list is filled from any context so we need to lock
1535 * appropriately while removing urbs.
1536 */
1537 spin_lock_irqsave(&g_tasklet_lock, irq_state);
1538 while (!list_empty(&ozhcd->urb_pending_list)) {
1539 struct oz_urb_link *urbl =
1540 list_first_entry(&ozhcd->urb_pending_list,
1541 struct oz_urb_link, link);
1542 list_del_init(&urbl->link);
1543 spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
1544 urb = urbl->urb;
1545 oz_free_urb_link(urbl);
1546 rc = oz_urb_process(ozhcd, urb);
1547 if (rc)
1548 oz_complete_urb(ozhcd->hcd, urb, rc, 0);
1549 spin_lock_irqsave(&g_tasklet_lock, irq_state);
1550 }
1551 spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
1552 oz_hcd_put(ozhcd);
1553}
1554/*------------------------------------------------------------------------------
1555 * This function searches for the urb in any of the lists it could be in.
1556 * If it is found it is removed from the list and completed. If the urb is
1557 * being processed then it won't be in a list so won't be found. However, the
1558 * call to usb_hcd_check_unlink_urb() will set the value of the unlinked field
1559 * to a non-zero value. When an attempt is made to put the urb back in a list
1560 * the unlinked field will be checked and the urb will then be completed.
1561 * Context: tasklet
1562 */
1563static void oz_urb_cancel(struct oz_port *port, u8 ep_num, struct urb *urb)
1564{
1565 struct oz_urb_link *urbl = 0;
1566 struct list_head *e;
1567 struct oz_hcd *ozhcd;
1568 unsigned long irq_state;
1569 u8 ix;
1570 if (port == 0) {
1571 oz_trace("ERRORERROR: oz_urb_cancel(%p) port is null\n", urb);
1572 return;
1573 }
1574 ozhcd = port->ozhcd;
1575 if (ozhcd == 0) {
1576 oz_trace("ERRORERROR: oz_urb_cancel(%p) ozhcd is null\n", urb);
1577 return;
1578 }
1579
1580 /* Look in the tasklet queue.
1581 */
1582 spin_lock_irqsave(&g_tasklet_lock, irq_state);
1583 list_for_each(e, &ozhcd->urb_cancel_list) {
1584 urbl = container_of(e, struct oz_urb_link, link);
1585 if (urb == urbl->urb) {
1586 list_del_init(e);
1587 spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
1588 goto out2;
1589 }
1590 }
1591 spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
1592 urbl = 0;
1593
1594 /* Look in the orphanage.
1595 */
1596 spin_lock_irqsave(&ozhcd->hcd_lock, irq_state);
1597 list_for_each(e, &ozhcd->orphanage) {
1598 urbl = container_of(e, struct oz_urb_link, link);
1599 if (urbl->urb == urb) {
1600 list_del(e);
1601 oz_trace("Found urb in orphanage\n");
1602 goto out;
1603 }
1604 }
1605 ix = (ep_num & 0xf);
1606 urbl = 0;
1607 if ((ep_num & USB_DIR_IN) && ix)
1608 urbl = oz_remove_urb(port->in_ep[ix], urb);
1609 else
1610 urbl = oz_remove_urb(port->out_ep[ix], urb);
1611out:
1612 spin_unlock_irqrestore(&ozhcd->hcd_lock, irq_state);
1613out2:
1614 if (urbl) {
1615 urb->actual_length = 0;
1616 oz_free_urb_link(urbl);
1617 oz_complete_urb(ozhcd->hcd, urb, -EPIPE, 0);
1618 }
1619}
1620/*------------------------------------------------------------------------------
1621 * Context: tasklet
1622 */
1623static void oz_urb_cancel_tasklet(unsigned long unused)
1624{
1625 unsigned long irq_state;
1626 struct urb *urb;
1627 struct oz_hcd *ozhcd = oz_hcd_claim();
1628 if (ozhcd == 0)
1629 return;
1630 spin_lock_irqsave(&g_tasklet_lock, irq_state);
1631 while (!list_empty(&ozhcd->urb_cancel_list)) {
1632 struct oz_urb_link *urbl =
1633 list_first_entry(&ozhcd->urb_cancel_list,
1634 struct oz_urb_link, link);
1635 list_del_init(&urbl->link);
1636 spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
1637 urb = urbl->urb;
1638 if (urb->unlinked)
1639 oz_urb_cancel(urbl->port, urbl->ep_num, urb);
1640 oz_free_urb_link(urbl);
1641 spin_lock_irqsave(&g_tasklet_lock, irq_state);
1642 }
1643 spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
1644 oz_hcd_put(ozhcd);
1645}
1646/*------------------------------------------------------------------------------
1647 * Context: unknown
1648 */
1649static void oz_hcd_clear_orphanage(struct oz_hcd *ozhcd, int status)
1650{
1651 if (ozhcd) {
1652 struct oz_urb_link *urbl;
1653 while (!list_empty(&ozhcd->orphanage)) {
1654 urbl = list_first_entry(&ozhcd->orphanage,
1655 struct oz_urb_link, link);
1656 list_del(&urbl->link);
1657 oz_complete_urb(ozhcd->hcd, urbl->urb, status, 0);
1658 oz_free_urb_link(urbl);
1659 }
1660 }
1661}
1662/*------------------------------------------------------------------------------
1663 * Context: unknown
1664 */
1665static int oz_hcd_start(struct usb_hcd *hcd)
1666{
1667 oz_trace("oz_hcd_start()\n");
1668 hcd->power_budget = 200;
1669 hcd->state = HC_STATE_RUNNING;
1670 hcd->uses_new_polling = 1;
1671 return 0;
1672}
1673/*------------------------------------------------------------------------------
1674 * Context: unknown
1675 */
1676static void oz_hcd_stop(struct usb_hcd *hcd)
1677{
1678 oz_trace("oz_hcd_stop()\n");
1679}
1680/*------------------------------------------------------------------------------
1681 * Context: unknown
1682 */
1683static void oz_hcd_shutdown(struct usb_hcd *hcd)
1684{
1685 oz_trace("oz_hcd_shutdown()\n");
1686}
1687/*------------------------------------------------------------------------------
1688 * Context: any
1689 */
1690#ifdef WANT_EVENT_TRACE
1691static u8 oz_get_irq_ctx(void)
1692{
1693 u8 irq_info = 0;
1694 if (in_interrupt())
1695 irq_info |= 1;
1696 if (in_irq())
1697 irq_info |= 2;
1698 return irq_info;
1699}
1700#endif /* WANT_EVENT_TRACE */
1701/*------------------------------------------------------------------------------
1702 * Called to queue an urb for the device.
1703 * This function should return a non-zero error code if it fails the urb but
1704 * should not call usb_hcd_giveback_urb().
1705 * Context: any
1706 */
1707static int oz_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
1708 gfp_t mem_flags)
1709{
1710 struct oz_hcd *ozhcd = oz_hcd_private(hcd);
1711 int rc = 0;
1712 int port_ix;
1713 struct oz_port *port;
1714 unsigned long irq_state;
1715 struct oz_urb_link *urbl;
1716 oz_trace2(OZ_TRACE_URB, "%lu: oz_hcd_urb_enqueue(%p)\n",
1717 jiffies, urb);
1718 oz_event_log(OZ_EVT_URB_SUBMIT, oz_get_irq_ctx(),
1719 (u16)urb->number_of_packets, urb, urb->pipe);
1720 if (unlikely(ozhcd == 0)) {
1721 oz_trace2(OZ_TRACE_URB, "%lu: Refused urb(%p) not ozhcd.\n",
1722 jiffies, urb);
1723 return -EPIPE;
1724 }
1725 if (unlikely(hcd->state != HC_STATE_RUNNING)) {
1726 oz_trace2(OZ_TRACE_URB, "%lu: Refused urb(%p) not running.\n",
1727 jiffies, urb);
1728 return -EPIPE;
1729 }
1730 port_ix = oz_get_port_from_addr(ozhcd, urb->dev->devnum);
1731 if (port_ix < 0)
1732 return -EPIPE;
1733 port = &ozhcd->ports[port_ix];
1734 if (port == 0)
1735 return -EPIPE;
1736 if ((port->flags & OZ_PORT_F_PRESENT) == 0) {
1737 oz_trace("Refusing URB port_ix = %d devnum = %d\n",
1738 port_ix, urb->dev->devnum);
1739 return -EPIPE;
1740 }
1741 urb->hcpriv = port;
1742 /* Put request in queue for processing by tasklet.
1743 */
1744 urbl = oz_alloc_urb_link();
1745 if (unlikely(urbl == 0))
1746 return -ENOMEM;
1747 urbl->urb = urb;
1748 spin_lock_irqsave(&g_tasklet_lock, irq_state);
1749 rc = usb_hcd_link_urb_to_ep(hcd, urb);
1750 if (unlikely(rc)) {
1751 spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
1752 oz_free_urb_link(urbl);
1753 return rc;
1754 }
1755 list_add_tail(&urbl->link, &ozhcd->urb_pending_list);
1756 spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
1757 tasklet_schedule(&g_urb_process_tasklet);
1758 atomic_inc(&g_pending_urbs);
1759 return 0;
1760}
1761/*------------------------------------------------------------------------------
1762 * Context: tasklet
1763 */
1764static struct oz_urb_link *oz_remove_urb(struct oz_endpoint *ep,
1765 struct urb *urb)
1766{
1767 struct oz_urb_link *urbl = 0;
1768 struct list_head *e;
1769 if (unlikely(ep == 0))
1770 return 0;
1771 list_for_each(e, &ep->urb_list) {
1772 urbl = container_of(e, struct oz_urb_link, link);
1773 if (urbl->urb == urb) {
1774 list_del_init(e);
1775 if (usb_pipeisoc(urb->pipe)) {
1776 ep->credit -= urb->number_of_packets;
1777 if (ep->credit < 0)
1778 ep->credit = 0;
1779 oz_event_log(OZ_EVT_EP_CREDIT,
1780 usb_pipein(urb->pipe) ?
1781 (ep->ep_num | USB_DIR_IN) : ep->ep_num,
1782 0, 0, ep->credit);
1783 }
1784 return urbl;
1785 }
1786 }
1787 return 0;
1788}
1789/*------------------------------------------------------------------------------
1790 * Called to dequeue a previously submitted urb for the device.
1791 * Context: any
1792 */
1793static int oz_hcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1794{
1795 struct oz_hcd *ozhcd = oz_hcd_private(hcd);
1796 struct oz_urb_link *urbl = 0;
1797 int rc;
1798 unsigned long irq_state;
1799 oz_trace2(OZ_TRACE_URB, "%lu: oz_hcd_urb_dequeue(%p)\n", jiffies, urb);
1800 urbl = oz_alloc_urb_link();
1801 if (unlikely(urbl == 0))
1802 return -ENOMEM;
1803 spin_lock_irqsave(&g_tasklet_lock, irq_state);
1804 /* The following function checks the urb is still in the queue
1805 * maintained by the core and that the unlinked field is zero.
1806 * If both are true the function sets the unlinked field and returns
1807 * zero. Otherwise it returns an error.
1808 */
1809 rc = usb_hcd_check_unlink_urb(hcd, urb, status);
1810 /* We have to check we haven't completed the urb or are about
1811 * to complete it. When we do we set hcpriv to 0 so if this has
1812 * already happened we don't put the urb in the cancel queue.
1813 */
1814 if ((rc == 0) && urb->hcpriv) {
1815 urbl->urb = urb;
1816 urbl->port = (struct oz_port *)urb->hcpriv;
1817 urbl->ep_num = usb_pipeendpoint(urb->pipe);
1818 if (usb_pipein(urb->pipe))
1819 urbl->ep_num |= USB_DIR_IN;
1820 list_add_tail(&urbl->link, &ozhcd->urb_cancel_list);
1821 spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
1822 tasklet_schedule(&g_urb_cancel_tasklet);
1823 } else {
1824 spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
1825 oz_free_urb_link(urbl);
1826 }
1827 return rc;
1828}
1829/*------------------------------------------------------------------------------
1830 * Context: unknown
1831 */
1832static void oz_hcd_endpoint_disable(struct usb_hcd *hcd,
1833 struct usb_host_endpoint *ep)
1834{
1835 oz_trace("oz_hcd_endpoint_disable\n");
1836}
1837/*------------------------------------------------------------------------------
1838 * Context: unknown
1839 */
1840static void oz_hcd_endpoint_reset(struct usb_hcd *hcd,
1841 struct usb_host_endpoint *ep)
1842{
1843 oz_trace("oz_hcd_endpoint_reset\n");
1844}
1845/*------------------------------------------------------------------------------
1846 * Context: unknown
1847 */
1848static int oz_hcd_get_frame_number(struct usb_hcd *hcd)
1849{
1850 oz_trace("oz_hcd_get_frame_number\n");
1851 return oz_usb_get_frame_number();
1852}
1853/*------------------------------------------------------------------------------
1854 * Context: softirq
1855 * This is called as a consquence of us calling usb_hcd_poll_rh_status() and we
1856 * always do that in softirq context.
1857 */
1858static int oz_hcd_hub_status_data(struct usb_hcd *hcd, char *buf)
1859{
1860 struct oz_hcd *ozhcd = oz_hcd_private(hcd);
1861 int i;
1862
1863 oz_trace2(OZ_TRACE_HUB, "oz_hcd_hub_status_data()\n");
1864 buf[0] = 0;
1865
1866 spin_lock_bh(&ozhcd->hcd_lock);
1867 for (i = 0; i < OZ_NB_PORTS; i++) {
1868 if (ozhcd->ports[i].flags & OZ_PORT_F_CHANGED) {
1869 oz_trace2(OZ_TRACE_HUB, "Port %d changed\n", i);
1870 ozhcd->ports[i].flags &= ~OZ_PORT_F_CHANGED;
1871 buf[0] |= 1<<(i+1);
1872 }
1873 }
1874 spin_unlock_bh(&ozhcd->hcd_lock);
1875 return buf[0] ? 1 : 0;
1876}
1877/*------------------------------------------------------------------------------
1878 * Context: process
1879 */
1880static void oz_get_hub_descriptor(struct usb_hcd *hcd,
1881 struct usb_hub_descriptor *desc)
1882{
1883 oz_trace2(OZ_TRACE_HUB, "GetHubDescriptor\n");
1884 memset(desc, 0, sizeof(*desc));
1885 desc->bDescriptorType = 0x29;
1886 desc->bDescLength = 9;
1887 desc->wHubCharacteristics = (__force __u16)
1888 __constant_cpu_to_le16(0x0001);
1889 desc->bNbrPorts = OZ_NB_PORTS;
1890}
1891/*------------------------------------------------------------------------------
1892 * Context: process
1893 */
1894static int oz_set_port_feature(struct usb_hcd *hcd, u16 wvalue, u16 windex)
1895{
1896 struct oz_port *port;
1897 int err = 0;
1898 u8 port_id = (u8)windex;
1899 struct oz_hcd *ozhcd = oz_hcd_private(hcd);
1900 unsigned set_bits = 0;
1901 unsigned clear_bits = 0;
1902 oz_trace2(OZ_TRACE_HUB, "SetPortFeature\n");
1903 if ((port_id < 1) || (port_id > OZ_NB_PORTS))
1904 return -EPIPE;
1905 port = &ozhcd->ports[port_id-1];
1906 switch (wvalue) {
1907 case USB_PORT_FEAT_CONNECTION:
1908 oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_CONNECTION\n");
1909 break;
1910 case USB_PORT_FEAT_ENABLE:
1911 oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_ENABLE\n");
1912 break;
1913 case USB_PORT_FEAT_SUSPEND:
1914 oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_SUSPEND\n");
1915 break;
1916 case USB_PORT_FEAT_OVER_CURRENT:
1917 oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_OVER_CURRENT\n");
1918 break;
1919 case USB_PORT_FEAT_RESET:
1920 oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_RESET\n");
1921 set_bits = USB_PORT_STAT_ENABLE | (USB_PORT_STAT_C_RESET<<16);
1922 clear_bits = USB_PORT_STAT_RESET;
1923 ozhcd->ports[port_id-1].bus_addr = 0;
1924 break;
1925 case USB_PORT_FEAT_POWER:
1926 oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_POWER\n");
1927 set_bits |= USB_PORT_STAT_POWER;
1928 break;
1929 case USB_PORT_FEAT_LOWSPEED:
1930 oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_LOWSPEED\n");
1931 break;
1932 case USB_PORT_FEAT_C_CONNECTION:
1933 oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_CONNECTION\n");
1934 break;
1935 case USB_PORT_FEAT_C_ENABLE:
1936 oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_ENABLE\n");
1937 break;
1938 case USB_PORT_FEAT_C_SUSPEND:
1939 oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_SUSPEND\n");
1940 break;
1941 case USB_PORT_FEAT_C_OVER_CURRENT:
1942 oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_OVER_CURRENT\n");
1943 break;
1944 case USB_PORT_FEAT_C_RESET:
1945 oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_RESET\n");
1946 break;
1947 case USB_PORT_FEAT_TEST:
1948 oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_TEST\n");
1949 break;
1950 case USB_PORT_FEAT_INDICATOR:
1951 oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_INDICATOR\n");
1952 break;
1953 default:
1954 oz_trace2(OZ_TRACE_HUB, "Other %d\n", wvalue);
1955 break;
1956 }
1957 if (set_bits || clear_bits) {
1958 spin_lock_bh(&port->port_lock);
1959 port->status &= ~clear_bits;
1960 port->status |= set_bits;
1961 spin_unlock_bh(&port->port_lock);
1962 }
1963 oz_trace2(OZ_TRACE_HUB, "Port[%d] status = 0x%x\n", port_id,
1964 port->status);
1965 return err;
1966}
1967/*------------------------------------------------------------------------------
1968 * Context: process
1969 */
1970static int oz_clear_port_feature(struct usb_hcd *hcd, u16 wvalue, u16 windex)
1971{
1972 struct oz_port *port;
1973 int err = 0;
1974 u8 port_id = (u8)windex;
1975 struct oz_hcd *ozhcd = oz_hcd_private(hcd);
1976 unsigned clear_bits = 0;
1977 oz_trace2(OZ_TRACE_HUB, "ClearPortFeature\n");
1978 if ((port_id < 1) || (port_id > OZ_NB_PORTS))
1979 return -EPIPE;
1980 port = &ozhcd->ports[port_id-1];
1981 switch (wvalue) {
1982 case USB_PORT_FEAT_CONNECTION:
1983 oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_CONNECTION\n");
1984 break;
1985 case USB_PORT_FEAT_ENABLE:
1986 oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_ENABLE\n");
1987 clear_bits = USB_PORT_STAT_ENABLE;
1988 break;
1989 case USB_PORT_FEAT_SUSPEND:
1990 oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_SUSPEND\n");
1991 break;
1992 case USB_PORT_FEAT_OVER_CURRENT:
1993 oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_OVER_CURRENT\n");
1994 break;
1995 case USB_PORT_FEAT_RESET:
1996 oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_RESET\n");
1997 break;
1998 case USB_PORT_FEAT_POWER:
1999 oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_POWER\n");
2000 clear_bits |= USB_PORT_STAT_POWER;
2001 break;
2002 case USB_PORT_FEAT_LOWSPEED:
2003 oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_LOWSPEED\n");
2004 break;
2005 case USB_PORT_FEAT_C_CONNECTION:
2006 oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_CONNECTION\n");
2007 clear_bits = (USB_PORT_STAT_C_CONNECTION << 16);
2008 break;
2009 case USB_PORT_FEAT_C_ENABLE:
2010 oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_ENABLE\n");
2011 clear_bits = (USB_PORT_STAT_C_ENABLE << 16);
2012 break;
2013 case USB_PORT_FEAT_C_SUSPEND:
2014 oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_SUSPEND\n");
2015 break;
2016 case USB_PORT_FEAT_C_OVER_CURRENT:
2017 oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_OVER_CURRENT\n");
2018 break;
2019 case USB_PORT_FEAT_C_RESET:
2020 oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_RESET\n");
2021 clear_bits = (USB_PORT_FEAT_C_RESET << 16);
2022 break;
2023 case USB_PORT_FEAT_TEST:
2024 oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_TEST\n");
2025 break;
2026 case USB_PORT_FEAT_INDICATOR:
2027 oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_INDICATOR\n");
2028 break;
2029 default:
2030 oz_trace2(OZ_TRACE_HUB, "Other %d\n", wvalue);
2031 break;
2032 }
2033 if (clear_bits) {
2034 spin_lock_bh(&port->port_lock);
2035 port->status &= ~clear_bits;
2036 spin_unlock_bh(&port->port_lock);
2037 }
2038 oz_trace2(OZ_TRACE_HUB, "Port[%d] status = 0x%x\n", port_id,
2039 ozhcd->ports[port_id-1].status);
2040 return err;
2041}
2042/*------------------------------------------------------------------------------
2043 * Context: process
2044 */
2045static int oz_get_port_status(struct usb_hcd *hcd, u16 windex, char *buf)
2046{
2047 struct oz_hcd *ozhcd;
2048 u32 status = 0;
2049 if ((windex < 1) || (windex > OZ_NB_PORTS))
2050 return -EPIPE;
2051 ozhcd = oz_hcd_private(hcd);
2052 oz_trace2(OZ_TRACE_HUB, "GetPortStatus windex = %d\n", windex);
2053 status = ozhcd->ports[windex-1].status;
2054 put_unaligned(cpu_to_le32(status), (__le32 *)buf);
2055 oz_trace2(OZ_TRACE_HUB, "Port[%d] status = %x\n", windex, status);
2056 return 0;
2057}
2058/*------------------------------------------------------------------------------
2059 * Context: process
2060 */
2061static int oz_hcd_hub_control(struct usb_hcd *hcd, u16 req_type, u16 wvalue,
2062 u16 windex, char *buf, u16 wlength)
2063{
2064 int err = 0;
2065 oz_trace2(OZ_TRACE_HUB, "oz_hcd_hub_control()\n");
2066 switch (req_type) {
2067 case ClearHubFeature:
2068 oz_trace2(OZ_TRACE_HUB, "ClearHubFeature: %d\n", req_type);
2069 break;
2070 case ClearPortFeature:
2071 err = oz_clear_port_feature(hcd, wvalue, windex);
2072 break;
2073 case GetHubDescriptor:
2074 oz_get_hub_descriptor(hcd, (struct usb_hub_descriptor *)buf);
2075 break;
2076 case GetHubStatus:
2077 oz_trace2(OZ_TRACE_HUB, "GetHubStatus: req_type = 0x%x\n",
2078 req_type);
2079 put_unaligned(__constant_cpu_to_le32(0), (__le32 *)buf);
2080 break;
2081 case GetPortStatus:
2082 err = oz_get_port_status(hcd, windex, buf);
2083 break;
2084 case SetHubFeature:
2085 oz_trace2(OZ_TRACE_HUB, "SetHubFeature: %d\n", req_type);
2086 break;
2087 case SetPortFeature:
2088 err = oz_set_port_feature(hcd, wvalue, windex);
2089 break;
2090 default:
2091 oz_trace2(OZ_TRACE_HUB, "Other: %d\n", req_type);
2092 break;
2093 }
2094 return err;
2095}
2096/*------------------------------------------------------------------------------
2097 * Context: process
2098 */
2099static int oz_hcd_bus_suspend(struct usb_hcd *hcd)
2100{
2101 struct oz_hcd *ozhcd;
2102 oz_trace2(OZ_TRACE_HUB, "oz_hcd_hub_suspend()\n");
2103 ozhcd = oz_hcd_private(hcd);
2104 spin_lock_bh(&ozhcd->hcd_lock);
2105 hcd->state = HC_STATE_SUSPENDED;
2106 ozhcd->flags |= OZ_HDC_F_SUSPENDED;
2107 spin_unlock_bh(&ozhcd->hcd_lock);
2108 return 0;
2109}
2110/*------------------------------------------------------------------------------
2111 * Context: process
2112 */
2113static int oz_hcd_bus_resume(struct usb_hcd *hcd)
2114{
2115 struct oz_hcd *ozhcd;
2116 oz_trace2(OZ_TRACE_HUB, "oz_hcd_hub_resume()\n");
2117 ozhcd = oz_hcd_private(hcd);
2118 spin_lock_bh(&ozhcd->hcd_lock);
2119 ozhcd->flags &= ~OZ_HDC_F_SUSPENDED;
2120 hcd->state = HC_STATE_RUNNING;
2121 spin_unlock_bh(&ozhcd->hcd_lock);
2122 return 0;
2123}
2124/*------------------------------------------------------------------------------
2125 */
2126static void oz_plat_shutdown(struct platform_device *dev)
2127{
2128 oz_trace("oz_plat_shutdown()\n");
2129}
2130/*------------------------------------------------------------------------------
2131 * Context: process
2132 */
2133static int oz_plat_probe(struct platform_device *dev)
2134{
2135 int i;
2136 int err;
2137 struct usb_hcd *hcd;
2138 struct oz_hcd *ozhcd;
2139 oz_trace("oz_plat_probe()\n");
2140 hcd = usb_create_hcd(&g_oz_hc_drv, &dev->dev, dev_name(&dev->dev));
2141 if (hcd == 0) {
2142 oz_trace("Failed to created hcd object OK\n");
2143 return -ENOMEM;
2144 }
2145 ozhcd = oz_hcd_private(hcd);
2146 memset(ozhcd, 0, sizeof(*ozhcd));
2147 INIT_LIST_HEAD(&ozhcd->urb_pending_list);
2148 INIT_LIST_HEAD(&ozhcd->urb_cancel_list);
2149 INIT_LIST_HEAD(&ozhcd->orphanage);
2150 ozhcd->hcd = hcd;
2151 ozhcd->conn_port = -1;
2152 spin_lock_init(&ozhcd->hcd_lock);
2153 for (i = 0; i < OZ_NB_PORTS; i++) {
2154 struct oz_port *port = &ozhcd->ports[i];
2155 port->ozhcd = ozhcd;
2156 port->flags = 0;
2157 port->status = 0;
2158 port->bus_addr = 0xff;
2159 spin_lock_init(&port->port_lock);
2160 }
2161 err = usb_add_hcd(hcd, 0, 0);
2162 if (err) {
2163 oz_trace("Failed to add hcd object OK\n");
2164 usb_put_hcd(hcd);
2165 return -1;
2166 }
2167 spin_lock_bh(&g_hcdlock);
2168 g_ozhcd = ozhcd;
2169 spin_unlock_bh(&g_hcdlock);
2170 return 0;
2171}
2172/*------------------------------------------------------------------------------
2173 * Context: unknown
2174 */
2175static int oz_plat_remove(struct platform_device *dev)
2176{
2177 struct usb_hcd *hcd = platform_get_drvdata(dev);
2178 struct oz_hcd *ozhcd;
2179 oz_trace("oz_plat_remove()\n");
2180 if (hcd == 0)
2181 return -1;
2182 ozhcd = oz_hcd_private(hcd);
2183 spin_lock_bh(&g_hcdlock);
2184 if (ozhcd == g_ozhcd)
2185 g_ozhcd = 0;
2186 spin_unlock_bh(&g_hcdlock);
2187 oz_trace("Clearing orphanage\n");
2188 oz_hcd_clear_orphanage(ozhcd, -EPIPE);
2189 oz_trace("Removing hcd\n");
2190 usb_remove_hcd(hcd);
2191 usb_put_hcd(hcd);
2192 oz_empty_link_pool();
2193 return 0;
2194}
2195/*------------------------------------------------------------------------------
2196 * Context: unknown
2197 */
2198static int oz_plat_suspend(struct platform_device *dev, pm_message_t msg)
2199{
2200 oz_trace("oz_plat_suspend()\n");
2201 return 0;
2202}
2203/*------------------------------------------------------------------------------
2204 * Context: unknown
2205 */
2206static int oz_plat_resume(struct platform_device *dev)
2207{
2208 oz_trace("oz_plat_resume()\n");
2209 return 0;
2210}
2211/*------------------------------------------------------------------------------
2212 * Context: process
2213 */
2214int oz_hcd_init(void)
2215{
2216 int err;
2217 if (usb_disabled())
2218 return -ENODEV;
2219 tasklet_init(&g_urb_process_tasklet, oz_urb_process_tasklet, 0);
2220 tasklet_init(&g_urb_cancel_tasklet, oz_urb_cancel_tasklet, 0);
2221 err = platform_driver_register(&g_oz_plat_drv);
2222 oz_trace("platform_driver_register() returned %d\n", err);
2223 if (err)
2224 goto error;
2225 g_plat_dev = platform_device_alloc(OZ_PLAT_DEV_NAME, -1);
2226 if (g_plat_dev == 0) {
2227 err = -ENOMEM;
2228 goto error1;
2229 }
2230 oz_trace("platform_device_alloc() succeeded\n");
2231 err = platform_device_add(g_plat_dev);
2232 if (err)
2233 goto error2;
2234 oz_trace("platform_device_add() succeeded\n");
2235 return 0;
2236error2:
2237 platform_device_put(g_plat_dev);
2238error1:
2239 platform_driver_unregister(&g_oz_plat_drv);
2240error:
2241 tasklet_disable(&g_urb_process_tasklet);
2242 tasklet_disable(&g_urb_cancel_tasklet);
2243 oz_trace("oz_hcd_init() failed %d\n", err);
2244 return err;
2245}
2246/*------------------------------------------------------------------------------
2247 * Context: process
2248 */
2249void oz_hcd_term(void)
2250{
2251 tasklet_disable(&g_urb_process_tasklet);
2252 tasklet_disable(&g_urb_cancel_tasklet);
2253 platform_device_unregister(g_plat_dev);
2254 platform_driver_unregister(&g_oz_plat_drv);
2255 oz_trace("Pending urbs:%d\n", atomic_read(&g_pending_urbs));
2256}
diff --git a/drivers/staging/ozwpan/ozhcd.h b/drivers/staging/ozwpan/ozhcd.h
new file mode 100644
index 000000000000..9b30dfd09973
--- /dev/null
+++ b/drivers/staging/ozwpan/ozhcd.h
@@ -0,0 +1,15 @@
1/* -----------------------------------------------------------------------------
2 * Copyright (c) 2011 Ozmo Inc
3 * Released under the GNU General Public License Version 2 (GPLv2).
4 * ---------------------------------------------------------------------------*/
5#ifndef _OZHCD_H
6#define _OZHCD_H
7
8int oz_hcd_init(void);
9void oz_hcd_term(void);
10void *oz_hcd_pd_arrived(void *ctx);
11void oz_hcd_pd_departed(void *ctx);
12void oz_hcd_pd_reset(void *hpd, void *hport);
13
14#endif /* _OZHCD_H */
15
diff --git a/drivers/staging/ozwpan/ozmain.c b/drivers/staging/ozwpan/ozmain.c
new file mode 100644
index 000000000000..aaf2ccc0bcfb
--- /dev/null
+++ b/drivers/staging/ozwpan/ozmain.c
@@ -0,0 +1,58 @@
1/* -----------------------------------------------------------------------------
2 * Copyright (c) 2011 Ozmo Inc
3 * Released under the GNU General Public License Version 2 (GPLv2).
4 * -----------------------------------------------------------------------------
5 */
6#include <linux/init.h>
7#include <linux/module.h>
8#include <linux/timer.h>
9#include <linux/sched.h>
10#include <linux/netdevice.h>
11#include <linux/errno.h>
12#include <linux/ieee80211.h>
13#include "ozconfig.h"
14#include "ozpd.h"
15#include "ozproto.h"
16#include "ozcdev.h"
17#include "oztrace.h"
18#include "ozevent.h"
19/*------------------------------------------------------------------------------
20 * The name of the 802.11 mac device. Empty string is the default value but a
21 * value can be supplied as a parameter to the module. An empty string means
22 * bind to nothing. '*' means bind to all netcards - this includes non-802.11
23 * netcards. Bindings can be added later using an IOCTL.
24 */
25char *g_net_dev = "";
26/*------------------------------------------------------------------------------
27 * Context: process
28 */
29static int __init ozwpan_init(void)
30{
31 oz_event_init();
32 oz_cdev_register();
33 oz_protocol_init(g_net_dev);
34 oz_app_enable(OZ_APPID_USB, 1);
35 oz_apps_init();
36 return 0;
37}
38/*------------------------------------------------------------------------------
39 * Context: process
40 */
41static void __exit ozwpan_exit(void)
42{
43 oz_protocol_term();
44 oz_apps_term();
45 oz_cdev_deregister();
46 oz_event_term();
47}
48/*------------------------------------------------------------------------------
49 */
50module_param(g_net_dev, charp, S_IRUGO);
51module_init(ozwpan_init);
52module_exit(ozwpan_exit);
53
54MODULE_AUTHOR("Chris Kelly");
55MODULE_DESCRIPTION("Ozmo Devices USB over WiFi hcd driver");
56MODULE_VERSION("1.0.8");
57MODULE_LICENSE("GPL");
58
diff --git a/drivers/staging/ozwpan/ozpd.c b/drivers/staging/ozwpan/ozpd.c
new file mode 100644
index 000000000000..2b45d3d1800c
--- /dev/null
+++ b/drivers/staging/ozwpan/ozpd.c
@@ -0,0 +1,832 @@
1/* -----------------------------------------------------------------------------
2 * Copyright (c) 2011 Ozmo Inc
3 * Released under the GNU General Public License Version 2 (GPLv2).
4 * -----------------------------------------------------------------------------
5 */
6#include <linux/init.h>
7#include <linux/module.h>
8#include <linux/timer.h>
9#include <linux/sched.h>
10#include <linux/netdevice.h>
11#include <linux/errno.h>
12#include "ozconfig.h"
13#include "ozprotocol.h"
14#include "ozeltbuf.h"
15#include "ozpd.h"
16#include "ozproto.h"
17#include "oztrace.h"
18#include "ozevent.h"
19#include "ozcdev.h"
20#include "ozusbsvc.h"
21#include <asm/unaligned.h>
22#include <linux/uaccess.h>
23#include <net/psnap.h>
24/*------------------------------------------------------------------------------
25 */
26#define OZ_MAX_TX_POOL_SIZE 6
27/* Maximum number of uncompleted isoc frames that can be pending.
28 */
29#define OZ_MAX_SUBMITTED_ISOC 16
30/*------------------------------------------------------------------------------
31 */
32static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd);
33static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f);
34static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f);
35static int oz_send_isoc_frame(struct oz_pd *pd);
36static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f);
37static void oz_isoc_stream_free(struct oz_isoc_stream *st);
38static int oz_send_next_queued_frame(struct oz_pd *pd, int *more_data);
39static void oz_isoc_destructor(struct sk_buff *skb);
40static int oz_def_app_init(void);
41static void oz_def_app_term(void);
42static int oz_def_app_start(struct oz_pd *pd, int resume);
43static void oz_def_app_stop(struct oz_pd *pd, int pause);
44static void oz_def_app_rx(struct oz_pd *pd, struct oz_elt *elt);
45/*------------------------------------------------------------------------------
46 * Counts the uncompleted isoc frames submitted to netcard.
47 */
48static atomic_t g_submitted_isoc = ATOMIC_INIT(0);
49/* Application handler functions.
50 */
51static struct oz_app_if g_app_if[OZ_APPID_MAX] = {
52 {oz_usb_init,
53 oz_usb_term,
54 oz_usb_start,
55 oz_usb_stop,
56 oz_usb_rx,
57 oz_usb_heartbeat,
58 oz_usb_farewell,
59 OZ_APPID_USB},
60
61 {oz_def_app_init,
62 oz_def_app_term,
63 oz_def_app_start,
64 oz_def_app_stop,
65 oz_def_app_rx,
66 0,
67 0,
68 OZ_APPID_UNUSED1},
69
70 {oz_def_app_init,
71 oz_def_app_term,
72 oz_def_app_start,
73 oz_def_app_stop,
74 oz_def_app_rx,
75 0,
76 0,
77 OZ_APPID_UNUSED2},
78
79 {oz_cdev_init,
80 oz_cdev_term,
81 oz_cdev_start,
82 oz_cdev_stop,
83 oz_cdev_rx,
84 0,
85 0,
86 OZ_APPID_SERIAL},
87};
88/*------------------------------------------------------------------------------
89 * Context: process
90 */
91static int oz_def_app_init(void)
92{
93 return 0;
94}
95/*------------------------------------------------------------------------------
96 * Context: process
97 */
98static void oz_def_app_term(void)
99{
100}
101/*------------------------------------------------------------------------------
102 * Context: softirq
103 */
104static int oz_def_app_start(struct oz_pd *pd, int resume)
105{
106 return 0;
107}
108/*------------------------------------------------------------------------------
109 * Context: softirq
110 */
111static void oz_def_app_stop(struct oz_pd *pd, int pause)
112{
113}
114/*------------------------------------------------------------------------------
115 * Context: softirq
116 */
117static void oz_def_app_rx(struct oz_pd *pd, struct oz_elt *elt)
118{
119}
120/*------------------------------------------------------------------------------
121 * Context: softirq or process
122 */
123void oz_pd_set_state(struct oz_pd *pd, unsigned state)
124{
125 pd->state = state;
126 oz_event_log(OZ_EVT_PD_STATE, 0, 0, 0, state);
127#ifdef WANT_TRACE
128 switch (state) {
129 case OZ_PD_S_IDLE:
130 oz_trace("PD State: OZ_PD_S_IDLE\n");
131 break;
132 case OZ_PD_S_CONNECTED:
133 oz_trace("PD State: OZ_PD_S_CONNECTED\n");
134 break;
135 case OZ_PD_S_STOPPED:
136 oz_trace("PD State: OZ_PD_S_STOPPED\n");
137 break;
138 case OZ_PD_S_SLEEP:
139 oz_trace("PD State: OZ_PD_S_SLEEP\n");
140 break;
141 }
142#endif /* WANT_TRACE */
143}
144/*------------------------------------------------------------------------------
145 * Context: softirq or process
146 */
147void oz_pd_get(struct oz_pd *pd)
148{
149 atomic_inc(&pd->ref_count);
150}
151/*------------------------------------------------------------------------------
152 * Context: softirq or process
153 */
154void oz_pd_put(struct oz_pd *pd)
155{
156 if (atomic_dec_and_test(&pd->ref_count))
157 oz_pd_destroy(pd);
158}
159/*------------------------------------------------------------------------------
160 * Context: softirq-serialized
161 */
162struct oz_pd *oz_pd_alloc(u8 *mac_addr)
163{
164 struct oz_pd *pd = kzalloc(sizeof(struct oz_pd), GFP_ATOMIC);
165 if (pd) {
166 int i;
167 atomic_set(&pd->ref_count, 2);
168 for (i = 0; i < OZ_APPID_MAX; i++)
169 spin_lock_init(&pd->app_lock[i]);
170 pd->last_rx_pkt_num = 0xffffffff;
171 oz_pd_set_state(pd, OZ_PD_S_IDLE);
172 pd->max_tx_size = OZ_MAX_TX_SIZE;
173 memcpy(pd->mac_addr, mac_addr, ETH_ALEN);
174 if (0 != oz_elt_buf_init(&pd->elt_buff)) {
175 kfree(pd);
176 pd = 0;
177 }
178 spin_lock_init(&pd->tx_frame_lock);
179 INIT_LIST_HEAD(&pd->tx_queue);
180 INIT_LIST_HEAD(&pd->farewell_list);
181 pd->last_sent_frame = &pd->tx_queue;
182 spin_lock_init(&pd->stream_lock);
183 INIT_LIST_HEAD(&pd->stream_list);
184 }
185 return pd;
186}
187/*------------------------------------------------------------------------------
188 * Context: softirq or process
189 */
190void oz_pd_destroy(struct oz_pd *pd)
191{
192 struct list_head *e;
193 struct oz_tx_frame *f;
194 struct oz_isoc_stream *st;
195 struct oz_farewell *fwell;
196 oz_trace("Destroying PD\n");
197 /* Delete any streams.
198 */
199 e = pd->stream_list.next;
200 while (e != &pd->stream_list) {
201 st = container_of(e, struct oz_isoc_stream, link);
202 e = e->next;
203 oz_isoc_stream_free(st);
204 }
205 /* Free any queued tx frames.
206 */
207 e = pd->tx_queue.next;
208 while (e != &pd->tx_queue) {
209 f = container_of(e, struct oz_tx_frame, link);
210 e = e->next;
211 oz_retire_frame(pd, f);
212 }
213 oz_elt_buf_term(&pd->elt_buff);
214 /* Free any farewells.
215 */
216 e = pd->farewell_list.next;
217 while (e != &pd->farewell_list) {
218 fwell = container_of(e, struct oz_farewell, link);
219 e = e->next;
220 kfree(fwell);
221 }
222 /* Deallocate all frames in tx pool.
223 */
224 while (pd->tx_pool) {
225 e = pd->tx_pool;
226 pd->tx_pool = e->next;
227 kfree(container_of(e, struct oz_tx_frame, link));
228 }
229 if (pd->net_dev)
230 dev_put(pd->net_dev);
231 kfree(pd);
232}
233/*------------------------------------------------------------------------------
234 * Context: softirq-serialized
235 */
236int oz_services_start(struct oz_pd *pd, u16 apps, int resume)
237{
238 struct oz_app_if *ai;
239 int rc = 0;
240 oz_trace("oz_services_start(0x%x) resume(%d)\n", apps, resume);
241 for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
242 if (apps & (1<<ai->app_id)) {
243 if (ai->start(pd, resume)) {
244 rc = -1;
245 oz_trace("Unabled to start service %d\n",
246 ai->app_id);
247 break;
248 }
249 oz_polling_lock_bh();
250 pd->total_apps |= (1<<ai->app_id);
251 if (resume)
252 pd->paused_apps &= ~(1<<ai->app_id);
253 oz_polling_unlock_bh();
254 }
255 }
256 return rc;
257}
258/*------------------------------------------------------------------------------
259 * Context: softirq or process
260 */
261void oz_services_stop(struct oz_pd *pd, u16 apps, int pause)
262{
263 struct oz_app_if *ai;
264 oz_trace("oz_stop_services(0x%x) pause(%d)\n", apps, pause);
265 for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
266 if (apps & (1<<ai->app_id)) {
267 oz_polling_lock_bh();
268 if (pause) {
269 pd->paused_apps |= (1<<ai->app_id);
270 } else {
271 pd->total_apps &= ~(1<<ai->app_id);
272 pd->paused_apps &= ~(1<<ai->app_id);
273 }
274 oz_polling_unlock_bh();
275 ai->stop(pd, pause);
276 }
277 }
278}
279/*------------------------------------------------------------------------------
280 * Context: softirq
281 */
282void oz_pd_heartbeat(struct oz_pd *pd, u16 apps)
283{
284 struct oz_app_if *ai;
285 int more = 0;
286 for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
287 if (ai->heartbeat && (apps & (1<<ai->app_id))) {
288 if (ai->heartbeat(pd))
289 more = 1;
290 }
291 }
292 if (more)
293 oz_pd_request_heartbeat(pd);
294 if (pd->mode & OZ_F_ISOC_ANYTIME) {
295 int count = 8;
296 while (count-- && (oz_send_isoc_frame(pd) >= 0))
297 ;
298 }
299}
300/*------------------------------------------------------------------------------
301 * Context: softirq or process
302 */
303void oz_pd_stop(struct oz_pd *pd)
304{
305 u16 stop_apps = 0;
306 oz_trace("oz_pd_stop() State = 0x%x\n", pd->state);
307 oz_pd_indicate_farewells(pd);
308 oz_polling_lock_bh();
309 stop_apps = pd->total_apps;
310 pd->total_apps = 0;
311 pd->paused_apps = 0;
312 oz_polling_unlock_bh();
313 oz_services_stop(pd, stop_apps, 0);
314 oz_polling_lock_bh();
315 oz_pd_set_state(pd, OZ_PD_S_STOPPED);
316 /* Remove from PD list.*/
317 list_del(&pd->link);
318 oz_polling_unlock_bh();
319 oz_trace("pd ref count = %d\n", atomic_read(&pd->ref_count));
320 oz_timer_delete(pd, 0);
321 oz_pd_put(pd);
322}
323/*------------------------------------------------------------------------------
324 * Context: softirq
325 */
326int oz_pd_sleep(struct oz_pd *pd)
327{
328 int do_stop = 0;
329 u16 stop_apps = 0;
330 oz_polling_lock_bh();
331 if (pd->state & (OZ_PD_S_SLEEP | OZ_PD_S_STOPPED)) {
332 oz_polling_unlock_bh();
333 return 0;
334 }
335 if (pd->keep_alive_j && pd->session_id) {
336 oz_pd_set_state(pd, OZ_PD_S_SLEEP);
337 pd->pulse_time_j = jiffies + pd->keep_alive_j;
338 oz_trace("Sleep Now %lu until %lu\n",
339 jiffies, pd->pulse_time_j);
340 } else {
341 do_stop = 1;
342 }
343 stop_apps = pd->total_apps;
344 oz_polling_unlock_bh();
345 if (do_stop) {
346 oz_pd_stop(pd);
347 } else {
348 oz_services_stop(pd, stop_apps, 1);
349 oz_timer_add(pd, OZ_TIMER_STOP, jiffies + pd->keep_alive_j, 1);
350 }
351 return do_stop;
352}
353/*------------------------------------------------------------------------------
354 * Context: softirq
355 */
356static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd)
357{
358 struct oz_tx_frame *f = 0;
359 spin_lock_bh(&pd->tx_frame_lock);
360 if (pd->tx_pool) {
361 f = container_of(pd->tx_pool, struct oz_tx_frame, link);
362 pd->tx_pool = pd->tx_pool->next;
363 pd->tx_pool_count--;
364 }
365 spin_unlock_bh(&pd->tx_frame_lock);
366 if (f == 0)
367 f = kmalloc(sizeof(struct oz_tx_frame), GFP_ATOMIC);
368 if (f) {
369 f->total_size = sizeof(struct oz_hdr);
370 INIT_LIST_HEAD(&f->link);
371 INIT_LIST_HEAD(&f->elt_list);
372 }
373 return f;
374}
375/*------------------------------------------------------------------------------
376 * Context: softirq or process
377 */
378static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f)
379{
380 spin_lock_bh(&pd->tx_frame_lock);
381 if (pd->tx_pool_count < OZ_MAX_TX_POOL_SIZE) {
382 f->link.next = pd->tx_pool;
383 pd->tx_pool = &f->link;
384 pd->tx_pool_count++;
385 f = 0;
386 } else {
387 kfree(f);
388 }
389 spin_unlock_bh(&pd->tx_frame_lock);
390 if (f)
391 kfree(f);
392}
393/*------------------------------------------------------------------------------
394 * Context: softirq
395 */
396int oz_prepare_frame(struct oz_pd *pd, int empty)
397{
398 struct oz_tx_frame *f;
399 if ((pd->mode & OZ_MODE_MASK) != OZ_MODE_TRIGGERED)
400 return -1;
401 if (pd->nb_queued_frames >= OZ_MAX_QUEUED_FRAMES)
402 return -1;
403 if (!empty && !oz_are_elts_available(&pd->elt_buff))
404 return -1;
405 f = oz_tx_frame_alloc(pd);
406 if (f == 0)
407 return -1;
408 f->hdr.control =
409 (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ACK_REQUESTED;
410 ++pd->last_tx_pkt_num;
411 put_unaligned(cpu_to_le32(pd->last_tx_pkt_num), &f->hdr.pkt_num);
412 if (empty == 0) {
413 oz_select_elts_for_tx(&pd->elt_buff, 0, &f->total_size,
414 pd->max_tx_size, &f->elt_list);
415 }
416 spin_lock(&pd->tx_frame_lock);
417 list_add_tail(&f->link, &pd->tx_queue);
418 pd->nb_queued_frames++;
419 spin_unlock(&pd->tx_frame_lock);
420 return 0;
421}
422/*------------------------------------------------------------------------------
423 * Context: softirq-serialized
424 */
425static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f)
426{
427 struct sk_buff *skb = 0;
428 struct net_device *dev = pd->net_dev;
429 struct oz_hdr *oz_hdr;
430 struct oz_elt *elt;
431 struct list_head *e;
432 /* Allocate skb with enough space for the lower layers as well
433 * as the space we need.
434 */
435 skb = alloc_skb(f->total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
436 if (skb == 0)
437 return 0;
438 /* Reserve the head room for lower layers.
439 */
440 skb_reserve(skb, LL_RESERVED_SPACE(dev));
441 skb_reset_network_header(skb);
442 skb->dev = dev;
443 skb->protocol = htons(OZ_ETHERTYPE);
444 if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
445 dev->dev_addr, skb->len) < 0)
446 goto fail;
447 /* Push the tail to the end of the area we are going to copy to.
448 */
449 oz_hdr = (struct oz_hdr *)skb_put(skb, f->total_size);
450 f->hdr.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
451 memcpy(oz_hdr, &f->hdr, sizeof(struct oz_hdr));
452 /* Copy the elements into the frame body.
453 */
454 elt = (struct oz_elt *)(oz_hdr+1);
455 for (e = f->elt_list.next; e != &f->elt_list; e = e->next) {
456 struct oz_elt_info *ei;
457 ei = container_of(e, struct oz_elt_info, link);
458 memcpy(elt, ei->data, ei->length);
459 elt = oz_next_elt(elt);
460 }
461 return skb;
462fail:
463 kfree_skb(skb);
464 return 0;
465}
466/*------------------------------------------------------------------------------
467 * Context: softirq or process
468 */
469static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f)
470{
471 struct list_head *e;
472 struct oz_elt_info *ei;
473 e = f->elt_list.next;
474 while (e != &f->elt_list) {
475 ei = container_of(e, struct oz_elt_info, link);
476 e = e->next;
477 list_del_init(&ei->link);
478 if (ei->callback)
479 ei->callback(pd, ei->context);
480 spin_lock_bh(&pd->elt_buff.lock);
481 oz_elt_info_free(&pd->elt_buff, ei);
482 spin_unlock_bh(&pd->elt_buff.lock);
483 }
484 oz_tx_frame_free(pd, f);
485 if (pd->elt_buff.free_elts > pd->elt_buff.max_free_elts)
486 oz_trim_elt_pool(&pd->elt_buff);
487}
488/*------------------------------------------------------------------------------
489 * Context: softirq-serialized
490 */
491static int oz_send_next_queued_frame(struct oz_pd *pd, int *more_data)
492{
493 struct sk_buff *skb;
494 struct oz_tx_frame *f;
495 struct list_head *e;
496 *more_data = 0;
497 spin_lock(&pd->tx_frame_lock);
498 e = pd->last_sent_frame->next;
499 if (e == &pd->tx_queue) {
500 spin_unlock(&pd->tx_frame_lock);
501 return -1;
502 }
503 pd->last_sent_frame = e;
504 if (e->next != &pd->tx_queue)
505 *more_data = 1;
506 f = container_of(e, struct oz_tx_frame, link);
507 skb = oz_build_frame(pd, f);
508 spin_unlock(&pd->tx_frame_lock);
509 oz_trace2(OZ_TRACE_TX_FRAMES, "TX frame PN=0x%x\n", f->hdr.pkt_num);
510 if (skb) {
511 oz_event_log(OZ_EVT_TX_FRAME,
512 0,
513 (((u16)f->hdr.control)<<8)|f->hdr.last_pkt_num,
514 0, f->hdr.pkt_num);
515 if (dev_queue_xmit(skb) < 0)
516 return -1;
517 }
518 return 0;
519}
520/*------------------------------------------------------------------------------
521 * Context: softirq-serialized
522 */
523void oz_send_queued_frames(struct oz_pd *pd, int backlog)
524{
525 int more;
526 if (backlog < OZ_MAX_QUEUED_FRAMES) {
527 if (oz_send_next_queued_frame(pd, &more) >= 0) {
528 while (more && oz_send_next_queued_frame(pd, &more))
529 ;
530 } else {
531 if (((pd->mode & OZ_F_ISOC_ANYTIME) == 0)
532 || (pd->isoc_sent == 0)) {
533 if (oz_prepare_frame(pd, 1) >= 0)
534 oz_send_next_queued_frame(pd, &more);
535 }
536 }
537 } else {
538 oz_send_next_queued_frame(pd, &more);
539 }
540}
541/*------------------------------------------------------------------------------
542 * Context: softirq
543 */
544static int oz_send_isoc_frame(struct oz_pd *pd)
545{
546 struct sk_buff *skb = 0;
547 struct net_device *dev = pd->net_dev;
548 struct oz_hdr *oz_hdr;
549 struct oz_elt *elt;
550 struct list_head *e;
551 struct list_head list;
552 int total_size = sizeof(struct oz_hdr);
553 INIT_LIST_HEAD(&list);
554
555 oz_select_elts_for_tx(&pd->elt_buff, 1, &total_size,
556 pd->max_tx_size, &list);
557 if (list.next == &list)
558 return 0;
559 skb = alloc_skb(total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
560 if (skb == 0) {
561 oz_trace("Cannot alloc skb\n");
562 oz_elt_info_free_chain(&pd->elt_buff, &list);
563 return -1;
564 }
565 skb_reserve(skb, LL_RESERVED_SPACE(dev));
566 skb_reset_network_header(skb);
567 skb->dev = dev;
568 skb->protocol = htons(OZ_ETHERTYPE);
569 if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
570 dev->dev_addr, skb->len) < 0) {
571 kfree_skb(skb);
572 return -1;
573 }
574 oz_hdr = (struct oz_hdr *)skb_put(skb, total_size);
575 oz_hdr->control = (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ISOC;
576 oz_hdr->last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
577 elt = (struct oz_elt *)(oz_hdr+1);
578
579 for (e = list.next; e != &list; e = e->next) {
580 struct oz_elt_info *ei;
581 ei = container_of(e, struct oz_elt_info, link);
582 memcpy(elt, ei->data, ei->length);
583 elt = oz_next_elt(elt);
584 }
585 oz_event_log(OZ_EVT_TX_ISOC, 0, 0, 0, 0);
586 dev_queue_xmit(skb);
587 oz_elt_info_free_chain(&pd->elt_buff, &list);
588 return 0;
589}
590/*------------------------------------------------------------------------------
591 * Context: softirq-serialized
592 */
593void oz_retire_tx_frames(struct oz_pd *pd, u8 lpn)
594{
595 struct list_head *e;
596 struct oz_tx_frame *f;
597 struct list_head *first = 0;
598 struct list_head *last = 0;
599 u8 diff;
600 u32 pkt_num;
601
602 spin_lock(&pd->tx_frame_lock);
603 e = pd->tx_queue.next;
604 while (e != &pd->tx_queue) {
605 f = container_of(e, struct oz_tx_frame, link);
606 pkt_num = le32_to_cpu(get_unaligned(&f->hdr.pkt_num));
607 diff = (lpn - (pkt_num & OZ_LAST_PN_MASK)) & OZ_LAST_PN_MASK;
608 if (diff > OZ_LAST_PN_HALF_CYCLE)
609 break;
610 if (first == 0)
611 first = e;
612 last = e;
613 e = e->next;
614 pd->nb_queued_frames--;
615 }
616 if (first) {
617 last->next->prev = &pd->tx_queue;
618 pd->tx_queue.next = last->next;
619 last->next = 0;
620 }
621 pd->last_sent_frame = &pd->tx_queue;
622 spin_unlock(&pd->tx_frame_lock);
623 while (first) {
624 f = container_of(first, struct oz_tx_frame, link);
625 first = first->next;
626 oz_retire_frame(pd, f);
627 }
628}
629/*------------------------------------------------------------------------------
630 * Precondition: stream_lock must be held.
631 * Context: softirq
632 */
633static struct oz_isoc_stream *pd_stream_find(struct oz_pd *pd, u8 ep_num)
634{
635 struct list_head *e;
636 struct oz_isoc_stream *st;
637 list_for_each(e, &pd->stream_list) {
638 st = container_of(e, struct oz_isoc_stream, link);
639 if (st->ep_num == ep_num)
640 return st;
641 }
642 return 0;
643}
644/*------------------------------------------------------------------------------
645 * Context: softirq
646 */
647int oz_isoc_stream_create(struct oz_pd *pd, u8 ep_num)
648{
649 struct oz_isoc_stream *st =
650 kzalloc(sizeof(struct oz_isoc_stream), GFP_ATOMIC);
651 if (!st)
652 return -ENOMEM;
653 st->ep_num = ep_num;
654 spin_lock_bh(&pd->stream_lock);
655 if (!pd_stream_find(pd, ep_num)) {
656 list_add(&st->link, &pd->stream_list);
657 st = 0;
658 }
659 spin_unlock_bh(&pd->stream_lock);
660 if (st)
661 kfree(st);
662 return 0;
663}
664/*------------------------------------------------------------------------------
665 * Context: softirq or process
666 */
667static void oz_isoc_stream_free(struct oz_isoc_stream *st)
668{
669 if (st->skb)
670 kfree_skb(st->skb);
671 kfree(st);
672}
673/*------------------------------------------------------------------------------
674 * Context: softirq
675 */
676int oz_isoc_stream_delete(struct oz_pd *pd, u8 ep_num)
677{
678 struct oz_isoc_stream *st;
679 spin_lock_bh(&pd->stream_lock);
680 st = pd_stream_find(pd, ep_num);
681 if (st)
682 list_del(&st->link);
683 spin_unlock_bh(&pd->stream_lock);
684 if (st)
685 oz_isoc_stream_free(st);
686 return 0;
687}
688/*------------------------------------------------------------------------------
689 * Context: any
690 */
691static void oz_isoc_destructor(struct sk_buff *skb)
692{
693 atomic_dec(&g_submitted_isoc);
694 oz_event_log(OZ_EVT_TX_ISOC_DONE, atomic_read(&g_submitted_isoc),
695 0, skb, 0);
696}
697/*------------------------------------------------------------------------------
698 * Context: softirq
699 */
700int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, u8 *data, int len)
701{
702 struct net_device *dev = pd->net_dev;
703 struct oz_isoc_stream *st;
704 u8 nb_units = 0;
705 struct sk_buff *skb = 0;
706 struct oz_hdr *oz_hdr = 0;
707 int size = 0;
708 spin_lock_bh(&pd->stream_lock);
709 st = pd_stream_find(pd, ep_num);
710 if (st) {
711 skb = st->skb;
712 st->skb = 0;
713 nb_units = st->nb_units;
714 st->nb_units = 0;
715 oz_hdr = st->oz_hdr;
716 size = st->size;
717 }
718 spin_unlock_bh(&pd->stream_lock);
719 if (!st)
720 return 0;
721 if (!skb) {
722 /* Allocate enough space for max size frame. */
723 skb = alloc_skb(pd->max_tx_size + OZ_ALLOCATED_SPACE(dev),
724 GFP_ATOMIC);
725 if (skb == 0)
726 return 0;
727 /* Reserve the head room for lower layers. */
728 skb_reserve(skb, LL_RESERVED_SPACE(dev));
729 skb_reset_network_header(skb);
730 skb->dev = dev;
731 skb->protocol = htons(OZ_ETHERTYPE);
732 size = sizeof(struct oz_hdr) + sizeof(struct oz_isoc_large);
733 oz_hdr = (struct oz_hdr *)skb_put(skb, size);
734 }
735 memcpy(skb_put(skb, len), data, len);
736 size += len;
737 if (++nb_units < pd->ms_per_isoc) {
738 spin_lock_bh(&pd->stream_lock);
739 st->skb = skb;
740 st->nb_units = nb_units;
741 st->oz_hdr = oz_hdr;
742 st->size = size;
743 spin_unlock_bh(&pd->stream_lock);
744 } else {
745 struct oz_hdr oz;
746 struct oz_isoc_large iso;
747 spin_lock_bh(&pd->stream_lock);
748 iso.frame_number = st->frame_num;
749 st->frame_num += nb_units;
750 spin_unlock_bh(&pd->stream_lock);
751 oz.control =
752 (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ISOC;
753 oz.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
754 oz.pkt_num = 0;
755 iso.endpoint = ep_num;
756 iso.format = OZ_DATA_F_ISOC_LARGE;
757 iso.ms_data = nb_units;
758 memcpy(oz_hdr, &oz, sizeof(oz));
759 memcpy(oz_hdr+1, &iso, sizeof(iso));
760 if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
761 dev->dev_addr, skb->len) < 0) {
762 kfree_skb(skb);
763 return -1;
764 }
765 if (atomic_read(&g_submitted_isoc) < OZ_MAX_SUBMITTED_ISOC) {
766 skb->destructor = oz_isoc_destructor;
767 atomic_inc(&g_submitted_isoc);
768 oz_event_log(OZ_EVT_TX_ISOC, nb_units, iso.frame_number,
769 skb, atomic_read(&g_submitted_isoc));
770 if (dev_queue_xmit(skb) < 0)
771 return -1;
772 } else {
773 oz_event_log(OZ_EVT_TX_ISOC_DROP, 0, 0, 0, 0);
774 kfree_skb(skb);
775 }
776 }
777 return 0;
778}
779/*------------------------------------------------------------------------------
780 * Context: process
781 */
782void oz_apps_init(void)
783{
784 int i;
785 for (i = 0; i < OZ_APPID_MAX; i++)
786 if (g_app_if[i].init)
787 g_app_if[i].init();
788}
789/*------------------------------------------------------------------------------
790 * Context: process
791 */
792void oz_apps_term(void)
793{
794 int i;
795 /* Terminate all the apps. */
796 for (i = 0; i < OZ_APPID_MAX; i++)
797 if (g_app_if[i].term)
798 g_app_if[i].term();
799}
800/*------------------------------------------------------------------------------
801 * Context: softirq-serialized
802 */
803void oz_handle_app_elt(struct oz_pd *pd, u8 app_id, struct oz_elt *elt)
804{
805 struct oz_app_if *ai;
806 if (app_id == 0 || app_id > OZ_APPID_MAX)
807 return;
808 ai = &g_app_if[app_id-1];
809 ai->rx(pd, elt);
810}
811/*------------------------------------------------------------------------------
812 * Context: softirq or process
813 */
814void oz_pd_indicate_farewells(struct oz_pd *pd)
815{
816 struct oz_farewell *f;
817 struct oz_app_if *ai = &g_app_if[OZ_APPID_USB-1];
818 while (1) {
819 oz_polling_lock_bh();
820 if (list_empty(&pd->farewell_list)) {
821 oz_polling_unlock_bh();
822 break;
823 }
824 f = list_first_entry(&pd->farewell_list,
825 struct oz_farewell, link);
826 list_del(&f->link);
827 oz_polling_unlock_bh();
828 if (ai->farewell)
829 ai->farewell(pd, f->ep_num, f->report, f->len);
830 kfree(f);
831 }
832}
diff --git a/drivers/staging/ozwpan/ozpd.h b/drivers/staging/ozwpan/ozpd.h
new file mode 100644
index 000000000000..afc77f0260f0
--- /dev/null
+++ b/drivers/staging/ozwpan/ozpd.h
@@ -0,0 +1,121 @@
1/* -----------------------------------------------------------------------------
2 * Copyright (c) 2011 Ozmo Inc
3 * Released under the GNU General Public License Version 2 (GPLv2).
4 * -----------------------------------------------------------------------------
5 */
6#ifndef _OZPD_H_
7#define _OZPD_H_
8
9#include "ozeltbuf.h"
10
11/* PD state
12 */
13#define OZ_PD_S_IDLE 0x1
14#define OZ_PD_S_CONNECTED 0x2
15#define OZ_PD_S_SLEEP 0x4
16#define OZ_PD_S_STOPPED 0x8
17
18/* Timer event types.
19 */
20#define OZ_TIMER_TOUT 1
21#define OZ_TIMER_HEARTBEAT 2
22#define OZ_TIMER_STOP 3
23
24/* Data structure that hold information on a frame for transmisson. This is
25 * built when the frame is first transmitted and is used to rebuild the frame
26 * if a re-transmission is required.
27 */
28struct oz_tx_frame {
29 struct list_head link;
30 struct list_head elt_list;
31 struct oz_hdr hdr;
32 int total_size;
33};
34
35struct oz_isoc_stream {
36 struct list_head link;
37 u8 ep_num;
38 u8 frame_num;
39 u8 nb_units;
40 int size;
41 struct sk_buff *skb;
42 struct oz_hdr *oz_hdr;
43};
44
45struct oz_farewell {
46 struct list_head link;
47 u8 ep_num;
48 u8 index;
49 u8 report[1];
50 u8 len;
51};
52
53/* Data structure that holds information on a specific peripheral device (PD).
54 */
55struct oz_pd {
56 struct list_head link;
57 atomic_t ref_count;
58 u8 mac_addr[ETH_ALEN];
59 unsigned state;
60 unsigned state_flags;
61 unsigned send_flags;
62 u16 total_apps;
63 u16 paused_apps;
64 u8 session_id;
65 u8 param_rsp_status;
66 u8 pd_info;
67 u8 isoc_sent;
68 u32 last_rx_pkt_num;
69 u32 last_tx_pkt_num;
70 u32 trigger_pkt_num;
71 unsigned long pulse_time_j;
72 unsigned long timeout_time_j;
73 unsigned long pulse_period_j;
74 unsigned long presleep_j;
75 unsigned long keep_alive_j;
76 unsigned long last_rx_time_j;
77 struct oz_elt_buf elt_buff;
78 void *app_ctx[OZ_APPID_MAX];
79 spinlock_t app_lock[OZ_APPID_MAX];
80 int max_tx_size;
81 u8 heartbeat_requested;
82 u8 mode;
83 u8 ms_per_isoc;
84 unsigned max_stream_buffering;
85 int nb_queued_frames;
86 struct list_head *tx_pool;
87 int tx_pool_count;
88 spinlock_t tx_frame_lock;
89 struct list_head *last_sent_frame;
90 struct list_head tx_queue;
91 struct list_head farewell_list;
92 spinlock_t stream_lock;
93 struct list_head stream_list;
94 struct net_device *net_dev;
95};
96
97#define OZ_MAX_QUEUED_FRAMES 4
98
99struct oz_pd *oz_pd_alloc(u8 *mac_addr);
100void oz_pd_destroy(struct oz_pd *pd);
101void oz_pd_get(struct oz_pd *pd);
102void oz_pd_put(struct oz_pd *pd);
103void oz_pd_set_state(struct oz_pd *pd, unsigned state);
104void oz_pd_indicate_farewells(struct oz_pd *pd);
105int oz_pd_sleep(struct oz_pd *pd);
106void oz_pd_stop(struct oz_pd *pd);
107void oz_pd_heartbeat(struct oz_pd *pd, u16 apps);
108int oz_services_start(struct oz_pd *pd, u16 apps, int resume);
109void oz_services_stop(struct oz_pd *pd, u16 apps, int pause);
110int oz_prepare_frame(struct oz_pd *pd, int empty);
111void oz_send_queued_frames(struct oz_pd *pd, int backlog);
112void oz_retire_tx_frames(struct oz_pd *pd, u8 lpn);
113int oz_isoc_stream_create(struct oz_pd *pd, u8 ep_num);
114int oz_isoc_stream_delete(struct oz_pd *pd, u8 ep_num);
115int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, u8 *data, int len);
116void oz_handle_app_elt(struct oz_pd *pd, u8 app_id, struct oz_elt *elt);
117void oz_apps_init(void);
118void oz_apps_term(void);
119
120#endif /* Sentry */
121
diff --git a/drivers/staging/ozwpan/ozproto.c b/drivers/staging/ozwpan/ozproto.c
new file mode 100644
index 000000000000..ad857eeabbb7
--- /dev/null
+++ b/drivers/staging/ozwpan/ozproto.c
@@ -0,0 +1,957 @@
1/* -----------------------------------------------------------------------------
2 * Copyright (c) 2011 Ozmo Inc
3 * Released under the GNU General Public License Version 2 (GPLv2).
4 * -----------------------------------------------------------------------------
5 */
6#include <linux/init.h>
7#include <linux/module.h>
8#include <linux/timer.h>
9#include <linux/sched.h>
10#include <linux/netdevice.h>
11#include <linux/errno.h>
12#include <linux/ieee80211.h>
13#include "ozconfig.h"
14#include "ozprotocol.h"
15#include "ozeltbuf.h"
16#include "ozpd.h"
17#include "ozproto.h"
18#include "ozusbsvc.h"
19#include "oztrace.h"
20#include "ozappif.h"
21#include "ozevent.h"
22#include <asm/unaligned.h>
23#include <linux/uaccess.h>
24#include <net/psnap.h>
25/*------------------------------------------------------------------------------
26 */
27#define OZ_CF_CONN_SUCCESS 1
28#define OZ_CF_CONN_FAILURE 2
29
30#define OZ_DO_STOP 1
31#define OZ_DO_SLEEP 2
32
33/* States of the timer.
34 */
35#define OZ_TIMER_IDLE 0
36#define OZ_TIMER_SET 1
37#define OZ_TIMER_IN_HANDLER 2
38
39#define OZ_MAX_TIMER_POOL_SIZE 16
40
41/*------------------------------------------------------------------------------
42 */
43struct oz_binding {
44 struct packet_type ptype;
45 char name[OZ_MAX_BINDING_LEN];
46 struct oz_binding *next;
47};
48
49struct oz_timer {
50 struct list_head link;
51 struct oz_pd *pd;
52 unsigned long due_time;
53 int type;
54};
55/*------------------------------------------------------------------------------
56 * Static external variables.
57 */
58static DEFINE_SPINLOCK(g_polling_lock);
59static LIST_HEAD(g_pd_list);
60static struct oz_binding *g_binding ;
61static DEFINE_SPINLOCK(g_binding_lock);
62static struct sk_buff_head g_rx_queue;
63static u8 g_session_id;
64static u16 g_apps = 0x1;
65static int g_processing_rx;
66static struct timer_list g_timer;
67static struct oz_timer *g_cur_timer;
68static struct list_head *g_timer_pool;
69static int g_timer_pool_count;
70static int g_timer_state = OZ_TIMER_IDLE;
71static LIST_HEAD(g_timer_list);
72/*------------------------------------------------------------------------------
73 */
74static void oz_protocol_timer_start(void);
75/*------------------------------------------------------------------------------
76 * Context: softirq-serialized
77 */
78static u8 oz_get_new_session_id(u8 exclude)
79{
80 if (++g_session_id == 0)
81 g_session_id = 1;
82 if (g_session_id == exclude) {
83 if (++g_session_id == 0)
84 g_session_id = 1;
85 }
86 return g_session_id;
87}
88/*------------------------------------------------------------------------------
89 * Context: softirq-serialized
90 */
91static void oz_send_conn_rsp(struct oz_pd *pd, u8 status)
92{
93 struct sk_buff *skb;
94 struct net_device *dev = pd->net_dev;
95 struct oz_hdr *oz_hdr;
96 struct oz_elt *elt;
97 struct oz_elt_connect_rsp *body;
98 int sz = sizeof(struct oz_hdr) + sizeof(struct oz_elt) +
99 sizeof(struct oz_elt_connect_rsp);
100 skb = alloc_skb(sz + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
101 if (skb == 0)
102 return;
103 skb_reserve(skb, LL_RESERVED_SPACE(dev));
104 skb_reset_network_header(skb);
105 oz_hdr = (struct oz_hdr *)skb_put(skb, sz);
106 elt = (struct oz_elt *)(oz_hdr+1);
107 body = (struct oz_elt_connect_rsp *)(elt+1);
108 skb->dev = dev;
109 skb->protocol = htons(OZ_ETHERTYPE);
110 /* Fill in device header */
111 if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
112 dev->dev_addr, skb->len) < 0) {
113 kfree_skb(skb);
114 return;
115 }
116 oz_hdr->control = (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT);
117 oz_hdr->last_pkt_num = 0;
118 put_unaligned(0, &oz_hdr->pkt_num);
119 oz_event_log(OZ_EVT_CONNECT_RSP, 0, 0, 0, 0);
120 elt->type = OZ_ELT_CONNECT_RSP;
121 elt->length = sizeof(struct oz_elt_connect_rsp);
122 memset(body, 0, sizeof(struct oz_elt_connect_rsp));
123 body->status = status;
124 if (status == 0) {
125 body->mode = pd->mode;
126 body->session_id = pd->session_id;
127 put_unaligned(cpu_to_le16(pd->total_apps), &body->apps);
128 }
129 oz_trace("TX: OZ_ELT_CONNECT_RSP %d", status);
130 dev_queue_xmit(skb);
131 return;
132}
133/*------------------------------------------------------------------------------
134 * Context: softirq-serialized
135 */
136static void pd_set_keepalive(struct oz_pd *pd, u8 kalive)
137{
138 unsigned long keep_alive = kalive & OZ_KALIVE_VALUE_MASK;
139
140 switch (kalive & OZ_KALIVE_TYPE_MASK) {
141 case OZ_KALIVE_SPECIAL:
142 pd->keep_alive_j =
143 oz_ms_to_jiffies(keep_alive * 1000*60*60*24*20);
144 break;
145 case OZ_KALIVE_SECS:
146 pd->keep_alive_j = oz_ms_to_jiffies(keep_alive*1000);
147 break;
148 case OZ_KALIVE_MINS:
149 pd->keep_alive_j = oz_ms_to_jiffies(keep_alive*1000*60);
150 break;
151 case OZ_KALIVE_HOURS:
152 pd->keep_alive_j = oz_ms_to_jiffies(keep_alive*1000*60*60);
153 break;
154 default:
155 pd->keep_alive_j = 0;
156 }
157 oz_trace("Keepalive = %lu jiffies\n", pd->keep_alive_j);
158}
159/*------------------------------------------------------------------------------
160 * Context: softirq-serialized
161 */
162static void pd_set_presleep(struct oz_pd *pd, u8 presleep)
163{
164 if (presleep)
165 pd->presleep_j = oz_ms_to_jiffies(presleep*100);
166 else
167 pd->presleep_j = OZ_PRESLEEP_TOUT_J;
168 oz_trace("Presleep time = %lu jiffies\n", pd->presleep_j);
169}
170/*------------------------------------------------------------------------------
171 * Context: softirq-serialized
172 */
173static struct oz_pd *oz_connect_req(struct oz_pd *cur_pd, struct oz_elt *elt,
174 u8 *pd_addr, struct net_device *net_dev)
175{
176 struct oz_pd *pd;
177 struct oz_elt_connect_req *body =
178 (struct oz_elt_connect_req *)(elt+1);
179 u8 rsp_status = OZ_STATUS_SUCCESS;
180 u8 stop_needed = 0;
181 u16 new_apps = g_apps;
182 struct net_device *old_net_dev = 0;
183 struct oz_pd *free_pd = 0;
184 if (cur_pd) {
185 pd = cur_pd;
186 spin_lock_bh(&g_polling_lock);
187 } else {
188 struct oz_pd *pd2 = 0;
189 struct list_head *e;
190 pd = oz_pd_alloc(pd_addr);
191 if (pd == 0)
192 return 0;
193 pd->last_rx_time_j = jiffies;
194 spin_lock_bh(&g_polling_lock);
195 list_for_each(e, &g_pd_list) {
196 pd2 = container_of(e, struct oz_pd, link);
197 if (memcmp(pd2->mac_addr, pd_addr, ETH_ALEN) == 0) {
198 free_pd = pd;
199 pd = pd2;
200 break;
201 }
202 }
203 if (pd != pd2)
204 list_add_tail(&pd->link, &g_pd_list);
205 }
206 if (pd == 0) {
207 spin_unlock_bh(&g_polling_lock);
208 return 0;
209 }
210 if (pd->net_dev != net_dev) {
211 old_net_dev = pd->net_dev;
212 dev_hold(net_dev);
213 pd->net_dev = net_dev;
214 }
215 oz_trace("Host vendor: %d\n", body->host_vendor);
216 pd->max_tx_size = OZ_MAX_TX_SIZE;
217 pd->mode = body->mode;
218 pd->pd_info = body->pd_info;
219 if (pd->mode & OZ_F_ISOC_NO_ELTS) {
220 pd->mode |= OZ_F_ISOC_ANYTIME;
221 pd->ms_per_isoc = body->ms_per_isoc;
222 if (!pd->ms_per_isoc)
223 pd->ms_per_isoc = 4;
224 }
225 if (body->max_len_div16)
226 pd->max_tx_size = ((u16)body->max_len_div16)<<4;
227 oz_trace("Max frame:%u Ms per isoc:%u\n",
228 pd->max_tx_size, pd->ms_per_isoc);
229 pd->max_stream_buffering = 3*1024;
230 pd->timeout_time_j = jiffies + OZ_CONNECTION_TOUT_J;
231 pd->pulse_period_j = OZ_QUANTUM_J;
232 pd_set_presleep(pd, body->presleep);
233 pd_set_keepalive(pd, body->keep_alive);
234
235 new_apps &= le16_to_cpu(get_unaligned(&body->apps));
236 if ((new_apps & 0x1) && (body->session_id)) {
237 if (pd->session_id) {
238 if (pd->session_id != body->session_id) {
239 rsp_status = OZ_STATUS_SESSION_MISMATCH;
240 goto done;
241 }
242 } else {
243 new_apps &= ~0x1; /* Resume not permitted */
244 pd->session_id =
245 oz_get_new_session_id(body->session_id);
246 }
247 } else {
248 if (pd->session_id && !body->session_id) {
249 rsp_status = OZ_STATUS_SESSION_TEARDOWN;
250 stop_needed = 1;
251 } else {
252 new_apps &= ~0x1; /* Resume not permitted */
253 pd->session_id =
254 oz_get_new_session_id(body->session_id);
255 }
256 }
257done:
258 if (rsp_status == OZ_STATUS_SUCCESS) {
259 u16 start_apps = new_apps & ~pd->total_apps & ~0x1;
260 u16 stop_apps = pd->total_apps & ~new_apps & ~0x1;
261 u16 resume_apps = new_apps & pd->paused_apps & ~0x1;
262 spin_unlock_bh(&g_polling_lock);
263 oz_pd_set_state(pd, OZ_PD_S_CONNECTED);
264 oz_timer_delete(pd, OZ_TIMER_STOP);
265 oz_trace("new_apps=0x%x total_apps=0x%x paused_apps=0x%x\n",
266 new_apps, pd->total_apps, pd->paused_apps);
267 if (start_apps) {
268 if (oz_services_start(pd, start_apps, 0))
269 rsp_status = OZ_STATUS_TOO_MANY_PDS;
270 }
271 if (resume_apps)
272 if (oz_services_start(pd, resume_apps, 1))
273 rsp_status = OZ_STATUS_TOO_MANY_PDS;
274 if (stop_apps)
275 oz_services_stop(pd, stop_apps, 0);
276 oz_pd_request_heartbeat(pd);
277 } else {
278 spin_unlock_bh(&g_polling_lock);
279 }
280 oz_send_conn_rsp(pd, rsp_status);
281 if (rsp_status != OZ_STATUS_SUCCESS) {
282 if (stop_needed)
283 oz_pd_stop(pd);
284 oz_pd_put(pd);
285 pd = 0;
286 }
287 if (old_net_dev)
288 dev_put(old_net_dev);
289 if (free_pd)
290 oz_pd_destroy(free_pd);
291 return pd;
292}
293/*------------------------------------------------------------------------------
294 * Context: softirq-serialized
295 */
296static void oz_add_farewell(struct oz_pd *pd, u8 ep_num, u8 index,
297 u8 *report, u8 len)
298{
299 struct oz_farewell *f;
300 struct oz_farewell *f2;
301 int found = 0;
302 f = kmalloc(sizeof(struct oz_farewell) + len - 1, GFP_ATOMIC);
303 if (!f)
304 return;
305 f->ep_num = ep_num;
306 f->index = index;
307 memcpy(f->report, report, len);
308 oz_trace("RX: Adding farewell report\n");
309 spin_lock(&g_polling_lock);
310 list_for_each_entry(f2, &pd->farewell_list, link) {
311 if ((f2->ep_num == ep_num) && (f2->index == index)) {
312 found = 1;
313 list_del(&f2->link);
314 break;
315 }
316 }
317 list_add_tail(&f->link, &pd->farewell_list);
318 spin_unlock(&g_polling_lock);
319 if (found)
320 kfree(f2);
321}
322/*------------------------------------------------------------------------------
323 * Context: softirq-serialized
324 */
325static void oz_rx_frame(struct sk_buff *skb)
326{
327 u8 *mac_hdr;
328 u8 *src_addr;
329 struct oz_elt *elt;
330 int length;
331 struct oz_pd *pd = 0;
332 struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
333 int dup = 0;
334 u32 pkt_num;
335
336 oz_event_log(OZ_EVT_RX_PROCESS, 0,
337 (((u16)oz_hdr->control)<<8)|oz_hdr->last_pkt_num,
338 0, oz_hdr->pkt_num);
339 oz_trace2(OZ_TRACE_RX_FRAMES,
340 "RX frame PN=0x%x LPN=0x%x control=0x%x\n",
341 oz_hdr->pkt_num, oz_hdr->last_pkt_num, oz_hdr->control);
342 mac_hdr = skb_mac_header(skb);
343 src_addr = &mac_hdr[ETH_ALEN] ;
344 length = skb->len;
345
346 /* Check the version field */
347 if (oz_get_prot_ver(oz_hdr->control) != OZ_PROTOCOL_VERSION) {
348 oz_trace("Incorrect protocol version: %d\n",
349 oz_get_prot_ver(oz_hdr->control));
350 goto done;
351 }
352
353 pkt_num = le32_to_cpu(get_unaligned(&oz_hdr->pkt_num));
354
355 pd = oz_pd_find(src_addr);
356 if (pd) {
357 pd->last_rx_time_j = jiffies;
358 oz_timer_add(pd, OZ_TIMER_TOUT,
359 pd->last_rx_time_j + pd->presleep_j, 1);
360 if (pkt_num != pd->last_rx_pkt_num) {
361 pd->last_rx_pkt_num = pkt_num;
362 } else {
363 dup = 1;
364 oz_trace("Duplicate frame\n");
365 }
366 }
367
368 if (pd && !dup && ((pd->mode & OZ_MODE_MASK) == OZ_MODE_TRIGGERED)) {
369 pd->last_sent_frame = &pd->tx_queue;
370 if (oz_hdr->control & OZ_F_ACK) {
371 /* Retire completed frames */
372 oz_retire_tx_frames(pd, oz_hdr->last_pkt_num);
373 }
374 if ((oz_hdr->control & OZ_F_ACK_REQUESTED) &&
375 (pd->state == OZ_PD_S_CONNECTED)) {
376 int backlog = pd->nb_queued_frames;
377 pd->trigger_pkt_num = pkt_num;
378 /* Send queued frames */
379 while (oz_prepare_frame(pd, 0) >= 0)
380 ;
381 oz_send_queued_frames(pd, backlog);
382 }
383 }
384
385 length -= sizeof(struct oz_hdr);
386 elt = (struct oz_elt *)((u8 *)oz_hdr + sizeof(struct oz_hdr));
387
388 while (length >= sizeof(struct oz_elt)) {
389 length -= sizeof(struct oz_elt) + elt->length;
390 if (length < 0)
391 break;
392 switch (elt->type) {
393 case OZ_ELT_CONNECT_REQ:
394 oz_event_log(OZ_EVT_CONNECT_REQ, 0, 0, 0, 0);
395 oz_trace("RX: OZ_ELT_CONNECT_REQ\n");
396 pd = oz_connect_req(pd, elt, src_addr, skb->dev);
397 break;
398 case OZ_ELT_DISCONNECT:
399 oz_trace("RX: OZ_ELT_DISCONNECT\n");
400 if (pd)
401 oz_pd_sleep(pd);
402 break;
403 case OZ_ELT_UPDATE_PARAM_REQ: {
404 struct oz_elt_update_param *body =
405 (struct oz_elt_update_param *)(elt + 1);
406 oz_trace("RX: OZ_ELT_UPDATE_PARAM_REQ\n");
407 if (pd && (pd->state & OZ_PD_S_CONNECTED)) {
408 spin_lock(&g_polling_lock);
409 pd_set_keepalive(pd, body->keepalive);
410 pd_set_presleep(pd, body->presleep);
411 spin_unlock(&g_polling_lock);
412 }
413 }
414 break;
415 case OZ_ELT_FAREWELL_REQ: {
416 struct oz_elt_farewell *body =
417 (struct oz_elt_farewell *)(elt + 1);
418 oz_trace("RX: OZ_ELT_FAREWELL_REQ\n");
419 oz_add_farewell(pd, body->ep_num,
420 body->index, body->report,
421 elt->length + 1 - sizeof(*body));
422 }
423 break;
424 case OZ_ELT_APP_DATA:
425 if (pd && (pd->state & OZ_PD_S_CONNECTED)) {
426 struct oz_app_hdr *app_hdr =
427 (struct oz_app_hdr *)(elt+1);
428 if (dup)
429 break;
430 oz_handle_app_elt(pd, app_hdr->app_id, elt);
431 }
432 break;
433 default:
434 oz_trace("RX: Unknown elt %02x\n", elt->type);
435 }
436 elt = oz_next_elt(elt);
437 }
438done:
439 if (pd)
440 oz_pd_put(pd);
441 consume_skb(skb);
442}
443/*------------------------------------------------------------------------------
444 * Context: process
445 */
446void oz_protocol_term(void)
447{
448 struct list_head *chain = 0;
449 del_timer_sync(&g_timer);
450 /* Walk the list of bindings and remove each one.
451 */
452 spin_lock_bh(&g_binding_lock);
453 while (g_binding) {
454 struct oz_binding *b = g_binding;
455 g_binding = b->next;
456 spin_unlock_bh(&g_binding_lock);
457 dev_remove_pack(&b->ptype);
458 if (b->ptype.dev)
459 dev_put(b->ptype.dev);
460 kfree(b);
461 spin_lock_bh(&g_binding_lock);
462 }
463 spin_unlock_bh(&g_binding_lock);
464 /* Walk the list of PDs and stop each one. This causes the PD to be
465 * removed from the list so we can just pull each one from the head
466 * of the list.
467 */
468 spin_lock_bh(&g_polling_lock);
469 while (!list_empty(&g_pd_list)) {
470 struct oz_pd *pd =
471 list_first_entry(&g_pd_list, struct oz_pd, link);
472 oz_pd_get(pd);
473 spin_unlock_bh(&g_polling_lock);
474 oz_pd_stop(pd);
475 oz_pd_put(pd);
476 spin_lock_bh(&g_polling_lock);
477 }
478 chain = g_timer_pool;
479 g_timer_pool = 0;
480 spin_unlock_bh(&g_polling_lock);
481 while (chain) {
482 struct oz_timer *t = container_of(chain, struct oz_timer, link);
483 chain = chain->next;
484 kfree(t);
485 }
486 oz_trace("Protocol stopped\n");
487}
488/*------------------------------------------------------------------------------
489 * Context: softirq
490 */
491static void oz_pd_handle_timer(struct oz_pd *pd, int type)
492{
493 switch (type) {
494 case OZ_TIMER_TOUT:
495 oz_pd_sleep(pd);
496 break;
497 case OZ_TIMER_STOP:
498 oz_pd_stop(pd);
499 break;
500 case OZ_TIMER_HEARTBEAT: {
501 u16 apps = 0;
502 spin_lock_bh(&g_polling_lock);
503 pd->heartbeat_requested = 0;
504 if (pd->state & OZ_PD_S_CONNECTED)
505 apps = pd->total_apps;
506 spin_unlock_bh(&g_polling_lock);
507 if (apps)
508 oz_pd_heartbeat(pd, apps);
509 }
510 break;
511 }
512}
513/*------------------------------------------------------------------------------
514 * Context: softirq
515 */
516static void oz_protocol_timer(unsigned long arg)
517{
518 struct oz_timer *t;
519 struct oz_timer *t2;
520 struct oz_pd *pd;
521 spin_lock_bh(&g_polling_lock);
522 if (!g_cur_timer) {
523 /* This happens if we remove the current timer but can't stop
524 * the timer from firing. In this case just get out.
525 */
526 oz_event_log(OZ_EVT_TIMER, 0, 0, 0, 0);
527 spin_unlock_bh(&g_polling_lock);
528 return;
529 }
530 g_timer_state = OZ_TIMER_IN_HANDLER;
531 t = g_cur_timer;
532 g_cur_timer = 0;
533 list_del(&t->link);
534 spin_unlock_bh(&g_polling_lock);
535 do {
536 pd = t->pd;
537 oz_event_log(OZ_EVT_TIMER, 0, t->type, 0, 0);
538 oz_pd_handle_timer(pd, t->type);
539 spin_lock_bh(&g_polling_lock);
540 if (g_timer_pool_count < OZ_MAX_TIMER_POOL_SIZE) {
541 t->link.next = g_timer_pool;
542 g_timer_pool = &t->link;
543 g_timer_pool_count++;
544 t = 0;
545 }
546 if (!list_empty(&g_timer_list)) {
547 t2 = container_of(g_timer_list.next,
548 struct oz_timer, link);
549 if (time_before_eq(t2->due_time, jiffies))
550 list_del(&t2->link);
551 else
552 t2 = 0;
553 } else {
554 t2 = 0;
555 }
556 spin_unlock_bh(&g_polling_lock);
557 oz_pd_put(pd);
558 if (t)
559 kfree(t);
560 t = t2;
561 } while (t);
562 g_timer_state = OZ_TIMER_IDLE;
563 oz_protocol_timer_start();
564}
565/*------------------------------------------------------------------------------
566 * Context: softirq
567 */
568static void oz_protocol_timer_start(void)
569{
570 spin_lock_bh(&g_polling_lock);
571 if (!list_empty(&g_timer_list)) {
572 g_cur_timer =
573 container_of(g_timer_list.next, struct oz_timer, link);
574 if (g_timer_state == OZ_TIMER_SET) {
575 oz_event_log(OZ_EVT_TIMER_CTRL, 3,
576 (u16)g_cur_timer->type, 0,
577 (unsigned)g_cur_timer->due_time);
578 mod_timer(&g_timer, g_cur_timer->due_time);
579 } else {
580 oz_event_log(OZ_EVT_TIMER_CTRL, 4,
581 (u16)g_cur_timer->type, 0,
582 (unsigned)g_cur_timer->due_time);
583 g_timer.expires = g_cur_timer->due_time;
584 g_timer.function = oz_protocol_timer;
585 g_timer.data = 0;
586 add_timer(&g_timer);
587 }
588 g_timer_state = OZ_TIMER_SET;
589 } else {
590 oz_trace("No queued timers\n");
591 }
592 spin_unlock_bh(&g_polling_lock);
593}
594/*------------------------------------------------------------------------------
595 * Context: softirq or process
596 */
597void oz_timer_add(struct oz_pd *pd, int type, unsigned long due_time,
598 int remove)
599{
600 struct list_head *e;
601 struct oz_timer *t = 0;
602 int restart_needed = 0;
603 oz_event_log(OZ_EVT_TIMER_CTRL, 1, (u16)type, 0, (unsigned)due_time);
604 spin_lock(&g_polling_lock);
605 if (remove) {
606 list_for_each(e, &g_timer_list) {
607 t = container_of(e, struct oz_timer, link);
608 if ((t->pd == pd) && (t->type == type)) {
609 if (g_cur_timer == t) {
610 restart_needed = 1;
611 g_cur_timer = 0;
612 }
613 list_del(e);
614 break;
615 }
616 t = 0;
617 }
618 }
619 if (!t) {
620 if (g_timer_pool) {
621 t = container_of(g_timer_pool, struct oz_timer, link);
622 g_timer_pool = g_timer_pool->next;
623 g_timer_pool_count--;
624 } else {
625 t = kmalloc(sizeof(struct oz_timer), GFP_ATOMIC);
626 }
627 if (t) {
628 t->pd = pd;
629 t->type = type;
630 oz_pd_get(pd);
631 }
632 }
633 if (t) {
634 struct oz_timer *t2;
635 t->due_time = due_time;
636 list_for_each(e, &g_timer_list) {
637 t2 = container_of(e, struct oz_timer, link);
638 if (time_before(due_time, t2->due_time)) {
639 if (t2 == g_cur_timer) {
640 g_cur_timer = 0;
641 restart_needed = 1;
642 }
643 break;
644 }
645 }
646 list_add_tail(&t->link, e);
647 }
648 if (g_timer_state == OZ_TIMER_IDLE)
649 restart_needed = 1;
650 else if (g_timer_state == OZ_TIMER_IN_HANDLER)
651 restart_needed = 0;
652 spin_unlock(&g_polling_lock);
653 if (restart_needed)
654 oz_protocol_timer_start();
655}
656/*------------------------------------------------------------------------------
657 * Context: softirq or process
658 */
659void oz_timer_delete(struct oz_pd *pd, int type)
660{
661 struct list_head *chain = 0;
662 struct oz_timer *t;
663 struct oz_timer *n;
664 int restart_needed = 0;
665 int release = 0;
666 oz_event_log(OZ_EVT_TIMER_CTRL, 2, (u16)type, 0, 0);
667 spin_lock(&g_polling_lock);
668 list_for_each_entry_safe(t, n, &g_timer_list, link) {
669 if ((t->pd == pd) && ((type == 0) || (t->type == type))) {
670 if (g_cur_timer == t) {
671 restart_needed = 1;
672 g_cur_timer = 0;
673 del_timer(&g_timer);
674 }
675 list_del(&t->link);
676 release++;
677 if (g_timer_pool_count < OZ_MAX_TIMER_POOL_SIZE) {
678 t->link.next = g_timer_pool;
679 g_timer_pool = &t->link;
680 g_timer_pool_count++;
681 } else {
682 t->link.next = chain;
683 chain = &t->link;
684 }
685 if (type)
686 break;
687 }
688 }
689 if (g_timer_state == OZ_TIMER_IN_HANDLER)
690 restart_needed = 0;
691 else if (restart_needed)
692 g_timer_state = OZ_TIMER_IDLE;
693 spin_unlock(&g_polling_lock);
694 if (restart_needed)
695 oz_protocol_timer_start();
696 while (release--)
697 oz_pd_put(pd);
698 while (chain) {
699 t = container_of(chain, struct oz_timer, link);
700 chain = chain->next;
701 kfree(t);
702 }
703}
704/*------------------------------------------------------------------------------
705 * Context: softirq or process
706 */
707void oz_pd_request_heartbeat(struct oz_pd *pd)
708{
709 unsigned long now = jiffies;
710 unsigned long t;
711 spin_lock(&g_polling_lock);
712 if (pd->heartbeat_requested) {
713 spin_unlock(&g_polling_lock);
714 return;
715 }
716 if (pd->pulse_period_j)
717 t = ((now / pd->pulse_period_j) + 1) * pd->pulse_period_j;
718 else
719 t = now + 1;
720 pd->heartbeat_requested = 1;
721 spin_unlock(&g_polling_lock);
722 oz_timer_add(pd, OZ_TIMER_HEARTBEAT, t, 0);
723}
724/*------------------------------------------------------------------------------
725 * Context: softirq or process
726 */
727struct oz_pd *oz_pd_find(u8 *mac_addr)
728{
729 struct oz_pd *pd;
730 struct list_head *e;
731 spin_lock_bh(&g_polling_lock);
732 list_for_each(e, &g_pd_list) {
733 pd = container_of(e, struct oz_pd, link);
734 if (memcmp(pd->mac_addr, mac_addr, ETH_ALEN) == 0) {
735 atomic_inc(&pd->ref_count);
736 spin_unlock_bh(&g_polling_lock);
737 return pd;
738 }
739 }
740 spin_unlock_bh(&g_polling_lock);
741 return 0;
742}
743/*------------------------------------------------------------------------------
744 * Context: process
745 */
746void oz_app_enable(int app_id, int enable)
747{
748 if (app_id <= OZ_APPID_MAX) {
749 spin_lock_bh(&g_polling_lock);
750 if (enable)
751 g_apps |= (1<<app_id);
752 else
753 g_apps &= ~(1<<app_id);
754 spin_unlock_bh(&g_polling_lock);
755 }
756}
757/*------------------------------------------------------------------------------
758 * Context: softirq
759 */
760static int oz_pkt_recv(struct sk_buff *skb, struct net_device *dev,
761 struct packet_type *pt, struct net_device *orig_dev)
762{
763 oz_event_log(OZ_EVT_RX_FRAME, 0, 0, 0, 0);
764 skb = skb_share_check(skb, GFP_ATOMIC);
765 if (skb == 0)
766 return 0;
767 spin_lock_bh(&g_rx_queue.lock);
768 if (g_processing_rx) {
769 /* We already hold the lock so use __ variant.
770 */
771 __skb_queue_head(&g_rx_queue, skb);
772 spin_unlock_bh(&g_rx_queue.lock);
773 } else {
774 g_processing_rx = 1;
775 do {
776
777 spin_unlock_bh(&g_rx_queue.lock);
778 oz_rx_frame(skb);
779 spin_lock_bh(&g_rx_queue.lock);
780 if (skb_queue_empty(&g_rx_queue)) {
781 g_processing_rx = 0;
782 spin_unlock_bh(&g_rx_queue.lock);
783 break;
784 }
785 /* We already hold the lock so use __ variant.
786 */
787 skb = __skb_dequeue(&g_rx_queue);
788 } while (1);
789 }
790 return 0;
791}
792/*------------------------------------------------------------------------------
793 * Context: process
794 */
795void oz_binding_add(char *net_dev)
796{
797 struct oz_binding *binding;
798
799 binding = kmalloc(sizeof(struct oz_binding), GFP_ATOMIC);
800 if (binding) {
801 binding->ptype.type = __constant_htons(OZ_ETHERTYPE);
802 binding->ptype.func = oz_pkt_recv;
803 memcpy(binding->name, net_dev, OZ_MAX_BINDING_LEN);
804 if (net_dev && *net_dev) {
805 oz_trace("Adding binding: %s\n", net_dev);
806 binding->ptype.dev =
807 dev_get_by_name(&init_net, net_dev);
808 if (binding->ptype.dev == 0) {
809 oz_trace("Netdev %s not found\n", net_dev);
810 kfree(binding);
811 binding = 0;
812 }
813 } else {
814 oz_trace("Binding to all netcards\n");
815 binding->ptype.dev = 0;
816 }
817 if (binding) {
818 dev_add_pack(&binding->ptype);
819 spin_lock_bh(&g_binding_lock);
820 binding->next = g_binding;
821 g_binding = binding;
822 spin_unlock_bh(&g_binding_lock);
823 }
824 }
825}
826/*------------------------------------------------------------------------------
827 * Context: process
828 */
829static int compare_binding_name(char *s1, char *s2)
830{
831 int i;
832 for (i = 0; i < OZ_MAX_BINDING_LEN; i++) {
833 if (*s1 != *s2)
834 return 0;
835 if (!*s1++)
836 return 1;
837 s2++;
838 }
839 return 1;
840}
841/*------------------------------------------------------------------------------
842 * Context: process
843 */
844static void pd_stop_all_for_device(struct net_device *net_dev)
845{
846 struct list_head h;
847 struct oz_pd *pd;
848 struct oz_pd *n;
849 INIT_LIST_HEAD(&h);
850 spin_lock_bh(&g_polling_lock);
851 list_for_each_entry_safe(pd, n, &g_pd_list, link) {
852 if (pd->net_dev == net_dev) {
853 list_move(&pd->link, &h);
854 oz_pd_get(pd);
855 }
856 }
857 spin_unlock_bh(&g_polling_lock);
858 while (!list_empty(&h)) {
859 pd = list_first_entry(&h, struct oz_pd, link);
860 oz_pd_stop(pd);
861 oz_pd_put(pd);
862 }
863}
864/*------------------------------------------------------------------------------
865 * Context: process
866 */
867void oz_binding_remove(char *net_dev)
868{
869 struct oz_binding *binding = 0;
870 struct oz_binding **link;
871 oz_trace("Removing binding: %s\n", net_dev);
872 spin_lock_bh(&g_binding_lock);
873 binding = g_binding;
874 link = &g_binding;
875 while (binding) {
876 if (compare_binding_name(binding->name, net_dev)) {
877 oz_trace("Binding '%s' found\n", net_dev);
878 *link = binding->next;
879 break;
880 } else {
881 link = &binding;
882 binding = binding->next;
883 }
884 }
885 spin_unlock_bh(&g_binding_lock);
886 if (binding) {
887 dev_remove_pack(&binding->ptype);
888 if (binding->ptype.dev) {
889 dev_put(binding->ptype.dev);
890 pd_stop_all_for_device(binding->ptype.dev);
891 }
892 kfree(binding);
893 }
894}
895/*------------------------------------------------------------------------------
896 * Context: process
897 */
898static char *oz_get_next_device_name(char *s, char *dname, int max_size)
899{
900 while (*s == ',')
901 s++;
902 while (*s && (*s != ',') && max_size > 1) {
903 *dname++ = *s++;
904 max_size--;
905 }
906 *dname = 0;
907 return s;
908}
909/*------------------------------------------------------------------------------
910 * Context: process
911 */
912int oz_protocol_init(char *devs)
913{
914 skb_queue_head_init(&g_rx_queue);
915 if (devs && (devs[0] == '*')) {
916 oz_binding_add(0);
917 } else {
918 char d[32];
919 while (*devs) {
920 devs = oz_get_next_device_name(devs, d, sizeof(d));
921 if (d[0])
922 oz_binding_add(d);
923 }
924 }
925 init_timer(&g_timer);
926 return 0;
927}
928/*------------------------------------------------------------------------------
929 * Context: process
930 */
931int oz_get_pd_list(struct oz_mac_addr *addr, int max_count)
932{
933 struct oz_pd *pd;
934 struct list_head *e;
935 int count = 0;
936 spin_lock_bh(&g_polling_lock);
937 list_for_each(e, &g_pd_list) {
938 if (count >= max_count)
939 break;
940 pd = container_of(e, struct oz_pd, link);
941 memcpy(&addr[count++], pd->mac_addr, ETH_ALEN);
942 }
943 spin_unlock_bh(&g_polling_lock);
944 return count;
945}
946/*------------------------------------------------------------------------------
947*/
948void oz_polling_lock_bh(void)
949{
950 spin_lock_bh(&g_polling_lock);
951}
952/*------------------------------------------------------------------------------
953*/
954void oz_polling_unlock_bh(void)
955{
956 spin_unlock_bh(&g_polling_lock);
957}
diff --git a/drivers/staging/ozwpan/ozproto.h b/drivers/staging/ozwpan/ozproto.h
new file mode 100644
index 000000000000..89aea28bd8d5
--- /dev/null
+++ b/drivers/staging/ozwpan/ozproto.h
@@ -0,0 +1,69 @@
1/* -----------------------------------------------------------------------------
2 * Copyright (c) 2011 Ozmo Inc
3 * Released under the GNU General Public License Version 2 (GPLv2).
4 * -----------------------------------------------------------------------------
5 */
6#ifndef _OZPROTO_H
7#define _OZPROTO_H
8
9#include <asm/byteorder.h>
10#include "ozconfig.h"
11#include "ozappif.h"
12
13#define OZ_ALLOCATED_SPACE(__x) (LL_RESERVED_SPACE(__x)+(__x)->needed_tailroom)
14
15/* Converts millisecs to jiffies.
16 */
17#define oz_ms_to_jiffies(__x) (((__x)*1000)/HZ)
18
19/* Quantum milliseconds.
20 */
21#define OZ_QUANTUM_MS 8
22/* Quantum jiffies
23 */
24#define OZ_QUANTUM_J (oz_ms_to_jiffies(OZ_QUANTUM_MS))
25/* Default timeouts.
26 */
27#define OZ_CONNECTION_TOUT_J (2*HZ)
28#define OZ_PRESLEEP_TOUT_J (11*HZ)
29
30/* Maximun sizes of tx frames. */
31#define OZ_MAX_TX_SIZE 1514
32
33/* Application handler functions.
34 */
35typedef int (*oz_app_init_fn_t)(void);
36typedef void (*oz_app_term_fn_t)(void);
37typedef int (*oz_app_start_fn_t)(struct oz_pd *pd, int resume);
38typedef void (*oz_app_stop_fn_t)(struct oz_pd *pd, int pause);
39typedef void (*oz_app_rx_fn_t)(struct oz_pd *pd, struct oz_elt *elt);
40typedef int (*oz_app_hearbeat_fn_t)(struct oz_pd *pd);
41typedef void (*oz_app_farewell_fn_t)(struct oz_pd *pd, u8 ep_num,
42 u8 *data, u8 len);
43
44struct oz_app_if {
45 oz_app_init_fn_t init;
46 oz_app_term_fn_t term;
47 oz_app_start_fn_t start;
48 oz_app_stop_fn_t stop;
49 oz_app_rx_fn_t rx;
50 oz_app_hearbeat_fn_t heartbeat;
51 oz_app_farewell_fn_t farewell;
52 int app_id;
53};
54
55int oz_protocol_init(char *devs);
56void oz_protocol_term(void);
57int oz_get_pd_list(struct oz_mac_addr *addr, int max_count);
58void oz_app_enable(int app_id, int enable);
59struct oz_pd *oz_pd_find(u8 *mac_addr);
60void oz_binding_add(char *net_dev);
61void oz_binding_remove(char *net_dev);
62void oz_timer_add(struct oz_pd *pd, int type, unsigned long due_time,
63 int remove);
64void oz_timer_delete(struct oz_pd *pd, int type);
65void oz_pd_request_heartbeat(struct oz_pd *pd);
66void oz_polling_lock_bh(void);
67void oz_polling_unlock_bh(void);
68
69#endif /* _OZPROTO_H */
diff --git a/drivers/staging/ozwpan/ozprotocol.h b/drivers/staging/ozwpan/ozprotocol.h
new file mode 100644
index 000000000000..b3e7d77f3fff
--- /dev/null
+++ b/drivers/staging/ozwpan/ozprotocol.h
@@ -0,0 +1,372 @@
1/* -----------------------------------------------------------------------------
2 * Copyright (c) 2011 Ozmo Inc
3 * Released under the GNU General Public License Version 2 (GPLv2).
4 * -----------------------------------------------------------------------------
5 */
6#ifndef _OZPROTOCOL_H
7#define _OZPROTOCOL_H
8
9#define PACKED __packed
10
11#define OZ_ETHERTYPE 0x892e
12
13/* Status codes
14 */
15#define OZ_STATUS_SUCCESS 0
16#define OZ_STATUS_INVALID_PARAM 1
17#define OZ_STATUS_TOO_MANY_PDS 2
18#define OZ_STATUS_NOT_ALLOWED 4
19#define OZ_STATUS_SESSION_MISMATCH 5
20#define OZ_STATUS_SESSION_TEARDOWN 6
21
22/* This is the generic element header.
23 Every element starts with this.
24 */
25struct oz_elt {
26 u8 type;
27 u8 length;
28} PACKED;
29
30#define oz_next_elt(__elt) \
31 (struct oz_elt *)((u8 *)((__elt) + 1) + (__elt)->length)
32
33/* Protocol element IDs.
34 */
35#define OZ_ELT_CONNECT_REQ 0x06
36#define OZ_ELT_CONNECT_RSP 0x07
37#define OZ_ELT_DISCONNECT 0x08
38#define OZ_ELT_UPDATE_PARAM_REQ 0x11
39#define OZ_ELT_FAREWELL_REQ 0x12
40#define OZ_ELT_APP_DATA 0x31
41
42/* This is the Ozmo header which is the first Ozmo specific part
43 * of a frame and comes after the MAC header.
44 */
45struct oz_hdr {
46 u8 control;
47 u8 last_pkt_num;
48 u32 pkt_num;
49} PACKED;
50
51#define OZ_PROTOCOL_VERSION 0x1
52/* Bits in the control field. */
53#define OZ_VERSION_MASK 0xc
54#define OZ_VERSION_SHIFT 2
55#define OZ_F_ACK 0x10
56#define OZ_F_ISOC 0x20
57#define OZ_F_MORE_DATA 0x40
58#define OZ_F_ACK_REQUESTED 0x80
59
60#define oz_get_prot_ver(__x) (((__x) & OZ_VERSION_MASK) >> OZ_VERSION_SHIFT)
61
62/* Used to select the bits of packet number to put in the last_pkt_num.
63 */
64#define OZ_LAST_PN_MASK 0x00ff
65
66#define OZ_LAST_PN_HALF_CYCLE 127
67
68/* Connect request data structure.
69 */
70struct oz_elt_connect_req {
71 u8 mode;
72 u8 resv1[16];
73 u8 pd_info;
74 u8 session_id;
75 u8 presleep;
76 u8 resv2;
77 u8 host_vendor;
78 u8 keep_alive;
79 u16 apps;
80 u8 max_len_div16;
81 u8 ms_per_isoc;
82 u8 resv3[2];
83} PACKED;
84
85/* mode field bits.
86 */
87#define OZ_MODE_POLLED 0x0
88#define OZ_MODE_TRIGGERED 0x1
89#define OZ_MODE_MASK 0xf
90#define OZ_F_ISOC_NO_ELTS 0x40
91#define OZ_F_ISOC_ANYTIME 0x80
92
93/* Keep alive field.
94 */
95#define OZ_KALIVE_TYPE_MASK 0xc0
96#define OZ_KALIVE_VALUE_MASK 0x3f
97#define OZ_KALIVE_SPECIAL 0x00
98#define OZ_KALIVE_SECS 0x40
99#define OZ_KALIVE_MINS 0x80
100#define OZ_KALIVE_HOURS 0xc0
101
102/* Connect response data structure.
103 */
104struct oz_elt_connect_rsp {
105 u8 mode;
106 u8 status;
107 u8 resv1[3];
108 u8 session_id;
109 u16 apps;
110 u32 resv2;
111} PACKED;
112
113struct oz_elt_farewell {
114 u8 ep_num;
115 u8 index;
116 u8 report[1];
117} PACKED;
118
119struct oz_elt_update_param {
120 u8 resv1[16];
121 u8 presleep;
122 u8 resv2;
123 u8 host_vendor;
124 u8 keepalive;
125} PACKED;
126
127/* Header common to all application elements.
128 */
129struct oz_app_hdr {
130 u8 app_id;
131 u8 elt_seq_num;
132} PACKED;
133
134/* Values for app_id.
135 */
136#define OZ_APPID_USB 0x1
137#define OZ_APPID_UNUSED1 0x2
138#define OZ_APPID_UNUSED2 0x3
139#define OZ_APPID_SERIAL 0x4
140#define OZ_APPID_MAX OZ_APPID_SERIAL
141#define OZ_NB_APPS (OZ_APPID_MAX+1)
142
143/* USB header common to all elements for the USB application.
144 * This header extends the oz_app_hdr and comes directly after
145 * the element header in a USB application.
146 */
147struct oz_usb_hdr {
148 u8 app_id;
149 u8 elt_seq_num;
150 u8 type;
151} PACKED;
152
153
154
155/* USB requests element subtypes (type field of hs_usb_hdr).
156 */
157#define OZ_GET_DESC_REQ 1
158#define OZ_GET_DESC_RSP 2
159#define OZ_SET_CONFIG_REQ 3
160#define OZ_SET_CONFIG_RSP 4
161#define OZ_SET_INTERFACE_REQ 5
162#define OZ_SET_INTERFACE_RSP 6
163#define OZ_VENDOR_CLASS_REQ 7
164#define OZ_VENDOR_CLASS_RSP 8
165#define OZ_GET_STATUS_REQ 9
166#define OZ_GET_STATUS_RSP 10
167#define OZ_CLEAR_FEATURE_REQ 11
168#define OZ_CLEAR_FEATURE_RSP 12
169#define OZ_SET_FEATURE_REQ 13
170#define OZ_SET_FEATURE_RSP 14
171#define OZ_GET_CONFIGURATION_REQ 15
172#define OZ_GET_CONFIGURATION_RSP 16
173#define OZ_GET_INTERFACE_REQ 17
174#define OZ_GET_INTERFACE_RSP 18
175#define OZ_SYNCH_FRAME_REQ 19
176#define OZ_SYNCH_FRAME_RSP 20
177#define OZ_USB_ENDPOINT_DATA 23
178
179#define OZ_REQD_D2H 0x80
180
181struct oz_get_desc_req {
182 u8 app_id;
183 u8 elt_seq_num;
184 u8 type;
185 u8 req_id;
186 u16 offset;
187 u16 size;
188 u8 req_type;
189 u8 desc_type;
190 u16 w_index;
191 u8 index;
192} PACKED;
193
194/* Values for desc_type field.
195*/
196#define OZ_DESC_DEVICE 0x01
197#define OZ_DESC_CONFIG 0x02
198#define OZ_DESC_STRING 0x03
199
200/* Values for req_type field.
201 */
202#define OZ_RECP_MASK 0x1F
203#define OZ_RECP_DEVICE 0x00
204#define OZ_RECP_INTERFACE 0x01
205#define OZ_RECP_ENDPOINT 0x02
206
207#define OZ_REQT_MASK 0x60
208#define OZ_REQT_STD 0x00
209#define OZ_REQT_CLASS 0x20
210#define OZ_REQT_VENDOR 0x40
211
212struct oz_get_desc_rsp {
213 u8 app_id;
214 u8 elt_seq_num;
215 u8 type;
216 u8 req_id;
217 u16 offset;
218 u16 total_size;
219 u8 rcode;
220 u8 data[1];
221} PACKED;
222
223struct oz_feature_req {
224 u8 app_id;
225 u8 elt_seq_num;
226 u8 type;
227 u8 req_id;
228 u8 recipient;
229 u8 index;
230 u16 feature;
231} PACKED;
232
233struct oz_feature_rsp {
234 u8 app_id;
235 u8 elt_seq_num;
236 u8 type;
237 u8 req_id;
238 u8 rcode;
239} PACKED;
240
241struct oz_set_config_req {
242 u8 app_id;
243 u8 elt_seq_num;
244 u8 type;
245 u8 req_id;
246 u8 index;
247} PACKED;
248
249struct oz_set_config_rsp {
250 u8 app_id;
251 u8 elt_seq_num;
252 u8 type;
253 u8 req_id;
254 u8 rcode;
255} PACKED;
256
257struct oz_set_interface_req {
258 u8 app_id;
259 u8 elt_seq_num;
260 u8 type;
261 u8 req_id;
262 u8 index;
263 u8 alternative;
264} PACKED;
265
266struct oz_set_interface_rsp {
267 u8 app_id;
268 u8 elt_seq_num;
269 u8 type;
270 u8 req_id;
271 u8 rcode;
272} PACKED;
273
274struct oz_get_interface_req {
275 u8 app_id;
276 u8 elt_seq_num;
277 u8 type;
278 u8 req_id;
279 u8 index;
280} PACKED;
281
282struct oz_get_interface_rsp {
283 u8 app_id;
284 u8 elt_seq_num;
285 u8 type;
286 u8 req_id;
287 u8 rcode;
288 u8 alternative;
289} PACKED;
290
291struct oz_vendor_class_req {
292 u8 app_id;
293 u8 elt_seq_num;
294 u8 type;
295 u8 req_id;
296 u8 req_type;
297 u8 request;
298 u16 value;
299 u16 index;
300 u8 data[1];
301} PACKED;
302
303struct oz_vendor_class_rsp {
304 u8 app_id;
305 u8 elt_seq_num;
306 u8 type;
307 u8 req_id;
308 u8 rcode;
309 u8 data[1];
310} PACKED;
311
312struct oz_data {
313 u8 app_id;
314 u8 elt_seq_num;
315 u8 type;
316 u8 endpoint;
317 u8 format;
318} PACKED;
319
320struct oz_isoc_fixed {
321 u8 app_id;
322 u8 elt_seq_num;
323 u8 type;
324 u8 endpoint;
325 u8 format;
326 u8 unit_size;
327 u8 frame_number;
328 u8 data[1];
329} PACKED;
330
331struct oz_multiple_fixed {
332 u8 app_id;
333 u8 elt_seq_num;
334 u8 type;
335 u8 endpoint;
336 u8 format;
337 u8 unit_size;
338 u8 data[1];
339} PACKED;
340
341struct oz_fragmented {
342 u8 app_id;
343 u8 elt_seq_num;
344 u8 type;
345 u8 endpoint;
346 u8 format;
347 u16 total_size;
348 u16 offset;
349 u8 data[1];
350} PACKED;
351
352/* Note: the following does not get packaged in an element in the same way
353 * that other data formats are packaged. Instead the data is put in a frame
354 * directly after the oz_header and is the only permitted data in such a
355 * frame. The length of the data is directly determined from the frame size.
356 */
357struct oz_isoc_large {
358 u8 endpoint;
359 u8 format;
360 u8 ms_data;
361 u8 frame_number;
362} PACKED;
363
364#define OZ_DATA_F_TYPE_MASK 0xF
365#define OZ_DATA_F_MULTIPLE_FIXED 0x1
366#define OZ_DATA_F_MULTIPLE_VAR 0x2
367#define OZ_DATA_F_ISOC_FIXED 0x3
368#define OZ_DATA_F_ISOC_VAR 0x4
369#define OZ_DATA_F_FRAGMENTED 0x5
370#define OZ_DATA_F_ISOC_LARGE 0x7
371
372#endif /* _OZPROTOCOL_H */
diff --git a/drivers/staging/ozwpan/oztrace.c b/drivers/staging/ozwpan/oztrace.c
new file mode 100644
index 000000000000..353ead24fd7d
--- /dev/null
+++ b/drivers/staging/ozwpan/oztrace.c
@@ -0,0 +1,36 @@
1/* -----------------------------------------------------------------------------
2 * Copyright (c) 2011 Ozmo Inc
3 * Released under the GNU General Public License Version 2 (GPLv2).
4 * -----------------------------------------------------------------------------
5 */
6#include "ozconfig.h"
7#include "oztrace.h"
8
9#ifdef WANT_VERBOSE_TRACE
10unsigned long trace_flags =
11 0
12#ifdef WANT_TRACE_STREAM
13 | OZ_TRACE_STREAM
14#endif /* WANT_TRACE_STREAM */
15#ifdef WANT_TRACE_URB
16 | OZ_TRACE_URB
17#endif /* WANT_TRACE_URB */
18
19#ifdef WANT_TRACE_CTRL_DETAIL
20 | OZ_TRACE_CTRL_DETAIL
21#endif /* WANT_TRACE_CTRL_DETAIL */
22
23#ifdef WANT_TRACE_HUB
24 | OZ_TRACE_HUB
25#endif /* WANT_TRACE_HUB */
26
27#ifdef WANT_TRACE_RX_FRAMES
28 | OZ_TRACE_RX_FRAMES
29#endif /* WANT_TRACE_RX_FRAMES */
30
31#ifdef WANT_TRACE_TX_FRAMES
32 | OZ_TRACE_TX_FRAMES
33#endif /* WANT_TRACE_TX_FRAMES */
34 ;
35#endif /* WANT_VERBOSE_TRACE */
36
diff --git a/drivers/staging/ozwpan/oztrace.h b/drivers/staging/ozwpan/oztrace.h
new file mode 100644
index 000000000000..8293b24c5a77
--- /dev/null
+++ b/drivers/staging/ozwpan/oztrace.h
@@ -0,0 +1,35 @@
1/* -----------------------------------------------------------------------------
2 * Copyright (c) 2011 Ozmo Inc
3 * Released under the GNU General Public License Version 2 (GPLv2).
4 * -----------------------------------------------------------------------------
5 */
6#ifndef _OZTRACE_H_
7#define _OZTRACE_H_
8#include "ozconfig.h"
9
10#define TRACE_PREFIX KERN_ALERT "OZWPAN: "
11
12#ifdef WANT_TRACE
13#define oz_trace(...) printk(TRACE_PREFIX __VA_ARGS__)
14#ifdef WANT_VERBOSE_TRACE
15extern unsigned long trace_flags;
16#define oz_trace2(_flag, ...) \
17 do { if (trace_flags & _flag) printk(TRACE_PREFIX __VA_ARGS__); \
18 } while (0)
19#else
20#define oz_trace2(...)
21#endif /* #ifdef WANT_VERBOSE_TRACE */
22#else
23#define oz_trace(...)
24#define oz_trace2(...)
25#endif /* #ifdef WANT_TRACE */
26
27#define OZ_TRACE_STREAM 0x1
28#define OZ_TRACE_URB 0x2
29#define OZ_TRACE_CTRL_DETAIL 0x4
30#define OZ_TRACE_HUB 0x8
31#define OZ_TRACE_RX_FRAMES 0x10
32#define OZ_TRACE_TX_FRAMES 0x20
33
34#endif /* Sentry */
35
diff --git a/drivers/staging/ozwpan/ozurbparanoia.c b/drivers/staging/ozwpan/ozurbparanoia.c
new file mode 100644
index 000000000000..55b9afbbe47b
--- /dev/null
+++ b/drivers/staging/ozwpan/ozurbparanoia.c
@@ -0,0 +1,53 @@
1/* -----------------------------------------------------------------------------
2 * Copyright (c) 2011 Ozmo Inc
3 * Released under the GNU General Public License Version 2 (GPLv2).
4 * -----------------------------------------------------------------------------
5 */
6#include <linux/usb.h>
7#include "ozconfig.h"
8#ifdef WANT_URB_PARANOIA
9#include "ozurbparanoia.h"
10#include "oztrace.h"
11/*-----------------------------------------------------------------------------
12 */
13#define OZ_MAX_URBS 1000
14struct urb *g_urb_memory[OZ_MAX_URBS];
15int g_nb_urbs;
16DEFINE_SPINLOCK(g_urb_mem_lock);
17/*-----------------------------------------------------------------------------
18 */
19void oz_remember_urb(struct urb *urb)
20{
21 unsigned long irq_state;
22 spin_lock_irqsave(&g_urb_mem_lock, irq_state);
23 if (g_nb_urbs < OZ_MAX_URBS) {
24 g_urb_memory[g_nb_urbs++] = urb;
25 oz_trace("%lu: urb up = %d %p\n", jiffies, g_nb_urbs, urb);
26 } else {
27 oz_trace("ERROR urb buffer full\n");
28 }
29 spin_unlock_irqrestore(&g_urb_mem_lock, irq_state);
30}
31/*------------------------------------------------------------------------------
32 */
33int oz_forget_urb(struct urb *urb)
34{
35 unsigned long irq_state;
36 int i;
37 int rc = -1;
38 spin_lock_irqsave(&g_urb_mem_lock, irq_state);
39 for (i = 0; i < g_nb_urbs; i++) {
40 if (g_urb_memory[i] == urb) {
41 rc = 0;
42 if (--g_nb_urbs > i)
43 memcpy(&g_urb_memory[i], &g_urb_memory[i+1],
44 (g_nb_urbs - i) * sizeof(struct urb *));
45 oz_trace("%lu: urb down = %d %p\n",
46 jiffies, g_nb_urbs, urb);
47 }
48 }
49 spin_unlock_irqrestore(&g_urb_mem_lock, irq_state);
50 return rc;
51}
52#endif /* #ifdef WANT_URB_PARANOIA */
53
diff --git a/drivers/staging/ozwpan/ozurbparanoia.h b/drivers/staging/ozwpan/ozurbparanoia.h
new file mode 100644
index 000000000000..00f5a3a81bc8
--- /dev/null
+++ b/drivers/staging/ozwpan/ozurbparanoia.h
@@ -0,0 +1,19 @@
1#ifndef _OZURBPARANOIA_H
2#define _OZURBPARANOIA_H
3/* -----------------------------------------------------------------------------
4 * Released under the GNU General Public License Version 2 (GPLv2).
5 * Copyright (c) 2011 Ozmo Inc
6 * -----------------------------------------------------------------------------
7 */
8
9#ifdef WANT_URB_PARANOIA
10void oz_remember_urb(struct urb *urb);
11int oz_forget_urb(struct urb *urb);
12#else
13#define oz_remember_urb(__x)
14#define oz_forget_urb(__x) 0
15#endif /* WANT_URB_PARANOIA */
16
17
18#endif /* _OZURBPARANOIA_H */
19
diff --git a/drivers/staging/ozwpan/ozusbif.h b/drivers/staging/ozwpan/ozusbif.h
new file mode 100644
index 000000000000..3acf5980d7cc
--- /dev/null
+++ b/drivers/staging/ozwpan/ozusbif.h
@@ -0,0 +1,43 @@
1/* -----------------------------------------------------------------------------
2 * Copyright (c) 2011 Ozmo Inc
3 * Released under the GNU General Public License Version 2 (GPLv2).
4 * -----------------------------------------------------------------------------
5 */
6#ifndef _OZUSBIF_H
7#define _OZUSBIF_H
8
9#include <linux/usb.h>
10
11/* Reference counting functions.
12 */
13void oz_usb_get(void *hpd);
14void oz_usb_put(void *hpd);
15
16/* Stream functions.
17 */
18int oz_usb_stream_create(void *hpd, u8 ep_num);
19int oz_usb_stream_delete(void *hpd, u8 ep_num);
20
21/* Request functions.
22 */
23int oz_usb_control_req(void *hpd, u8 req_id, struct usb_ctrlrequest *setup,
24 u8 *data, int data_len);
25int oz_usb_get_desc_req(void *hpd, u8 req_id, u8 req_type, u8 desc_type,
26 u8 index, u16 windex, int offset, int len);
27int oz_usb_send_isoc(void *hpd, u8 ep_num, struct urb *urb);
28void oz_usb_request_heartbeat(void *hpd);
29
30/* Confirmation functions.
31 */
32void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status,
33 u8 *desc, int length, int offset, int total_size);
34void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode,
35 u8 *data, int data_len);
36
37/* Indication functions.
38 */
39void oz_hcd_data_ind(void *hport, u8 endpoint, u8 *data, int data_len);
40
41int oz_hcd_heartbeat(void *hport);
42
43#endif /* _OZUSBIF_H */
diff --git a/drivers/staging/ozwpan/ozusbsvc.c b/drivers/staging/ozwpan/ozusbsvc.c
new file mode 100644
index 000000000000..9e74f9602384
--- /dev/null
+++ b/drivers/staging/ozwpan/ozusbsvc.c
@@ -0,0 +1,245 @@
1/* -----------------------------------------------------------------------------
2 * Copyright (c) 2011 Ozmo Inc
3 * Released under the GNU General Public License Version 2 (GPLv2).
4 *
5 * This file provides protocol independent part of the implementation of the USB
6 * service for a PD.
7 * The implementation of this service is split into two parts the first of which
8 * is protocol independent and the second contains protocol specific details.
9 * This split is to allow alternative protocols to be defined.
10 * The implemenation of this service uses ozhcd.c to implement a USB HCD.
11 * -----------------------------------------------------------------------------
12 */
13#include <linux/init.h>
14#include <linux/module.h>
15#include <linux/timer.h>
16#include <linux/sched.h>
17#include <linux/netdevice.h>
18#include <linux/errno.h>
19#include <linux/input.h>
20#include <asm/unaligned.h>
21#include "ozconfig.h"
22#include "ozprotocol.h"
23#include "ozeltbuf.h"
24#include "ozpd.h"
25#include "ozproto.h"
26#include "ozusbif.h"
27#include "ozhcd.h"
28#include "oztrace.h"
29#include "ozusbsvc.h"
30#include "ozevent.h"
31/*------------------------------------------------------------------------------
32 * This is called once when the driver is loaded to initialise the USB service.
33 * Context: process
34 */
35int oz_usb_init(void)
36{
37 oz_event_log(OZ_EVT_SERVICE, 1, OZ_APPID_USB, 0, 0);
38 return oz_hcd_init();
39}
40/*------------------------------------------------------------------------------
41 * This is called once when the driver is unloaded to terminate the USB service.
42 * Context: process
43 */
44void oz_usb_term(void)
45{
46 oz_event_log(OZ_EVT_SERVICE, 2, OZ_APPID_USB, 0, 0);
47 oz_hcd_term();
48}
49/*------------------------------------------------------------------------------
50 * This is called when the USB service is started or resumed for a PD.
51 * Context: softirq
52 */
53int oz_usb_start(struct oz_pd *pd, int resume)
54{
55 int rc = 0;
56 struct oz_usb_ctx *usb_ctx;
57 struct oz_usb_ctx *old_ctx = 0;
58 oz_event_log(OZ_EVT_SERVICE, 3, OZ_APPID_USB, 0, resume);
59 if (resume) {
60 oz_trace("USB service resumed.\n");
61 return 0;
62 }
63 oz_trace("USB service started.\n");
64 /* Create a USB context in case we need one. If we find the PD already
65 * has a USB context then we will destroy it.
66 */
67 usb_ctx = kzalloc(sizeof(struct oz_usb_ctx), GFP_ATOMIC);
68 if (usb_ctx == 0)
69 return -ENOMEM;
70 atomic_set(&usb_ctx->ref_count, 1);
71 usb_ctx->pd = pd;
72 usb_ctx->stopped = 0;
73 /* Install the USB context if the PD doesn't already have one.
74 * If it does already have one then destroy the one we have just
75 * created.
76 */
77 spin_lock_bh(&pd->app_lock[OZ_APPID_USB-1]);
78 old_ctx = pd->app_ctx[OZ_APPID_USB-1];
79 if (old_ctx == 0)
80 pd->app_ctx[OZ_APPID_USB-1] = usb_ctx;
81 oz_usb_get(pd->app_ctx[OZ_APPID_USB-1]);
82 spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
83 if (old_ctx) {
84 oz_trace("Already have USB context.\n");
85 kfree(usb_ctx);
86 usb_ctx = old_ctx;
87 } else if (usb_ctx) {
88 /* Take a reference to the PD. This will be released when
89 * the USB context is destroyed.
90 */
91 oz_pd_get(pd);
92 }
93 /* If we already had a USB context and had obtained a port from
94 * the USB HCD then just reset the port. If we didn't have a port
95 * then report the arrival to the USB HCD so we get one.
96 */
97 if (usb_ctx->hport) {
98 oz_hcd_pd_reset(usb_ctx, usb_ctx->hport);
99 } else {
100 usb_ctx->hport = oz_hcd_pd_arrived(usb_ctx);
101 if (usb_ctx->hport == 0) {
102 oz_trace("USB hub returned null port.\n");
103 spin_lock_bh(&pd->app_lock[OZ_APPID_USB-1]);
104 pd->app_ctx[OZ_APPID_USB-1] = 0;
105 spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
106 oz_usb_put(usb_ctx);
107 rc = -1;
108 }
109 }
110 oz_usb_put(usb_ctx);
111 return rc;
112}
113/*------------------------------------------------------------------------------
114 * This is called when the USB service is stopped or paused for a PD.
115 * Context: softirq or process
116 */
117void oz_usb_stop(struct oz_pd *pd, int pause)
118{
119 struct oz_usb_ctx *usb_ctx;
120 oz_event_log(OZ_EVT_SERVICE, 4, OZ_APPID_USB, 0, pause);
121 if (pause) {
122 oz_trace("USB service paused.\n");
123 return;
124 }
125 spin_lock_bh(&pd->app_lock[OZ_APPID_USB-1]);
126 usb_ctx = (struct oz_usb_ctx *)pd->app_ctx[OZ_APPID_USB-1];
127 pd->app_ctx[OZ_APPID_USB-1] = 0;
128 spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
129 if (usb_ctx) {
130 unsigned long tout = jiffies + HZ;
131 oz_trace("USB service stopping...\n");
132 usb_ctx->stopped = 1;
133 /* At this point the reference count on the usb context should
134 * be 2 - one from when we created it and one from the hcd
135 * which claims a reference. Since stopped = 1 no one else
136 * should get in but someone may already be in. So wait
137 * until they leave but timeout after 1 second.
138 */
139 while ((atomic_read(&usb_ctx->ref_count) > 2) &&
140 time_before(jiffies, tout))
141 ;
142 oz_trace("USB service stopped.\n");
143 oz_hcd_pd_departed(usb_ctx->hport);
144 /* Release the reference taken in oz_usb_start.
145 */
146 oz_usb_put(usb_ctx);
147 }
148}
149/*------------------------------------------------------------------------------
150 * This increments the reference count of the context area for a specific PD.
151 * This ensures this context area does not disappear while still in use.
152 * Context: softirq
153 */
154void oz_usb_get(void *hpd)
155{
156 struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
157 atomic_inc(&usb_ctx->ref_count);
158}
159/*------------------------------------------------------------------------------
160 * This decrements the reference count of the context area for a specific PD
161 * and destroys the context area if the reference count becomes zero.
162 * Context: softirq or process
163 */
164void oz_usb_put(void *hpd)
165{
166 struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
167 if (atomic_dec_and_test(&usb_ctx->ref_count)) {
168 oz_trace("Dealloc USB context.\n");
169 oz_pd_put(usb_ctx->pd);
170 kfree(usb_ctx);
171 }
172}
173/*------------------------------------------------------------------------------
174 * Context: softirq
175 */
176int oz_usb_heartbeat(struct oz_pd *pd)
177{
178 struct oz_usb_ctx *usb_ctx;
179 int rc = 0;
180 spin_lock_bh(&pd->app_lock[OZ_APPID_USB-1]);
181 usb_ctx = (struct oz_usb_ctx *)pd->app_ctx[OZ_APPID_USB-1];
182 if (usb_ctx)
183 oz_usb_get(usb_ctx);
184 spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
185 if (usb_ctx == 0)
186 return rc;
187 if (usb_ctx->stopped)
188 goto done;
189 if (usb_ctx->hport)
190 if (oz_hcd_heartbeat(usb_ctx->hport))
191 rc = 1;
192done:
193 oz_usb_put(usb_ctx);
194 return rc;
195}
196/*------------------------------------------------------------------------------
197 * Context: softirq
198 */
199int oz_usb_stream_create(void *hpd, u8 ep_num)
200{
201 struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
202 struct oz_pd *pd = usb_ctx->pd;
203 oz_trace("oz_usb_stream_create(0x%x)\n", ep_num);
204 if (pd->mode & OZ_F_ISOC_NO_ELTS) {
205 oz_isoc_stream_create(pd, ep_num);
206 } else {
207 oz_pd_get(pd);
208 if (oz_elt_stream_create(&pd->elt_buff, ep_num,
209 4*pd->max_tx_size)) {
210 oz_pd_put(pd);
211 return -1;
212 }
213 }
214 return 0;
215}
216/*------------------------------------------------------------------------------
217 * Context: softirq
218 */
219int oz_usb_stream_delete(void *hpd, u8 ep_num)
220{
221 struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
222 if (usb_ctx) {
223 struct oz_pd *pd = usb_ctx->pd;
224 if (pd) {
225 oz_trace("oz_usb_stream_delete(0x%x)\n", ep_num);
226 if (pd->mode & OZ_F_ISOC_NO_ELTS) {
227 oz_isoc_stream_delete(pd, ep_num);
228 } else {
229 if (oz_elt_stream_delete(&pd->elt_buff, ep_num))
230 return -1;
231 oz_pd_put(pd);
232 }
233 }
234 }
235 return 0;
236}
237/*------------------------------------------------------------------------------
238 * Context: softirq or process
239 */
240void oz_usb_request_heartbeat(void *hpd)
241{
242 struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
243 if (usb_ctx && usb_ctx->pd)
244 oz_pd_request_heartbeat(usb_ctx->pd);
245}
diff --git a/drivers/staging/ozwpan/ozusbsvc.h b/drivers/staging/ozwpan/ozusbsvc.h
new file mode 100644
index 000000000000..58e05a59be31
--- /dev/null
+++ b/drivers/staging/ozwpan/ozusbsvc.h
@@ -0,0 +1,32 @@
1/* -----------------------------------------------------------------------------
2 * Copyright (c) 2011 Ozmo Inc
3 * Released under the GNU General Public License Version 2 (GPLv2).
4 * -----------------------------------------------------------------------------
5 */
6#ifndef _OZUSBSVC_H
7#define _OZUSBSVC_H
8
9/*------------------------------------------------------------------------------
10 * Per PD context info stored in application context area of PD.
11 * This object is reference counted to ensure it doesn't disappear while
12 * still in use.
13 */
14struct oz_usb_ctx {
15 atomic_t ref_count;
16 u8 tx_seq_num;
17 u8 rx_seq_num;
18 struct oz_pd *pd;
19 void *hport;
20 int stopped;
21};
22
23int oz_usb_init(void);
24void oz_usb_term(void);
25int oz_usb_start(struct oz_pd *pd, int resume);
26void oz_usb_stop(struct oz_pd *pd, int pause);
27void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt);
28int oz_usb_heartbeat(struct oz_pd *pd);
29void oz_usb_farewell(struct oz_pd *pd, u8 ep_num, u8 *data, u8 len);
30
31#endif /* _OZUSBSVC_H */
32
diff --git a/drivers/staging/ozwpan/ozusbsvc1.c b/drivers/staging/ozwpan/ozusbsvc1.c
new file mode 100644
index 000000000000..66bd576bb5e9
--- /dev/null
+++ b/drivers/staging/ozwpan/ozusbsvc1.c
@@ -0,0 +1,437 @@
1/* -----------------------------------------------------------------------------
2 * Copyright (c) 2011 Ozmo Inc
3 * Released under the GNU General Public License Version 2 (GPLv2).
4 *
5 * This file implements the protocol specific parts of the USB service for a PD.
6 * -----------------------------------------------------------------------------
7 */
8#include <linux/init.h>
9#include <linux/module.h>
10#include <linux/timer.h>
11#include <linux/sched.h>
12#include <linux/netdevice.h>
13#include <linux/errno.h>
14#include <linux/input.h>
15#include <asm/unaligned.h>
16#include "ozconfig.h"
17#include "ozprotocol.h"
18#include "ozeltbuf.h"
19#include "ozpd.h"
20#include "ozproto.h"
21#include "ozusbif.h"
22#include "ozhcd.h"
23#include "oztrace.h"
24#include "ozusbsvc.h"
25#include "ozevent.h"
26/*------------------------------------------------------------------------------
27 */
28#define MAX_ISOC_FIXED_DATA (253-sizeof(struct oz_isoc_fixed))
29/*------------------------------------------------------------------------------
30 * Context: softirq
31 */
32static int oz_usb_submit_elt(struct oz_elt_buf *eb, struct oz_elt_info *ei,
33 struct oz_usb_ctx *usb_ctx, u8 strid, u8 isoc)
34{
35 int ret;
36 struct oz_elt *elt = (struct oz_elt *)ei->data;
37 struct oz_app_hdr *app_hdr = (struct oz_app_hdr *)(elt+1);
38 elt->type = OZ_ELT_APP_DATA;
39 ei->app_id = OZ_APPID_USB;
40 ei->length = elt->length + sizeof(struct oz_elt);
41 app_hdr->app_id = OZ_APPID_USB;
42 spin_lock_bh(&eb->lock);
43 if (isoc == 0) {
44 app_hdr->elt_seq_num = usb_ctx->tx_seq_num++;
45 if (usb_ctx->tx_seq_num == 0)
46 usb_ctx->tx_seq_num = 1;
47 }
48 ret = oz_queue_elt_info(eb, isoc, strid, ei);
49 if (ret)
50 oz_elt_info_free(eb, ei);
51 spin_unlock_bh(&eb->lock);
52 return ret;
53}
54/*------------------------------------------------------------------------------
55 * Context: softirq
56 */
57int oz_usb_get_desc_req(void *hpd, u8 req_id, u8 req_type, u8 desc_type,
58 u8 index, u16 windex, int offset, int len)
59{
60 struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
61 struct oz_pd *pd = usb_ctx->pd;
62 struct oz_elt *elt;
63 struct oz_get_desc_req *body;
64 struct oz_elt_buf *eb = &pd->elt_buff;
65 struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
66 oz_trace(" req_type = 0x%x\n", req_type);
67 oz_trace(" desc_type = 0x%x\n", desc_type);
68 oz_trace(" index = 0x%x\n", index);
69 oz_trace(" windex = 0x%x\n", windex);
70 oz_trace(" offset = 0x%x\n", offset);
71 oz_trace(" len = 0x%x\n", len);
72 if (len > 200)
73 len = 200;
74 if (ei == 0)
75 return -1;
76 elt = (struct oz_elt *)ei->data;
77 elt->length = sizeof(struct oz_get_desc_req);
78 body = (struct oz_get_desc_req *)(elt+1);
79 body->type = OZ_GET_DESC_REQ;
80 body->req_id = req_id;
81 put_unaligned(cpu_to_le16(offset), &body->offset);
82 put_unaligned(cpu_to_le16(len), &body->size);
83 body->req_type = req_type;
84 body->desc_type = desc_type;
85 body->w_index = windex;
86 body->index = index;
87 return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0);
88}
89/*------------------------------------------------------------------------------
90 * Context: tasklet
91 */
92static int oz_usb_set_config_req(void *hpd, u8 req_id, u8 index)
93{
94 struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
95 struct oz_pd *pd = usb_ctx->pd;
96 struct oz_elt *elt;
97 struct oz_elt_buf *eb = &pd->elt_buff;
98 struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
99 struct oz_set_config_req *body;
100 if (ei == 0)
101 return -1;
102 elt = (struct oz_elt *)ei->data;
103 elt->length = sizeof(struct oz_set_config_req);
104 body = (struct oz_set_config_req *)(elt+1);
105 body->type = OZ_SET_CONFIG_REQ;
106 body->req_id = req_id;
107 body->index = index;
108 return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0);
109}
110/*------------------------------------------------------------------------------
111 * Context: tasklet
112 */
113static int oz_usb_set_interface_req(void *hpd, u8 req_id, u8 index, u8 alt)
114{
115 struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
116 struct oz_pd *pd = usb_ctx->pd;
117 struct oz_elt *elt;
118 struct oz_elt_buf *eb = &pd->elt_buff;
119 struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
120 struct oz_set_interface_req *body;
121 if (ei == 0)
122 return -1;
123 elt = (struct oz_elt *)ei->data;
124 elt->length = sizeof(struct oz_set_interface_req);
125 body = (struct oz_set_interface_req *)(elt+1);
126 body->type = OZ_SET_INTERFACE_REQ;
127 body->req_id = req_id;
128 body->index = index;
129 body->alternative = alt;
130 return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0);
131}
132/*------------------------------------------------------------------------------
133 * Context: tasklet
134 */
135static int oz_usb_set_clear_feature_req(void *hpd, u8 req_id, u8 type,
136 u8 recipient, u8 index, __le16 feature)
137{
138 struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
139 struct oz_pd *pd = usb_ctx->pd;
140 struct oz_elt *elt;
141 struct oz_elt_buf *eb = &pd->elt_buff;
142 struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
143 struct oz_feature_req *body;
144 if (ei == 0)
145 return -1;
146 elt = (struct oz_elt *)ei->data;
147 elt->length = sizeof(struct oz_feature_req);
148 body = (struct oz_feature_req *)(elt+1);
149 body->type = type;
150 body->req_id = req_id;
151 body->recipient = recipient;
152 body->index = index;
153 put_unaligned(feature, &body->feature);
154 return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0);
155}
156/*------------------------------------------------------------------------------
157 * Context: tasklet
158 */
159static int oz_usb_vendor_class_req(void *hpd, u8 req_id, u8 req_type,
160 u8 request, __le16 value, __le16 index, u8 *data, int data_len)
161{
162 struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
163 struct oz_pd *pd = usb_ctx->pd;
164 struct oz_elt *elt;
165 struct oz_elt_buf *eb = &pd->elt_buff;
166 struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
167 struct oz_vendor_class_req *body;
168 if (ei == 0)
169 return -1;
170 elt = (struct oz_elt *)ei->data;
171 elt->length = sizeof(struct oz_vendor_class_req) - 1 + data_len;
172 body = (struct oz_vendor_class_req *)(elt+1);
173 body->type = OZ_VENDOR_CLASS_REQ;
174 body->req_id = req_id;
175 body->req_type = req_type;
176 body->request = request;
177 put_unaligned(value, &body->value);
178 put_unaligned(index, &body->index);
179 if (data_len)
180 memcpy(body->data, data, data_len);
181 return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0);
182}
183/*------------------------------------------------------------------------------
184 * Context: tasklet
185 */
186int oz_usb_control_req(void *hpd, u8 req_id, struct usb_ctrlrequest *setup,
187 u8 *data, int data_len)
188{
189 unsigned wvalue = le16_to_cpu(setup->wValue);
190 unsigned windex = le16_to_cpu(setup->wIndex);
191 unsigned wlength = le16_to_cpu(setup->wLength);
192 int rc = 0;
193 oz_event_log(OZ_EVT_CTRL_REQ, setup->bRequest, req_id,
194 (void *)(((unsigned long)(setup->wValue))<<16 |
195 ((unsigned long)setup->wIndex)),
196 setup->bRequestType);
197 if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
198 switch (setup->bRequest) {
199 case USB_REQ_GET_DESCRIPTOR:
200 rc = oz_usb_get_desc_req(hpd, req_id,
201 setup->bRequestType, (u8)(wvalue>>8),
202 (u8)wvalue, setup->wIndex, 0, wlength);
203 break;
204 case USB_REQ_SET_CONFIGURATION:
205 rc = oz_usb_set_config_req(hpd, req_id, (u8)wvalue);
206 break;
207 case USB_REQ_SET_INTERFACE: {
208 u8 if_num = (u8)windex;
209 u8 alt = (u8)wvalue;
210 rc = oz_usb_set_interface_req(hpd, req_id,
211 if_num, alt);
212 }
213 break;
214 case USB_REQ_SET_FEATURE:
215 rc = oz_usb_set_clear_feature_req(hpd, req_id,
216 OZ_SET_FEATURE_REQ,
217 setup->bRequestType & 0xf, (u8)windex,
218 setup->wValue);
219 break;
220 case USB_REQ_CLEAR_FEATURE:
221 rc = oz_usb_set_clear_feature_req(hpd, req_id,
222 OZ_CLEAR_FEATURE_REQ,
223 setup->bRequestType & 0xf,
224 (u8)windex, setup->wValue);
225 break;
226 }
227 } else {
228 rc = oz_usb_vendor_class_req(hpd, req_id, setup->bRequestType,
229 setup->bRequest, setup->wValue, setup->wIndex,
230 data, data_len);
231 }
232 return rc;
233}
234/*------------------------------------------------------------------------------
235 * Context: softirq
236 */
237int oz_usb_send_isoc(void *hpd, u8 ep_num, struct urb *urb)
238{
239 struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
240 struct oz_pd *pd = usb_ctx->pd;
241 struct oz_elt_buf *eb;
242 int i;
243 int hdr_size;
244 u8 *data;
245 struct usb_iso_packet_descriptor *desc;
246
247 if (pd->mode & OZ_F_ISOC_NO_ELTS) {
248 for (i = 0; i < urb->number_of_packets; i++) {
249 u8 *data;
250 desc = &urb->iso_frame_desc[i];
251 data = ((u8 *)urb->transfer_buffer)+desc->offset;
252 oz_send_isoc_unit(pd, ep_num, data, desc->length);
253 }
254 return 0;
255 }
256
257 hdr_size = sizeof(struct oz_isoc_fixed) - 1;
258 eb = &pd->elt_buff;
259 i = 0;
260 while (i < urb->number_of_packets) {
261 struct oz_elt_info *ei = oz_elt_info_alloc(eb);
262 struct oz_elt *elt;
263 struct oz_isoc_fixed *body;
264 int unit_count;
265 int unit_size;
266 int rem;
267 if (ei == 0)
268 return -1;
269 rem = MAX_ISOC_FIXED_DATA;
270 elt = (struct oz_elt *)ei->data;
271 body = (struct oz_isoc_fixed *)(elt + 1);
272 body->type = OZ_USB_ENDPOINT_DATA;
273 body->endpoint = ep_num;
274 body->format = OZ_DATA_F_ISOC_FIXED;
275 unit_size = urb->iso_frame_desc[i].length;
276 body->unit_size = (u8)unit_size;
277 data = ((u8 *)(elt+1)) + hdr_size;
278 unit_count = 0;
279 while (i < urb->number_of_packets) {
280 desc = &urb->iso_frame_desc[i];
281 if ((unit_size == desc->length) &&
282 (desc->length <= rem)) {
283 memcpy(data, ((u8 *)urb->transfer_buffer) +
284 desc->offset, unit_size);
285 data += unit_size;
286 rem -= unit_size;
287 unit_count++;
288 desc->status = 0;
289 desc->actual_length = desc->length;
290 i++;
291 } else {
292 break;
293 }
294 }
295 elt->length = hdr_size + MAX_ISOC_FIXED_DATA - rem;
296 /* Store the number of units in body->frame_number for the
297 * moment. This field will be correctly determined before
298 * the element is sent. */
299 body->frame_number = (u8)unit_count;
300 oz_usb_submit_elt(eb, ei, usb_ctx, ep_num,
301 pd->mode & OZ_F_ISOC_ANYTIME);
302 }
303 return 0;
304}
305/*------------------------------------------------------------------------------
306 * Context: softirq-serialized
307 */
308void oz_usb_handle_ep_data(struct oz_usb_ctx *usb_ctx,
309 struct oz_usb_hdr *usb_hdr, int len)
310{
311 struct oz_data *data_hdr = (struct oz_data *)usb_hdr;
312 switch (data_hdr->format) {
313 case OZ_DATA_F_MULTIPLE_FIXED: {
314 struct oz_multiple_fixed *body =
315 (struct oz_multiple_fixed *)data_hdr;
316 u8 *data = body->data;
317 int n = (len - sizeof(struct oz_multiple_fixed)+1)
318 / body->unit_size;
319 while (n--) {
320 oz_hcd_data_ind(usb_ctx->hport, body->endpoint,
321 data, body->unit_size);
322 data += body->unit_size;
323 }
324 }
325 break;
326 case OZ_DATA_F_ISOC_FIXED: {
327 struct oz_isoc_fixed *body =
328 (struct oz_isoc_fixed *)data_hdr;
329 int data_len = len-sizeof(struct oz_isoc_fixed)+1;
330 int unit_size = body->unit_size;
331 u8 *data = body->data;
332 int count;
333 int i;
334 if (!unit_size)
335 break;
336 count = data_len/unit_size;
337 for (i = 0; i < count; i++) {
338 oz_hcd_data_ind(usb_ctx->hport,
339 body->endpoint, data, unit_size);
340 data += unit_size;
341 }
342 }
343 break;
344 }
345
346}
347/*------------------------------------------------------------------------------
348 * This is called when the PD has received a USB element. The type of element
349 * is determined and is then passed to an appropriate handler function.
350 * Context: softirq-serialized
351 */
352void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt)
353{
354 struct oz_usb_hdr *usb_hdr = (struct oz_usb_hdr *)(elt + 1);
355 struct oz_usb_ctx *usb_ctx;
356
357 spin_lock_bh(&pd->app_lock[OZ_APPID_USB-1]);
358 usb_ctx = (struct oz_usb_ctx *)pd->app_ctx[OZ_APPID_USB-1];
359 if (usb_ctx)
360 oz_usb_get(usb_ctx);
361 spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
362 if (usb_ctx == 0)
363 return; /* Context has gone so nothing to do. */
364 if (usb_ctx->stopped)
365 goto done;
366 /* If sequence number is non-zero then check it is not a duplicate.
367 * Zero sequence numbers are always accepted.
368 */
369 if (usb_hdr->elt_seq_num != 0) {
370 if (((usb_ctx->rx_seq_num - usb_hdr->elt_seq_num) & 0x80) == 0)
371 /* Reject duplicate element. */
372 goto done;
373 }
374 usb_ctx->rx_seq_num = usb_hdr->elt_seq_num;
375 switch (usb_hdr->type) {
376 case OZ_GET_DESC_RSP: {
377 struct oz_get_desc_rsp *body =
378 (struct oz_get_desc_rsp *)usb_hdr;
379 int data_len = elt->length -
380 sizeof(struct oz_get_desc_rsp) + 1;
381 u16 offs = le16_to_cpu(get_unaligned(&body->offset));
382 u16 total_size =
383 le16_to_cpu(get_unaligned(&body->total_size));
384 oz_trace("USB_REQ_GET_DESCRIPTOR - cnf\n");
385 oz_hcd_get_desc_cnf(usb_ctx->hport, body->req_id,
386 body->rcode, body->data,
387 data_len, offs, total_size);
388 }
389 break;
390 case OZ_SET_CONFIG_RSP: {
391 struct oz_set_config_rsp *body =
392 (struct oz_set_config_rsp *)usb_hdr;
393 oz_hcd_control_cnf(usb_ctx->hport, body->req_id,
394 body->rcode, 0, 0);
395 }
396 break;
397 case OZ_SET_INTERFACE_RSP: {
398 struct oz_set_interface_rsp *body =
399 (struct oz_set_interface_rsp *)usb_hdr;
400 oz_hcd_control_cnf(usb_ctx->hport,
401 body->req_id, body->rcode, 0, 0);
402 }
403 break;
404 case OZ_VENDOR_CLASS_RSP: {
405 struct oz_vendor_class_rsp *body =
406 (struct oz_vendor_class_rsp *)usb_hdr;
407 oz_hcd_control_cnf(usb_ctx->hport, body->req_id,
408 body->rcode, body->data, elt->length-
409 sizeof(struct oz_vendor_class_rsp)+1);
410 }
411 break;
412 case OZ_USB_ENDPOINT_DATA:
413 oz_usb_handle_ep_data(usb_ctx, usb_hdr, elt->length);
414 break;
415 }
416done:
417 oz_usb_put(usb_ctx);
418}
419/*------------------------------------------------------------------------------
420 * Context: softirq, process
421 */
422void oz_usb_farewell(struct oz_pd *pd, u8 ep_num, u8 *data, u8 len)
423{
424 struct oz_usb_ctx *usb_ctx;
425 spin_lock_bh(&pd->app_lock[OZ_APPID_USB-1]);
426 usb_ctx = (struct oz_usb_ctx *)pd->app_ctx[OZ_APPID_USB-1];
427 if (usb_ctx)
428 oz_usb_get(usb_ctx);
429 spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
430 if (usb_ctx == 0)
431 return; /* Context has gone so nothing to do. */
432 if (!usb_ctx->stopped) {
433 oz_trace("Farewell indicated ep = 0x%x\n", ep_num);
434 oz_hcd_data_ind(usb_ctx->hport, ep_num, data, len);
435 }
436 oz_usb_put(usb_ctx);
437}
diff --git a/drivers/staging/quickstart/quickstart.c b/drivers/staging/quickstart/quickstart.c
index c60911c6ab3f..cac320738142 100644
--- a/drivers/staging/quickstart/quickstart.c
+++ b/drivers/staging/quickstart/quickstart.c
@@ -4,7 +4,7 @@
4 * 4 *
5 * Copyright (C) 2007-2010 Angelo Arrifano <miknix@gmail.com> 5 * Copyright (C) 2007-2010 Angelo Arrifano <miknix@gmail.com>
6 * 6 *
7 * Information gathered from disassebled dsdt and from here: 7 * Information gathered from disassembled dsdt and from here:
8 * <http://www.microsoft.com/whdc/system/platform/firmware/DirAppLaunch.mspx> 8 * <http://www.microsoft.com/whdc/system/platform/firmware/DirAppLaunch.mspx>
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
@@ -23,7 +23,9 @@
23 * 23 *
24 */ 24 */
25 25
26#define QUICKSTART_VERSION "1.03" 26#define QUICKSTART_VERSION "1.04"
27
28#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27 29
28#include <linux/kernel.h> 30#include <linux/kernel.h>
29#include <linux/module.h> 31#include <linux/module.h>
@@ -37,118 +39,73 @@ MODULE_AUTHOR("Angelo Arrifano");
37MODULE_DESCRIPTION("ACPI Direct App Launch driver"); 39MODULE_DESCRIPTION("ACPI Direct App Launch driver");
38MODULE_LICENSE("GPL"); 40MODULE_LICENSE("GPL");
39 41
40#define QUICKSTART_ACPI_DEVICE_NAME "quickstart" 42#define QUICKSTART_ACPI_DEVICE_NAME "quickstart"
41#define QUICKSTART_ACPI_CLASS "quickstart" 43#define QUICKSTART_ACPI_CLASS "quickstart"
42#define QUICKSTART_ACPI_HID "PNP0C32" 44#define QUICKSTART_ACPI_HID "PNP0C32"
43
44#define QUICKSTART_PF_DRIVER_NAME "quickstart"
45#define QUICKSTART_PF_DEVICE_NAME "quickstart"
46#define QUICKSTART_PF_DEVATTR_NAME "pressed_button"
47 45
48#define QUICKSTART_MAX_BTN_NAME_LEN 16 46#define QUICKSTART_PF_DRIVER_NAME "quickstart"
47#define QUICKSTART_PF_DEVICE_NAME "quickstart"
49 48
50/* There will be two events: 49/*
51 * 0x02 - A hot button was pressed while device was off/sleeping. 50 * There will be two events:
52 * 0x80 - A hot button was pressed while device was up. */ 51 * 0x02 - A hot button was pressed while device was off/sleeping.
53#define QUICKSTART_EVENT_WAKE 0x02 52 * 0x80 - A hot button was pressed while device was up.
54#define QUICKSTART_EVENT_RUNTIME 0x80 53 */
54#define QUICKSTART_EVENT_WAKE 0x02
55#define QUICKSTART_EVENT_RUNTIME 0x80
55 56
56struct quickstart_btn { 57struct quickstart_button {
57 char *name; 58 char *name;
58 unsigned int id; 59 unsigned int id;
59 struct quickstart_btn *next; 60 struct list_head list;
60}; 61};
61 62
62static struct quickstart_driver_data {
63 struct quickstart_btn *btn_lst;
64 struct quickstart_btn *pressed;
65} quickstart_data;
66
67/* ACPI driver Structs */
68struct quickstart_acpi { 63struct quickstart_acpi {
69 struct acpi_device *device; 64 struct acpi_device *device;
70 struct quickstart_btn *btn; 65 struct quickstart_button *button;
71};
72static int quickstart_acpi_add(struct acpi_device *device);
73static int quickstart_acpi_remove(struct acpi_device *device, int type);
74static const struct acpi_device_id quickstart_device_ids[] = {
75 {QUICKSTART_ACPI_HID, 0},
76 {"", 0},
77}; 66};
78 67
79static struct acpi_driver quickstart_acpi_driver = { 68static LIST_HEAD(buttons);
80 .name = "quickstart", 69static struct quickstart_button *pressed;
81 .class = QUICKSTART_ACPI_CLASS,
82 .ids = quickstart_device_ids,
83 .ops = {
84 .add = quickstart_acpi_add,
85 .remove = quickstart_acpi_remove,
86 },
87};
88 70
89/* Input device structs */ 71static struct input_dev *quickstart_input;
90struct input_dev *quickstart_input;
91 72
92/* Platform driver structs */ 73/* Platform driver functions */
93static ssize_t buttons_show(struct device *dev, 74static ssize_t quickstart_buttons_show(struct device *dev,
94 struct device_attribute *attr,
95 char *buf);
96static ssize_t pressed_button_show(struct device *dev,
97 struct device_attribute *attr, 75 struct device_attribute *attr,
98 char *buf); 76 char *buf)
99static ssize_t pressed_button_store(struct device *dev,
100 struct device_attribute *attr,
101 const char *buf,
102 size_t count);
103static DEVICE_ATTR(pressed_button, 0666, pressed_button_show,
104 pressed_button_store);
105static DEVICE_ATTR(buttons, 0444, buttons_show, NULL);
106static struct platform_device *pf_device;
107static struct platform_driver pf_driver = {
108 .driver = {
109 .name = QUICKSTART_PF_DRIVER_NAME,
110 .owner = THIS_MODULE,
111 }
112};
113
114/*
115 * Platform driver functions
116 */
117static ssize_t buttons_show(struct device *dev,
118 struct device_attribute *attr,
119 char *buf)
120{ 77{
121 int count = 0; 78 int count = 0;
122 struct quickstart_btn *ptr = quickstart_data.btn_lst; 79 struct quickstart_button *b;
123 80
124 if (!ptr) 81 if (list_empty(&buttons))
125 return snprintf(buf, PAGE_SIZE, "none"); 82 return snprintf(buf, PAGE_SIZE, "none");
126 83
127 while (ptr && (count < PAGE_SIZE)) { 84 list_for_each_entry(b, &buttons, list) {
128 if (ptr->name) { 85 count += snprintf(buf + count, PAGE_SIZE - count, "%u\t%s\n",
129 count += snprintf(buf + count, 86 b->id, b->name);
130 PAGE_SIZE - count, 87
131 "%d\t%s\n", ptr->id, ptr->name); 88 if (count >= PAGE_SIZE) {
89 count = PAGE_SIZE;
90 break;
132 } 91 }
133 ptr = ptr->next;
134 } 92 }
135 93
136 return count; 94 return count;
137} 95}
138 96
139static ssize_t pressed_button_show(struct device *dev, 97static ssize_t quickstart_pressed_button_show(struct device *dev,
140 struct device_attribute *attr, 98 struct device_attribute *attr,
141 char *buf) 99 char *buf)
142{ 100{
143 return snprintf(buf, PAGE_SIZE, "%s\n", 101 return scnprintf(buf, PAGE_SIZE, "%s\n",
144 (quickstart_data.pressed ? 102 (pressed ? pressed->name : "none"));
145 quickstart_data.pressed->name : "none"));
146} 103}
147 104
148 105
149static ssize_t pressed_button_store(struct device *dev, 106static ssize_t quickstart_pressed_button_store(struct device *dev,
150 struct device_attribute *attr, 107 struct device_attribute *attr,
151 const char *buf, size_t count) 108 const char *buf, size_t count)
152{ 109{
153 if (count < 2) 110 if (count < 2)
154 return -EINVAL; 111 return -EINVAL;
@@ -156,60 +113,40 @@ static ssize_t pressed_button_store(struct device *dev,
156 if (strncasecmp(buf, "none", 4) != 0) 113 if (strncasecmp(buf, "none", 4) != 0)
157 return -EINVAL; 114 return -EINVAL;
158 115
159 quickstart_data.pressed = NULL; 116 pressed = NULL;
160 return count; 117 return count;
161} 118}
162 119
163/* Hotstart Helper functions */ 120/* Helper functions */
164static int quickstart_btnlst_add(struct quickstart_btn **data) 121static struct quickstart_button *quickstart_buttons_add(void)
165{ 122{
166 struct quickstart_btn **ptr = &quickstart_data.btn_lst; 123 struct quickstart_button *b;
167 124
168 while (*ptr) 125 b = kzalloc(sizeof(*b), GFP_KERNEL);
169 ptr = &((*ptr)->next); 126 if (!b)
127 return NULL;
170 128
171 *ptr = kzalloc(sizeof(struct quickstart_btn), GFP_KERNEL); 129 list_add_tail(&b->list, &buttons);
172 if (!*ptr) {
173 *data = NULL;
174 return -ENOMEM;
175 }
176 *data = *ptr;
177 130
178 return 0; 131 return b;
179} 132}
180 133
181static void quickstart_btnlst_del(struct quickstart_btn *data) 134static void quickstart_button_del(struct quickstart_button *data)
182{ 135{
183 struct quickstart_btn **ptr = &quickstart_data.btn_lst;
184
185 if (!data) 136 if (!data)
186 return; 137 return;
187 138
188 while (*ptr) { 139 list_del(&data->list);
189 if (*ptr == data) { 140 kfree(data->name);
190 *ptr = (*ptr)->next; 141 kfree(data);
191 kfree(data);
192 return;
193 }
194 ptr = &((*ptr)->next);
195 }
196
197 return;
198} 142}
199 143
200static void quickstart_btnlst_free(void) 144static void quickstart_buttons_free(void)
201{ 145{
202 struct quickstart_btn *ptr = quickstart_data.btn_lst; 146 struct quickstart_button *b, *n;
203 struct quickstart_btn *lptr = NULL;
204
205 while (ptr) {
206 lptr = ptr;
207 ptr = ptr->next;
208 kfree(lptr->name);
209 kfree(lptr);
210 }
211 147
212 return; 148 list_for_each_entry_safe(b, n, &buttons, list)
149 quickstart_button_del(b);
213} 150}
214 151
215/* ACPI Driver functions */ 152/* ACPI Driver functions */
@@ -220,107 +157,137 @@ static void quickstart_acpi_notify(acpi_handle handle, u32 event, void *data)
220 if (!quickstart) 157 if (!quickstart)
221 return; 158 return;
222 159
223 if (event == QUICKSTART_EVENT_WAKE) 160 switch (event) {
224 quickstart_data.pressed = quickstart->btn; 161 case QUICKSTART_EVENT_WAKE:
225 else if (event == QUICKSTART_EVENT_RUNTIME) { 162 pressed = quickstart->button;
226 input_report_key(quickstart_input, quickstart->btn->id, 1); 163 break;
164 case QUICKSTART_EVENT_RUNTIME:
165 input_report_key(quickstart_input, quickstart->button->id, 1);
227 input_sync(quickstart_input); 166 input_sync(quickstart_input);
228 input_report_key(quickstart_input, quickstart->btn->id, 0); 167 input_report_key(quickstart_input, quickstart->button->id, 0);
229 input_sync(quickstart_input); 168 input_sync(quickstart_input);
169 break;
170 default:
171 pr_err("Unexpected ACPI event notify (%u)\n", event);
172 break;
230 } 173 }
231 return;
232} 174}
233 175
234static void quickstart_acpi_ghid(struct quickstart_acpi *quickstart) 176static int quickstart_acpi_ghid(struct quickstart_acpi *quickstart)
235{ 177{
236 acpi_status status; 178 acpi_status status;
237 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 179 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
238 uint32_t usageid = 0; 180 int ret = 0;
239
240 if (!quickstart)
241 return;
242 181
243 /* This returns a buffer telling the button usage ID, 182 /*
244 * and triggers pending notify events (The ones before booting). */ 183 * This returns a buffer telling the button usage ID,
245 status = acpi_evaluate_object(quickstart->device->handle, 184 * and triggers pending notify events (The ones before booting).
246 "GHID", NULL, &buffer); 185 */
247 if (ACPI_FAILURE(status) || !buffer.pointer) { 186 status = acpi_evaluate_object(quickstart->device->handle, "GHID", NULL,
248 printk(KERN_ERR "quickstart: %s GHID method failed.\n", 187 &buffer);
249 quickstart->btn->name); 188 if (ACPI_FAILURE(status)) {
250 return; 189 pr_err("%s GHID method failed\n", quickstart->button->name);
190 return -EINVAL;
251 } 191 }
252 192
253 if (buffer.length < 8) 193 /*
254 return; 194 * <<The GHID method can return a BYTE, WORD, or DWORD.
255
256 /* <<The GHID method can return a BYTE, WORD, or DWORD.
257 * The value must be encoded in little-endian byte 195 * The value must be encoded in little-endian byte
258 * order (least significant byte first).>> */ 196 * order (least significant byte first).>>
259 usageid = *((uint32_t *)(buffer.pointer + (buffer.length - 8))); 197 */
260 quickstart->btn->id = usageid; 198 switch (buffer.length) {
199 case 1:
200 quickstart->button->id = *(uint8_t *)buffer.pointer;
201 break;
202 case 2:
203 quickstart->button->id = *(uint16_t *)buffer.pointer;
204 break;
205 case 4:
206 quickstart->button->id = *(uint32_t *)buffer.pointer;
207 break;
208 case 8:
209 quickstart->button->id = *(uint64_t *)buffer.pointer;
210 break;
211 default:
212 pr_err("%s GHID method returned buffer of unexpected length %lu\n",
213 quickstart->button->name,
214 (unsigned long)buffer.length);
215 ret = -EINVAL;
216 break;
217 }
261 218
262 kfree(buffer.pointer); 219 kfree(buffer.pointer);
220
221 return ret;
263} 222}
264 223
265static int quickstart_acpi_config(struct quickstart_acpi *quickstart, char *bid) 224static int quickstart_acpi_config(struct quickstart_acpi *quickstart)
266{ 225{
267 int len = strlen(bid); 226 char *bid = acpi_device_bid(quickstart->device);
268 int ret; 227 char *name;
269 228
270 /* Add button to list */ 229 name = kmalloc(strlen(bid) + 1, GFP_KERNEL);
271 ret = quickstart_btnlst_add(&quickstart->btn); 230 if (!name)
272 if (ret) 231 return -ENOMEM;
273 return ret;
274 232
275 quickstart->btn->name = kzalloc(len + 1, GFP_KERNEL); 233 /* Add new button to list */
276 if (!quickstart->btn->name) { 234 quickstart->button = quickstart_buttons_add();
277 quickstart_btnlst_free(); 235 if (!quickstart->button) {
236 kfree(name);
278 return -ENOMEM; 237 return -ENOMEM;
279 } 238 }
280 strcpy(quickstart->btn->name, bid); 239
240 quickstart->button->name = name;
241 strcpy(quickstart->button->name, bid);
281 242
282 return 0; 243 return 0;
283} 244}
284 245
285static int quickstart_acpi_add(struct acpi_device *device) 246static int quickstart_acpi_add(struct acpi_device *device)
286{ 247{
287 int ret = 0; 248 int ret;
288 acpi_status status = AE_OK; 249 acpi_status status;
289 struct quickstart_acpi *quickstart = NULL; 250 struct quickstart_acpi *quickstart;
290 251
291 if (!device) 252 if (!device)
292 return -EINVAL; 253 return -EINVAL;
293 254
294 quickstart = kzalloc(sizeof(struct quickstart_acpi), GFP_KERNEL); 255 quickstart = kzalloc(sizeof(*quickstart), GFP_KERNEL);
295 if (!quickstart) 256 if (!quickstart)
296 return -ENOMEM; 257 return -ENOMEM;
297 258
298 quickstart->device = device; 259 quickstart->device = device;
260
299 strcpy(acpi_device_name(device), QUICKSTART_ACPI_DEVICE_NAME); 261 strcpy(acpi_device_name(device), QUICKSTART_ACPI_DEVICE_NAME);
300 strcpy(acpi_device_class(device), QUICKSTART_ACPI_CLASS); 262 strcpy(acpi_device_class(device), QUICKSTART_ACPI_CLASS);
301 device->driver_data = quickstart; 263 device->driver_data = quickstart;
302 264
303 /* Add button to list and initialize some stuff */ 265 /* Add button to list and initialize some stuff */
304 ret = quickstart_acpi_config(quickstart, acpi_device_bid(device)); 266 ret = quickstart_acpi_config(quickstart);
305 if (ret) 267 if (ret < 0)
306 goto fail_config; 268 goto fail_config;
307 269
308 status = acpi_install_notify_handler(device->handle, 270 status = acpi_install_notify_handler(device->handle, ACPI_ALL_NOTIFY,
309 ACPI_ALL_NOTIFY,
310 quickstart_acpi_notify, 271 quickstart_acpi_notify,
311 quickstart); 272 quickstart);
312 if (ACPI_FAILURE(status)) { 273 if (ACPI_FAILURE(status)) {
313 printk(KERN_ERR "quickstart: Notify handler install error\n"); 274 pr_err("Notify handler install error\n");
314 ret = -ENODEV; 275 ret = -ENODEV;
315 goto fail_installnotify; 276 goto fail_installnotify;
316 } 277 }
317 278
318 quickstart_acpi_ghid(quickstart); 279 ret = quickstart_acpi_ghid(quickstart);
280 if (ret < 0)
281 goto fail_ghid;
319 282
320 return 0; 283 return 0;
321 284
285fail_ghid:
286 acpi_remove_notify_handler(device->handle, ACPI_ALL_NOTIFY,
287 quickstart_acpi_notify);
288
322fail_installnotify: 289fail_installnotify:
323 quickstart_btnlst_del(quickstart->btn); 290 quickstart_button_del(quickstart->button);
324 291
325fail_config: 292fail_config:
326 293
@@ -331,28 +298,54 @@ fail_config:
331 298
332static int quickstart_acpi_remove(struct acpi_device *device, int type) 299static int quickstart_acpi_remove(struct acpi_device *device, int type)
333{ 300{
334 acpi_status status = 0; 301 acpi_status status;
335 struct quickstart_acpi *quickstart = NULL; 302 struct quickstart_acpi *quickstart;
336 303
337 if (!device || !acpi_driver_data(device)) 304 if (!device)
338 return -EINVAL; 305 return -EINVAL;
339 306
340 quickstart = acpi_driver_data(device); 307 quickstart = acpi_driver_data(device);
308 if (!quickstart)
309 return -EINVAL;
341 310
342 status = acpi_remove_notify_handler(device->handle, 311 status = acpi_remove_notify_handler(device->handle, ACPI_ALL_NOTIFY,
343 ACPI_ALL_NOTIFY, 312 quickstart_acpi_notify);
344 quickstart_acpi_notify);
345 if (ACPI_FAILURE(status)) 313 if (ACPI_FAILURE(status))
346 printk(KERN_ERR "quickstart: Error removing notify handler\n"); 314 pr_err("Error removing notify handler\n");
347
348 315
349 kfree(quickstart); 316 kfree(quickstart);
350 317
351 return 0; 318 return 0;
352} 319}
353 320
354/* Module functions */ 321/* Platform driver structs */
322static DEVICE_ATTR(pressed_button, 0666, quickstart_pressed_button_show,
323 quickstart_pressed_button_store);
324static DEVICE_ATTR(buttons, 0444, quickstart_buttons_show, NULL);
325static struct platform_device *pf_device;
326static struct platform_driver pf_driver = {
327 .driver = {
328 .name = QUICKSTART_PF_DRIVER_NAME,
329 .owner = THIS_MODULE,
330 }
331};
332
333static const struct acpi_device_id quickstart_device_ids[] = {
334 {QUICKSTART_ACPI_HID, 0},
335 {"", 0},
336};
337
338static struct acpi_driver quickstart_acpi_driver = {
339 .name = "quickstart",
340 .class = QUICKSTART_ACPI_CLASS,
341 .ids = quickstart_device_ids,
342 .ops = {
343 .add = quickstart_acpi_add,
344 .remove = quickstart_acpi_remove,
345 },
346};
355 347
348/* Module functions */
356static void quickstart_exit(void) 349static void quickstart_exit(void)
357{ 350{
358 input_unregister_device(quickstart_input); 351 input_unregister_device(quickstart_input);
@@ -366,15 +359,12 @@ static void quickstart_exit(void)
366 359
367 acpi_bus_unregister_driver(&quickstart_acpi_driver); 360 acpi_bus_unregister_driver(&quickstart_acpi_driver);
368 361
369 quickstart_btnlst_free(); 362 quickstart_buttons_free();
370
371 return;
372} 363}
373 364
374static int __init quickstart_init_input(void) 365static int __init quickstart_init_input(void)
375{ 366{
376 struct quickstart_btn **ptr = &quickstart_data.btn_lst; 367 struct quickstart_button *b;
377 int count;
378 int ret; 368 int ret;
379 369
380 quickstart_input = input_allocate_device(); 370 quickstart_input = input_allocate_device();
@@ -385,11 +375,9 @@ static int __init quickstart_init_input(void)
385 quickstart_input->name = "Quickstart ACPI Buttons"; 375 quickstart_input->name = "Quickstart ACPI Buttons";
386 quickstart_input->id.bustype = BUS_HOST; 376 quickstart_input->id.bustype = BUS_HOST;
387 377
388 while (*ptr) { 378 list_for_each_entry(b, &buttons, list) {
389 count++;
390 set_bit(EV_KEY, quickstart_input->evbit); 379 set_bit(EV_KEY, quickstart_input->evbit);
391 set_bit((*ptr)->id, quickstart_input->keybit); 380 set_bit(b->id, quickstart_input->keybit);
392 ptr = &((*ptr)->next);
393 } 381 }
394 382
395 ret = input_register_device(quickstart_input); 383 ret = input_register_device(quickstart_input);
@@ -415,7 +403,7 @@ static int __init quickstart_init(void)
415 return ret; 403 return ret;
416 404
417 /* If existing bus with no devices */ 405 /* If existing bus with no devices */
418 if (!quickstart_data.btn_lst) { 406 if (list_empty(&buttons)) {
419 ret = -ENODEV; 407 ret = -ENODEV;
420 goto fail_pfdrv_reg; 408 goto fail_pfdrv_reg;
421 } 409 }
@@ -444,14 +432,12 @@ static int __init quickstart_init(void)
444 if (ret) 432 if (ret)
445 goto fail_dev_file2; 433 goto fail_dev_file2;
446 434
447
448 /* Input device */ 435 /* Input device */
449 ret = quickstart_init_input(); 436 ret = quickstart_init_input();
450 if (ret) 437 if (ret)
451 goto fail_input; 438 goto fail_input;
452 439
453 printk(KERN_INFO "quickstart: ACPI Direct App Launch ver %s\n", 440 pr_info("ACPI Direct App Launch ver %s\n", QUICKSTART_VERSION);
454 QUICKSTART_VERSION);
455 441
456 return 0; 442 return 0;
457fail_input: 443fail_input:
diff --git a/drivers/staging/ramster/Kconfig b/drivers/staging/ramster/Kconfig
new file mode 100644
index 000000000000..8b57b87edda4
--- /dev/null
+++ b/drivers/staging/ramster/Kconfig
@@ -0,0 +1,17 @@
1# Dependency on CONFIG_BROKEN is because there is a commit dependency
2# on a cleancache naming change to be submitted by Konrad Wilk
3# a39c00ded70339603ffe1b0ffdf3ade85bcf009a "Merge branch 'stable/cleancache.v13'
4# into linux-next. Once this commit is present, BROKEN can be removed
5config RAMSTER
6 bool "Cross-machine RAM capacity sharing, aka peer-to-peer tmem"
7 depends on (CLEANCACHE || FRONTSWAP) && CONFIGFS_FS=y && !ZCACHE && !XVMALLOC && !HIGHMEM && BROKEN
8 select LZO_COMPRESS
9 select LZO_DECOMPRESS
10 default n
11 help
12 RAMster allows RAM on other machines in a cluster to be utilized
13 dynamically and symmetrically instead of swapping to a local swap
14 disk, thus improving performance on memory-constrained workloads
15 while minimizing total RAM across the cluster. RAMster, like
16 zcache, compresses swap pages into local RAM, but then remotifies
17 the compressed pages to another node in the RAMster cluster.
diff --git a/drivers/staging/ramster/Makefile b/drivers/staging/ramster/Makefile
new file mode 100644
index 000000000000..bcc13c87f996
--- /dev/null
+++ b/drivers/staging/ramster/Makefile
@@ -0,0 +1 @@
obj-$(CONFIG_RAMSTER) += zcache-main.o tmem.o r2net.o xvmalloc.o cluster/
diff --git a/drivers/staging/ramster/TODO b/drivers/staging/ramster/TODO
new file mode 100644
index 000000000000..46fcf0c58acf
--- /dev/null
+++ b/drivers/staging/ramster/TODO
@@ -0,0 +1,13 @@
1For this staging driver, RAMster duplicates code from drivers/staging/zcache
2then incorporates changes to the local copy of the code. For V5, it also
3directly incorporates the soon-to-be-removed drivers/staging/zram/xvmalloc.[ch]
4as all testing has been done with xvmalloc rather than the new zsmalloc.
5Before RAMster can be promoted from staging, the zcache and RAMster drivers
6should be either merged or reorganized to separate out common code.
7
8Until V4, RAMster duplicated code from fs/ocfs2/cluster, but this made
9RAMster incompatible with ocfs2 running in the same kernel and included
10lots of code that could be removed. As of V5, the ocfs2 code has been
11mined and made RAMster-specific, made to communicate with a userland
12ramster-tools package rather than ocfs2-tools, and can co-exist with ocfs2
13both in the same kernel and in userland on the same machine.
diff --git a/drivers/staging/ramster/cluster/Makefile b/drivers/staging/ramster/cluster/Makefile
new file mode 100644
index 000000000000..9c6943652c01
--- /dev/null
+++ b/drivers/staging/ramster/cluster/Makefile
@@ -0,0 +1,3 @@
1obj-$(CONFIG_RAMSTER) += ramster_nodemanager.o
2
3ramster_nodemanager-objs := heartbeat.o masklog.o nodemanager.o tcp.o
diff --git a/drivers/staging/ramster/cluster/heartbeat.c b/drivers/staging/ramster/cluster/heartbeat.c
new file mode 100644
index 000000000000..00209490756e
--- /dev/null
+++ b/drivers/staging/ramster/cluster/heartbeat.c
@@ -0,0 +1,464 @@
1/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * Copyright (C) 2004, 2005, 2012 Oracle. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public
17 * License along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA.
20 */
21
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/configfs.h>
25
26#include "heartbeat.h"
27#include "tcp.h"
28#include "nodemanager.h"
29
30#include "masklog.h"
31
32/*
33 * The first heartbeat pass had one global thread that would serialize all hb
34 * callback calls. This global serializing sem should only be removed once
35 * we've made sure that all callees can deal with being called concurrently
36 * from multiple hb region threads.
37 */
38static DECLARE_RWSEM(r2hb_callback_sem);
39
40/*
41 * multiple hb threads are watching multiple regions. A node is live
42 * whenever any of the threads sees activity from the node in its region.
43 */
44static DEFINE_SPINLOCK(r2hb_live_lock);
45static unsigned long r2hb_live_node_bitmap[BITS_TO_LONGS(R2NM_MAX_NODES)];
46
47static struct r2hb_callback {
48 struct list_head list;
49} r2hb_callbacks[R2HB_NUM_CB];
50
51enum r2hb_heartbeat_modes {
52 R2HB_HEARTBEAT_LOCAL = 0,
53 R2HB_HEARTBEAT_GLOBAL,
54 R2HB_HEARTBEAT_NUM_MODES,
55};
56
57char *r2hb_heartbeat_mode_desc[R2HB_HEARTBEAT_NUM_MODES] = {
58 "local", /* R2HB_HEARTBEAT_LOCAL */
59 "global", /* R2HB_HEARTBEAT_GLOBAL */
60};
61
62unsigned int r2hb_dead_threshold = R2HB_DEFAULT_DEAD_THRESHOLD;
63unsigned int r2hb_heartbeat_mode = R2HB_HEARTBEAT_LOCAL;
64
65/* Only sets a new threshold if there are no active regions.
66 *
67 * No locking or otherwise interesting code is required for reading
68 * r2hb_dead_threshold as it can't change once regions are active and
69 * it's not interesting to anyone until then anyway. */
70static void r2hb_dead_threshold_set(unsigned int threshold)
71{
72 if (threshold > R2HB_MIN_DEAD_THRESHOLD) {
73 spin_lock(&r2hb_live_lock);
74 r2hb_dead_threshold = threshold;
75 spin_unlock(&r2hb_live_lock);
76 }
77}
78
79static int r2hb_global_hearbeat_mode_set(unsigned int hb_mode)
80{
81 int ret = -1;
82
83 if (hb_mode < R2HB_HEARTBEAT_NUM_MODES) {
84 spin_lock(&r2hb_live_lock);
85 r2hb_heartbeat_mode = hb_mode;
86 ret = 0;
87 spin_unlock(&r2hb_live_lock);
88 }
89
90 return ret;
91}
92
93void r2hb_exit(void)
94{
95}
96
97int r2hb_init(void)
98{
99 int i;
100
101 for (i = 0; i < ARRAY_SIZE(r2hb_callbacks); i++)
102 INIT_LIST_HEAD(&r2hb_callbacks[i].list);
103
104 memset(r2hb_live_node_bitmap, 0, sizeof(r2hb_live_node_bitmap));
105
106 return 0;
107}
108
109/* if we're already in a callback then we're already serialized by the sem */
110static void r2hb_fill_node_map_from_callback(unsigned long *map,
111 unsigned bytes)
112{
113 BUG_ON(bytes < (BITS_TO_LONGS(R2NM_MAX_NODES) * sizeof(unsigned long)));
114
115 memcpy(map, &r2hb_live_node_bitmap, bytes);
116}
117
118/*
119 * get a map of all nodes that are heartbeating in any regions
120 */
121void r2hb_fill_node_map(unsigned long *map, unsigned bytes)
122{
123 /* callers want to serialize this map and callbacks so that they
124 * can trust that they don't miss nodes coming to the party */
125 down_read(&r2hb_callback_sem);
126 spin_lock(&r2hb_live_lock);
127 r2hb_fill_node_map_from_callback(map, bytes);
128 spin_unlock(&r2hb_live_lock);
129 up_read(&r2hb_callback_sem);
130}
131EXPORT_SYMBOL_GPL(r2hb_fill_node_map);
132
133/*
134 * heartbeat configfs bits. The heartbeat set is a default set under
135 * the cluster set in nodemanager.c.
136 */
137
138/* heartbeat set */
139
140struct r2hb_hb_group {
141 struct config_group hs_group;
142 /* some stuff? */
143};
144
145static struct r2hb_hb_group *to_r2hb_hb_group(struct config_group *group)
146{
147 return group ?
148 container_of(group, struct r2hb_hb_group, hs_group)
149 : NULL;
150}
151
152static struct config_item r2hb_config_item;
153
154static struct config_item *r2hb_hb_group_make_item(struct config_group *group,
155 const char *name)
156{
157 int ret;
158
159 if (strlen(name) > R2HB_MAX_REGION_NAME_LEN) {
160 ret = -ENAMETOOLONG;
161 goto free;
162 }
163
164 config_item_put(&r2hb_config_item);
165
166 return &r2hb_config_item;
167free:
168 return ERR_PTR(ret);
169}
170
171static void r2hb_hb_group_drop_item(struct config_group *group,
172 struct config_item *item)
173{
174 if (r2hb_global_heartbeat_active()) {
175 printk(KERN_NOTICE "ramster: Heartbeat %s "
176 "on region %s (%s)\n",
177 "stopped/aborted", config_item_name(item),
178 "no region");
179 }
180
181 config_item_put(item);
182}
183
184struct r2hb_hb_group_attribute {
185 struct configfs_attribute attr;
186 ssize_t (*show)(struct r2hb_hb_group *, char *);
187 ssize_t (*store)(struct r2hb_hb_group *, const char *, size_t);
188};
189
190static ssize_t r2hb_hb_group_show(struct config_item *item,
191 struct configfs_attribute *attr,
192 char *page)
193{
194 struct r2hb_hb_group *reg = to_r2hb_hb_group(to_config_group(item));
195 struct r2hb_hb_group_attribute *r2hb_hb_group_attr =
196 container_of(attr, struct r2hb_hb_group_attribute, attr);
197 ssize_t ret = 0;
198
199 if (r2hb_hb_group_attr->show)
200 ret = r2hb_hb_group_attr->show(reg, page);
201 return ret;
202}
203
204static ssize_t r2hb_hb_group_store(struct config_item *item,
205 struct configfs_attribute *attr,
206 const char *page, size_t count)
207{
208 struct r2hb_hb_group *reg = to_r2hb_hb_group(to_config_group(item));
209 struct r2hb_hb_group_attribute *r2hb_hb_group_attr =
210 container_of(attr, struct r2hb_hb_group_attribute, attr);
211 ssize_t ret = -EINVAL;
212
213 if (r2hb_hb_group_attr->store)
214 ret = r2hb_hb_group_attr->store(reg, page, count);
215 return ret;
216}
217
218static ssize_t r2hb_hb_group_threshold_show(struct r2hb_hb_group *group,
219 char *page)
220{
221 return sprintf(page, "%u\n", r2hb_dead_threshold);
222}
223
224static ssize_t r2hb_hb_group_threshold_store(struct r2hb_hb_group *group,
225 const char *page,
226 size_t count)
227{
228 unsigned long tmp;
229 char *p = (char *)page;
230 int err;
231
232 err = kstrtoul(p, 10, &tmp);
233 if (err)
234 return err;
235
236 /* this will validate ranges for us. */
237 r2hb_dead_threshold_set((unsigned int) tmp);
238
239 return count;
240}
241
242static
243ssize_t r2hb_hb_group_mode_show(struct r2hb_hb_group *group,
244 char *page)
245{
246 return sprintf(page, "%s\n",
247 r2hb_heartbeat_mode_desc[r2hb_heartbeat_mode]);
248}
249
250static
251ssize_t r2hb_hb_group_mode_store(struct r2hb_hb_group *group,
252 const char *page, size_t count)
253{
254 unsigned int i;
255 int ret;
256 size_t len;
257
258 len = (page[count - 1] == '\n') ? count - 1 : count;
259 if (!len)
260 return -EINVAL;
261
262 for (i = 0; i < R2HB_HEARTBEAT_NUM_MODES; ++i) {
263 if (strnicmp(page, r2hb_heartbeat_mode_desc[i], len))
264 continue;
265
266 ret = r2hb_global_hearbeat_mode_set(i);
267 if (!ret)
268 printk(KERN_NOTICE "ramster: Heartbeat mode "
269 "set to %s\n",
270 r2hb_heartbeat_mode_desc[i]);
271 return count;
272 }
273
274 return -EINVAL;
275
276}
277
278static struct r2hb_hb_group_attribute r2hb_hb_group_attr_threshold = {
279 .attr = { .ca_owner = THIS_MODULE,
280 .ca_name = "dead_threshold",
281 .ca_mode = S_IRUGO | S_IWUSR },
282 .show = r2hb_hb_group_threshold_show,
283 .store = r2hb_hb_group_threshold_store,
284};
285
286static struct r2hb_hb_group_attribute r2hb_hb_group_attr_mode = {
287 .attr = { .ca_owner = THIS_MODULE,
288 .ca_name = "mode",
289 .ca_mode = S_IRUGO | S_IWUSR },
290 .show = r2hb_hb_group_mode_show,
291 .store = r2hb_hb_group_mode_store,
292};
293
294static struct configfs_attribute *r2hb_hb_group_attrs[] = {
295 &r2hb_hb_group_attr_threshold.attr,
296 &r2hb_hb_group_attr_mode.attr,
297 NULL,
298};
299
300static struct configfs_item_operations r2hb_hearbeat_group_item_ops = {
301 .show_attribute = r2hb_hb_group_show,
302 .store_attribute = r2hb_hb_group_store,
303};
304
305static struct configfs_group_operations r2hb_hb_group_group_ops = {
306 .make_item = r2hb_hb_group_make_item,
307 .drop_item = r2hb_hb_group_drop_item,
308};
309
310static struct config_item_type r2hb_hb_group_type = {
311 .ct_group_ops = &r2hb_hb_group_group_ops,
312 .ct_item_ops = &r2hb_hearbeat_group_item_ops,
313 .ct_attrs = r2hb_hb_group_attrs,
314 .ct_owner = THIS_MODULE,
315};
316
317/* this is just here to avoid touching group in heartbeat.h which the
318 * entire damn world #includes */
319struct config_group *r2hb_alloc_hb_set(void)
320{
321 struct r2hb_hb_group *hs = NULL;
322 struct config_group *ret = NULL;
323
324 hs = kzalloc(sizeof(struct r2hb_hb_group), GFP_KERNEL);
325 if (hs == NULL)
326 goto out;
327
328 config_group_init_type_name(&hs->hs_group, "heartbeat",
329 &r2hb_hb_group_type);
330
331 ret = &hs->hs_group;
332out:
333 if (ret == NULL)
334 kfree(hs);
335 return ret;
336}
337
338void r2hb_free_hb_set(struct config_group *group)
339{
340 struct r2hb_hb_group *hs = to_r2hb_hb_group(group);
341 kfree(hs);
342}
343
344/* hb callback registration and issuing */
345
346static struct r2hb_callback *hbcall_from_type(enum r2hb_callback_type type)
347{
348 if (type == R2HB_NUM_CB)
349 return ERR_PTR(-EINVAL);
350
351 return &r2hb_callbacks[type];
352}
353
354void r2hb_setup_callback(struct r2hb_callback_func *hc,
355 enum r2hb_callback_type type,
356 r2hb_cb_func *func,
357 void *data,
358 int priority)
359{
360 INIT_LIST_HEAD(&hc->hc_item);
361 hc->hc_func = func;
362 hc->hc_data = data;
363 hc->hc_priority = priority;
364 hc->hc_type = type;
365 hc->hc_magic = R2HB_CB_MAGIC;
366}
367EXPORT_SYMBOL_GPL(r2hb_setup_callback);
368
369int r2hb_register_callback(const char *region_uuid,
370 struct r2hb_callback_func *hc)
371{
372 struct r2hb_callback_func *tmp;
373 struct list_head *iter;
374 struct r2hb_callback *hbcall;
375 int ret;
376
377 BUG_ON(hc->hc_magic != R2HB_CB_MAGIC);
378 BUG_ON(!list_empty(&hc->hc_item));
379
380 hbcall = hbcall_from_type(hc->hc_type);
381 if (IS_ERR(hbcall)) {
382 ret = PTR_ERR(hbcall);
383 goto out;
384 }
385
386 down_write(&r2hb_callback_sem);
387
388 list_for_each(iter, &hbcall->list) {
389 tmp = list_entry(iter, struct r2hb_callback_func, hc_item);
390 if (hc->hc_priority < tmp->hc_priority) {
391 list_add_tail(&hc->hc_item, iter);
392 break;
393 }
394 }
395 if (list_empty(&hc->hc_item))
396 list_add_tail(&hc->hc_item, &hbcall->list);
397
398 up_write(&r2hb_callback_sem);
399 ret = 0;
400out:
401 mlog(ML_CLUSTER, "returning %d on behalf of %p for funcs %p\n",
402 ret, __builtin_return_address(0), hc);
403 return ret;
404}
405EXPORT_SYMBOL_GPL(r2hb_register_callback);
406
407void r2hb_unregister_callback(const char *region_uuid,
408 struct r2hb_callback_func *hc)
409{
410 BUG_ON(hc->hc_magic != R2HB_CB_MAGIC);
411
412 mlog(ML_CLUSTER, "on behalf of %p for funcs %p\n",
413 __builtin_return_address(0), hc);
414
415 /* XXX Can this happen _with_ a region reference? */
416 if (list_empty(&hc->hc_item))
417 return;
418
419 down_write(&r2hb_callback_sem);
420
421 list_del_init(&hc->hc_item);
422
423 up_write(&r2hb_callback_sem);
424}
425EXPORT_SYMBOL_GPL(r2hb_unregister_callback);
426
427int r2hb_check_node_heartbeating_from_callback(u8 node_num)
428{
429 unsigned long testing_map[BITS_TO_LONGS(R2NM_MAX_NODES)];
430
431 r2hb_fill_node_map_from_callback(testing_map, sizeof(testing_map));
432 if (!test_bit(node_num, testing_map)) {
433 mlog(ML_HEARTBEAT,
434 "node (%u) does not have heartbeating enabled.\n",
435 node_num);
436 return 0;
437 }
438
439 return 1;
440}
441EXPORT_SYMBOL_GPL(r2hb_check_node_heartbeating_from_callback);
442
443void r2hb_stop_all_regions(void)
444{
445}
446EXPORT_SYMBOL_GPL(r2hb_stop_all_regions);
447
448/*
449 * this is just a hack until we get the plumbing which flips file systems
450 * read only and drops the hb ref instead of killing the node dead.
451 */
452int r2hb_global_heartbeat_active(void)
453{
454 return (r2hb_heartbeat_mode == R2HB_HEARTBEAT_GLOBAL);
455}
456EXPORT_SYMBOL(r2hb_global_heartbeat_active);
457
458/* added for RAMster */
459void r2hb_manual_set_node_heartbeating(int node_num)
460{
461 if (node_num < R2NM_MAX_NODES)
462 set_bit(node_num, r2hb_live_node_bitmap);
463}
464EXPORT_SYMBOL(r2hb_manual_set_node_heartbeating);
diff --git a/drivers/staging/ramster/cluster/heartbeat.h b/drivers/staging/ramster/cluster/heartbeat.h
new file mode 100644
index 000000000000..6cbc775bd63b
--- /dev/null
+++ b/drivers/staging/ramster/cluster/heartbeat.h
@@ -0,0 +1,87 @@
1/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * heartbeat.h
5 *
6 * Function prototypes
7 *
8 * Copyright (C) 2004 Oracle. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
24 *
25 */
26
27#ifndef R2CLUSTER_HEARTBEAT_H
28#define R2CLUSTER_HEARTBEAT_H
29
30#define R2HB_REGION_TIMEOUT_MS 2000
31
32#define R2HB_MAX_REGION_NAME_LEN 32
33
34/* number of changes to be seen as live */
35#define R2HB_LIVE_THRESHOLD 2
36/* number of equal samples to be seen as dead */
37extern unsigned int r2hb_dead_threshold;
38#define R2HB_DEFAULT_DEAD_THRESHOLD 31
39/* Otherwise MAX_WRITE_TIMEOUT will be zero... */
40#define R2HB_MIN_DEAD_THRESHOLD 2
41#define R2HB_MAX_WRITE_TIMEOUT_MS \
42 (R2HB_REGION_TIMEOUT_MS * (r2hb_dead_threshold - 1))
43
44#define R2HB_CB_MAGIC 0x51d1e4ec
45
46/* callback stuff */
47enum r2hb_callback_type {
48 R2HB_NODE_DOWN_CB = 0,
49 R2HB_NODE_UP_CB,
50 R2HB_NUM_CB
51};
52
53struct r2nm_node;
54typedef void (r2hb_cb_func)(struct r2nm_node *, int, void *);
55
56struct r2hb_callback_func {
57 u32 hc_magic;
58 struct list_head hc_item;
59 r2hb_cb_func *hc_func;
60 void *hc_data;
61 int hc_priority;
62 enum r2hb_callback_type hc_type;
63};
64
65struct config_group *r2hb_alloc_hb_set(void);
66void r2hb_free_hb_set(struct config_group *group);
67
68void r2hb_setup_callback(struct r2hb_callback_func *hc,
69 enum r2hb_callback_type type,
70 r2hb_cb_func *func,
71 void *data,
72 int priority);
73int r2hb_register_callback(const char *region_uuid,
74 struct r2hb_callback_func *hc);
75void r2hb_unregister_callback(const char *region_uuid,
76 struct r2hb_callback_func *hc);
77void r2hb_fill_node_map(unsigned long *map,
78 unsigned bytes);
79void r2hb_exit(void);
80int r2hb_init(void);
81int r2hb_check_node_heartbeating_from_callback(u8 node_num);
82void r2hb_stop_all_regions(void);
83int r2hb_get_all_regions(char *region_uuids, u8 numregions);
84int r2hb_global_heartbeat_active(void);
85void r2hb_manual_set_node_heartbeating(int);
86
87#endif /* R2CLUSTER_HEARTBEAT_H */
diff --git a/drivers/staging/ramster/cluster/masklog.c b/drivers/staging/ramster/cluster/masklog.c
new file mode 100644
index 000000000000..1261d8579aae
--- /dev/null
+++ b/drivers/staging/ramster/cluster/masklog.c
@@ -0,0 +1,155 @@
1/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * Copyright (C) 2004, 2005, 2012 Oracle. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public
17 * License along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA.
20 */
21
22#include <linux/module.h>
23#include <linux/kernel.h>
24#include <linux/proc_fs.h>
25#include <linux/seq_file.h>
26#include <linux/string.h>
27#include <linux/uaccess.h>
28
29#include "masklog.h"
30
31struct mlog_bits r2_mlog_and_bits = MLOG_BITS_RHS(MLOG_INITIAL_AND_MASK);
32EXPORT_SYMBOL_GPL(r2_mlog_and_bits);
33struct mlog_bits r2_mlog_not_bits = MLOG_BITS_RHS(0);
34EXPORT_SYMBOL_GPL(r2_mlog_not_bits);
35
36static ssize_t mlog_mask_show(u64 mask, char *buf)
37{
38 char *state;
39
40 if (__mlog_test_u64(mask, r2_mlog_and_bits))
41 state = "allow";
42 else if (__mlog_test_u64(mask, r2_mlog_not_bits))
43 state = "deny";
44 else
45 state = "off";
46
47 return snprintf(buf, PAGE_SIZE, "%s\n", state);
48}
49
50static ssize_t mlog_mask_store(u64 mask, const char *buf, size_t count)
51{
52 if (!strnicmp(buf, "allow", 5)) {
53 __mlog_set_u64(mask, r2_mlog_and_bits);
54 __mlog_clear_u64(mask, r2_mlog_not_bits);
55 } else if (!strnicmp(buf, "deny", 4)) {
56 __mlog_set_u64(mask, r2_mlog_not_bits);
57 __mlog_clear_u64(mask, r2_mlog_and_bits);
58 } else if (!strnicmp(buf, "off", 3)) {
59 __mlog_clear_u64(mask, r2_mlog_not_bits);
60 __mlog_clear_u64(mask, r2_mlog_and_bits);
61 } else
62 return -EINVAL;
63
64 return count;
65}
66
67struct mlog_attribute {
68 struct attribute attr;
69 u64 mask;
70};
71
72#define to_mlog_attr(_attr) container_of(_attr, struct mlog_attribute, attr)
73
74#define define_mask(_name) { \
75 .attr = { \
76 .name = #_name, \
77 .mode = S_IRUGO | S_IWUSR, \
78 }, \
79 .mask = ML_##_name, \
80}
81
82static struct mlog_attribute mlog_attrs[MLOG_MAX_BITS] = {
83 define_mask(TCP),
84 define_mask(MSG),
85 define_mask(SOCKET),
86 define_mask(HEARTBEAT),
87 define_mask(HB_BIO),
88 define_mask(DLMFS),
89 define_mask(DLM),
90 define_mask(DLM_DOMAIN),
91 define_mask(DLM_THREAD),
92 define_mask(DLM_MASTER),
93 define_mask(DLM_RECOVERY),
94 define_mask(DLM_GLUE),
95 define_mask(VOTE),
96 define_mask(CONN),
97 define_mask(QUORUM),
98 define_mask(BASTS),
99 define_mask(CLUSTER),
100 define_mask(ERROR),
101 define_mask(NOTICE),
102 define_mask(KTHREAD),
103};
104
105static struct attribute *mlog_attr_ptrs[MLOG_MAX_BITS] = {NULL, };
106
107static ssize_t mlog_show(struct kobject *obj, struct attribute *attr,
108 char *buf)
109{
110 struct mlog_attribute *mlog_attr = to_mlog_attr(attr);
111
112 return mlog_mask_show(mlog_attr->mask, buf);
113}
114
115static ssize_t mlog_store(struct kobject *obj, struct attribute *attr,
116 const char *buf, size_t count)
117{
118 struct mlog_attribute *mlog_attr = to_mlog_attr(attr);
119
120 return mlog_mask_store(mlog_attr->mask, buf, count);
121}
122
123static const struct sysfs_ops mlog_attr_ops = {
124 .show = mlog_show,
125 .store = mlog_store,
126};
127
128static struct kobj_type mlog_ktype = {
129 .default_attrs = mlog_attr_ptrs,
130 .sysfs_ops = &mlog_attr_ops,
131};
132
133static struct kset mlog_kset = {
134 .kobj = {.ktype = &mlog_ktype},
135};
136
137int r2_mlog_sys_init(struct kset *r2cb_kset)
138{
139 int i = 0;
140
141 while (mlog_attrs[i].attr.mode) {
142 mlog_attr_ptrs[i] = &mlog_attrs[i].attr;
143 i++;
144 }
145 mlog_attr_ptrs[i] = NULL;
146
147 kobject_set_name(&mlog_kset.kobj, "logmask");
148 mlog_kset.kobj.kset = r2cb_kset;
149 return kset_register(&mlog_kset);
150}
151
152void r2_mlog_sys_shutdown(void)
153{
154 kset_unregister(&mlog_kset);
155}
diff --git a/drivers/staging/ramster/cluster/masklog.h b/drivers/staging/ramster/cluster/masklog.h
new file mode 100644
index 000000000000..918ae110b699
--- /dev/null
+++ b/drivers/staging/ramster/cluster/masklog.h
@@ -0,0 +1,220 @@
1/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * Copyright (C) 2005, 2012 Oracle. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public
17 * License along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA.
20 */
21
22#ifndef R2CLUSTER_MASKLOG_H
23#define R2CLUSTER_MASKLOG_H
24
25/*
26 * For now this is a trivial wrapper around printk() that gives the critical
27 * ability to enable sets of debugging output at run-time. In the future this
28 * will almost certainly be redirected to relayfs so that it can pay a
29 * substantially lower heisenberg tax.
30 *
31 * Callers associate the message with a bitmask and a global bitmask is
32 * maintained with help from /proc. If any of the bits match the message is
33 * output.
34 *
35 * We must have efficient bit tests on i386 and it seems gcc still emits crazy
36 * code for the 64bit compare. It emits very good code for the dual unsigned
37 * long tests, though, completely avoiding tests that can never pass if the
38 * caller gives a constant bitmask that fills one of the longs with all 0s. So
39 * the desire is to have almost all of the calls decided on by comparing just
40 * one of the longs. This leads to having infrequently given bits that are
41 * frequently matched in the high bits.
42 *
43 * _ERROR and _NOTICE are used for messages that always go to the console and
44 * have appropriate KERN_ prefixes. We wrap these in our function instead of
45 * just calling printk() so that this can eventually make its way through
46 * relayfs along with the debugging messages. Everything else gets KERN_DEBUG.
47 * The inline tests and macro dance give GCC the opportunity to quite cleverly
48 * only emit the appropriage printk() when the caller passes in a constant
49 * mask, as is almost always the case.
50 *
51 * All this bitmask nonsense is managed from the files under
52 * /sys/fs/r2cb/logmask/. Reading the files gives a straightforward
53 * indication of which bits are allowed (allow) or denied (off/deny).
54 * ENTRY deny
55 * EXIT deny
56 * TCP off
57 * MSG off
58 * SOCKET off
59 * ERROR allow
60 * NOTICE allow
61 *
62 * Writing changes the state of a given bit and requires a strictly formatted
63 * single write() call:
64 *
65 * write(fd, "allow", 5);
66 *
67 * Echoing allow/deny/off string into the logmask files can flip the bits
68 * on or off as expected; here is the bash script for example:
69 *
70 * log_mask="/sys/fs/r2cb/log_mask"
71 * for node in ENTRY EXIT TCP MSG SOCKET ERROR NOTICE; do
72 * echo allow >"$log_mask"/"$node"
73 * done
74 *
75 * The debugfs.ramster tool can also flip the bits with the -l option:
76 *
77 * debugfs.ramster -l TCP allow
78 */
79
80/* for task_struct */
81#include <linux/sched.h>
82
83/* bits that are frequently given and infrequently matched in the low word */
84/* NOTE: If you add a flag, you need to also update masklog.c! */
85#define ML_TCP 0x0000000000000001ULL /* net cluster/tcp.c */
86#define ML_MSG 0x0000000000000002ULL /* net network messages */
87#define ML_SOCKET 0x0000000000000004ULL /* net socket lifetime */
88#define ML_HEARTBEAT 0x0000000000000008ULL /* hb all heartbeat tracking */
89#define ML_HB_BIO 0x0000000000000010ULL /* hb io tracing */
90#define ML_DLMFS 0x0000000000000020ULL /* dlm user dlmfs */
91#define ML_DLM 0x0000000000000040ULL /* dlm general debugging */
92#define ML_DLM_DOMAIN 0x0000000000000080ULL /* dlm domain debugging */
93#define ML_DLM_THREAD 0x0000000000000100ULL /* dlm domain thread */
94#define ML_DLM_MASTER 0x0000000000000200ULL /* dlm master functions */
95#define ML_DLM_RECOVERY 0x0000000000000400ULL /* dlm master functions */
96#define ML_DLM_GLUE 0x0000000000000800ULL /* ramster dlm glue layer */
97#define ML_VOTE 0x0000000000001000ULL /* ramster node messaging */
98#define ML_CONN 0x0000000000002000ULL /* net connection management */
99#define ML_QUORUM 0x0000000000004000ULL /* net connection quorum */
100#define ML_BASTS 0x0000000000008000ULL /* dlmglue asts and basts */
101#define ML_CLUSTER 0x0000000000010000ULL /* cluster stack */
102
103/* bits that are infrequently given and frequently matched in the high word */
104#define ML_ERROR 0x1000000000000000ULL /* sent to KERN_ERR */
105#define ML_NOTICE 0x2000000000000000ULL /* setn to KERN_NOTICE */
106#define ML_KTHREAD 0x4000000000000000ULL /* kernel thread activity */
107
108#define MLOG_INITIAL_AND_MASK (ML_ERROR|ML_NOTICE)
109#ifndef MLOG_MASK_PREFIX
110#define MLOG_MASK_PREFIX 0
111#endif
112
113/*
114 * When logging is disabled, force the bit test to 0 for anything other
115 * than errors and notices, allowing gcc to remove the code completely.
116 * When enabled, allow all masks.
117 */
118#if defined(CONFIG_RAMSTER_DEBUG_MASKLOG)
119#define ML_ALLOWED_BITS (~0)
120#else
121#define ML_ALLOWED_BITS (ML_ERROR|ML_NOTICE)
122#endif
123
124#define MLOG_MAX_BITS 64
125
126struct mlog_bits {
127 unsigned long words[MLOG_MAX_BITS / BITS_PER_LONG];
128};
129
130extern struct mlog_bits r2_mlog_and_bits, r2_mlog_not_bits;
131
132#if BITS_PER_LONG == 32
133
134#define __mlog_test_u64(mask, bits) \
135 ((u32)(mask & 0xffffffff) & bits.words[0] || \
136 ((u64)(mask) >> 32) & bits.words[1])
137#define __mlog_set_u64(mask, bits) do { \
138 bits.words[0] |= (u32)(mask & 0xffffffff); \
139 bits.words[1] |= (u64)(mask) >> 32; \
140} while (0)
141#define __mlog_clear_u64(mask, bits) do { \
142 bits.words[0] &= ~((u32)(mask & 0xffffffff)); \
143 bits.words[1] &= ~((u64)(mask) >> 32); \
144} while (0)
145#define MLOG_BITS_RHS(mask) { \
146 { \
147 [0] = (u32)(mask & 0xffffffff), \
148 [1] = (u64)(mask) >> 32, \
149 } \
150}
151
152#else /* 32bit long above, 64bit long below */
153
154#define __mlog_test_u64(mask, bits) ((mask) & bits.words[0])
155#define __mlog_set_u64(mask, bits) do { \
156 bits.words[0] |= (mask); \
157} while (0)
158#define __mlog_clear_u64(mask, bits) do { \
159 bits.words[0] &= ~(mask); \
160} while (0)
161#define MLOG_BITS_RHS(mask) { { (mask) } }
162
163#endif
164
165/*
166 * smp_processor_id() "helpfully" screams when called outside preemptible
167 * regions in current kernels. sles doesn't have the variants that don't
168 * scream. just do this instead of trying to guess which we're building
169 * against.. *sigh*.
170 */
171#define __mlog_cpu_guess ({ \
172 unsigned long _cpu = get_cpu(); \
173 put_cpu(); \
174 _cpu; \
175})
176
177/* In the following two macros, the whitespace after the ',' just
178 * before ##args is intentional. Otherwise, gcc 2.95 will eat the
179 * previous token if args expands to nothing.
180 */
181#define __mlog_printk(level, fmt, args...) \
182 printk(level "(%s,%u,%lu):%s:%d " fmt, current->comm, \
183 task_pid_nr(current), __mlog_cpu_guess, \
184 __PRETTY_FUNCTION__, __LINE__ , ##args)
185
186#define mlog(mask, fmt, args...) do { \
187 u64 __m = MLOG_MASK_PREFIX | (mask); \
188 if ((__m & ML_ALLOWED_BITS) && \
189 __mlog_test_u64(__m, r2_mlog_and_bits) && \
190 !__mlog_test_u64(__m, r2_mlog_not_bits)) { \
191 if (__m & ML_ERROR) \
192 __mlog_printk(KERN_ERR, "ERROR: "fmt , ##args); \
193 else if (__m & ML_NOTICE) \
194 __mlog_printk(KERN_NOTICE, fmt , ##args); \
195 else \
196 __mlog_printk(KERN_INFO, fmt , ##args); \
197 } \
198} while (0)
199
200#define mlog_errno(st) do { \
201 int _st = (st); \
202 if (_st != -ERESTARTSYS && _st != -EINTR && \
203 _st != AOP_TRUNCATED_PAGE && _st != -ENOSPC) \
204 mlog(ML_ERROR, "status = %lld\n", (long long)_st); \
205} while (0)
206
207#define mlog_bug_on_msg(cond, fmt, args...) do { \
208 if (cond) { \
209 mlog(ML_ERROR, "bug expression: " #cond "\n"); \
210 mlog(ML_ERROR, fmt, ##args); \
211 BUG(); \
212 } \
213} while (0)
214
215#include <linux/kobject.h>
216#include <linux/sysfs.h>
217int r2_mlog_sys_init(struct kset *r2cb_subsys);
218void r2_mlog_sys_shutdown(void);
219
220#endif /* R2CLUSTER_MASKLOG_H */
diff --git a/drivers/staging/ramster/cluster/nodemanager.c b/drivers/staging/ramster/cluster/nodemanager.c
new file mode 100644
index 000000000000..de0e5c8da6ea
--- /dev/null
+++ b/drivers/staging/ramster/cluster/nodemanager.c
@@ -0,0 +1,992 @@
1/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * Copyright (C) 2004, 2005, 2012 Oracle. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public
17 * License along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA.
20 */
21
22#include <linux/slab.h>
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/configfs.h>
26
27#include "tcp.h"
28#include "nodemanager.h"
29#include "heartbeat.h"
30#include "masklog.h"
31
32/* for now we operate under the assertion that there can be only one
33 * cluster active at a time. Changing this will require trickling
34 * cluster references throughout where nodes are looked up */
35struct r2nm_cluster *r2nm_single_cluster;
36
37char *r2nm_fence_method_desc[R2NM_FENCE_METHODS] = {
38 "reset", /* R2NM_FENCE_RESET */
39 "panic", /* R2NM_FENCE_PANIC */
40};
41
42struct r2nm_node *r2nm_get_node_by_num(u8 node_num)
43{
44 struct r2nm_node *node = NULL;
45
46 if (node_num >= R2NM_MAX_NODES || r2nm_single_cluster == NULL)
47 goto out;
48
49 read_lock(&r2nm_single_cluster->cl_nodes_lock);
50 node = r2nm_single_cluster->cl_nodes[node_num];
51 if (node)
52 config_item_get(&node->nd_item);
53 read_unlock(&r2nm_single_cluster->cl_nodes_lock);
54out:
55 return node;
56}
57EXPORT_SYMBOL_GPL(r2nm_get_node_by_num);
58
59int r2nm_configured_node_map(unsigned long *map, unsigned bytes)
60{
61 struct r2nm_cluster *cluster = r2nm_single_cluster;
62
63 BUG_ON(bytes < (sizeof(cluster->cl_nodes_bitmap)));
64
65 if (cluster == NULL)
66 return -EINVAL;
67
68 read_lock(&cluster->cl_nodes_lock);
69 memcpy(map, cluster->cl_nodes_bitmap, sizeof(cluster->cl_nodes_bitmap));
70 read_unlock(&cluster->cl_nodes_lock);
71
72 return 0;
73}
74EXPORT_SYMBOL_GPL(r2nm_configured_node_map);
75
76static struct r2nm_node *r2nm_node_ip_tree_lookup(struct r2nm_cluster *cluster,
77 __be32 ip_needle,
78 struct rb_node ***ret_p,
79 struct rb_node **ret_parent)
80{
81 struct rb_node **p = &cluster->cl_node_ip_tree.rb_node;
82 struct rb_node *parent = NULL;
83 struct r2nm_node *node, *ret = NULL;
84
85 while (*p) {
86 int cmp;
87
88 parent = *p;
89 node = rb_entry(parent, struct r2nm_node, nd_ip_node);
90
91 cmp = memcmp(&ip_needle, &node->nd_ipv4_address,
92 sizeof(ip_needle));
93 if (cmp < 0)
94 p = &(*p)->rb_left;
95 else if (cmp > 0)
96 p = &(*p)->rb_right;
97 else {
98 ret = node;
99 break;
100 }
101 }
102
103 if (ret_p != NULL)
104 *ret_p = p;
105 if (ret_parent != NULL)
106 *ret_parent = parent;
107
108 return ret;
109}
110
111struct r2nm_node *r2nm_get_node_by_ip(__be32 addr)
112{
113 struct r2nm_node *node = NULL;
114 struct r2nm_cluster *cluster = r2nm_single_cluster;
115
116 if (cluster == NULL)
117 goto out;
118
119 read_lock(&cluster->cl_nodes_lock);
120 node = r2nm_node_ip_tree_lookup(cluster, addr, NULL, NULL);
121 if (node)
122 config_item_get(&node->nd_item);
123 read_unlock(&cluster->cl_nodes_lock);
124
125out:
126 return node;
127}
128EXPORT_SYMBOL_GPL(r2nm_get_node_by_ip);
129
130void r2nm_node_put(struct r2nm_node *node)
131{
132 config_item_put(&node->nd_item);
133}
134EXPORT_SYMBOL_GPL(r2nm_node_put);
135
136void r2nm_node_get(struct r2nm_node *node)
137{
138 config_item_get(&node->nd_item);
139}
140EXPORT_SYMBOL_GPL(r2nm_node_get);
141
142u8 r2nm_this_node(void)
143{
144 u8 node_num = R2NM_MAX_NODES;
145
146 if (r2nm_single_cluster && r2nm_single_cluster->cl_has_local)
147 node_num = r2nm_single_cluster->cl_local_node;
148
149 return node_num;
150}
151EXPORT_SYMBOL_GPL(r2nm_this_node);
152
153/* node configfs bits */
154
155static struct r2nm_cluster *to_r2nm_cluster(struct config_item *item)
156{
157 return item ?
158 container_of(to_config_group(item), struct r2nm_cluster,
159 cl_group)
160 : NULL;
161}
162
163static struct r2nm_node *to_r2nm_node(struct config_item *item)
164{
165 return item ? container_of(item, struct r2nm_node, nd_item) : NULL;
166}
167
168static void r2nm_node_release(struct config_item *item)
169{
170 struct r2nm_node *node = to_r2nm_node(item);
171 kfree(node);
172}
173
174static ssize_t r2nm_node_num_read(struct r2nm_node *node, char *page)
175{
176 return sprintf(page, "%d\n", node->nd_num);
177}
178
179static struct r2nm_cluster *to_r2nm_cluster_from_node(struct r2nm_node *node)
180{
181 /* through the first node_set .parent
182 * mycluster/nodes/mynode == r2nm_cluster->r2nm_node_group->r2nm_node */
183 return to_r2nm_cluster(node->nd_item.ci_parent->ci_parent);
184}
185
186enum {
187 R2NM_NODE_ATTR_NUM = 0,
188 R2NM_NODE_ATTR_PORT,
189 R2NM_NODE_ATTR_ADDRESS,
190 R2NM_NODE_ATTR_LOCAL,
191};
192
193static ssize_t r2nm_node_num_write(struct r2nm_node *node, const char *page,
194 size_t count)
195{
196 struct r2nm_cluster *cluster = to_r2nm_cluster_from_node(node);
197 unsigned long tmp;
198 char *p = (char *)page;
199 int err;
200
201 err = kstrtoul(p, 10, &tmp);
202 if (err)
203 return err;
204
205 if (tmp >= R2NM_MAX_NODES)
206 return -ERANGE;
207
208 /* once we're in the cl_nodes tree networking can look us up by
209 * node number and try to use our address and port attributes
210 * to connect to this node.. make sure that they've been set
211 * before writing the node attribute? */
212 if (!test_bit(R2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) ||
213 !test_bit(R2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
214 return -EINVAL; /* XXX */
215
216 write_lock(&cluster->cl_nodes_lock);
217 if (cluster->cl_nodes[tmp])
218 p = NULL;
219 else {
220 cluster->cl_nodes[tmp] = node;
221 node->nd_num = tmp;
222 set_bit(tmp, cluster->cl_nodes_bitmap);
223 }
224 write_unlock(&cluster->cl_nodes_lock);
225 if (p == NULL)
226 return -EEXIST;
227
228 return count;
229}
230static ssize_t r2nm_node_ipv4_port_read(struct r2nm_node *node, char *page)
231{
232 return sprintf(page, "%u\n", ntohs(node->nd_ipv4_port));
233}
234
235static ssize_t r2nm_node_ipv4_port_write(struct r2nm_node *node,
236 const char *page, size_t count)
237{
238 unsigned long tmp;
239 char *p = (char *)page;
240 int err;
241
242 err = kstrtoul(p, 10, &tmp);
243 if (err)
244 return err;
245
246 if (tmp == 0)
247 return -EINVAL;
248 if (tmp >= (u16)-1)
249 return -ERANGE;
250
251 node->nd_ipv4_port = htons(tmp);
252
253 return count;
254}
255
256static ssize_t r2nm_node_ipv4_address_read(struct r2nm_node *node, char *page)
257{
258 return sprintf(page, "%pI4\n", &node->nd_ipv4_address);
259}
260
261static ssize_t r2nm_node_ipv4_address_write(struct r2nm_node *node,
262 const char *page,
263 size_t count)
264{
265 struct r2nm_cluster *cluster = to_r2nm_cluster_from_node(node);
266 int ret, i;
267 struct rb_node **p, *parent;
268 unsigned int octets[4];
269 __be32 ipv4_addr = 0;
270
271 ret = sscanf(page, "%3u.%3u.%3u.%3u", &octets[3], &octets[2],
272 &octets[1], &octets[0]);
273 if (ret != 4)
274 return -EINVAL;
275
276 for (i = 0; i < ARRAY_SIZE(octets); i++) {
277 if (octets[i] > 255)
278 return -ERANGE;
279 be32_add_cpu(&ipv4_addr, octets[i] << (i * 8));
280 }
281
282 ret = 0;
283 write_lock(&cluster->cl_nodes_lock);
284 if (r2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent))
285 ret = -EEXIST;
286 else {
287 rb_link_node(&node->nd_ip_node, parent, p);
288 rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree);
289 }
290 write_unlock(&cluster->cl_nodes_lock);
291 if (ret)
292 return ret;
293
294 memcpy(&node->nd_ipv4_address, &ipv4_addr, sizeof(ipv4_addr));
295
296 return count;
297}
298
299static ssize_t r2nm_node_local_read(struct r2nm_node *node, char *page)
300{
301 return sprintf(page, "%d\n", node->nd_local);
302}
303
304static ssize_t r2nm_node_local_write(struct r2nm_node *node, const char *page,
305 size_t count)
306{
307 struct r2nm_cluster *cluster = to_r2nm_cluster_from_node(node);
308 unsigned long tmp;
309 char *p = (char *)page;
310 ssize_t ret;
311 int err;
312
313 err = kstrtoul(p, 10, &tmp);
314 if (err)
315 return err;
316
317 tmp = !!tmp; /* boolean of whether this node wants to be local */
318
319 /* setting local turns on networking rx for now so we require having
320 * set everything else first */
321 if (!test_bit(R2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) ||
322 !test_bit(R2NM_NODE_ATTR_NUM, &node->nd_set_attributes) ||
323 !test_bit(R2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
324 return -EINVAL; /* XXX */
325
326 /* the only failure case is trying to set a new local node
327 * when a different one is already set */
328 if (tmp && tmp == cluster->cl_has_local &&
329 cluster->cl_local_node != node->nd_num)
330 return -EBUSY;
331
332 /* bring up the rx thread if we're setting the new local node. */
333 if (tmp && !cluster->cl_has_local) {
334 ret = r2net_start_listening(node);
335 if (ret)
336 return ret;
337 }
338
339 if (!tmp && cluster->cl_has_local &&
340 cluster->cl_local_node == node->nd_num) {
341 r2net_stop_listening(node);
342 cluster->cl_local_node = R2NM_INVALID_NODE_NUM;
343 }
344
345 node->nd_local = tmp;
346 if (node->nd_local) {
347 cluster->cl_has_local = tmp;
348 cluster->cl_local_node = node->nd_num;
349 }
350
351 return count;
352}
353
354struct r2nm_node_attribute {
355 struct configfs_attribute attr;
356 ssize_t (*show)(struct r2nm_node *, char *);
357 ssize_t (*store)(struct r2nm_node *, const char *, size_t);
358};
359
360static struct r2nm_node_attribute r2nm_node_attr_num = {
361 .attr = { .ca_owner = THIS_MODULE,
362 .ca_name = "num",
363 .ca_mode = S_IRUGO | S_IWUSR },
364 .show = r2nm_node_num_read,
365 .store = r2nm_node_num_write,
366};
367
368static struct r2nm_node_attribute r2nm_node_attr_ipv4_port = {
369 .attr = { .ca_owner = THIS_MODULE,
370 .ca_name = "ipv4_port",
371 .ca_mode = S_IRUGO | S_IWUSR },
372 .show = r2nm_node_ipv4_port_read,
373 .store = r2nm_node_ipv4_port_write,
374};
375
376static struct r2nm_node_attribute r2nm_node_attr_ipv4_address = {
377 .attr = { .ca_owner = THIS_MODULE,
378 .ca_name = "ipv4_address",
379 .ca_mode = S_IRUGO | S_IWUSR },
380 .show = r2nm_node_ipv4_address_read,
381 .store = r2nm_node_ipv4_address_write,
382};
383
384static struct r2nm_node_attribute r2nm_node_attr_local = {
385 .attr = { .ca_owner = THIS_MODULE,
386 .ca_name = "local",
387 .ca_mode = S_IRUGO | S_IWUSR },
388 .show = r2nm_node_local_read,
389 .store = r2nm_node_local_write,
390};
391
392static struct configfs_attribute *r2nm_node_attrs[] = {
393 [R2NM_NODE_ATTR_NUM] = &r2nm_node_attr_num.attr,
394 [R2NM_NODE_ATTR_PORT] = &r2nm_node_attr_ipv4_port.attr,
395 [R2NM_NODE_ATTR_ADDRESS] = &r2nm_node_attr_ipv4_address.attr,
396 [R2NM_NODE_ATTR_LOCAL] = &r2nm_node_attr_local.attr,
397 NULL,
398};
399
400static int r2nm_attr_index(struct configfs_attribute *attr)
401{
402 int i;
403 for (i = 0; i < ARRAY_SIZE(r2nm_node_attrs); i++) {
404 if (attr == r2nm_node_attrs[i])
405 return i;
406 }
407 BUG();
408 return 0;
409}
410
411static ssize_t r2nm_node_show(struct config_item *item,
412 struct configfs_attribute *attr,
413 char *page)
414{
415 struct r2nm_node *node = to_r2nm_node(item);
416 struct r2nm_node_attribute *r2nm_node_attr =
417 container_of(attr, struct r2nm_node_attribute, attr);
418 ssize_t ret = 0;
419
420 if (r2nm_node_attr->show)
421 ret = r2nm_node_attr->show(node, page);
422 return ret;
423}
424
425static ssize_t r2nm_node_store(struct config_item *item,
426 struct configfs_attribute *attr,
427 const char *page, size_t count)
428{
429 struct r2nm_node *node = to_r2nm_node(item);
430 struct r2nm_node_attribute *r2nm_node_attr =
431 container_of(attr, struct r2nm_node_attribute, attr);
432 ssize_t ret;
433 int attr_index = r2nm_attr_index(attr);
434
435 if (r2nm_node_attr->store == NULL) {
436 ret = -EINVAL;
437 goto out;
438 }
439
440 if (test_bit(attr_index, &node->nd_set_attributes))
441 return -EBUSY;
442
443 ret = r2nm_node_attr->store(node, page, count);
444 if (ret < count)
445 goto out;
446
447 set_bit(attr_index, &node->nd_set_attributes);
448out:
449 return ret;
450}
451
452static struct configfs_item_operations r2nm_node_item_ops = {
453 .release = r2nm_node_release,
454 .show_attribute = r2nm_node_show,
455 .store_attribute = r2nm_node_store,
456};
457
458static struct config_item_type r2nm_node_type = {
459 .ct_item_ops = &r2nm_node_item_ops,
460 .ct_attrs = r2nm_node_attrs,
461 .ct_owner = THIS_MODULE,
462};
463
464/* node set */
465
466struct r2nm_node_group {
467 struct config_group ns_group;
468 /* some stuff? */
469};
470
471#if 0
472static struct r2nm_node_group *to_r2nm_node_group(struct config_group *group)
473{
474 return group ?
475 container_of(group, struct r2nm_node_group, ns_group)
476 : NULL;
477}
478#endif
479
480struct r2nm_cluster_attribute {
481 struct configfs_attribute attr;
482 ssize_t (*show)(struct r2nm_cluster *, char *);
483 ssize_t (*store)(struct r2nm_cluster *, const char *, size_t);
484};
485
486static ssize_t r2nm_cluster_attr_write(const char *page, ssize_t count,
487 unsigned int *val)
488{
489 unsigned long tmp;
490 char *p = (char *)page;
491 int err;
492
493 err = kstrtoul(p, 10, &tmp);
494 if (err)
495 return err;
496
497 if (tmp == 0)
498 return -EINVAL;
499 if (tmp >= (u32)-1)
500 return -ERANGE;
501
502 *val = tmp;
503
504 return count;
505}
506
507static ssize_t r2nm_cluster_attr_idle_timeout_ms_read(
508 struct r2nm_cluster *cluster, char *page)
509{
510 return sprintf(page, "%u\n", cluster->cl_idle_timeout_ms);
511}
512
513static ssize_t r2nm_cluster_attr_idle_timeout_ms_write(
514 struct r2nm_cluster *cluster, const char *page, size_t count)
515{
516 ssize_t ret;
517 unsigned int val = 0;
518
519 ret = r2nm_cluster_attr_write(page, count, &val);
520
521 if (ret > 0) {
522 if (cluster->cl_idle_timeout_ms != val
523 && r2net_num_connected_peers()) {
524 mlog(ML_NOTICE,
525 "r2net: cannot change idle timeout after "
526 "the first peer has agreed to it."
527 " %d connected peers\n",
528 r2net_num_connected_peers());
529 ret = -EINVAL;
530 } else if (val <= cluster->cl_keepalive_delay_ms) {
531 mlog(ML_NOTICE, "r2net: idle timeout must be larger "
532 "than keepalive delay\n");
533 ret = -EINVAL;
534 } else {
535 cluster->cl_idle_timeout_ms = val;
536 }
537 }
538
539 return ret;
540}
541
542static ssize_t r2nm_cluster_attr_keepalive_delay_ms_read(
543 struct r2nm_cluster *cluster, char *page)
544{
545 return sprintf(page, "%u\n", cluster->cl_keepalive_delay_ms);
546}
547
548static ssize_t r2nm_cluster_attr_keepalive_delay_ms_write(
549 struct r2nm_cluster *cluster, const char *page, size_t count)
550{
551 ssize_t ret;
552 unsigned int val = 0;
553
554 ret = r2nm_cluster_attr_write(page, count, &val);
555
556 if (ret > 0) {
557 if (cluster->cl_keepalive_delay_ms != val
558 && r2net_num_connected_peers()) {
559 mlog(ML_NOTICE,
560 "r2net: cannot change keepalive delay after"
561 " the first peer has agreed to it."
562 " %d connected peers\n",
563 r2net_num_connected_peers());
564 ret = -EINVAL;
565 } else if (val >= cluster->cl_idle_timeout_ms) {
566 mlog(ML_NOTICE, "r2net: keepalive delay must be "
567 "smaller than idle timeout\n");
568 ret = -EINVAL;
569 } else {
570 cluster->cl_keepalive_delay_ms = val;
571 }
572 }
573
574 return ret;
575}
576
577static ssize_t r2nm_cluster_attr_reconnect_delay_ms_read(
578 struct r2nm_cluster *cluster, char *page)
579{
580 return sprintf(page, "%u\n", cluster->cl_reconnect_delay_ms);
581}
582
583static ssize_t r2nm_cluster_attr_reconnect_delay_ms_write(
584 struct r2nm_cluster *cluster, const char *page, size_t count)
585{
586 return r2nm_cluster_attr_write(page, count,
587 &cluster->cl_reconnect_delay_ms);
588}
589
590static ssize_t r2nm_cluster_attr_fence_method_read(
591 struct r2nm_cluster *cluster, char *page)
592{
593 ssize_t ret = 0;
594
595 if (cluster)
596 ret = sprintf(page, "%s\n",
597 r2nm_fence_method_desc[cluster->cl_fence_method]);
598 return ret;
599}
600
601static ssize_t r2nm_cluster_attr_fence_method_write(
602 struct r2nm_cluster *cluster, const char *page, size_t count)
603{
604 unsigned int i;
605
606 if (page[count - 1] != '\n')
607 goto bail;
608
609 for (i = 0; i < R2NM_FENCE_METHODS; ++i) {
610 if (count != strlen(r2nm_fence_method_desc[i]) + 1)
611 continue;
612 if (strncasecmp(page, r2nm_fence_method_desc[i], count - 1))
613 continue;
614 if (cluster->cl_fence_method != i) {
615 printk(KERN_INFO "ramster: Changing fence method to %s\n",
616 r2nm_fence_method_desc[i]);
617 cluster->cl_fence_method = i;
618 }
619 return count;
620 }
621
622bail:
623 return -EINVAL;
624}
625
626static struct r2nm_cluster_attribute r2nm_cluster_attr_idle_timeout_ms = {
627 .attr = { .ca_owner = THIS_MODULE,
628 .ca_name = "idle_timeout_ms",
629 .ca_mode = S_IRUGO | S_IWUSR },
630 .show = r2nm_cluster_attr_idle_timeout_ms_read,
631 .store = r2nm_cluster_attr_idle_timeout_ms_write,
632};
633
634static struct r2nm_cluster_attribute r2nm_cluster_attr_keepalive_delay_ms = {
635 .attr = { .ca_owner = THIS_MODULE,
636 .ca_name = "keepalive_delay_ms",
637 .ca_mode = S_IRUGO | S_IWUSR },
638 .show = r2nm_cluster_attr_keepalive_delay_ms_read,
639 .store = r2nm_cluster_attr_keepalive_delay_ms_write,
640};
641
642static struct r2nm_cluster_attribute r2nm_cluster_attr_reconnect_delay_ms = {
643 .attr = { .ca_owner = THIS_MODULE,
644 .ca_name = "reconnect_delay_ms",
645 .ca_mode = S_IRUGO | S_IWUSR },
646 .show = r2nm_cluster_attr_reconnect_delay_ms_read,
647 .store = r2nm_cluster_attr_reconnect_delay_ms_write,
648};
649
650static struct r2nm_cluster_attribute r2nm_cluster_attr_fence_method = {
651 .attr = { .ca_owner = THIS_MODULE,
652 .ca_name = "fence_method",
653 .ca_mode = S_IRUGO | S_IWUSR },
654 .show = r2nm_cluster_attr_fence_method_read,
655 .store = r2nm_cluster_attr_fence_method_write,
656};
657
658static struct configfs_attribute *r2nm_cluster_attrs[] = {
659 &r2nm_cluster_attr_idle_timeout_ms.attr,
660 &r2nm_cluster_attr_keepalive_delay_ms.attr,
661 &r2nm_cluster_attr_reconnect_delay_ms.attr,
662 &r2nm_cluster_attr_fence_method.attr,
663 NULL,
664};
665static ssize_t r2nm_cluster_show(struct config_item *item,
666 struct configfs_attribute *attr,
667 char *page)
668{
669 struct r2nm_cluster *cluster = to_r2nm_cluster(item);
670 struct r2nm_cluster_attribute *r2nm_cluster_attr =
671 container_of(attr, struct r2nm_cluster_attribute, attr);
672 ssize_t ret = 0;
673
674 if (r2nm_cluster_attr->show)
675 ret = r2nm_cluster_attr->show(cluster, page);
676 return ret;
677}
678
679static ssize_t r2nm_cluster_store(struct config_item *item,
680 struct configfs_attribute *attr,
681 const char *page, size_t count)
682{
683 struct r2nm_cluster *cluster = to_r2nm_cluster(item);
684 struct r2nm_cluster_attribute *r2nm_cluster_attr =
685 container_of(attr, struct r2nm_cluster_attribute, attr);
686 ssize_t ret;
687
688 if (r2nm_cluster_attr->store == NULL) {
689 ret = -EINVAL;
690 goto out;
691 }
692
693 ret = r2nm_cluster_attr->store(cluster, page, count);
694 if (ret < count)
695 goto out;
696out:
697 return ret;
698}
699
700static struct config_item *r2nm_node_group_make_item(struct config_group *group,
701 const char *name)
702{
703 struct r2nm_node *node = NULL;
704
705 if (strlen(name) > R2NM_MAX_NAME_LEN)
706 return ERR_PTR(-ENAMETOOLONG);
707
708 node = kzalloc(sizeof(struct r2nm_node), GFP_KERNEL);
709 if (node == NULL)
710 return ERR_PTR(-ENOMEM);
711
712 strcpy(node->nd_name, name); /* use item.ci_namebuf instead? */
713 config_item_init_type_name(&node->nd_item, name, &r2nm_node_type);
714 spin_lock_init(&node->nd_lock);
715
716 mlog(ML_CLUSTER, "r2nm: Registering node %s\n", name);
717
718 return &node->nd_item;
719}
720
721static void r2nm_node_group_drop_item(struct config_group *group,
722 struct config_item *item)
723{
724 struct r2nm_node *node = to_r2nm_node(item);
725 struct r2nm_cluster *cluster =
726 to_r2nm_cluster(group->cg_item.ci_parent);
727
728 r2net_disconnect_node(node);
729
730 if (cluster->cl_has_local &&
731 (cluster->cl_local_node == node->nd_num)) {
732 cluster->cl_has_local = 0;
733 cluster->cl_local_node = R2NM_INVALID_NODE_NUM;
734 r2net_stop_listening(node);
735 }
736
737 /* XXX call into net to stop this node from trading messages */
738
739 write_lock(&cluster->cl_nodes_lock);
740
741 /* XXX sloppy */
742 if (node->nd_ipv4_address)
743 rb_erase(&node->nd_ip_node, &cluster->cl_node_ip_tree);
744
745 /* nd_num might be 0 if the node number hasn't been set.. */
746 if (cluster->cl_nodes[node->nd_num] == node) {
747 cluster->cl_nodes[node->nd_num] = NULL;
748 clear_bit(node->nd_num, cluster->cl_nodes_bitmap);
749 }
750 write_unlock(&cluster->cl_nodes_lock);
751
752 mlog(ML_CLUSTER, "r2nm: Unregistered node %s\n",
753 config_item_name(&node->nd_item));
754
755 config_item_put(item);
756}
757
758static struct configfs_group_operations r2nm_node_group_group_ops = {
759 .make_item = r2nm_node_group_make_item,
760 .drop_item = r2nm_node_group_drop_item,
761};
762
763static struct config_item_type r2nm_node_group_type = {
764 .ct_group_ops = &r2nm_node_group_group_ops,
765 .ct_owner = THIS_MODULE,
766};
767
768/* cluster */
769
770static void r2nm_cluster_release(struct config_item *item)
771{
772 struct r2nm_cluster *cluster = to_r2nm_cluster(item);
773
774 kfree(cluster->cl_group.default_groups);
775 kfree(cluster);
776}
777
778static struct configfs_item_operations r2nm_cluster_item_ops = {
779 .release = r2nm_cluster_release,
780 .show_attribute = r2nm_cluster_show,
781 .store_attribute = r2nm_cluster_store,
782};
783
784static struct config_item_type r2nm_cluster_type = {
785 .ct_item_ops = &r2nm_cluster_item_ops,
786 .ct_attrs = r2nm_cluster_attrs,
787 .ct_owner = THIS_MODULE,
788};
789
790/* cluster set */
791
792struct r2nm_cluster_group {
793 struct configfs_subsystem cs_subsys;
794 /* some stuff? */
795};
796
797#if 0
798static struct r2nm_cluster_group *
799to_r2nm_cluster_group(struct config_group *group)
800{
801 return group ?
802 container_of(to_configfs_subsystem(group),
803 struct r2nm_cluster_group, cs_subsys)
804 : NULL;
805}
806#endif
807
808static struct config_group *
809r2nm_cluster_group_make_group(struct config_group *group,
810 const char *name)
811{
812 struct r2nm_cluster *cluster = NULL;
813 struct r2nm_node_group *ns = NULL;
814 struct config_group *r2hb_group = NULL, *ret = NULL;
815 void *defs = NULL;
816
817 /* this runs under the parent dir's i_mutex; there can be only
818 * one caller in here at a time */
819 if (r2nm_single_cluster)
820 return ERR_PTR(-ENOSPC);
821
822 cluster = kzalloc(sizeof(struct r2nm_cluster), GFP_KERNEL);
823 ns = kzalloc(sizeof(struct r2nm_node_group), GFP_KERNEL);
824 defs = kcalloc(3, sizeof(struct config_group *), GFP_KERNEL);
825 r2hb_group = r2hb_alloc_hb_set();
826 if (cluster == NULL || ns == NULL || r2hb_group == NULL || defs == NULL)
827 goto out;
828
829 config_group_init_type_name(&cluster->cl_group, name,
830 &r2nm_cluster_type);
831 config_group_init_type_name(&ns->ns_group, "node",
832 &r2nm_node_group_type);
833
834 cluster->cl_group.default_groups = defs;
835 cluster->cl_group.default_groups[0] = &ns->ns_group;
836 cluster->cl_group.default_groups[1] = r2hb_group;
837 cluster->cl_group.default_groups[2] = NULL;
838 rwlock_init(&cluster->cl_nodes_lock);
839 cluster->cl_node_ip_tree = RB_ROOT;
840 cluster->cl_reconnect_delay_ms = R2NET_RECONNECT_DELAY_MS_DEFAULT;
841 cluster->cl_idle_timeout_ms = R2NET_IDLE_TIMEOUT_MS_DEFAULT;
842 cluster->cl_keepalive_delay_ms = R2NET_KEEPALIVE_DELAY_MS_DEFAULT;
843 cluster->cl_fence_method = R2NM_FENCE_RESET;
844
845 ret = &cluster->cl_group;
846 r2nm_single_cluster = cluster;
847
848out:
849 if (ret == NULL) {
850 kfree(cluster);
851 kfree(ns);
852 r2hb_free_hb_set(r2hb_group);
853 kfree(defs);
854 ret = ERR_PTR(-ENOMEM);
855 }
856
857 return ret;
858}
859
860static void r2nm_cluster_group_drop_item(struct config_group *group,
861 struct config_item *item)
862{
863 struct r2nm_cluster *cluster = to_r2nm_cluster(item);
864 int i;
865 struct config_item *killme;
866
867 BUG_ON(r2nm_single_cluster != cluster);
868 r2nm_single_cluster = NULL;
869
870 for (i = 0; cluster->cl_group.default_groups[i]; i++) {
871 killme = &cluster->cl_group.default_groups[i]->cg_item;
872 cluster->cl_group.default_groups[i] = NULL;
873 config_item_put(killme);
874 }
875
876 config_item_put(item);
877}
878
879static struct configfs_group_operations r2nm_cluster_group_group_ops = {
880 .make_group = r2nm_cluster_group_make_group,
881 .drop_item = r2nm_cluster_group_drop_item,
882};
883
884static struct config_item_type r2nm_cluster_group_type = {
885 .ct_group_ops = &r2nm_cluster_group_group_ops,
886 .ct_owner = THIS_MODULE,
887};
888
889static struct r2nm_cluster_group r2nm_cluster_group = {
890 .cs_subsys = {
891 .su_group = {
892 .cg_item = {
893 .ci_namebuf = "cluster",
894 .ci_type = &r2nm_cluster_group_type,
895 },
896 },
897 },
898};
899
900int r2nm_depend_item(struct config_item *item)
901{
902 return configfs_depend_item(&r2nm_cluster_group.cs_subsys, item);
903}
904
905void r2nm_undepend_item(struct config_item *item)
906{
907 configfs_undepend_item(&r2nm_cluster_group.cs_subsys, item);
908}
909
910int r2nm_depend_this_node(void)
911{
912 int ret = 0;
913 struct r2nm_node *local_node;
914
915 local_node = r2nm_get_node_by_num(r2nm_this_node());
916 if (!local_node) {
917 ret = -EINVAL;
918 goto out;
919 }
920
921 ret = r2nm_depend_item(&local_node->nd_item);
922 r2nm_node_put(local_node);
923
924out:
925 return ret;
926}
927
928void r2nm_undepend_this_node(void)
929{
930 struct r2nm_node *local_node;
931
932 local_node = r2nm_get_node_by_num(r2nm_this_node());
933 BUG_ON(!local_node);
934
935 r2nm_undepend_item(&local_node->nd_item);
936 r2nm_node_put(local_node);
937}
938
939
940static void __exit exit_r2nm(void)
941{
942 /* XXX sync with hb callbacks and shut down hb? */
943 r2net_unregister_hb_callbacks();
944 configfs_unregister_subsystem(&r2nm_cluster_group.cs_subsys);
945
946 r2net_exit();
947 r2hb_exit();
948}
949
950static int __init init_r2nm(void)
951{
952 int ret = -1;
953
954 ret = r2hb_init();
955 if (ret)
956 goto out;
957
958 ret = r2net_init();
959 if (ret)
960 goto out_r2hb;
961
962 ret = r2net_register_hb_callbacks();
963 if (ret)
964 goto out_r2net;
965
966 config_group_init(&r2nm_cluster_group.cs_subsys.su_group);
967 mutex_init(&r2nm_cluster_group.cs_subsys.su_mutex);
968 ret = configfs_register_subsystem(&r2nm_cluster_group.cs_subsys);
969 if (ret) {
970 printk(KERN_ERR "nodemanager: Registration returned %d\n", ret);
971 goto out_callbacks;
972 }
973
974 if (!ret)
975 goto out;
976
977 configfs_unregister_subsystem(&r2nm_cluster_group.cs_subsys);
978out_callbacks:
979 r2net_unregister_hb_callbacks();
980out_r2net:
981 r2net_exit();
982out_r2hb:
983 r2hb_exit();
984out:
985 return ret;
986}
987
988MODULE_AUTHOR("Oracle");
989MODULE_LICENSE("GPL");
990
991module_init(init_r2nm)
992module_exit(exit_r2nm)
diff --git a/drivers/staging/ramster/cluster/nodemanager.h b/drivers/staging/ramster/cluster/nodemanager.h
new file mode 100644
index 000000000000..41a04df5842c
--- /dev/null
+++ b/drivers/staging/ramster/cluster/nodemanager.h
@@ -0,0 +1,88 @@
1/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * nodemanager.h
5 *
6 * Function prototypes
7 *
8 * Copyright (C) 2004 Oracle. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
24 *
25 */
26
27#ifndef R2CLUSTER_NODEMANAGER_H
28#define R2CLUSTER_NODEMANAGER_H
29
30#include "ramster_nodemanager.h"
31
32/* This totally doesn't belong here. */
33#include <linux/configfs.h>
34#include <linux/rbtree.h>
35
36enum r2nm_fence_method {
37 R2NM_FENCE_RESET = 0,
38 R2NM_FENCE_PANIC,
39 R2NM_FENCE_METHODS, /* Number of fence methods */
40};
41
42struct r2nm_node {
43 spinlock_t nd_lock;
44 struct config_item nd_item;
45 char nd_name[R2NM_MAX_NAME_LEN+1]; /* replace? */
46 __u8 nd_num;
47 /* only one address per node, as attributes, for now. */
48 __be32 nd_ipv4_address;
49 __be16 nd_ipv4_port;
50 struct rb_node nd_ip_node;
51 /* there can be only one local node for now */
52 int nd_local;
53
54 unsigned long nd_set_attributes;
55};
56
57struct r2nm_cluster {
58 struct config_group cl_group;
59 unsigned cl_has_local:1;
60 u8 cl_local_node;
61 rwlock_t cl_nodes_lock;
62 struct r2nm_node *cl_nodes[R2NM_MAX_NODES];
63 struct rb_root cl_node_ip_tree;
64 unsigned int cl_idle_timeout_ms;
65 unsigned int cl_keepalive_delay_ms;
66 unsigned int cl_reconnect_delay_ms;
67 enum r2nm_fence_method cl_fence_method;
68
69 /* part of a hack for disk bitmap.. will go eventually. - zab */
70 unsigned long cl_nodes_bitmap[BITS_TO_LONGS(R2NM_MAX_NODES)];
71};
72
73extern struct r2nm_cluster *r2nm_single_cluster;
74
75u8 r2nm_this_node(void);
76
77int r2nm_configured_node_map(unsigned long *map, unsigned bytes);
78struct r2nm_node *r2nm_get_node_by_num(u8 node_num);
79struct r2nm_node *r2nm_get_node_by_ip(__be32 addr);
80void r2nm_node_get(struct r2nm_node *node);
81void r2nm_node_put(struct r2nm_node *node);
82
83int r2nm_depend_item(struct config_item *item);
84void r2nm_undepend_item(struct config_item *item);
85int r2nm_depend_this_node(void);
86void r2nm_undepend_this_node(void);
87
88#endif /* R2CLUSTER_NODEMANAGER_H */
diff --git a/drivers/staging/ramster/cluster/ramster_nodemanager.h b/drivers/staging/ramster/cluster/ramster_nodemanager.h
new file mode 100644
index 000000000000..49f879d943ab
--- /dev/null
+++ b/drivers/staging/ramster/cluster/ramster_nodemanager.h
@@ -0,0 +1,39 @@
1/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * ramster_nodemanager.h
5 *
6 * Header describing the interface between userspace and the kernel
7 * for the ramster_nodemanager module.
8 *
9 * Copyright (C) 2002, 2004, 2012 Oracle. All rights reserved.
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public
13 * License as published by the Free Software Foundation; either
14 * version 2 of the License, or (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public
22 * License along with this program; if not, write to the
23 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
24 * Boston, MA 021110-1307, USA.
25 *
26 */
27
28#ifndef _RAMSTER_NODEMANAGER_H
29#define _RAMSTER_NODEMANAGER_H
30
31#define R2NM_API_VERSION 5
32
33#define R2NM_MAX_NODES 255
34#define R2NM_INVALID_NODE_NUM 255
35
36/* host name, group name, cluster name all 64 bytes */
37#define R2NM_MAX_NAME_LEN 64 /* __NEW_UTS_LEN */
38
39#endif /* _RAMSTER_NODEMANAGER_H */
diff --git a/drivers/staging/ramster/cluster/tcp.c b/drivers/staging/ramster/cluster/tcp.c
new file mode 100644
index 000000000000..3af1b2c51b78
--- /dev/null
+++ b/drivers/staging/ramster/cluster/tcp.c
@@ -0,0 +1,2256 @@
1/* -*- mode: c; c-basic-offset: 8; -*-
2 *
3 * vim: noexpandtab sw=8 ts=8 sts=0:
4 *
5 * Copyright (C) 2004 Oracle. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public
18 * License along with this program; if not, write to the
19 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
20 * Boston, MA 021110-1307, USA.
21 *
22 * ----
23 *
24 * Callers for this were originally written against a very simple synchronus
25 * API. This implementation reflects those simple callers. Some day I'm sure
26 * we'll need to move to a more robust posting/callback mechanism.
27 *
28 * Transmit calls pass in kernel virtual addresses and block copying this into
29 * the socket's tx buffers via a usual blocking sendmsg. They'll block waiting
30 * for a failed socket to timeout. TX callers can also pass in a poniter to an
31 * 'int' which gets filled with an errno off the wire in response to the
32 * message they send.
33 *
34 * Handlers for unsolicited messages are registered. Each socket has a page
35 * that incoming data is copied into. First the header, then the data.
36 * Handlers are called from only one thread with a reference to this per-socket
37 * page. This page is destroyed after the handler call, so it can't be
38 * referenced beyond the call. Handlers may block but are discouraged from
39 * doing so.
40 *
41 * Any framing errors (bad magic, large payload lengths) close a connection.
42 *
43 * Our sock_container holds the state we associate with a socket. It's current
44 * framing state is held there as well as the refcounting we do around when it
45 * is safe to tear down the socket. The socket is only finally torn down from
46 * the container when the container loses all of its references -- so as long
47 * as you hold a ref on the container you can trust that the socket is valid
48 * for use with kernel socket APIs.
49 *
50 * Connections are initiated between a pair of nodes when the node with the
51 * higher node number gets a heartbeat callback which indicates that the lower
52 * numbered node has started heartbeating. The lower numbered node is passive
53 * and only accepts the connection if the higher numbered node is heartbeating.
54 */
55
56#include <linux/kernel.h>
57#include <linux/jiffies.h>
58#include <linux/slab.h>
59#include <linux/idr.h>
60#include <linux/kref.h>
61#include <linux/net.h>
62#include <linux/export.h>
63#include <linux/uaccess.h>
64#include <net/tcp.h>
65
66
67#include "heartbeat.h"
68#include "tcp.h"
69#include "nodemanager.h"
70#define MLOG_MASK_PREFIX ML_TCP
71#include "masklog.h"
72
73#include "tcp_internal.h"
74
75#define SC_NODEF_FMT "node %s (num %u) at %pI4:%u"
76
77/*
78 * In the following two log macros, the whitespace after the ',' just
79 * before ##args is intentional. Otherwise, gcc 2.95 will eat the
80 * previous token if args expands to nothing.
81 */
82#define msglog(hdr, fmt, args...) do { \
83 typeof(hdr) __hdr = (hdr); \
84 mlog(ML_MSG, "[mag %u len %u typ %u stat %d sys_stat %d " \
85 "key %08x num %u] " fmt, \
86 be16_to_cpu(__hdr->magic), be16_to_cpu(__hdr->data_len), \
87 be16_to_cpu(__hdr->msg_type), be32_to_cpu(__hdr->status), \
88 be32_to_cpu(__hdr->sys_status), be32_to_cpu(__hdr->key), \
89 be32_to_cpu(__hdr->msg_num) , ##args); \
90} while (0)
91
92#define sclog(sc, fmt, args...) do { \
93 typeof(sc) __sc = (sc); \
94 mlog(ML_SOCKET, "[sc %p refs %d sock %p node %u page %p " \
95 "pg_off %zu] " fmt, __sc, \
96 atomic_read(&__sc->sc_kref.refcount), __sc->sc_sock, \
97 __sc->sc_node->nd_num, __sc->sc_page, __sc->sc_page_off , \
98 ##args); \
99} while (0)
100
101static DEFINE_RWLOCK(r2net_handler_lock);
102static struct rb_root r2net_handler_tree = RB_ROOT;
103
104static struct r2net_node r2net_nodes[R2NM_MAX_NODES];
105
106/* XXX someday we'll need better accounting */
107static struct socket *r2net_listen_sock;
108
109/*
110 * listen work is only queued by the listening socket callbacks on the
111 * r2net_wq. teardown detaches the callbacks before destroying the workqueue.
112 * quorum work is queued as sock containers are shutdown.. stop_listening
113 * tears down all the node's sock containers, preventing future shutdowns
114 * and queued quroum work, before canceling delayed quorum work and
115 * destroying the work queue.
116 */
117static struct workqueue_struct *r2net_wq;
118static struct work_struct r2net_listen_work;
119
120static struct r2hb_callback_func r2net_hb_up, r2net_hb_down;
121#define R2NET_HB_PRI 0x1
122
123static struct r2net_handshake *r2net_hand;
124static struct r2net_msg *r2net_keep_req, *r2net_keep_resp;
125
126static int r2net_sys_err_translations[R2NET_ERR_MAX] = {
127 [R2NET_ERR_NONE] = 0,
128 [R2NET_ERR_NO_HNDLR] = -ENOPROTOOPT,
129 [R2NET_ERR_OVERFLOW] = -EOVERFLOW,
130 [R2NET_ERR_DIED] = -EHOSTDOWN,};
131
132/* can't quite avoid *all* internal declarations :/ */
133static void r2net_sc_connect_completed(struct work_struct *work);
134static void r2net_rx_until_empty(struct work_struct *work);
135static void r2net_shutdown_sc(struct work_struct *work);
136static void r2net_listen_data_ready(struct sock *sk, int bytes);
137static void r2net_sc_send_keep_req(struct work_struct *work);
138static void r2net_idle_timer(unsigned long data);
139static void r2net_sc_postpone_idle(struct r2net_sock_container *sc);
140static void r2net_sc_reset_idle_timer(struct r2net_sock_container *sc);
141
142#ifdef CONFIG_DEBUG_FS
143static void r2net_init_nst(struct r2net_send_tracking *nst, u32 msgtype,
144 u32 msgkey, struct task_struct *task, u8 node)
145{
146 INIT_LIST_HEAD(&nst->st_net_debug_item);
147 nst->st_task = task;
148 nst->st_msg_type = msgtype;
149 nst->st_msg_key = msgkey;
150 nst->st_node = node;
151}
152
153static inline void r2net_set_nst_sock_time(struct r2net_send_tracking *nst)
154{
155 nst->st_sock_time = ktime_get();
156}
157
158static inline void r2net_set_nst_send_time(struct r2net_send_tracking *nst)
159{
160 nst->st_send_time = ktime_get();
161}
162
163static inline void r2net_set_nst_status_time(struct r2net_send_tracking *nst)
164{
165 nst->st_status_time = ktime_get();
166}
167
168static inline void r2net_set_nst_sock_container(struct r2net_send_tracking *nst,
169 struct r2net_sock_container *sc)
170{
171 nst->st_sc = sc;
172}
173
174static inline void r2net_set_nst_msg_id(struct r2net_send_tracking *nst,
175 u32 msg_id)
176{
177 nst->st_id = msg_id;
178}
179
180static inline void r2net_set_sock_timer(struct r2net_sock_container *sc)
181{
182 sc->sc_tv_timer = ktime_get();
183}
184
185static inline void r2net_set_data_ready_time(struct r2net_sock_container *sc)
186{
187 sc->sc_tv_data_ready = ktime_get();
188}
189
190static inline void r2net_set_advance_start_time(struct r2net_sock_container *sc)
191{
192 sc->sc_tv_advance_start = ktime_get();
193}
194
195static inline void r2net_set_advance_stop_time(struct r2net_sock_container *sc)
196{
197 sc->sc_tv_advance_stop = ktime_get();
198}
199
200static inline void r2net_set_func_start_time(struct r2net_sock_container *sc)
201{
202 sc->sc_tv_func_start = ktime_get();
203}
204
205static inline void r2net_set_func_stop_time(struct r2net_sock_container *sc)
206{
207 sc->sc_tv_func_stop = ktime_get();
208}
209
210#else /* CONFIG_DEBUG_FS */
211# define r2net_init_nst(a, b, c, d, e)
212# define r2net_set_nst_sock_time(a)
213# define r2net_set_nst_send_time(a)
214# define r2net_set_nst_status_time(a)
215# define r2net_set_nst_sock_container(a, b)
216# define r2net_set_nst_msg_id(a, b)
217# define r2net_set_sock_timer(a)
218# define r2net_set_data_ready_time(a)
219# define r2net_set_advance_start_time(a)
220# define r2net_set_advance_stop_time(a)
221# define r2net_set_func_start_time(a)
222# define r2net_set_func_stop_time(a)
223#endif /* CONFIG_DEBUG_FS */
224
225#ifdef CONFIG_RAMSTER_FS_STATS
226static ktime_t r2net_get_func_run_time(struct r2net_sock_container *sc)
227{
228 return ktime_sub(sc->sc_tv_func_stop, sc->sc_tv_func_start);
229}
230
231static void r2net_update_send_stats(struct r2net_send_tracking *nst,
232 struct r2net_sock_container *sc)
233{
234 sc->sc_tv_status_total = ktime_add(sc->sc_tv_status_total,
235 ktime_sub(ktime_get(),
236 nst->st_status_time));
237 sc->sc_tv_send_total = ktime_add(sc->sc_tv_send_total,
238 ktime_sub(nst->st_status_time,
239 nst->st_send_time));
240 sc->sc_tv_acquiry_total = ktime_add(sc->sc_tv_acquiry_total,
241 ktime_sub(nst->st_send_time,
242 nst->st_sock_time));
243 sc->sc_send_count++;
244}
245
246static void r2net_update_recv_stats(struct r2net_sock_container *sc)
247{
248 sc->sc_tv_process_total = ktime_add(sc->sc_tv_process_total,
249 r2net_get_func_run_time(sc));
250 sc->sc_recv_count++;
251}
252
253#else
254
255# define r2net_update_send_stats(a, b)
256
257# define r2net_update_recv_stats(sc)
258
259#endif /* CONFIG_RAMSTER_FS_STATS */
260
261static inline int r2net_reconnect_delay(void)
262{
263 return r2nm_single_cluster->cl_reconnect_delay_ms;
264}
265
266static inline int r2net_keepalive_delay(void)
267{
268 return r2nm_single_cluster->cl_keepalive_delay_ms;
269}
270
271static inline int r2net_idle_timeout(void)
272{
273 return r2nm_single_cluster->cl_idle_timeout_ms;
274}
275
276static inline int r2net_sys_err_to_errno(enum r2net_system_error err)
277{
278 int trans;
279 BUG_ON(err >= R2NET_ERR_MAX);
280 trans = r2net_sys_err_translations[err];
281
282 /* Just in case we mess up the translation table above */
283 BUG_ON(err != R2NET_ERR_NONE && trans == 0);
284 return trans;
285}
286
287struct r2net_node *r2net_nn_from_num(u8 node_num)
288{
289 BUG_ON(node_num >= ARRAY_SIZE(r2net_nodes));
290 return &r2net_nodes[node_num];
291}
292
293static u8 r2net_num_from_nn(struct r2net_node *nn)
294{
295 BUG_ON(nn == NULL);
296 return nn - r2net_nodes;
297}
298
299/* ------------------------------------------------------------ */
300
301static int r2net_prep_nsw(struct r2net_node *nn, struct r2net_status_wait *nsw)
302{
303 int ret = 0;
304
305 do {
306 if (!idr_pre_get(&nn->nn_status_idr, GFP_ATOMIC)) {
307 ret = -EAGAIN;
308 break;
309 }
310 spin_lock(&nn->nn_lock);
311 ret = idr_get_new(&nn->nn_status_idr, nsw, &nsw->ns_id);
312 if (ret == 0)
313 list_add_tail(&nsw->ns_node_item,
314 &nn->nn_status_list);
315 spin_unlock(&nn->nn_lock);
316 } while (ret == -EAGAIN);
317
318 if (ret == 0) {
319 init_waitqueue_head(&nsw->ns_wq);
320 nsw->ns_sys_status = R2NET_ERR_NONE;
321 nsw->ns_status = 0;
322 }
323
324 return ret;
325}
326
327static void r2net_complete_nsw_locked(struct r2net_node *nn,
328 struct r2net_status_wait *nsw,
329 enum r2net_system_error sys_status,
330 s32 status)
331{
332 assert_spin_locked(&nn->nn_lock);
333
334 if (!list_empty(&nsw->ns_node_item)) {
335 list_del_init(&nsw->ns_node_item);
336 nsw->ns_sys_status = sys_status;
337 nsw->ns_status = status;
338 idr_remove(&nn->nn_status_idr, nsw->ns_id);
339 wake_up(&nsw->ns_wq);
340 }
341}
342
343static void r2net_complete_nsw(struct r2net_node *nn,
344 struct r2net_status_wait *nsw,
345 u64 id, enum r2net_system_error sys_status,
346 s32 status)
347{
348 spin_lock(&nn->nn_lock);
349 if (nsw == NULL) {
350 if (id > INT_MAX)
351 goto out;
352
353 nsw = idr_find(&nn->nn_status_idr, id);
354 if (nsw == NULL)
355 goto out;
356 }
357
358 r2net_complete_nsw_locked(nn, nsw, sys_status, status);
359
360out:
361 spin_unlock(&nn->nn_lock);
362 return;
363}
364
365static void r2net_complete_nodes_nsw(struct r2net_node *nn)
366{
367 struct r2net_status_wait *nsw, *tmp;
368 unsigned int num_kills = 0;
369
370 assert_spin_locked(&nn->nn_lock);
371
372 list_for_each_entry_safe(nsw, tmp, &nn->nn_status_list, ns_node_item) {
373 r2net_complete_nsw_locked(nn, nsw, R2NET_ERR_DIED, 0);
374 num_kills++;
375 }
376
377 mlog(0, "completed %d messages for node %u\n", num_kills,
378 r2net_num_from_nn(nn));
379}
380
381static int r2net_nsw_completed(struct r2net_node *nn,
382 struct r2net_status_wait *nsw)
383{
384 int completed;
385 spin_lock(&nn->nn_lock);
386 completed = list_empty(&nsw->ns_node_item);
387 spin_unlock(&nn->nn_lock);
388 return completed;
389}
390
391/* ------------------------------------------------------------ */
392
393static void sc_kref_release(struct kref *kref)
394{
395 struct r2net_sock_container *sc = container_of(kref,
396 struct r2net_sock_container, sc_kref);
397 BUG_ON(timer_pending(&sc->sc_idle_timeout));
398
399 sclog(sc, "releasing\n");
400
401 if (sc->sc_sock) {
402 sock_release(sc->sc_sock);
403 sc->sc_sock = NULL;
404 }
405
406 r2nm_undepend_item(&sc->sc_node->nd_item);
407 r2nm_node_put(sc->sc_node);
408 sc->sc_node = NULL;
409
410 r2net_debug_del_sc(sc);
411 kfree(sc);
412}
413
414static void sc_put(struct r2net_sock_container *sc)
415{
416 sclog(sc, "put\n");
417 kref_put(&sc->sc_kref, sc_kref_release);
418}
419static void sc_get(struct r2net_sock_container *sc)
420{
421 sclog(sc, "get\n");
422 kref_get(&sc->sc_kref);
423}
424static struct r2net_sock_container *sc_alloc(struct r2nm_node *node)
425{
426 struct r2net_sock_container *sc, *ret = NULL;
427 struct page *page = NULL;
428 int status = 0;
429
430 page = alloc_page(GFP_NOFS);
431 sc = kzalloc(sizeof(*sc), GFP_NOFS);
432 if (sc == NULL || page == NULL)
433 goto out;
434
435 kref_init(&sc->sc_kref);
436 r2nm_node_get(node);
437 sc->sc_node = node;
438
439 /* pin the node item of the remote node */
440 status = r2nm_depend_item(&node->nd_item);
441 if (status) {
442 mlog_errno(status);
443 r2nm_node_put(node);
444 goto out;
445 }
446 INIT_WORK(&sc->sc_connect_work, r2net_sc_connect_completed);
447 INIT_WORK(&sc->sc_rx_work, r2net_rx_until_empty);
448 INIT_WORK(&sc->sc_shutdown_work, r2net_shutdown_sc);
449 INIT_DELAYED_WORK(&sc->sc_keepalive_work, r2net_sc_send_keep_req);
450
451 init_timer(&sc->sc_idle_timeout);
452 sc->sc_idle_timeout.function = r2net_idle_timer;
453 sc->sc_idle_timeout.data = (unsigned long)sc;
454
455 sclog(sc, "alloced\n");
456
457 ret = sc;
458 sc->sc_page = page;
459 r2net_debug_add_sc(sc);
460 sc = NULL;
461 page = NULL;
462
463out:
464 if (page)
465 __free_page(page);
466 kfree(sc);
467
468 return ret;
469}
470
471/* ------------------------------------------------------------ */
472
473static void r2net_sc_queue_work(struct r2net_sock_container *sc,
474 struct work_struct *work)
475{
476 sc_get(sc);
477 if (!queue_work(r2net_wq, work))
478 sc_put(sc);
479}
480static void r2net_sc_queue_delayed_work(struct r2net_sock_container *sc,
481 struct delayed_work *work,
482 int delay)
483{
484 sc_get(sc);
485 if (!queue_delayed_work(r2net_wq, work, delay))
486 sc_put(sc);
487}
488static void r2net_sc_cancel_delayed_work(struct r2net_sock_container *sc,
489 struct delayed_work *work)
490{
491 if (cancel_delayed_work(work))
492 sc_put(sc);
493}
494
495static atomic_t r2net_connected_peers = ATOMIC_INIT(0);
496
497int r2net_num_connected_peers(void)
498{
499 return atomic_read(&r2net_connected_peers);
500}
501
502static void r2net_set_nn_state(struct r2net_node *nn,
503 struct r2net_sock_container *sc,
504 unsigned valid, int err)
505{
506 int was_valid = nn->nn_sc_valid;
507 int was_err = nn->nn_persistent_error;
508 struct r2net_sock_container *old_sc = nn->nn_sc;
509
510 assert_spin_locked(&nn->nn_lock);
511
512 if (old_sc && !sc)
513 atomic_dec(&r2net_connected_peers);
514 else if (!old_sc && sc)
515 atomic_inc(&r2net_connected_peers);
516
517 /* the node num comparison and single connect/accept path should stop
518 * an non-null sc from being overwritten with another */
519 BUG_ON(sc && nn->nn_sc && nn->nn_sc != sc);
520 mlog_bug_on_msg(err && valid, "err %d valid %u\n", err, valid);
521 mlog_bug_on_msg(valid && !sc, "valid %u sc %p\n", valid, sc);
522
523 if (was_valid && !valid && err == 0)
524 err = -ENOTCONN;
525
526 mlog(ML_CONN, "node %u sc: %p -> %p, valid %u -> %u, err %d -> %d\n",
527 r2net_num_from_nn(nn), nn->nn_sc, sc, nn->nn_sc_valid, valid,
528 nn->nn_persistent_error, err);
529
530 nn->nn_sc = sc;
531 nn->nn_sc_valid = valid ? 1 : 0;
532 nn->nn_persistent_error = err;
533
534 /* mirrors r2net_tx_can_proceed() */
535 if (nn->nn_persistent_error || nn->nn_sc_valid)
536 wake_up(&nn->nn_sc_wq);
537
538 if (!was_err && nn->nn_persistent_error) {
539 queue_delayed_work(r2net_wq, &nn->nn_still_up,
540 msecs_to_jiffies(R2NET_QUORUM_DELAY_MS));
541 }
542
543 if (was_valid && !valid) {
544 printk(KERN_NOTICE "ramster: No longer connected to "
545 SC_NODEF_FMT "\n",
546 old_sc->sc_node->nd_name, old_sc->sc_node->nd_num,
547 &old_sc->sc_node->nd_ipv4_address,
548 ntohs(old_sc->sc_node->nd_ipv4_port));
549 r2net_complete_nodes_nsw(nn);
550 }
551
552 if (!was_valid && valid) {
553 cancel_delayed_work(&nn->nn_connect_expired);
554 printk(KERN_NOTICE "ramster: %s " SC_NODEF_FMT "\n",
555 r2nm_this_node() > sc->sc_node->nd_num ?
556 "Connected to" : "Accepted connection from",
557 sc->sc_node->nd_name, sc->sc_node->nd_num,
558 &sc->sc_node->nd_ipv4_address,
559 ntohs(sc->sc_node->nd_ipv4_port));
560 }
561
562 /* trigger the connecting worker func as long as we're not valid,
563 * it will back off if it shouldn't connect. This can be called
564 * from node config teardown and so needs to be careful about
565 * the work queue actually being up. */
566 if (!valid && r2net_wq) {
567 unsigned long delay;
568 /* delay if we're within a RECONNECT_DELAY of the
569 * last attempt */
570 delay = (nn->nn_last_connect_attempt +
571 msecs_to_jiffies(r2net_reconnect_delay()))
572 - jiffies;
573 if (delay > msecs_to_jiffies(r2net_reconnect_delay()))
574 delay = 0;
575 mlog(ML_CONN, "queueing conn attempt in %lu jiffies\n", delay);
576 queue_delayed_work(r2net_wq, &nn->nn_connect_work, delay);
577
578 /*
579 * Delay the expired work after idle timeout.
580 *
581 * We might have lots of failed connection attempts that run
582 * through here but we only cancel the connect_expired work when
583 * a connection attempt succeeds. So only the first enqueue of
584 * the connect_expired work will do anything. The rest will see
585 * that it's already queued and do nothing.
586 */
587 delay += msecs_to_jiffies(r2net_idle_timeout());
588 queue_delayed_work(r2net_wq, &nn->nn_connect_expired, delay);
589 }
590
591 /* keep track of the nn's sc ref for the caller */
592 if ((old_sc == NULL) && sc)
593 sc_get(sc);
594 if (old_sc && (old_sc != sc)) {
595 r2net_sc_queue_work(old_sc, &old_sc->sc_shutdown_work);
596 sc_put(old_sc);
597 }
598}
599
600/* see r2net_register_callbacks() */
601static void r2net_data_ready(struct sock *sk, int bytes)
602{
603 void (*ready)(struct sock *sk, int bytes);
604
605 read_lock(&sk->sk_callback_lock);
606 if (sk->sk_user_data) {
607 struct r2net_sock_container *sc = sk->sk_user_data;
608 sclog(sc, "data_ready hit\n");
609 r2net_set_data_ready_time(sc);
610 r2net_sc_queue_work(sc, &sc->sc_rx_work);
611 ready = sc->sc_data_ready;
612 } else {
613 ready = sk->sk_data_ready;
614 }
615 read_unlock(&sk->sk_callback_lock);
616
617 ready(sk, bytes);
618}
619
620/* see r2net_register_callbacks() */
621static void r2net_state_change(struct sock *sk)
622{
623 void (*state_change)(struct sock *sk);
624 struct r2net_sock_container *sc;
625
626 read_lock(&sk->sk_callback_lock);
627 sc = sk->sk_user_data;
628 if (sc == NULL) {
629 state_change = sk->sk_state_change;
630 goto out;
631 }
632
633 sclog(sc, "state_change to %d\n", sk->sk_state);
634
635 state_change = sc->sc_state_change;
636
637 switch (sk->sk_state) {
638
639 /* ignore connecting sockets as they make progress */
640 case TCP_SYN_SENT:
641 case TCP_SYN_RECV:
642 break;
643 case TCP_ESTABLISHED:
644 r2net_sc_queue_work(sc, &sc->sc_connect_work);
645 break;
646 default:
647 printk(KERN_INFO "ramster: Connection to "
648 SC_NODEF_FMT " shutdown, state %d\n",
649 sc->sc_node->nd_name, sc->sc_node->nd_num,
650 &sc->sc_node->nd_ipv4_address,
651 ntohs(sc->sc_node->nd_ipv4_port), sk->sk_state);
652 r2net_sc_queue_work(sc, &sc->sc_shutdown_work);
653 break;
654
655 }
656out:
657 read_unlock(&sk->sk_callback_lock);
658 state_change(sk);
659}
660
661/*
662 * we register callbacks so we can queue work on events before calling
663 * the original callbacks. our callbacks our careful to test user_data
664 * to discover when they've reaced with r2net_unregister_callbacks().
665 */
666static void r2net_register_callbacks(struct sock *sk,
667 struct r2net_sock_container *sc)
668{
669 write_lock_bh(&sk->sk_callback_lock);
670
671 /* accepted sockets inherit the old listen socket data ready */
672 if (sk->sk_data_ready == r2net_listen_data_ready) {
673 sk->sk_data_ready = sk->sk_user_data;
674 sk->sk_user_data = NULL;
675 }
676
677 BUG_ON(sk->sk_user_data != NULL);
678 sk->sk_user_data = sc;
679 sc_get(sc);
680
681 sc->sc_data_ready = sk->sk_data_ready;
682 sc->sc_state_change = sk->sk_state_change;
683 sk->sk_data_ready = r2net_data_ready;
684 sk->sk_state_change = r2net_state_change;
685
686 mutex_init(&sc->sc_send_lock);
687
688 write_unlock_bh(&sk->sk_callback_lock);
689}
690
691static int r2net_unregister_callbacks(struct sock *sk,
692 struct r2net_sock_container *sc)
693{
694 int ret = 0;
695
696 write_lock_bh(&sk->sk_callback_lock);
697 if (sk->sk_user_data == sc) {
698 ret = 1;
699 sk->sk_user_data = NULL;
700 sk->sk_data_ready = sc->sc_data_ready;
701 sk->sk_state_change = sc->sc_state_change;
702 }
703 write_unlock_bh(&sk->sk_callback_lock);
704
705 return ret;
706}
707
708/*
709 * this is a little helper that is called by callers who have seen a problem
710 * with an sc and want to detach it from the nn if someone already hasn't beat
711 * them to it. if an error is given then the shutdown will be persistent
712 * and pending transmits will be canceled.
713 */
714static void r2net_ensure_shutdown(struct r2net_node *nn,
715 struct r2net_sock_container *sc,
716 int err)
717{
718 spin_lock(&nn->nn_lock);
719 if (nn->nn_sc == sc)
720 r2net_set_nn_state(nn, NULL, 0, err);
721 spin_unlock(&nn->nn_lock);
722}
723
724/*
725 * This work queue function performs the blocking parts of socket shutdown. A
726 * few paths lead here. set_nn_state will trigger this callback if it sees an
727 * sc detached from the nn. state_change will also trigger this callback
728 * directly when it sees errors. In that case we need to call set_nn_state
729 * ourselves as state_change couldn't get the nn_lock and call set_nn_state
730 * itself.
731 */
732static void r2net_shutdown_sc(struct work_struct *work)
733{
734 struct r2net_sock_container *sc =
735 container_of(work, struct r2net_sock_container,
736 sc_shutdown_work);
737 struct r2net_node *nn = r2net_nn_from_num(sc->sc_node->nd_num);
738
739 sclog(sc, "shutting down\n");
740
741 /* drop the callbacks ref and call shutdown only once */
742 if (r2net_unregister_callbacks(sc->sc_sock->sk, sc)) {
743 /* we shouldn't flush as we're in the thread, the
744 * races with pending sc work structs are harmless */
745 del_timer_sync(&sc->sc_idle_timeout);
746 r2net_sc_cancel_delayed_work(sc, &sc->sc_keepalive_work);
747 sc_put(sc);
748 kernel_sock_shutdown(sc->sc_sock, SHUT_RDWR);
749 }
750
751 /* not fatal so failed connects before the other guy has our
752 * heartbeat can be retried */
753 r2net_ensure_shutdown(nn, sc, 0);
754 sc_put(sc);
755}
756
757/* ------------------------------------------------------------ */
758
759static int r2net_handler_cmp(struct r2net_msg_handler *nmh, u32 msg_type,
760 u32 key)
761{
762 int ret = memcmp(&nmh->nh_key, &key, sizeof(key));
763
764 if (ret == 0)
765 ret = memcmp(&nmh->nh_msg_type, &msg_type, sizeof(msg_type));
766
767 return ret;
768}
769
770static struct r2net_msg_handler *
771r2net_handler_tree_lookup(u32 msg_type, u32 key, struct rb_node ***ret_p,
772 struct rb_node **ret_parent)
773{
774 struct rb_node **p = &r2net_handler_tree.rb_node;
775 struct rb_node *parent = NULL;
776 struct r2net_msg_handler *nmh, *ret = NULL;
777 int cmp;
778
779 while (*p) {
780 parent = *p;
781 nmh = rb_entry(parent, struct r2net_msg_handler, nh_node);
782 cmp = r2net_handler_cmp(nmh, msg_type, key);
783
784 if (cmp < 0)
785 p = &(*p)->rb_left;
786 else if (cmp > 0)
787 p = &(*p)->rb_right;
788 else {
789 ret = nmh;
790 break;
791 }
792 }
793
794 if (ret_p != NULL)
795 *ret_p = p;
796 if (ret_parent != NULL)
797 *ret_parent = parent;
798
799 return ret;
800}
801
802static void r2net_handler_kref_release(struct kref *kref)
803{
804 struct r2net_msg_handler *nmh;
805 nmh = container_of(kref, struct r2net_msg_handler, nh_kref);
806
807 kfree(nmh);
808}
809
810static void r2net_handler_put(struct r2net_msg_handler *nmh)
811{
812 kref_put(&nmh->nh_kref, r2net_handler_kref_release);
813}
814
815/* max_len is protection for the handler func. incoming messages won't
816 * be given to the handler if their payload is longer than the max. */
817int r2net_register_handler(u32 msg_type, u32 key, u32 max_len,
818 r2net_msg_handler_func *func, void *data,
819 r2net_post_msg_handler_func *post_func,
820 struct list_head *unreg_list)
821{
822 struct r2net_msg_handler *nmh = NULL;
823 struct rb_node **p, *parent;
824 int ret = 0;
825
826 if (max_len > R2NET_MAX_PAYLOAD_BYTES) {
827 mlog(0, "max_len for message handler out of range: %u\n",
828 max_len);
829 ret = -EINVAL;
830 goto out;
831 }
832
833 if (!msg_type) {
834 mlog(0, "no message type provided: %u, %p\n", msg_type, func);
835 ret = -EINVAL;
836 goto out;
837
838 }
839 if (!func) {
840 mlog(0, "no message handler provided: %u, %p\n",
841 msg_type, func);
842 ret = -EINVAL;
843 goto out;
844 }
845
846 nmh = kzalloc(sizeof(struct r2net_msg_handler), GFP_NOFS);
847 if (nmh == NULL) {
848 ret = -ENOMEM;
849 goto out;
850 }
851
852 nmh->nh_func = func;
853 nmh->nh_func_data = data;
854 nmh->nh_post_func = post_func;
855 nmh->nh_msg_type = msg_type;
856 nmh->nh_max_len = max_len;
857 nmh->nh_key = key;
858 /* the tree and list get this ref.. they're both removed in
859 * unregister when this ref is dropped */
860 kref_init(&nmh->nh_kref);
861 INIT_LIST_HEAD(&nmh->nh_unregister_item);
862
863 write_lock(&r2net_handler_lock);
864 if (r2net_handler_tree_lookup(msg_type, key, &p, &parent))
865 ret = -EEXIST;
866 else {
867 rb_link_node(&nmh->nh_node, parent, p);
868 rb_insert_color(&nmh->nh_node, &r2net_handler_tree);
869 list_add_tail(&nmh->nh_unregister_item, unreg_list);
870
871 mlog(ML_TCP, "registered handler func %p type %u key %08x\n",
872 func, msg_type, key);
873 /* we've had some trouble with handlers seemingly vanishing. */
874 mlog_bug_on_msg(r2net_handler_tree_lookup(msg_type, key, &p,
875 &parent) == NULL,
876 "couldn't find handler we *just* registered "
877 "for type %u key %08x\n", msg_type, key);
878 }
879 write_unlock(&r2net_handler_lock);
880 if (ret)
881 goto out;
882
883out:
884 if (ret)
885 kfree(nmh);
886
887 return ret;
888}
889EXPORT_SYMBOL_GPL(r2net_register_handler);
890
891void r2net_unregister_handler_list(struct list_head *list)
892{
893 struct r2net_msg_handler *nmh, *n;
894
895 write_lock(&r2net_handler_lock);
896 list_for_each_entry_safe(nmh, n, list, nh_unregister_item) {
897 mlog(ML_TCP, "unregistering handler func %p type %u key %08x\n",
898 nmh->nh_func, nmh->nh_msg_type, nmh->nh_key);
899 rb_erase(&nmh->nh_node, &r2net_handler_tree);
900 list_del_init(&nmh->nh_unregister_item);
901 kref_put(&nmh->nh_kref, r2net_handler_kref_release);
902 }
903 write_unlock(&r2net_handler_lock);
904}
905EXPORT_SYMBOL_GPL(r2net_unregister_handler_list);
906
907static struct r2net_msg_handler *r2net_handler_get(u32 msg_type, u32 key)
908{
909 struct r2net_msg_handler *nmh;
910
911 read_lock(&r2net_handler_lock);
912 nmh = r2net_handler_tree_lookup(msg_type, key, NULL, NULL);
913 if (nmh)
914 kref_get(&nmh->nh_kref);
915 read_unlock(&r2net_handler_lock);
916
917 return nmh;
918}
919
920/* ------------------------------------------------------------ */
921
922static int r2net_recv_tcp_msg(struct socket *sock, void *data, size_t len)
923{
924 int ret;
925 mm_segment_t oldfs;
926 struct kvec vec = {
927 .iov_len = len,
928 .iov_base = data,
929 };
930 struct msghdr msg = {
931 .msg_iovlen = 1,
932 .msg_iov = (struct iovec *)&vec,
933 .msg_flags = MSG_DONTWAIT,
934 };
935
936 oldfs = get_fs();
937 set_fs(get_ds());
938 ret = sock_recvmsg(sock, &msg, len, msg.msg_flags);
939 set_fs(oldfs);
940
941 return ret;
942}
943
944static int r2net_send_tcp_msg(struct socket *sock, struct kvec *vec,
945 size_t veclen, size_t total)
946{
947 int ret;
948 mm_segment_t oldfs;
949 struct msghdr msg = {
950 .msg_iov = (struct iovec *)vec,
951 .msg_iovlen = veclen,
952 };
953
954 if (sock == NULL) {
955 ret = -EINVAL;
956 goto out;
957 }
958
959 oldfs = get_fs();
960 set_fs(get_ds());
961 ret = sock_sendmsg(sock, &msg, total);
962 set_fs(oldfs);
963 if (ret != total) {
964 mlog(ML_ERROR, "sendmsg returned %d instead of %zu\n", ret,
965 total);
966 if (ret >= 0)
967 ret = -EPIPE; /* should be smarter, I bet */
968 goto out;
969 }
970
971 ret = 0;
972out:
973 if (ret < 0)
974 mlog(0, "returning error: %d\n", ret);
975 return ret;
976}
977
978static void r2net_sendpage(struct r2net_sock_container *sc,
979 void *kmalloced_virt,
980 size_t size)
981{
982 struct r2net_node *nn = r2net_nn_from_num(sc->sc_node->nd_num);
983 ssize_t ret;
984
985 while (1) {
986 mutex_lock(&sc->sc_send_lock);
987 ret = sc->sc_sock->ops->sendpage(sc->sc_sock,
988 virt_to_page(kmalloced_virt),
989 (long)kmalloced_virt & ~PAGE_MASK,
990 size, MSG_DONTWAIT);
991 mutex_unlock(&sc->sc_send_lock);
992 if (ret == size)
993 break;
994 if (ret == (ssize_t)-EAGAIN) {
995 mlog(0, "sendpage of size %zu to " SC_NODEF_FMT
996 " returned EAGAIN\n", size, sc->sc_node->nd_name,
997 sc->sc_node->nd_num,
998 &sc->sc_node->nd_ipv4_address,
999 ntohs(sc->sc_node->nd_ipv4_port));
1000 cond_resched();
1001 continue;
1002 }
1003 mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT
1004 " failed with %zd\n", size, sc->sc_node->nd_name,
1005 sc->sc_node->nd_num, &sc->sc_node->nd_ipv4_address,
1006 ntohs(sc->sc_node->nd_ipv4_port), ret);
1007 r2net_ensure_shutdown(nn, sc, 0);
1008 break;
1009 }
1010}
1011
1012static void r2net_init_msg(struct r2net_msg *msg, u16 data_len,
1013 u16 msg_type, u32 key)
1014{
1015 memset(msg, 0, sizeof(struct r2net_msg));
1016 msg->magic = cpu_to_be16(R2NET_MSG_MAGIC);
1017 msg->data_len = cpu_to_be16(data_len);
1018 msg->msg_type = cpu_to_be16(msg_type);
1019 msg->sys_status = cpu_to_be32(R2NET_ERR_NONE);
1020 msg->status = 0;
1021 msg->key = cpu_to_be32(key);
1022}
1023
1024static int r2net_tx_can_proceed(struct r2net_node *nn,
1025 struct r2net_sock_container **sc_ret,
1026 int *error)
1027{
1028 int ret = 0;
1029
1030 spin_lock(&nn->nn_lock);
1031 if (nn->nn_persistent_error) {
1032 ret = 1;
1033 *sc_ret = NULL;
1034 *error = nn->nn_persistent_error;
1035 } else if (nn->nn_sc_valid) {
1036 kref_get(&nn->nn_sc->sc_kref);
1037
1038 ret = 1;
1039 *sc_ret = nn->nn_sc;
1040 *error = 0;
1041 }
1042 spin_unlock(&nn->nn_lock);
1043
1044 return ret;
1045}
1046
1047/* Get a map of all nodes to which this node is currently connected to */
1048void r2net_fill_node_map(unsigned long *map, unsigned bytes)
1049{
1050 struct r2net_sock_container *sc;
1051 int node, ret;
1052
1053 BUG_ON(bytes < (BITS_TO_LONGS(R2NM_MAX_NODES) * sizeof(unsigned long)));
1054
1055 memset(map, 0, bytes);
1056 for (node = 0; node < R2NM_MAX_NODES; ++node) {
1057 r2net_tx_can_proceed(r2net_nn_from_num(node), &sc, &ret);
1058 if (!ret) {
1059 set_bit(node, map);
1060 sc_put(sc);
1061 }
1062 }
1063}
1064EXPORT_SYMBOL_GPL(r2net_fill_node_map);
1065
1066int r2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec,
1067 size_t caller_veclen, u8 target_node, int *status)
1068{
1069 int ret = 0;
1070 struct r2net_msg *msg = NULL;
1071 size_t veclen, caller_bytes = 0;
1072 struct kvec *vec = NULL;
1073 struct r2net_sock_container *sc = NULL;
1074 struct r2net_node *nn = r2net_nn_from_num(target_node);
1075 struct r2net_status_wait nsw = {
1076 .ns_node_item = LIST_HEAD_INIT(nsw.ns_node_item),
1077 };
1078 struct r2net_send_tracking nst;
1079
1080 /* this may be a general bug fix */
1081 init_waitqueue_head(&nsw.ns_wq);
1082
1083 r2net_init_nst(&nst, msg_type, key, current, target_node);
1084
1085 if (r2net_wq == NULL) {
1086 mlog(0, "attempt to tx without r2netd running\n");
1087 ret = -ESRCH;
1088 goto out;
1089 }
1090
1091 if (caller_veclen == 0) {
1092 mlog(0, "bad kvec array length\n");
1093 ret = -EINVAL;
1094 goto out;
1095 }
1096
1097 caller_bytes = iov_length((struct iovec *)caller_vec, caller_veclen);
1098 if (caller_bytes > R2NET_MAX_PAYLOAD_BYTES) {
1099 mlog(0, "total payload len %zu too large\n", caller_bytes);
1100 ret = -EINVAL;
1101 goto out;
1102 }
1103
1104 if (target_node == r2nm_this_node()) {
1105 ret = -ELOOP;
1106 goto out;
1107 }
1108
1109 r2net_debug_add_nst(&nst);
1110
1111 r2net_set_nst_sock_time(&nst);
1112
1113 wait_event(nn->nn_sc_wq, r2net_tx_can_proceed(nn, &sc, &ret));
1114 if (ret)
1115 goto out;
1116
1117 r2net_set_nst_sock_container(&nst, sc);
1118
1119 veclen = caller_veclen + 1;
1120 vec = kmalloc(sizeof(struct kvec) * veclen, GFP_ATOMIC);
1121 if (vec == NULL) {
1122 mlog(0, "failed to %zu element kvec!\n", veclen);
1123 ret = -ENOMEM;
1124 goto out;
1125 }
1126
1127 msg = kmalloc(sizeof(struct r2net_msg), GFP_ATOMIC);
1128 if (!msg) {
1129 mlog(0, "failed to allocate a r2net_msg!\n");
1130 ret = -ENOMEM;
1131 goto out;
1132 }
1133
1134 r2net_init_msg(msg, caller_bytes, msg_type, key);
1135
1136 vec[0].iov_len = sizeof(struct r2net_msg);
1137 vec[0].iov_base = msg;
1138 memcpy(&vec[1], caller_vec, caller_veclen * sizeof(struct kvec));
1139
1140 ret = r2net_prep_nsw(nn, &nsw);
1141 if (ret)
1142 goto out;
1143
1144 msg->msg_num = cpu_to_be32(nsw.ns_id);
1145 r2net_set_nst_msg_id(&nst, nsw.ns_id);
1146
1147 r2net_set_nst_send_time(&nst);
1148
1149 /* finally, convert the message header to network byte-order
1150 * and send */
1151 mutex_lock(&sc->sc_send_lock);
1152 ret = r2net_send_tcp_msg(sc->sc_sock, vec, veclen,
1153 sizeof(struct r2net_msg) + caller_bytes);
1154 mutex_unlock(&sc->sc_send_lock);
1155 msglog(msg, "sending returned %d\n", ret);
1156 if (ret < 0) {
1157 mlog(0, "error returned from r2net_send_tcp_msg=%d\n", ret);
1158 goto out;
1159 }
1160
1161 /* wait on other node's handler */
1162 r2net_set_nst_status_time(&nst);
1163 wait_event(nsw.ns_wq, r2net_nsw_completed(nn, &nsw));
1164
1165 r2net_update_send_stats(&nst, sc);
1166
1167 /* Note that we avoid overwriting the callers status return
1168 * variable if a system error was reported on the other
1169 * side. Callers beware. */
1170 ret = r2net_sys_err_to_errno(nsw.ns_sys_status);
1171 if (status && !ret)
1172 *status = nsw.ns_status;
1173
1174 mlog(0, "woken, returning system status %d, user status %d\n",
1175 ret, nsw.ns_status);
1176out:
1177 r2net_debug_del_nst(&nst); /* must be before dropping sc and node */
1178 if (sc)
1179 sc_put(sc);
1180 kfree(vec);
1181 kfree(msg);
1182 r2net_complete_nsw(nn, &nsw, 0, 0, 0);
1183 return ret;
1184}
1185EXPORT_SYMBOL_GPL(r2net_send_message_vec);
1186
1187int r2net_send_message(u32 msg_type, u32 key, void *data, u32 len,
1188 u8 target_node, int *status)
1189{
1190 struct kvec vec = {
1191 .iov_base = data,
1192 .iov_len = len,
1193 };
1194 return r2net_send_message_vec(msg_type, key, &vec, 1,
1195 target_node, status);
1196}
1197EXPORT_SYMBOL_GPL(r2net_send_message);
1198
1199static int r2net_send_status_magic(struct socket *sock, struct r2net_msg *hdr,
1200 enum r2net_system_error syserr, int err)
1201{
1202 struct kvec vec = {
1203 .iov_base = hdr,
1204 .iov_len = sizeof(struct r2net_msg),
1205 };
1206
1207 BUG_ON(syserr >= R2NET_ERR_MAX);
1208
1209 /* leave other fields intact from the incoming message, msg_num
1210 * in particular */
1211 hdr->sys_status = cpu_to_be32(syserr);
1212 hdr->status = cpu_to_be32(err);
1213 /* twiddle the magic */
1214 hdr->magic = cpu_to_be16(R2NET_MSG_STATUS_MAGIC);
1215 hdr->data_len = 0;
1216
1217 msglog(hdr, "about to send status magic %d\n", err);
1218 /* hdr has been in host byteorder this whole time */
1219 return r2net_send_tcp_msg(sock, &vec, 1, sizeof(struct r2net_msg));
1220}
1221
1222/*
1223 * "data magic" is a long version of "status magic" where the message
1224 * payload actually contains data to be passed in reply to certain messages
1225 */
1226static int r2net_send_data_magic(struct r2net_sock_container *sc,
1227 struct r2net_msg *hdr,
1228 void *data, size_t data_len,
1229 enum r2net_system_error syserr, int err)
1230{
1231 struct kvec vec[2];
1232 int ret;
1233
1234 vec[0].iov_base = hdr;
1235 vec[0].iov_len = sizeof(struct r2net_msg);
1236 vec[1].iov_base = data;
1237 vec[1].iov_len = data_len;
1238
1239 BUG_ON(syserr >= R2NET_ERR_MAX);
1240
1241 /* leave other fields intact from the incoming message, msg_num
1242 * in particular */
1243 hdr->sys_status = cpu_to_be32(syserr);
1244 hdr->status = cpu_to_be32(err);
1245 hdr->magic = cpu_to_be16(R2NET_MSG_DATA_MAGIC); /* twiddle magic */
1246 hdr->data_len = cpu_to_be16(data_len);
1247
1248 msglog(hdr, "about to send data magic %d\n", err);
1249 /* hdr has been in host byteorder this whole time */
1250 ret = r2net_send_tcp_msg(sc->sc_sock, vec, 2,
1251 sizeof(struct r2net_msg) + data_len);
1252 return ret;
1253}
1254
1255/*
1256 * called by a message handler to convert an otherwise normal reply
1257 * message into a "data magic" message
1258 */
1259void r2net_force_data_magic(struct r2net_msg *hdr, u16 msgtype, u32 msgkey)
1260{
1261 hdr->magic = cpu_to_be16(R2NET_MSG_DATA_MAGIC);
1262 hdr->msg_type = cpu_to_be16(msgtype);
1263 hdr->key = cpu_to_be32(msgkey);
1264}
1265
1266/* this returns -errno if the header was unknown or too large, etc.
1267 * after this is called the buffer us reused for the next message */
1268static int r2net_process_message(struct r2net_sock_container *sc,
1269 struct r2net_msg *hdr)
1270{
1271 struct r2net_node *nn = r2net_nn_from_num(sc->sc_node->nd_num);
1272 int ret = 0, handler_status;
1273 enum r2net_system_error syserr;
1274 struct r2net_msg_handler *nmh = NULL;
1275 void *ret_data = NULL;
1276 int data_magic = 0;
1277
1278 msglog(hdr, "processing message\n");
1279
1280 r2net_sc_postpone_idle(sc);
1281
1282 switch (be16_to_cpu(hdr->magic)) {
1283
1284 case R2NET_MSG_STATUS_MAGIC:
1285 /* special type for returning message status */
1286 r2net_complete_nsw(nn, NULL, be32_to_cpu(hdr->msg_num),
1287 be32_to_cpu(hdr->sys_status),
1288 be32_to_cpu(hdr->status));
1289 goto out;
1290 case R2NET_MSG_KEEP_REQ_MAGIC:
1291 r2net_sendpage(sc, r2net_keep_resp, sizeof(*r2net_keep_resp));
1292 goto out;
1293 case R2NET_MSG_KEEP_RESP_MAGIC:
1294 goto out;
1295 case R2NET_MSG_MAGIC:
1296 break;
1297 case R2NET_MSG_DATA_MAGIC:
1298 /*
1299 * unlike a normal status magic, a data magic DOES
1300 * (MUST) have a handler, so the control flow is
1301 * a little funky here as a result
1302 */
1303 data_magic = 1;
1304 break;
1305 default:
1306 msglog(hdr, "bad magic\n");
1307 ret = -EINVAL;
1308 goto out;
1309 break;
1310 }
1311
1312 /* find a handler for it */
1313 handler_status = 0;
1314 nmh = r2net_handler_get(be16_to_cpu(hdr->msg_type),
1315 be32_to_cpu(hdr->key));
1316 if (!nmh) {
1317 mlog(ML_TCP, "couldn't find handler for type %u key %08x\n",
1318 be16_to_cpu(hdr->msg_type), be32_to_cpu(hdr->key));
1319 syserr = R2NET_ERR_NO_HNDLR;
1320 goto out_respond;
1321 }
1322
1323 syserr = R2NET_ERR_NONE;
1324
1325 if (be16_to_cpu(hdr->data_len) > nmh->nh_max_len)
1326 syserr = R2NET_ERR_OVERFLOW;
1327
1328 if (syserr != R2NET_ERR_NONE)
1329 goto out_respond;
1330
1331 r2net_set_func_start_time(sc);
1332 sc->sc_msg_key = be32_to_cpu(hdr->key);
1333 sc->sc_msg_type = be16_to_cpu(hdr->msg_type);
1334 handler_status = (nmh->nh_func)(hdr, sizeof(struct r2net_msg) +
1335 be16_to_cpu(hdr->data_len),
1336 nmh->nh_func_data, &ret_data);
1337 if (data_magic) {
1338 /*
1339 * handler handled data sent in reply to request
1340 * so complete the transaction
1341 */
1342 r2net_complete_nsw(nn, NULL, be32_to_cpu(hdr->msg_num),
1343 be32_to_cpu(hdr->sys_status), handler_status);
1344 goto out;
1345 }
1346 /*
1347 * handler changed magic to DATA_MAGIC to reply to request for data,
1348 * implies ret_data points to data to return and handler_status
1349 * is the number of bytes of data
1350 */
1351 if (be16_to_cpu(hdr->magic) == R2NET_MSG_DATA_MAGIC) {
1352 ret = r2net_send_data_magic(sc, hdr,
1353 ret_data, handler_status,
1354 syserr, 0);
1355 hdr = NULL;
1356 mlog(0, "sending data reply %d, syserr %d returned %d\n",
1357 handler_status, syserr, ret);
1358 r2net_set_func_stop_time(sc);
1359
1360 r2net_update_recv_stats(sc);
1361 goto out;
1362 }
1363 r2net_set_func_stop_time(sc);
1364
1365 r2net_update_recv_stats(sc);
1366
1367out_respond:
1368 /* this destroys the hdr, so don't use it after this */
1369 mutex_lock(&sc->sc_send_lock);
1370 ret = r2net_send_status_magic(sc->sc_sock, hdr, syserr,
1371 handler_status);
1372 mutex_unlock(&sc->sc_send_lock);
1373 hdr = NULL;
1374 mlog(0, "sending handler status %d, syserr %d returned %d\n",
1375 handler_status, syserr, ret);
1376
1377 if (nmh) {
1378 BUG_ON(ret_data != NULL && nmh->nh_post_func == NULL);
1379 if (nmh->nh_post_func)
1380 (nmh->nh_post_func)(handler_status, nmh->nh_func_data,
1381 ret_data);
1382 }
1383
1384out:
1385 if (nmh)
1386 r2net_handler_put(nmh);
1387 return ret;
1388}
1389
1390static int r2net_check_handshake(struct r2net_sock_container *sc)
1391{
1392 struct r2net_handshake *hand = page_address(sc->sc_page);
1393 struct r2net_node *nn = r2net_nn_from_num(sc->sc_node->nd_num);
1394
1395 if (hand->protocol_version != cpu_to_be64(R2NET_PROTOCOL_VERSION)) {
1396 printk(KERN_NOTICE "ramster: " SC_NODEF_FMT " Advertised net "
1397 "protocol version %llu but %llu is required. "
1398 "Disconnecting.\n", sc->sc_node->nd_name,
1399 sc->sc_node->nd_num, &sc->sc_node->nd_ipv4_address,
1400 ntohs(sc->sc_node->nd_ipv4_port),
1401 (unsigned long long)be64_to_cpu(hand->protocol_version),
1402 R2NET_PROTOCOL_VERSION);
1403
1404 /* don't bother reconnecting if its the wrong version. */
1405 r2net_ensure_shutdown(nn, sc, -ENOTCONN);
1406 return -1;
1407 }
1408
1409 /*
1410 * Ensure timeouts are consistent with other nodes, otherwise
1411 * we can end up with one node thinking that the other must be down,
1412 * but isn't. This can ultimately cause corruption.
1413 */
1414 if (be32_to_cpu(hand->r2net_idle_timeout_ms) !=
1415 r2net_idle_timeout()) {
1416 printk(KERN_NOTICE "ramster: " SC_NODEF_FMT " uses a network "
1417 "idle timeout of %u ms, but we use %u ms locally. "
1418 "Disconnecting.\n", sc->sc_node->nd_name,
1419 sc->sc_node->nd_num, &sc->sc_node->nd_ipv4_address,
1420 ntohs(sc->sc_node->nd_ipv4_port),
1421 be32_to_cpu(hand->r2net_idle_timeout_ms),
1422 r2net_idle_timeout());
1423 r2net_ensure_shutdown(nn, sc, -ENOTCONN);
1424 return -1;
1425 }
1426
1427 if (be32_to_cpu(hand->r2net_keepalive_delay_ms) !=
1428 r2net_keepalive_delay()) {
1429 printk(KERN_NOTICE "ramster: " SC_NODEF_FMT " uses a keepalive "
1430 "delay of %u ms, but we use %u ms locally. "
1431 "Disconnecting.\n", sc->sc_node->nd_name,
1432 sc->sc_node->nd_num, &sc->sc_node->nd_ipv4_address,
1433 ntohs(sc->sc_node->nd_ipv4_port),
1434 be32_to_cpu(hand->r2net_keepalive_delay_ms),
1435 r2net_keepalive_delay());
1436 r2net_ensure_shutdown(nn, sc, -ENOTCONN);
1437 return -1;
1438 }
1439
1440 if (be32_to_cpu(hand->r2hb_heartbeat_timeout_ms) !=
1441 R2HB_MAX_WRITE_TIMEOUT_MS) {
1442 printk(KERN_NOTICE "ramster: " SC_NODEF_FMT " uses a heartbeat "
1443 "timeout of %u ms, but we use %u ms locally. "
1444 "Disconnecting.\n", sc->sc_node->nd_name,
1445 sc->sc_node->nd_num, &sc->sc_node->nd_ipv4_address,
1446 ntohs(sc->sc_node->nd_ipv4_port),
1447 be32_to_cpu(hand->r2hb_heartbeat_timeout_ms),
1448 R2HB_MAX_WRITE_TIMEOUT_MS);
1449 r2net_ensure_shutdown(nn, sc, -ENOTCONN);
1450 return -1;
1451 }
1452
1453 sc->sc_handshake_ok = 1;
1454
1455 spin_lock(&nn->nn_lock);
1456 /* set valid and queue the idle timers only if it hasn't been
1457 * shut down already */
1458 if (nn->nn_sc == sc) {
1459 r2net_sc_reset_idle_timer(sc);
1460 atomic_set(&nn->nn_timeout, 0);
1461 r2net_set_nn_state(nn, sc, 1, 0);
1462 }
1463 spin_unlock(&nn->nn_lock);
1464
1465 /* shift everything up as though it wasn't there */
1466 sc->sc_page_off -= sizeof(struct r2net_handshake);
1467 if (sc->sc_page_off)
1468 memmove(hand, hand + 1, sc->sc_page_off);
1469
1470 return 0;
1471}
1472
1473/* this demuxes the queued rx bytes into header or payload bits and calls
1474 * handlers as each full message is read off the socket. it returns -error,
1475 * == 0 eof, or > 0 for progress made.*/
1476static int r2net_advance_rx(struct r2net_sock_container *sc)
1477{
1478 struct r2net_msg *hdr;
1479 int ret = 0;
1480 void *data;
1481 size_t datalen;
1482
1483 sclog(sc, "receiving\n");
1484 r2net_set_advance_start_time(sc);
1485
1486 if (unlikely(sc->sc_handshake_ok == 0)) {
1487 if (sc->sc_page_off < sizeof(struct r2net_handshake)) {
1488 data = page_address(sc->sc_page) + sc->sc_page_off;
1489 datalen = sizeof(struct r2net_handshake) -
1490 sc->sc_page_off;
1491 ret = r2net_recv_tcp_msg(sc->sc_sock, data, datalen);
1492 if (ret > 0)
1493 sc->sc_page_off += ret;
1494 }
1495
1496 if (sc->sc_page_off == sizeof(struct r2net_handshake)) {
1497 r2net_check_handshake(sc);
1498 if (unlikely(sc->sc_handshake_ok == 0))
1499 ret = -EPROTO;
1500 }
1501 goto out;
1502 }
1503
1504 /* do we need more header? */
1505 if (sc->sc_page_off < sizeof(struct r2net_msg)) {
1506 data = page_address(sc->sc_page) + sc->sc_page_off;
1507 datalen = sizeof(struct r2net_msg) - sc->sc_page_off;
1508 ret = r2net_recv_tcp_msg(sc->sc_sock, data, datalen);
1509 if (ret > 0) {
1510 sc->sc_page_off += ret;
1511 /* only swab incoming here.. we can
1512 * only get here once as we cross from
1513 * being under to over */
1514 if (sc->sc_page_off == sizeof(struct r2net_msg)) {
1515 hdr = page_address(sc->sc_page);
1516 if (be16_to_cpu(hdr->data_len) >
1517 R2NET_MAX_PAYLOAD_BYTES)
1518 ret = -EOVERFLOW;
1519 }
1520 }
1521 if (ret <= 0)
1522 goto out;
1523 }
1524
1525 if (sc->sc_page_off < sizeof(struct r2net_msg)) {
1526 /* oof, still don't have a header */
1527 goto out;
1528 }
1529
1530 /* this was swabbed above when we first read it */
1531 hdr = page_address(sc->sc_page);
1532
1533 msglog(hdr, "at page_off %zu\n", sc->sc_page_off);
1534
1535 /* do we need more payload? */
1536 if (sc->sc_page_off - sizeof(struct r2net_msg) <
1537 be16_to_cpu(hdr->data_len)) {
1538 /* need more payload */
1539 data = page_address(sc->sc_page) + sc->sc_page_off;
1540 datalen = (sizeof(struct r2net_msg) +
1541 be16_to_cpu(hdr->data_len)) -
1542 sc->sc_page_off;
1543 ret = r2net_recv_tcp_msg(sc->sc_sock, data, datalen);
1544 if (ret > 0)
1545 sc->sc_page_off += ret;
1546 if (ret <= 0)
1547 goto out;
1548 }
1549
1550 if (sc->sc_page_off - sizeof(struct r2net_msg) ==
1551 be16_to_cpu(hdr->data_len)) {
1552 /* we can only get here once, the first time we read
1553 * the payload.. so set ret to progress if the handler
1554 * works out. after calling this the message is toast */
1555 ret = r2net_process_message(sc, hdr);
1556 if (ret == 0)
1557 ret = 1;
1558 sc->sc_page_off = 0;
1559 }
1560
1561out:
1562 sclog(sc, "ret = %d\n", ret);
1563 r2net_set_advance_stop_time(sc);
1564 return ret;
1565}
1566
1567/* this work func is triggerd by data ready. it reads until it can read no
1568 * more. it interprets 0, eof, as fatal. if data_ready hits while we're doing
1569 * our work the work struct will be marked and we'll be called again. */
1570static void r2net_rx_until_empty(struct work_struct *work)
1571{
1572 struct r2net_sock_container *sc =
1573 container_of(work, struct r2net_sock_container, sc_rx_work);
1574 int ret;
1575
1576 do {
1577 ret = r2net_advance_rx(sc);
1578 } while (ret > 0);
1579
1580 if (ret <= 0 && ret != -EAGAIN) {
1581 struct r2net_node *nn = r2net_nn_from_num(sc->sc_node->nd_num);
1582 sclog(sc, "saw error %d, closing\n", ret);
1583 /* not permanent so read failed handshake can retry */
1584 r2net_ensure_shutdown(nn, sc, 0);
1585 }
1586
1587 sc_put(sc);
1588}
1589
1590static int r2net_set_nodelay(struct socket *sock)
1591{
1592 int ret, val = 1;
1593 mm_segment_t oldfs;
1594
1595 oldfs = get_fs();
1596 set_fs(KERNEL_DS);
1597
1598 /*
1599 * Dear unsuspecting programmer,
1600 *
1601 * Don't use sock_setsockopt() for SOL_TCP. It doesn't check its level
1602 * argument and assumes SOL_SOCKET so, say, your TCP_NODELAY will
1603 * silently turn into SO_DEBUG.
1604 *
1605 * Yours,
1606 * Keeper of hilariously fragile interfaces.
1607 */
1608 ret = sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY,
1609 (char __user *)&val, sizeof(val));
1610
1611 set_fs(oldfs);
1612 return ret;
1613}
1614
1615static void r2net_initialize_handshake(void)
1616{
1617 r2net_hand->r2hb_heartbeat_timeout_ms = cpu_to_be32(
1618 R2HB_MAX_WRITE_TIMEOUT_MS);
1619 r2net_hand->r2net_idle_timeout_ms = cpu_to_be32(r2net_idle_timeout());
1620 r2net_hand->r2net_keepalive_delay_ms = cpu_to_be32(
1621 r2net_keepalive_delay());
1622 r2net_hand->r2net_reconnect_delay_ms = cpu_to_be32(
1623 r2net_reconnect_delay());
1624}
1625
1626/* ------------------------------------------------------------ */
1627
1628/* called when a connect completes and after a sock is accepted. the
1629 * rx path will see the response and mark the sc valid */
1630static void r2net_sc_connect_completed(struct work_struct *work)
1631{
1632 struct r2net_sock_container *sc =
1633 container_of(work, struct r2net_sock_container,
1634 sc_connect_work);
1635
1636 mlog(ML_MSG, "sc sending handshake with ver %llu id %llx\n",
1637 (unsigned long long)R2NET_PROTOCOL_VERSION,
1638 (unsigned long long)be64_to_cpu(r2net_hand->connector_id));
1639
1640 r2net_initialize_handshake();
1641 r2net_sendpage(sc, r2net_hand, sizeof(*r2net_hand));
1642 sc_put(sc);
1643}
1644
1645/* this is called as a work_struct func. */
1646static void r2net_sc_send_keep_req(struct work_struct *work)
1647{
1648 struct r2net_sock_container *sc =
1649 container_of(work, struct r2net_sock_container,
1650 sc_keepalive_work.work);
1651
1652 r2net_sendpage(sc, r2net_keep_req, sizeof(*r2net_keep_req));
1653 sc_put(sc);
1654}
1655
1656/* socket shutdown does a del_timer_sync against this as it tears down.
1657 * we can't start this timer until we've got to the point in sc buildup
1658 * where shutdown is going to be involved */
1659static void r2net_idle_timer(unsigned long data)
1660{
1661 struct r2net_sock_container *sc = (struct r2net_sock_container *)data;
1662#ifdef CONFIG_DEBUG_FS
1663 unsigned long msecs = ktime_to_ms(ktime_get()) -
1664 ktime_to_ms(sc->sc_tv_timer);
1665#else
1666 unsigned long msecs = r2net_idle_timeout();
1667#endif
1668
1669 printk(KERN_NOTICE "ramster: Connection to " SC_NODEF_FMT " has been "
1670 "idle for %lu.%lu secs, shutting it down.\n",
1671 sc->sc_node->nd_name, sc->sc_node->nd_num,
1672 &sc->sc_node->nd_ipv4_address, ntohs(sc->sc_node->nd_ipv4_port),
1673 msecs / 1000, msecs % 1000);
1674
1675 /*
1676 * Initialize the nn_timeout so that the next connection attempt
1677 * will continue in r2net_start_connect.
1678 */
1679 /* Avoid spurious shutdowns... not sure if this is still necessary */
1680 pr_err("ramster_idle_timer, skipping shutdown work\n");
1681#if 0
1682 /* old code used to do these two lines */
1683 atomic_set(&nn->nn_timeout, 1);
1684 r2net_sc_queue_work(sc, &sc->sc_shutdown_work);
1685#endif
1686}
1687
1688static void r2net_sc_reset_idle_timer(struct r2net_sock_container *sc)
1689{
1690 r2net_sc_cancel_delayed_work(sc, &sc->sc_keepalive_work);
1691 r2net_sc_queue_delayed_work(sc, &sc->sc_keepalive_work,
1692 msecs_to_jiffies(r2net_keepalive_delay()));
1693 r2net_set_sock_timer(sc);
1694 mod_timer(&sc->sc_idle_timeout,
1695 jiffies + msecs_to_jiffies(r2net_idle_timeout()));
1696}
1697
1698static void r2net_sc_postpone_idle(struct r2net_sock_container *sc)
1699{
1700 /* Only push out an existing timer */
1701 if (timer_pending(&sc->sc_idle_timeout))
1702 r2net_sc_reset_idle_timer(sc);
1703}
1704
1705/* this work func is kicked whenever a path sets the nn state which doesn't
1706 * have valid set. This includes seeing hb come up, losing a connection,
1707 * having a connect attempt fail, etc. This centralizes the logic which decides
1708 * if a connect attempt should be made or if we should give up and all future
1709 * transmit attempts should fail */
1710static void r2net_start_connect(struct work_struct *work)
1711{
1712 struct r2net_node *nn =
1713 container_of(work, struct r2net_node, nn_connect_work.work);
1714 struct r2net_sock_container *sc = NULL;
1715 struct r2nm_node *node = NULL, *mynode = NULL;
1716 struct socket *sock = NULL;
1717 struct sockaddr_in myaddr = {0, }, remoteaddr = {0, };
1718 int ret = 0, stop;
1719 unsigned int timeout;
1720
1721 /* if we're greater we initiate tx, otherwise we accept */
1722 if (r2nm_this_node() <= r2net_num_from_nn(nn))
1723 goto out;
1724
1725 /* watch for racing with tearing a node down */
1726 node = r2nm_get_node_by_num(r2net_num_from_nn(nn));
1727 if (node == NULL) {
1728 ret = 0;
1729 goto out;
1730 }
1731
1732 mynode = r2nm_get_node_by_num(r2nm_this_node());
1733 if (mynode == NULL) {
1734 ret = 0;
1735 goto out;
1736 }
1737
1738 spin_lock(&nn->nn_lock);
1739 /*
1740 * see if we already have one pending or have given up.
1741 * For nn_timeout, it is set when we close the connection
1742 * because of the idle time out. So it means that we have
1743 * at least connected to that node successfully once,
1744 * now try to connect to it again.
1745 */
1746 timeout = atomic_read(&nn->nn_timeout);
1747 stop = (nn->nn_sc ||
1748 (nn->nn_persistent_error &&
1749 (nn->nn_persistent_error != -ENOTCONN || timeout == 0)));
1750 spin_unlock(&nn->nn_lock);
1751 if (stop)
1752 goto out;
1753
1754 nn->nn_last_connect_attempt = jiffies;
1755
1756 sc = sc_alloc(node);
1757 if (sc == NULL) {
1758 mlog(0, "couldn't allocate sc\n");
1759 ret = -ENOMEM;
1760 goto out;
1761 }
1762
1763 ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
1764 if (ret < 0) {
1765 mlog(0, "can't create socket: %d\n", ret);
1766 goto out;
1767 }
1768 sc->sc_sock = sock; /* freed by sc_kref_release */
1769
1770 sock->sk->sk_allocation = GFP_ATOMIC;
1771
1772 myaddr.sin_family = AF_INET;
1773 myaddr.sin_addr.s_addr = mynode->nd_ipv4_address;
1774 myaddr.sin_port = htons(0); /* any port */
1775
1776 ret = sock->ops->bind(sock, (struct sockaddr *)&myaddr,
1777 sizeof(myaddr));
1778 if (ret) {
1779 mlog(ML_ERROR, "bind failed with %d at address %pI4\n",
1780 ret, &mynode->nd_ipv4_address);
1781 goto out;
1782 }
1783
1784 ret = r2net_set_nodelay(sc->sc_sock);
1785 if (ret) {
1786 mlog(ML_ERROR, "setting TCP_NODELAY failed with %d\n", ret);
1787 goto out;
1788 }
1789
1790 r2net_register_callbacks(sc->sc_sock->sk, sc);
1791
1792 spin_lock(&nn->nn_lock);
1793 /* handshake completion will set nn->nn_sc_valid */
1794 r2net_set_nn_state(nn, sc, 0, 0);
1795 spin_unlock(&nn->nn_lock);
1796
1797 remoteaddr.sin_family = AF_INET;
1798 remoteaddr.sin_addr.s_addr = node->nd_ipv4_address;
1799 remoteaddr.sin_port = node->nd_ipv4_port;
1800
1801 ret = sc->sc_sock->ops->connect(sc->sc_sock,
1802 (struct sockaddr *)&remoteaddr,
1803 sizeof(remoteaddr),
1804 O_NONBLOCK);
1805 if (ret == -EINPROGRESS)
1806 ret = 0;
1807
1808out:
1809 if (ret) {
1810 printk(KERN_NOTICE "ramster: Connect attempt to " SC_NODEF_FMT
1811 " failed with errno %d\n", sc->sc_node->nd_name,
1812 sc->sc_node->nd_num, &sc->sc_node->nd_ipv4_address,
1813 ntohs(sc->sc_node->nd_ipv4_port), ret);
1814 /* 0 err so that another will be queued and attempted
1815 * from set_nn_state */
1816 if (sc)
1817 r2net_ensure_shutdown(nn, sc, 0);
1818 }
1819 if (sc)
1820 sc_put(sc);
1821 if (node)
1822 r2nm_node_put(node);
1823 if (mynode)
1824 r2nm_node_put(mynode);
1825
1826 return;
1827}
1828
1829static void r2net_connect_expired(struct work_struct *work)
1830{
1831 struct r2net_node *nn =
1832 container_of(work, struct r2net_node, nn_connect_expired.work);
1833
1834 spin_lock(&nn->nn_lock);
1835 if (!nn->nn_sc_valid) {
1836 printk(KERN_NOTICE "ramster: No connection established with "
1837 "node %u after %u.%u seconds, giving up.\n",
1838 r2net_num_from_nn(nn),
1839 r2net_idle_timeout() / 1000,
1840 r2net_idle_timeout() % 1000);
1841
1842 r2net_set_nn_state(nn, NULL, 0, -ENOTCONN);
1843 }
1844 spin_unlock(&nn->nn_lock);
1845}
1846
1847static void r2net_still_up(struct work_struct *work)
1848{
1849}
1850
1851/* ------------------------------------------------------------ */
1852
1853void r2net_disconnect_node(struct r2nm_node *node)
1854{
1855 struct r2net_node *nn = r2net_nn_from_num(node->nd_num);
1856
1857 /* don't reconnect until it's heartbeating again */
1858 spin_lock(&nn->nn_lock);
1859 atomic_set(&nn->nn_timeout, 0);
1860 r2net_set_nn_state(nn, NULL, 0, -ENOTCONN);
1861 spin_unlock(&nn->nn_lock);
1862
1863 if (r2net_wq) {
1864 cancel_delayed_work(&nn->nn_connect_expired);
1865 cancel_delayed_work(&nn->nn_connect_work);
1866 cancel_delayed_work(&nn->nn_still_up);
1867 flush_workqueue(r2net_wq);
1868 }
1869}
1870
1871static void r2net_hb_node_down_cb(struct r2nm_node *node, int node_num,
1872 void *data)
1873{
1874 if (!node)
1875 return;
1876
1877 if (node_num != r2nm_this_node())
1878 r2net_disconnect_node(node);
1879
1880 BUG_ON(atomic_read(&r2net_connected_peers) < 0);
1881}
1882
1883static void r2net_hb_node_up_cb(struct r2nm_node *node, int node_num,
1884 void *data)
1885{
1886 struct r2net_node *nn = r2net_nn_from_num(node_num);
1887
1888 BUG_ON(!node);
1889
1890 /* ensure an immediate connect attempt */
1891 nn->nn_last_connect_attempt = jiffies -
1892 (msecs_to_jiffies(r2net_reconnect_delay()) + 1);
1893
1894 if (node_num != r2nm_this_node()) {
1895 /* believe it or not, accept and node hearbeating testing
1896 * can succeed for this node before we got here.. so
1897 * only use set_nn_state to clear the persistent error
1898 * if that hasn't already happened */
1899 spin_lock(&nn->nn_lock);
1900 atomic_set(&nn->nn_timeout, 0);
1901 if (nn->nn_persistent_error)
1902 r2net_set_nn_state(nn, NULL, 0, 0);
1903 spin_unlock(&nn->nn_lock);
1904 }
1905}
1906
1907void r2net_unregister_hb_callbacks(void)
1908{
1909 r2hb_unregister_callback(NULL, &r2net_hb_up);
1910 r2hb_unregister_callback(NULL, &r2net_hb_down);
1911}
1912
1913int r2net_register_hb_callbacks(void)
1914{
1915 int ret;
1916
1917 r2hb_setup_callback(&r2net_hb_down, R2HB_NODE_DOWN_CB,
1918 r2net_hb_node_down_cb, NULL, R2NET_HB_PRI);
1919 r2hb_setup_callback(&r2net_hb_up, R2HB_NODE_UP_CB,
1920 r2net_hb_node_up_cb, NULL, R2NET_HB_PRI);
1921
1922 ret = r2hb_register_callback(NULL, &r2net_hb_up);
1923 if (ret == 0)
1924 ret = r2hb_register_callback(NULL, &r2net_hb_down);
1925
1926 if (ret)
1927 r2net_unregister_hb_callbacks();
1928
1929 return ret;
1930}
1931
1932/* ------------------------------------------------------------ */
1933
1934static int r2net_accept_one(struct socket *sock)
1935{
1936 int ret, slen;
1937 struct sockaddr_in sin;
1938 struct socket *new_sock = NULL;
1939 struct r2nm_node *node = NULL;
1940 struct r2nm_node *local_node = NULL;
1941 struct r2net_sock_container *sc = NULL;
1942 struct r2net_node *nn;
1943
1944 BUG_ON(sock == NULL);
1945 ret = sock_create_lite(sock->sk->sk_family, sock->sk->sk_type,
1946 sock->sk->sk_protocol, &new_sock);
1947 if (ret)
1948 goto out;
1949
1950 new_sock->type = sock->type;
1951 new_sock->ops = sock->ops;
1952 ret = sock->ops->accept(sock, new_sock, O_NONBLOCK);
1953 if (ret < 0)
1954 goto out;
1955
1956 new_sock->sk->sk_allocation = GFP_ATOMIC;
1957
1958 ret = r2net_set_nodelay(new_sock);
1959 if (ret) {
1960 mlog(ML_ERROR, "setting TCP_NODELAY failed with %d\n", ret);
1961 goto out;
1962 }
1963
1964 slen = sizeof(sin);
1965 ret = new_sock->ops->getname(new_sock, (struct sockaddr *) &sin,
1966 &slen, 1);
1967 if (ret < 0)
1968 goto out;
1969
1970 node = r2nm_get_node_by_ip(sin.sin_addr.s_addr);
1971 if (node == NULL) {
1972 printk(KERN_NOTICE "ramster: Attempt to connect from unknown "
1973 "node at %pI4:%d\n", &sin.sin_addr.s_addr,
1974 ntohs(sin.sin_port));
1975 ret = -EINVAL;
1976 goto out;
1977 }
1978
1979 if (r2nm_this_node() >= node->nd_num) {
1980 local_node = r2nm_get_node_by_num(r2nm_this_node());
1981 printk(KERN_NOTICE "ramster: Unexpected connect attempt seen "
1982 "at node '%s' (%u, %pI4:%d) from node '%s' (%u, "
1983 "%pI4:%d)\n", local_node->nd_name, local_node->nd_num,
1984 &(local_node->nd_ipv4_address),
1985 ntohs(local_node->nd_ipv4_port), node->nd_name,
1986 node->nd_num, &sin.sin_addr.s_addr, ntohs(sin.sin_port));
1987 ret = -EINVAL;
1988 goto out;
1989 }
1990
1991 /* this happens all the time when the other node sees our heartbeat
1992 * and tries to connect before we see their heartbeat */
1993 if (!r2hb_check_node_heartbeating_from_callback(node->nd_num)) {
1994 mlog(ML_CONN, "attempt to connect from node '%s' at "
1995 "%pI4:%d but it isn't heartbeating\n",
1996 node->nd_name, &sin.sin_addr.s_addr,
1997 ntohs(sin.sin_port));
1998 ret = -EINVAL;
1999 goto out;
2000 }
2001
2002 nn = r2net_nn_from_num(node->nd_num);
2003
2004 spin_lock(&nn->nn_lock);
2005 if (nn->nn_sc)
2006 ret = -EBUSY;
2007 else
2008 ret = 0;
2009 spin_unlock(&nn->nn_lock);
2010 if (ret) {
2011 printk(KERN_NOTICE "ramster: Attempt to connect from node '%s' "
2012 "at %pI4:%d but it already has an open connection\n",
2013 node->nd_name, &sin.sin_addr.s_addr,
2014 ntohs(sin.sin_port));
2015 goto out;
2016 }
2017
2018 sc = sc_alloc(node);
2019 if (sc == NULL) {
2020 ret = -ENOMEM;
2021 goto out;
2022 }
2023
2024 sc->sc_sock = new_sock;
2025 new_sock = NULL;
2026
2027 spin_lock(&nn->nn_lock);
2028 atomic_set(&nn->nn_timeout, 0);
2029 r2net_set_nn_state(nn, sc, 0, 0);
2030 spin_unlock(&nn->nn_lock);
2031
2032 r2net_register_callbacks(sc->sc_sock->sk, sc);
2033 r2net_sc_queue_work(sc, &sc->sc_rx_work);
2034
2035 r2net_initialize_handshake();
2036 r2net_sendpage(sc, r2net_hand, sizeof(*r2net_hand));
2037
2038out:
2039 if (new_sock)
2040 sock_release(new_sock);
2041 if (node)
2042 r2nm_node_put(node);
2043 if (local_node)
2044 r2nm_node_put(local_node);
2045 if (sc)
2046 sc_put(sc);
2047 return ret;
2048}
2049
2050static void r2net_accept_many(struct work_struct *work)
2051{
2052 struct socket *sock = r2net_listen_sock;
2053 while (r2net_accept_one(sock) == 0)
2054 cond_resched();
2055}
2056
2057static void r2net_listen_data_ready(struct sock *sk, int bytes)
2058{
2059 void (*ready)(struct sock *sk, int bytes);
2060
2061 read_lock(&sk->sk_callback_lock);
2062 ready = sk->sk_user_data;
2063 if (ready == NULL) { /* check for teardown race */
2064 ready = sk->sk_data_ready;
2065 goto out;
2066 }
2067
2068 /* ->sk_data_ready is also called for a newly established child socket
2069 * before it has been accepted and the acceptor has set up their
2070 * data_ready.. we only want to queue listen work for our listening
2071 * socket */
2072 if (sk->sk_state == TCP_LISTEN) {
2073 mlog(ML_TCP, "bytes: %d\n", bytes);
2074 queue_work(r2net_wq, &r2net_listen_work);
2075 }
2076
2077out:
2078 read_unlock(&sk->sk_callback_lock);
2079 ready(sk, bytes);
2080}
2081
2082static int r2net_open_listening_sock(__be32 addr, __be16 port)
2083{
2084 struct socket *sock = NULL;
2085 int ret;
2086 struct sockaddr_in sin = {
2087 .sin_family = PF_INET,
2088 .sin_addr = { .s_addr = addr },
2089 .sin_port = port,
2090 };
2091
2092 ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
2093 if (ret < 0) {
2094 printk(KERN_ERR "ramster: Error %d while creating socket\n",
2095 ret);
2096 goto out;
2097 }
2098
2099 sock->sk->sk_allocation = GFP_ATOMIC;
2100
2101 write_lock_bh(&sock->sk->sk_callback_lock);
2102 sock->sk->sk_user_data = sock->sk->sk_data_ready;
2103 sock->sk->sk_data_ready = r2net_listen_data_ready;
2104 write_unlock_bh(&sock->sk->sk_callback_lock);
2105
2106 r2net_listen_sock = sock;
2107 INIT_WORK(&r2net_listen_work, r2net_accept_many);
2108
2109 sock->sk->sk_reuse = 1;
2110 ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin));
2111 if (ret < 0) {
2112 printk(KERN_ERR "ramster: Error %d while binding socket at "
2113 "%pI4:%u\n", ret, &addr, ntohs(port));
2114 goto out;
2115 }
2116
2117 ret = sock->ops->listen(sock, 64);
2118 if (ret < 0)
2119 printk(KERN_ERR "ramster: Error %d while listening on %pI4:%u\n",
2120 ret, &addr, ntohs(port));
2121
2122out:
2123 if (ret) {
2124 r2net_listen_sock = NULL;
2125 if (sock)
2126 sock_release(sock);
2127 }
2128 return ret;
2129}
2130
2131/*
2132 * called from node manager when we should bring up our network listening
2133 * socket. node manager handles all the serialization to only call this
2134 * once and to match it with r2net_stop_listening(). note,
2135 * r2nm_this_node() doesn't work yet as we're being called while it
2136 * is being set up.
2137 */
2138int r2net_start_listening(struct r2nm_node *node)
2139{
2140 int ret = 0;
2141
2142 BUG_ON(r2net_wq != NULL);
2143 BUG_ON(r2net_listen_sock != NULL);
2144
2145 mlog(ML_KTHREAD, "starting r2net thread...\n");
2146 r2net_wq = create_singlethread_workqueue("r2net");
2147 if (r2net_wq == NULL) {
2148 mlog(ML_ERROR, "unable to launch r2net thread\n");
2149 return -ENOMEM; /* ? */
2150 }
2151
2152 ret = r2net_open_listening_sock(node->nd_ipv4_address,
2153 node->nd_ipv4_port);
2154 if (ret) {
2155 destroy_workqueue(r2net_wq);
2156 r2net_wq = NULL;
2157 }
2158
2159 return ret;
2160}
2161
2162/* again, r2nm_this_node() doesn't work here as we're involved in
2163 * tearing it down */
2164void r2net_stop_listening(struct r2nm_node *node)
2165{
2166 struct socket *sock = r2net_listen_sock;
2167 size_t i;
2168
2169 BUG_ON(r2net_wq == NULL);
2170 BUG_ON(r2net_listen_sock == NULL);
2171
2172 /* stop the listening socket from generating work */
2173 write_lock_bh(&sock->sk->sk_callback_lock);
2174 sock->sk->sk_data_ready = sock->sk->sk_user_data;
2175 sock->sk->sk_user_data = NULL;
2176 write_unlock_bh(&sock->sk->sk_callback_lock);
2177
2178 for (i = 0; i < ARRAY_SIZE(r2net_nodes); i++) {
2179 struct r2nm_node *node = r2nm_get_node_by_num(i);
2180 if (node) {
2181 r2net_disconnect_node(node);
2182 r2nm_node_put(node);
2183 }
2184 }
2185
2186 /* finish all work and tear down the work queue */
2187 mlog(ML_KTHREAD, "waiting for r2net thread to exit....\n");
2188 destroy_workqueue(r2net_wq);
2189 r2net_wq = NULL;
2190
2191 sock_release(r2net_listen_sock);
2192 r2net_listen_sock = NULL;
2193}
2194
2195void r2net_hb_node_up_manual(int node_num)
2196{
2197 struct r2nm_node dummy;
2198 if (r2nm_single_cluster == NULL)
2199 pr_err("ramster: cluster not alive, node_up_manual ignored\n");
2200 else {
2201 r2hb_manual_set_node_heartbeating(node_num);
2202 r2net_hb_node_up_cb(&dummy, node_num, NULL);
2203 }
2204}
2205
2206/* ------------------------------------------------------------ */
2207
2208int r2net_init(void)
2209{
2210 unsigned long i;
2211
2212 if (r2net_debugfs_init())
2213 return -ENOMEM;
2214
2215 r2net_hand = kzalloc(sizeof(struct r2net_handshake), GFP_KERNEL);
2216 r2net_keep_req = kzalloc(sizeof(struct r2net_msg), GFP_KERNEL);
2217 r2net_keep_resp = kzalloc(sizeof(struct r2net_msg), GFP_KERNEL);
2218 if (!r2net_hand || !r2net_keep_req || !r2net_keep_resp) {
2219 kfree(r2net_hand);
2220 kfree(r2net_keep_req);
2221 kfree(r2net_keep_resp);
2222 return -ENOMEM;
2223 }
2224
2225 r2net_hand->protocol_version = cpu_to_be64(R2NET_PROTOCOL_VERSION);
2226 r2net_hand->connector_id = cpu_to_be64(1);
2227
2228 r2net_keep_req->magic = cpu_to_be16(R2NET_MSG_KEEP_REQ_MAGIC);
2229 r2net_keep_resp->magic = cpu_to_be16(R2NET_MSG_KEEP_RESP_MAGIC);
2230
2231 for (i = 0; i < ARRAY_SIZE(r2net_nodes); i++) {
2232 struct r2net_node *nn = r2net_nn_from_num(i);
2233
2234 atomic_set(&nn->nn_timeout, 0);
2235 spin_lock_init(&nn->nn_lock);
2236 INIT_DELAYED_WORK(&nn->nn_connect_work, r2net_start_connect);
2237 INIT_DELAYED_WORK(&nn->nn_connect_expired,
2238 r2net_connect_expired);
2239 INIT_DELAYED_WORK(&nn->nn_still_up, r2net_still_up);
2240 /* until we see hb from a node we'll return einval */
2241 nn->nn_persistent_error = -ENOTCONN;
2242 init_waitqueue_head(&nn->nn_sc_wq);
2243 idr_init(&nn->nn_status_idr);
2244 INIT_LIST_HEAD(&nn->nn_status_list);
2245 }
2246
2247 return 0;
2248}
2249
2250void r2net_exit(void)
2251{
2252 kfree(r2net_hand);
2253 kfree(r2net_keep_req);
2254 kfree(r2net_keep_resp);
2255 r2net_debugfs_exit();
2256}
diff --git a/drivers/staging/ramster/cluster/tcp.h b/drivers/staging/ramster/cluster/tcp.h
new file mode 100644
index 000000000000..9d05833452b5
--- /dev/null
+++ b/drivers/staging/ramster/cluster/tcp.h
@@ -0,0 +1,159 @@
1/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * tcp.h
5 *
6 * Function prototypes
7 *
8 * Copyright (C) 2004 Oracle. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
24 *
25 */
26
27#ifndef R2CLUSTER_TCP_H
28#define R2CLUSTER_TCP_H
29
30#include <linux/socket.h>
31#ifdef __KERNEL__
32#include <net/sock.h>
33#include <linux/tcp.h>
34#else
35#include <sys/socket.h>
36#endif
37#include <linux/inet.h>
38#include <linux/in.h>
39
40struct r2net_msg {
41 __be16 magic;
42 __be16 data_len;
43 __be16 msg_type;
44 __be16 pad1;
45 __be32 sys_status;
46 __be32 status;
47 __be32 key;
48 __be32 msg_num;
49 __u8 buf[0];
50};
51
52typedef int (r2net_msg_handler_func)(struct r2net_msg *msg, u32 len, void *data,
53 void **ret_data);
54typedef void (r2net_post_msg_handler_func)(int status, void *data,
55 void *ret_data);
56
57#define R2NET_MAX_PAYLOAD_BYTES (4096 - sizeof(struct r2net_msg))
58
59/* same as hb delay, we're waiting for another node to recognize our hb */
60#define R2NET_RECONNECT_DELAY_MS_DEFAULT 2000
61
62#define R2NET_KEEPALIVE_DELAY_MS_DEFAULT 2000
63#define R2NET_IDLE_TIMEOUT_MS_DEFAULT 30000
64
65
66/* TODO: figure this out.... */
67static inline int r2net_link_down(int err, struct socket *sock)
68{
69 if (sock) {
70 if (sock->sk->sk_state != TCP_ESTABLISHED &&
71 sock->sk->sk_state != TCP_CLOSE_WAIT)
72 return 1;
73 }
74
75 if (err >= 0)
76 return 0;
77 switch (err) {
78
79 /* ????????????????????????? */
80 case -ERESTARTSYS:
81 case -EBADF:
82 /* When the server has died, an ICMP port unreachable
83 * message prompts ECONNREFUSED. */
84 case -ECONNREFUSED:
85 case -ENOTCONN:
86 case -ECONNRESET:
87 case -EPIPE:
88 return 1;
89
90 }
91 return 0;
92}
93
94enum {
95 R2NET_DRIVER_UNINITED,
96 R2NET_DRIVER_READY,
97};
98
99int r2net_send_message(u32 msg_type, u32 key, void *data, u32 len,
100 u8 target_node, int *status);
101int r2net_send_message_vec(u32 msg_type, u32 key, struct kvec *vec,
102 size_t veclen, u8 target_node, int *status);
103
104int r2net_register_handler(u32 msg_type, u32 key, u32 max_len,
105 r2net_msg_handler_func *func, void *data,
106 r2net_post_msg_handler_func *post_func,
107 struct list_head *unreg_list);
108void r2net_unregister_handler_list(struct list_head *list);
109
110void r2net_fill_node_map(unsigned long *map, unsigned bytes);
111
112void r2net_force_data_magic(struct r2net_msg *, u16, u32);
113void r2net_hb_node_up_manual(int);
114struct r2net_node *r2net_nn_from_num(u8);
115
116struct r2nm_node;
117int r2net_register_hb_callbacks(void);
118void r2net_unregister_hb_callbacks(void);
119int r2net_start_listening(struct r2nm_node *node);
120void r2net_stop_listening(struct r2nm_node *node);
121void r2net_disconnect_node(struct r2nm_node *node);
122int r2net_num_connected_peers(void);
123
124int r2net_init(void);
125void r2net_exit(void);
126
127struct r2net_send_tracking;
128struct r2net_sock_container;
129
130#if 0
131int r2net_debugfs_init(void);
132void r2net_debugfs_exit(void);
133void r2net_debug_add_nst(struct r2net_send_tracking *nst);
134void r2net_debug_del_nst(struct r2net_send_tracking *nst);
135void r2net_debug_add_sc(struct r2net_sock_container *sc);
136void r2net_debug_del_sc(struct r2net_sock_container *sc);
137#else
138static inline int r2net_debugfs_init(void)
139{
140 return 0;
141}
142static inline void r2net_debugfs_exit(void)
143{
144}
145static inline void r2net_debug_add_nst(struct r2net_send_tracking *nst)
146{
147}
148static inline void r2net_debug_del_nst(struct r2net_send_tracking *nst)
149{
150}
151static inline void r2net_debug_add_sc(struct r2net_sock_container *sc)
152{
153}
154static inline void r2net_debug_del_sc(struct r2net_sock_container *sc)
155{
156}
157#endif /* CONFIG_DEBUG_FS */
158
159#endif /* R2CLUSTER_TCP_H */
diff --git a/drivers/staging/ramster/cluster/tcp_internal.h b/drivers/staging/ramster/cluster/tcp_internal.h
new file mode 100644
index 000000000000..4d8cc9f96fd2
--- /dev/null
+++ b/drivers/staging/ramster/cluster/tcp_internal.h
@@ -0,0 +1,248 @@
1/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * Copyright (C) 2005 Oracle. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public
17 * License along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA.
20 */
21
22#ifndef R2CLUSTER_TCP_INTERNAL_H
23#define R2CLUSTER_TCP_INTERNAL_H
24
25#define R2NET_MSG_MAGIC ((u16)0xfa55)
26#define R2NET_MSG_STATUS_MAGIC ((u16)0xfa56)
27#define R2NET_MSG_KEEP_REQ_MAGIC ((u16)0xfa57)
28#define R2NET_MSG_KEEP_RESP_MAGIC ((u16)0xfa58)
29/*
30 * "data magic" is a long version of "status magic" where the message
31 * payload actually contains data to be passed in reply to certain messages
32 */
33#define R2NET_MSG_DATA_MAGIC ((u16)0xfa59)
34
35/* we're delaying our quorum decision so that heartbeat will have timed
36 * out truly dead nodes by the time we come around to making decisions
37 * on their number */
38#define R2NET_QUORUM_DELAY_MS \
39 ((r2hb_dead_threshold + 2) * R2HB_REGION_TIMEOUT_MS)
40
41/*
42 * This version number represents quite a lot, unfortunately. It not
43 * only represents the raw network message protocol on the wire but also
44 * locking semantics of the file system using the protocol. It should
45 * be somewhere else, I'm sure, but right now it isn't.
46 *
47 * With version 11, we separate out the filesystem locking portion. The
48 * filesystem now has a major.minor version it negotiates. Version 11
49 * introduces this negotiation to the r2dlm protocol, and as such the
50 * version here in tcp_internal.h should not need to be bumped for
51 * filesystem locking changes.
52 *
53 * New in version 11
54 * - Negotiation of filesystem locking in the dlm join.
55 *
56 * New in version 10:
57 * - Meta/data locks combined
58 *
59 * New in version 9:
60 * - All votes removed
61 *
62 * New in version 8:
63 * - Replace delete inode votes with a cluster lock
64 *
65 * New in version 7:
66 * - DLM join domain includes the live nodemap
67 *
68 * New in version 6:
69 * - DLM lockres remote refcount fixes.
70 *
71 * New in version 5:
72 * - Network timeout checking protocol
73 *
74 * New in version 4:
75 * - Remove i_generation from lock names for better stat performance.
76 *
77 * New in version 3:
78 * - Replace dentry votes with a cluster lock
79 *
80 * New in version 2:
81 * - full 64 bit i_size in the metadata lock lvbs
82 * - introduction of "rw" lock and pushing meta/data locking down
83 */
84#define R2NET_PROTOCOL_VERSION 11ULL
85struct r2net_handshake {
86 __be64 protocol_version;
87 __be64 connector_id;
88 __be32 r2hb_heartbeat_timeout_ms;
89 __be32 r2net_idle_timeout_ms;
90 __be32 r2net_keepalive_delay_ms;
91 __be32 r2net_reconnect_delay_ms;
92};
93
94struct r2net_node {
95 /* this is never called from int/bh */
96 spinlock_t nn_lock;
97
98 /* set the moment an sc is allocated and a connect is started */
99 struct r2net_sock_container *nn_sc;
100 /* _valid is only set after the handshake passes and tx can happen */
101 unsigned nn_sc_valid:1;
102 /* if this is set tx just returns it */
103 int nn_persistent_error;
104 /* It is only set to 1 after the idle time out. */
105 atomic_t nn_timeout;
106
107 /* threads waiting for an sc to arrive wait on the wq for generation
108 * to increase. it is increased when a connecting socket succeeds
109 * or fails or when an accepted socket is attached. */
110 wait_queue_head_t nn_sc_wq;
111
112 struct idr nn_status_idr;
113 struct list_head nn_status_list;
114
115 /* connects are attempted from when heartbeat comes up until either hb
116 * goes down, the node is unconfigured, no connect attempts succeed
117 * before R2NET_CONN_IDLE_DELAY, or a connect succeeds. connect_work
118 * is queued from set_nn_state both from hb up and from itself if a
119 * connect attempt fails and so can be self-arming. shutdown is
120 * careful to first mark the nn such that no connects will be attempted
121 * before canceling delayed connect work and flushing the queue. */
122 struct delayed_work nn_connect_work;
123 unsigned long nn_last_connect_attempt;
124
125 /* this is queued as nodes come up and is canceled when a connection is
126 * established. this expiring gives up on the node and errors out
127 * transmits */
128 struct delayed_work nn_connect_expired;
129
130 /* after we give up on a socket we wait a while before deciding
131 * that it is still heartbeating and that we should do some
132 * quorum work */
133 struct delayed_work nn_still_up;
134};
135
136struct r2net_sock_container {
137 struct kref sc_kref;
138 /* the next two are valid for the life time of the sc */
139 struct socket *sc_sock;
140 struct r2nm_node *sc_node;
141
142 /* all of these sc work structs hold refs on the sc while they are
143 * queued. they should not be able to ref a freed sc. the teardown
144 * race is with r2net_wq destruction in r2net_stop_listening() */
145
146 /* rx and connect work are generated from socket callbacks. sc
147 * shutdown removes the callbacks and then flushes the work queue */
148 struct work_struct sc_rx_work;
149 struct work_struct sc_connect_work;
150 /* shutdown work is triggered in two ways. the simple way is
151 * for a code path calls ensure_shutdown which gets a lock, removes
152 * the sc from the nn, and queues the work. in this case the
153 * work is single-shot. the work is also queued from a sock
154 * callback, though, and in this case the work will find the sc
155 * still on the nn and will call ensure_shutdown itself.. this
156 * ends up triggering the shutdown work again, though nothing
157 * will be done in that second iteration. so work queue teardown
158 * has to be careful to remove the sc from the nn before waiting
159 * on the work queue so that the shutdown work doesn't remove the
160 * sc and rearm itself.
161 */
162 struct work_struct sc_shutdown_work;
163
164 struct timer_list sc_idle_timeout;
165 struct delayed_work sc_keepalive_work;
166
167 unsigned sc_handshake_ok:1;
168
169 struct page *sc_page;
170 size_t sc_page_off;
171
172 /* original handlers for the sockets */
173 void (*sc_state_change)(struct sock *sk);
174 void (*sc_data_ready)(struct sock *sk, int bytes);
175
176 u32 sc_msg_key;
177 u16 sc_msg_type;
178
179#ifdef CONFIG_DEBUG_FS
180 struct list_head sc_net_debug_item;
181 ktime_t sc_tv_timer;
182 ktime_t sc_tv_data_ready;
183 ktime_t sc_tv_advance_start;
184 ktime_t sc_tv_advance_stop;
185 ktime_t sc_tv_func_start;
186 ktime_t sc_tv_func_stop;
187#endif
188#ifdef CONFIG_RAMSTER_FS_STATS
189 ktime_t sc_tv_acquiry_total;
190 ktime_t sc_tv_send_total;
191 ktime_t sc_tv_status_total;
192 u32 sc_send_count;
193 u32 sc_recv_count;
194 ktime_t sc_tv_process_total;
195#endif
196 struct mutex sc_send_lock;
197};
198
199struct r2net_msg_handler {
200 struct rb_node nh_node;
201 u32 nh_max_len;
202 u32 nh_msg_type;
203 u32 nh_key;
204 r2net_msg_handler_func *nh_func;
205 r2net_msg_handler_func *nh_func_data;
206 r2net_post_msg_handler_func
207 *nh_post_func;
208 struct kref nh_kref;
209 struct list_head nh_unregister_item;
210};
211
212enum r2net_system_error {
213 R2NET_ERR_NONE = 0,
214 R2NET_ERR_NO_HNDLR,
215 R2NET_ERR_OVERFLOW,
216 R2NET_ERR_DIED,
217 R2NET_ERR_MAX
218};
219
220struct r2net_status_wait {
221 enum r2net_system_error ns_sys_status;
222 s32 ns_status;
223 int ns_id;
224 wait_queue_head_t ns_wq;
225 struct list_head ns_node_item;
226};
227
228#ifdef CONFIG_DEBUG_FS
229/* just for state dumps */
230struct r2net_send_tracking {
231 struct list_head st_net_debug_item;
232 struct task_struct *st_task;
233 struct r2net_sock_container *st_sc;
234 u32 st_id;
235 u32 st_msg_type;
236 u32 st_msg_key;
237 u8 st_node;
238 ktime_t st_sock_time;
239 ktime_t st_send_time;
240 ktime_t st_status_time;
241};
242#else
243struct r2net_send_tracking {
244 u32 dummy;
245};
246#endif /* CONFIG_DEBUG_FS */
247
248#endif /* R2CLUSTER_TCP_INTERNAL_H */
diff --git a/drivers/staging/ramster/r2net.c b/drivers/staging/ramster/r2net.c
new file mode 100644
index 000000000000..2ee02204c43d
--- /dev/null
+++ b/drivers/staging/ramster/r2net.c
@@ -0,0 +1,401 @@
1/*
2 * r2net.c
3 *
4 * Copyright (c) 2011, Dan Magenheimer, Oracle Corp.
5 *
6 * Ramster_r2net provides an interface between zcache and r2net.
7 *
8 * FIXME: support more than two nodes
9 */
10
11#include <linux/list.h>
12#include "cluster/tcp.h"
13#include "cluster/nodemanager.h"
14#include "tmem.h"
15#include "zcache.h"
16#include "ramster.h"
17
18#define RAMSTER_TESTING
19
20#define RMSTR_KEY 0x77347734
21
22enum {
23 RMSTR_TMEM_PUT_EPH = 100,
24 RMSTR_TMEM_PUT_PERS,
25 RMSTR_TMEM_ASYNC_GET_REQUEST,
26 RMSTR_TMEM_ASYNC_GET_AND_FREE_REQUEST,
27 RMSTR_TMEM_ASYNC_GET_REPLY,
28 RMSTR_TMEM_FLUSH,
29 RMSTR_TMEM_FLOBJ,
30 RMSTR_TMEM_DESTROY_POOL,
31};
32
33#define RMSTR_R2NET_MAX_LEN \
34 (R2NET_MAX_PAYLOAD_BYTES - sizeof(struct tmem_xhandle))
35
36#include "cluster/tcp_internal.h"
37
38static struct r2nm_node *r2net_target_node;
39static int r2net_target_nodenum;
40
41int r2net_remote_target_node_set(int node_num)
42{
43 int ret = -1;
44
45 r2net_target_node = r2nm_get_node_by_num(node_num);
46 if (r2net_target_node != NULL) {
47 r2net_target_nodenum = node_num;
48 r2nm_node_put(r2net_target_node);
49 ret = 0;
50 }
51 return ret;
52}
53
54/* FIXME following buffer should be per-cpu, protected by preempt_disable */
55static char ramster_async_get_buf[R2NET_MAX_PAYLOAD_BYTES];
56
57static int ramster_remote_async_get_request_handler(struct r2net_msg *msg,
58 u32 len, void *data, void **ret_data)
59{
60 char *pdata;
61 struct tmem_xhandle xh;
62 int found;
63 size_t size = RMSTR_R2NET_MAX_LEN;
64 u16 msgtype = be16_to_cpu(msg->msg_type);
65 bool get_and_free = (msgtype == RMSTR_TMEM_ASYNC_GET_AND_FREE_REQUEST);
66 unsigned long flags;
67
68 xh = *(struct tmem_xhandle *)msg->buf;
69 if (xh.xh_data_size > RMSTR_R2NET_MAX_LEN)
70 BUG();
71 pdata = ramster_async_get_buf;
72 *(struct tmem_xhandle *)pdata = xh;
73 pdata += sizeof(struct tmem_xhandle);
74 local_irq_save(flags);
75 found = zcache_get(xh.client_id, xh.pool_id, &xh.oid, xh.index,
76 pdata, &size, 1, get_and_free ? 1 : -1);
77 local_irq_restore(flags);
78 if (found < 0) {
79 /* a zero size indicates the get failed */
80 size = 0;
81 }
82 if (size > RMSTR_R2NET_MAX_LEN)
83 BUG();
84 *ret_data = pdata - sizeof(struct tmem_xhandle);
85 /* now make caller (r2net_process_message) handle specially */
86 r2net_force_data_magic(msg, RMSTR_TMEM_ASYNC_GET_REPLY, RMSTR_KEY);
87 return size + sizeof(struct tmem_xhandle);
88}
89
90static int ramster_remote_async_get_reply_handler(struct r2net_msg *msg,
91 u32 len, void *data, void **ret_data)
92{
93 char *in = (char *)msg->buf;
94 int datalen = len - sizeof(struct r2net_msg);
95 int ret = -1;
96 struct tmem_xhandle *xh = (struct tmem_xhandle *)in;
97
98 in += sizeof(struct tmem_xhandle);
99 datalen -= sizeof(struct tmem_xhandle);
100 BUG_ON(datalen < 0 || datalen > PAGE_SIZE);
101 ret = zcache_localify(xh->pool_id, &xh->oid, xh->index,
102 in, datalen, xh->extra);
103#ifdef RAMSTER_TESTING
104 if (ret == -EEXIST)
105 pr_err("TESTING ArrgREP, aborted overwrite on racy put\n");
106#endif
107 return ret;
108}
109
110int ramster_remote_put_handler(struct r2net_msg *msg,
111 u32 len, void *data, void **ret_data)
112{
113 struct tmem_xhandle *xh;
114 char *p = (char *)msg->buf;
115 int datalen = len - sizeof(struct r2net_msg) -
116 sizeof(struct tmem_xhandle);
117 u16 msgtype = be16_to_cpu(msg->msg_type);
118 bool ephemeral = (msgtype == RMSTR_TMEM_PUT_EPH);
119 unsigned long flags;
120 int ret;
121
122 xh = (struct tmem_xhandle *)p;
123 p += sizeof(struct tmem_xhandle);
124 zcache_autocreate_pool(xh->client_id, xh->pool_id, ephemeral);
125 local_irq_save(flags);
126 ret = zcache_put(xh->client_id, xh->pool_id, &xh->oid, xh->index,
127 p, datalen, 1, ephemeral ? 1 : -1);
128 local_irq_restore(flags);
129 return ret;
130}
131
132int ramster_remote_flush_handler(struct r2net_msg *msg,
133 u32 len, void *data, void **ret_data)
134{
135 struct tmem_xhandle *xh;
136 char *p = (char *)msg->buf;
137
138 xh = (struct tmem_xhandle *)p;
139 p += sizeof(struct tmem_xhandle);
140 (void)zcache_flush(xh->client_id, xh->pool_id, &xh->oid, xh->index);
141 return 0;
142}
143
144int ramster_remote_flobj_handler(struct r2net_msg *msg,
145 u32 len, void *data, void **ret_data)
146{
147 struct tmem_xhandle *xh;
148 char *p = (char *)msg->buf;
149
150 xh = (struct tmem_xhandle *)p;
151 p += sizeof(struct tmem_xhandle);
152 (void)zcache_flush_object(xh->client_id, xh->pool_id, &xh->oid);
153 return 0;
154}
155
156int ramster_remote_async_get(struct tmem_xhandle *xh, bool free, int remotenode,
157 size_t expect_size, uint8_t expect_cksum,
158 void *extra)
159{
160 int ret = -1, status;
161 struct r2nm_node *node = NULL;
162 struct kvec vec[1];
163 size_t veclen = 1;
164 u32 msg_type;
165
166 node = r2nm_get_node_by_num(remotenode);
167 if (node == NULL)
168 goto out;
169 xh->client_id = r2nm_this_node(); /* which node is getting */
170 xh->xh_data_cksum = expect_cksum;
171 xh->xh_data_size = expect_size;
172 xh->extra = extra;
173 vec[0].iov_len = sizeof(*xh);
174 vec[0].iov_base = xh;
175 if (free)
176 msg_type = RMSTR_TMEM_ASYNC_GET_AND_FREE_REQUEST;
177 else
178 msg_type = RMSTR_TMEM_ASYNC_GET_REQUEST;
179 ret = r2net_send_message_vec(msg_type, RMSTR_KEY,
180 vec, veclen, remotenode, &status);
181 r2nm_node_put(node);
182 if (ret < 0) {
183 /* FIXME handle bad message possibilities here? */
184 pr_err("UNTESTED ret<0 in ramster_remote_async_get\n");
185 }
186 ret = status;
187out:
188 return ret;
189}
190
191#ifdef RAMSTER_TESTING
192/* leave me here to see if it catches a weird crash */
193static void ramster_check_irq_counts(void)
194{
195 static int last_hardirq_cnt, last_softirq_cnt, last_preempt_cnt;
196 int cur_hardirq_cnt, cur_softirq_cnt, cur_preempt_cnt;
197
198 cur_hardirq_cnt = hardirq_count() >> HARDIRQ_SHIFT;
199 if (cur_hardirq_cnt > last_hardirq_cnt) {
200 last_hardirq_cnt = cur_hardirq_cnt;
201 if (!(last_hardirq_cnt&(last_hardirq_cnt-1)))
202 pr_err("RAMSTER TESTING RRP hardirq_count=%d\n",
203 last_hardirq_cnt);
204 }
205 cur_softirq_cnt = softirq_count() >> SOFTIRQ_SHIFT;
206 if (cur_softirq_cnt > last_softirq_cnt) {
207 last_softirq_cnt = cur_softirq_cnt;
208 if (!(last_softirq_cnt&(last_softirq_cnt-1)))
209 pr_err("RAMSTER TESTING RRP softirq_count=%d\n",
210 last_softirq_cnt);
211 }
212 cur_preempt_cnt = preempt_count() & PREEMPT_MASK;
213 if (cur_preempt_cnt > last_preempt_cnt) {
214 last_preempt_cnt = cur_preempt_cnt;
215 if (!(last_preempt_cnt&(last_preempt_cnt-1)))
216 pr_err("RAMSTER TESTING RRP preempt_count=%d\n",
217 last_preempt_cnt);
218 }
219}
220#endif
221
222int ramster_remote_put(struct tmem_xhandle *xh, char *data, size_t size,
223 bool ephemeral, int *remotenode)
224{
225 int nodenum, ret = -1, status;
226 struct r2nm_node *node = NULL;
227 struct kvec vec[2];
228 size_t veclen = 2;
229 u32 msg_type;
230#ifdef RAMSTER_TESTING
231 struct r2net_node *nn;
232#endif
233
234 BUG_ON(size > RMSTR_R2NET_MAX_LEN);
235 xh->client_id = r2nm_this_node(); /* which node is putting */
236 vec[0].iov_len = sizeof(*xh);
237 vec[0].iov_base = xh;
238 vec[1].iov_len = size;
239 vec[1].iov_base = data;
240 node = r2net_target_node;
241 if (!node)
242 goto out;
243
244 nodenum = r2net_target_nodenum;
245
246 r2nm_node_get(node);
247
248#ifdef RAMSTER_TESTING
249 nn = r2net_nn_from_num(nodenum);
250 WARN_ON_ONCE(nn->nn_persistent_error || !nn->nn_sc_valid);
251#endif
252
253 if (ephemeral)
254 msg_type = RMSTR_TMEM_PUT_EPH;
255 else
256 msg_type = RMSTR_TMEM_PUT_PERS;
257#ifdef RAMSTER_TESTING
258 /* leave me here to see if it catches a weird crash */
259 ramster_check_irq_counts();
260#endif
261
262 ret = r2net_send_message_vec(msg_type, RMSTR_KEY, vec, veclen,
263 nodenum, &status);
264#ifdef RAMSTER_TESTING
265 if (ret != 0) {
266 static unsigned long cnt;
267 cnt++;
268 if (!(cnt&(cnt-1)))
269 pr_err("ramster_remote_put: message failed, "
270 "ret=%d, cnt=%lu\n", ret, cnt);
271 ret = -1;
272 }
273#endif
274 if (ret < 0)
275 ret = -1;
276 else {
277 ret = status;
278 *remotenode = nodenum;
279 }
280
281 r2nm_node_put(node);
282out:
283 return ret;
284}
285
286int ramster_remote_flush(struct tmem_xhandle *xh, int remotenode)
287{
288 int ret = -1, status;
289 struct r2nm_node *node = NULL;
290 struct kvec vec[1];
291 size_t veclen = 1;
292
293 node = r2nm_get_node_by_num(remotenode);
294 BUG_ON(node == NULL);
295 xh->client_id = r2nm_this_node(); /* which node is flushing */
296 vec[0].iov_len = sizeof(*xh);
297 vec[0].iov_base = xh;
298 BUG_ON(irqs_disabled());
299 BUG_ON(in_softirq());
300 ret = r2net_send_message_vec(RMSTR_TMEM_FLUSH, RMSTR_KEY,
301 vec, veclen, remotenode, &status);
302 r2nm_node_put(node);
303 return ret;
304}
305
306int ramster_remote_flush_object(struct tmem_xhandle *xh, int remotenode)
307{
308 int ret = -1, status;
309 struct r2nm_node *node = NULL;
310 struct kvec vec[1];
311 size_t veclen = 1;
312
313 node = r2nm_get_node_by_num(remotenode);
314 BUG_ON(node == NULL);
315 xh->client_id = r2nm_this_node(); /* which node is flobjing */
316 vec[0].iov_len = sizeof(*xh);
317 vec[0].iov_base = xh;
318 ret = r2net_send_message_vec(RMSTR_TMEM_FLOBJ, RMSTR_KEY,
319 vec, veclen, remotenode, &status);
320 r2nm_node_put(node);
321 return ret;
322}
323
324/*
325 * Handler registration
326 */
327
328static LIST_HEAD(r2net_unreg_list);
329
330static void r2net_unregister_handlers(void)
331{
332 r2net_unregister_handler_list(&r2net_unreg_list);
333}
334
335int r2net_register_handlers(void)
336{
337 int status;
338
339 status = r2net_register_handler(RMSTR_TMEM_PUT_EPH, RMSTR_KEY,
340 RMSTR_R2NET_MAX_LEN,
341 ramster_remote_put_handler,
342 NULL, NULL, &r2net_unreg_list);
343 if (status)
344 goto bail;
345
346 status = r2net_register_handler(RMSTR_TMEM_PUT_PERS, RMSTR_KEY,
347 RMSTR_R2NET_MAX_LEN,
348 ramster_remote_put_handler,
349 NULL, NULL, &r2net_unreg_list);
350 if (status)
351 goto bail;
352
353 status = r2net_register_handler(RMSTR_TMEM_ASYNC_GET_REQUEST, RMSTR_KEY,
354 RMSTR_R2NET_MAX_LEN,
355 ramster_remote_async_get_request_handler,
356 NULL, NULL,
357 &r2net_unreg_list);
358 if (status)
359 goto bail;
360
361 status = r2net_register_handler(RMSTR_TMEM_ASYNC_GET_AND_FREE_REQUEST,
362 RMSTR_KEY, RMSTR_R2NET_MAX_LEN,
363 ramster_remote_async_get_request_handler,
364 NULL, NULL,
365 &r2net_unreg_list);
366 if (status)
367 goto bail;
368
369 status = r2net_register_handler(RMSTR_TMEM_ASYNC_GET_REPLY, RMSTR_KEY,
370 RMSTR_R2NET_MAX_LEN,
371 ramster_remote_async_get_reply_handler,
372 NULL, NULL,
373 &r2net_unreg_list);
374 if (status)
375 goto bail;
376
377 status = r2net_register_handler(RMSTR_TMEM_FLUSH, RMSTR_KEY,
378 RMSTR_R2NET_MAX_LEN,
379 ramster_remote_flush_handler,
380 NULL, NULL,
381 &r2net_unreg_list);
382 if (status)
383 goto bail;
384
385 status = r2net_register_handler(RMSTR_TMEM_FLOBJ, RMSTR_KEY,
386 RMSTR_R2NET_MAX_LEN,
387 ramster_remote_flobj_handler,
388 NULL, NULL,
389 &r2net_unreg_list);
390 if (status)
391 goto bail;
392
393 pr_info("ramster: r2net handlers registered\n");
394
395bail:
396 if (status) {
397 r2net_unregister_handlers();
398 pr_err("ramster: couldn't register r2net handlers\n");
399 }
400 return status;
401}
diff --git a/drivers/staging/ramster/ramster.h b/drivers/staging/ramster/ramster.h
new file mode 100644
index 000000000000..0c9455e8dcd8
--- /dev/null
+++ b/drivers/staging/ramster/ramster.h
@@ -0,0 +1,118 @@
1/*
2 * ramster.h
3 *
4 * Peer-to-peer transcendent memory
5 *
6 * Copyright (c) 2009-2012, Dan Magenheimer, Oracle Corp.
7 */
8
9#ifndef _RAMSTER_H_
10#define _RAMSTER_H_
11
12/*
13 * format of remote pampd:
14 * bit 0 == intransit
15 * bit 1 == is_remote... if this bit is set, then
16 * bit 2-9 == remotenode
17 * bit 10-22 == size
18 * bit 23-30 == cksum
19 */
20#define FAKE_PAMPD_INTRANSIT_BITS 1
21#define FAKE_PAMPD_ISREMOTE_BITS 1
22#define FAKE_PAMPD_REMOTENODE_BITS 8
23#define FAKE_PAMPD_REMOTESIZE_BITS 13
24#define FAKE_PAMPD_CHECKSUM_BITS 8
25
26#define FAKE_PAMPD_INTRANSIT_SHIFT 0
27#define FAKE_PAMPD_ISREMOTE_SHIFT (FAKE_PAMPD_INTRANSIT_SHIFT + \
28 FAKE_PAMPD_INTRANSIT_BITS)
29#define FAKE_PAMPD_REMOTENODE_SHIFT (FAKE_PAMPD_ISREMOTE_SHIFT + \
30 FAKE_PAMPD_ISREMOTE_BITS)
31#define FAKE_PAMPD_REMOTESIZE_SHIFT (FAKE_PAMPD_REMOTENODE_SHIFT + \
32 FAKE_PAMPD_REMOTENODE_BITS)
33#define FAKE_PAMPD_CHECKSUM_SHIFT (FAKE_PAMPD_REMOTESIZE_SHIFT + \
34 FAKE_PAMPD_REMOTESIZE_BITS)
35
36#define FAKE_PAMPD_MASK(x) ((1UL << (x)) - 1)
37
38static inline void *pampd_make_remote(int remotenode, size_t size,
39 unsigned char cksum)
40{
41 unsigned long fake_pampd = 0;
42 fake_pampd |= 1UL << FAKE_PAMPD_ISREMOTE_SHIFT;
43 fake_pampd |= ((unsigned long)remotenode &
44 FAKE_PAMPD_MASK(FAKE_PAMPD_REMOTENODE_BITS)) <<
45 FAKE_PAMPD_REMOTENODE_SHIFT;
46 fake_pampd |= ((unsigned long)size &
47 FAKE_PAMPD_MASK(FAKE_PAMPD_REMOTESIZE_BITS)) <<
48 FAKE_PAMPD_REMOTESIZE_SHIFT;
49 fake_pampd |= ((unsigned long)cksum &
50 FAKE_PAMPD_MASK(FAKE_PAMPD_CHECKSUM_BITS)) <<
51 FAKE_PAMPD_CHECKSUM_SHIFT;
52 return (void *)fake_pampd;
53}
54
55static inline unsigned int pampd_remote_node(void *pampd)
56{
57 unsigned long fake_pampd = (unsigned long)pampd;
58 return (fake_pampd >> FAKE_PAMPD_REMOTENODE_SHIFT) &
59 FAKE_PAMPD_MASK(FAKE_PAMPD_REMOTENODE_BITS);
60}
61
62static inline unsigned int pampd_remote_size(void *pampd)
63{
64 unsigned long fake_pampd = (unsigned long)pampd;
65 return (fake_pampd >> FAKE_PAMPD_REMOTESIZE_SHIFT) &
66 FAKE_PAMPD_MASK(FAKE_PAMPD_REMOTESIZE_BITS);
67}
68
69static inline unsigned char pampd_remote_cksum(void *pampd)
70{
71 unsigned long fake_pampd = (unsigned long)pampd;
72 return (fake_pampd >> FAKE_PAMPD_CHECKSUM_SHIFT) &
73 FAKE_PAMPD_MASK(FAKE_PAMPD_CHECKSUM_BITS);
74}
75
76static inline bool pampd_is_remote(void *pampd)
77{
78 unsigned long fake_pampd = (unsigned long)pampd;
79 return (fake_pampd >> FAKE_PAMPD_ISREMOTE_SHIFT) &
80 FAKE_PAMPD_MASK(FAKE_PAMPD_ISREMOTE_BITS);
81}
82
83static inline bool pampd_is_intransit(void *pampd)
84{
85 unsigned long fake_pampd = (unsigned long)pampd;
86 return (fake_pampd >> FAKE_PAMPD_INTRANSIT_SHIFT) &
87 FAKE_PAMPD_MASK(FAKE_PAMPD_INTRANSIT_BITS);
88}
89
90/* note that it is a BUG for intransit to be set without isremote also set */
91static inline void *pampd_mark_intransit(void *pampd)
92{
93 unsigned long fake_pampd = (unsigned long)pampd;
94
95 fake_pampd |= 1UL << FAKE_PAMPD_ISREMOTE_SHIFT;
96 fake_pampd |= 1UL << FAKE_PAMPD_INTRANSIT_SHIFT;
97 return (void *)fake_pampd;
98}
99
100static inline void *pampd_mask_intransit_and_remote(void *marked_pampd)
101{
102 unsigned long pampd = (unsigned long)marked_pampd;
103
104 pampd &= ~(1UL << FAKE_PAMPD_INTRANSIT_SHIFT);
105 pampd &= ~(1UL << FAKE_PAMPD_ISREMOTE_SHIFT);
106 return (void *)pampd;
107}
108
109extern int ramster_remote_async_get(struct tmem_xhandle *,
110 bool, int, size_t, uint8_t, void *extra);
111extern int ramster_remote_put(struct tmem_xhandle *, char *, size_t,
112 bool, int *);
113extern int ramster_remote_flush(struct tmem_xhandle *, int);
114extern int ramster_remote_flush_object(struct tmem_xhandle *, int);
115extern int r2net_register_handlers(void);
116extern int r2net_remote_target_node_set(int);
117
118#endif /* _TMEM_H */
diff --git a/drivers/staging/ramster/tmem.c b/drivers/staging/ramster/tmem.c
new file mode 100644
index 000000000000..8f2f6892d8d3
--- /dev/null
+++ b/drivers/staging/ramster/tmem.c
@@ -0,0 +1,851 @@
1/*
2 * In-kernel transcendent memory (generic implementation)
3 *
4 * Copyright (c) 2009-2011, Dan Magenheimer, Oracle Corp.
5 *
6 * The primary purpose of Transcedent Memory ("tmem") is to map object-oriented
7 * "handles" (triples containing a pool id, and object id, and an index), to
8 * pages in a page-accessible memory (PAM). Tmem references the PAM pages via
9 * an abstract "pampd" (PAM page-descriptor), which can be operated on by a
10 * set of functions (pamops). Each pampd contains some representation of
11 * PAGE_SIZE bytes worth of data. Tmem must support potentially millions of
12 * pages and must be able to insert, find, and delete these pages at a
13 * potential frequency of thousands per second concurrently across many CPUs,
14 * (and, if used with KVM, across many vcpus across many guests).
15 * Tmem is tracked with a hierarchy of data structures, organized by
16 * the elements in a handle-tuple: pool_id, object_id, and page index.
17 * One or more "clients" (e.g. guests) each provide one or more tmem_pools.
18 * Each pool, contains a hash table of rb_trees of tmem_objs. Each
19 * tmem_obj contains a radix-tree-like tree of pointers, with intermediate
20 * nodes called tmem_objnodes. Each leaf pointer in this tree points to
21 * a pampd, which is accessible only through a small set of callbacks
22 * registered by the PAM implementation (see tmem_register_pamops). Tmem
23 * does all memory allocation via a set of callbacks registered by the tmem
24 * host implementation (e.g. see tmem_register_hostops).
25 */
26
27#include <linux/list.h>
28#include <linux/spinlock.h>
29#include <linux/atomic.h>
30#include <linux/delay.h>
31
32#include "tmem.h"
33
34/* data structure sentinels used for debugging... see tmem.h */
35#define POOL_SENTINEL 0x87658765
36#define OBJ_SENTINEL 0x12345678
37#define OBJNODE_SENTINEL 0xfedcba09
38
39/*
40 * A tmem host implementation must use this function to register callbacks
41 * for memory allocation.
42 */
43static struct tmem_hostops tmem_hostops;
44
45static void tmem_objnode_tree_init(void);
46
47void tmem_register_hostops(struct tmem_hostops *m)
48{
49 tmem_objnode_tree_init();
50 tmem_hostops = *m;
51}
52
53/*
54 * A tmem host implementation must use this function to register
55 * callbacks for a page-accessible memory (PAM) implementation
56 */
57static struct tmem_pamops tmem_pamops;
58
59void tmem_register_pamops(struct tmem_pamops *m)
60{
61 tmem_pamops = *m;
62}
63
64/*
65 * Oid's are potentially very sparse and tmem_objs may have an indeterminately
66 * short life, being added and deleted at a relatively high frequency.
67 * So an rb_tree is an ideal data structure to manage tmem_objs. But because
68 * of the potentially huge number of tmem_objs, each pool manages a hashtable
69 * of rb_trees to reduce search, insert, delete, and rebalancing time.
70 * Each hashbucket also has a lock to manage concurrent access.
71 *
72 * The following routines manage tmem_objs. When any tmem_obj is accessed,
73 * the hashbucket lock must be held.
74 */
75
76/* searches for object==oid in pool, returns locked object if found */
77static struct tmem_obj *tmem_obj_find(struct tmem_hashbucket *hb,
78 struct tmem_oid *oidp)
79{
80 struct rb_node *rbnode;
81 struct tmem_obj *obj;
82
83 rbnode = hb->obj_rb_root.rb_node;
84 while (rbnode) {
85 BUG_ON(RB_EMPTY_NODE(rbnode));
86 obj = rb_entry(rbnode, struct tmem_obj, rb_tree_node);
87 switch (tmem_oid_compare(oidp, &obj->oid)) {
88 case 0: /* equal */
89 goto out;
90 case -1:
91 rbnode = rbnode->rb_left;
92 break;
93 case 1:
94 rbnode = rbnode->rb_right;
95 break;
96 }
97 }
98 obj = NULL;
99out:
100 return obj;
101}
102
103static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *);
104
105/* free an object that has no more pampds in it */
106static void tmem_obj_free(struct tmem_obj *obj, struct tmem_hashbucket *hb)
107{
108 struct tmem_pool *pool;
109
110 BUG_ON(obj == NULL);
111 ASSERT_SENTINEL(obj, OBJ);
112 BUG_ON(obj->pampd_count > 0);
113 pool = obj->pool;
114 BUG_ON(pool == NULL);
115 if (obj->objnode_tree_root != NULL) /* may be "stump" with no leaves */
116 tmem_pampd_destroy_all_in_obj(obj);
117 BUG_ON(obj->objnode_tree_root != NULL);
118 BUG_ON((long)obj->objnode_count != 0);
119 atomic_dec(&pool->obj_count);
120 BUG_ON(atomic_read(&pool->obj_count) < 0);
121 INVERT_SENTINEL(obj, OBJ);
122 obj->pool = NULL;
123 tmem_oid_set_invalid(&obj->oid);
124 rb_erase(&obj->rb_tree_node, &hb->obj_rb_root);
125}
126
127/*
128 * initialize, and insert an tmem_object_root (called only if find failed)
129 */
130static void tmem_obj_init(struct tmem_obj *obj, struct tmem_hashbucket *hb,
131 struct tmem_pool *pool,
132 struct tmem_oid *oidp)
133{
134 struct rb_root *root = &hb->obj_rb_root;
135 struct rb_node **new = &(root->rb_node), *parent = NULL;
136 struct tmem_obj *this;
137
138 BUG_ON(pool == NULL);
139 atomic_inc(&pool->obj_count);
140 obj->objnode_tree_height = 0;
141 obj->objnode_tree_root = NULL;
142 obj->pool = pool;
143 obj->oid = *oidp;
144 obj->objnode_count = 0;
145 obj->pampd_count = 0;
146 (*tmem_pamops.new_obj)(obj);
147 SET_SENTINEL(obj, OBJ);
148 while (*new) {
149 BUG_ON(RB_EMPTY_NODE(*new));
150 this = rb_entry(*new, struct tmem_obj, rb_tree_node);
151 parent = *new;
152 switch (tmem_oid_compare(oidp, &this->oid)) {
153 case 0:
154 BUG(); /* already present; should never happen! */
155 break;
156 case -1:
157 new = &(*new)->rb_left;
158 break;
159 case 1:
160 new = &(*new)->rb_right;
161 break;
162 }
163 }
164 rb_link_node(&obj->rb_tree_node, parent, new);
165 rb_insert_color(&obj->rb_tree_node, root);
166}
167
168/*
169 * Tmem is managed as a set of tmem_pools with certain attributes, such as
170 * "ephemeral" vs "persistent". These attributes apply to all tmem_objs
171 * and all pampds that belong to a tmem_pool. A tmem_pool is created
172 * or deleted relatively rarely (for example, when a filesystem is
173 * mounted or unmounted.
174 */
175
176/* flush all data from a pool and, optionally, free it */
177static void tmem_pool_flush(struct tmem_pool *pool, bool destroy)
178{
179 struct rb_node *rbnode;
180 struct tmem_obj *obj;
181 struct tmem_hashbucket *hb = &pool->hashbucket[0];
182 int i;
183
184 BUG_ON(pool == NULL);
185 for (i = 0; i < TMEM_HASH_BUCKETS; i++, hb++) {
186 spin_lock(&hb->lock);
187 rbnode = rb_first(&hb->obj_rb_root);
188 while (rbnode != NULL) {
189 obj = rb_entry(rbnode, struct tmem_obj, rb_tree_node);
190 rbnode = rb_next(rbnode);
191 tmem_pampd_destroy_all_in_obj(obj);
192 tmem_obj_free(obj, hb);
193 (*tmem_hostops.obj_free)(obj, pool);
194 }
195 spin_unlock(&hb->lock);
196 }
197 if (destroy)
198 list_del(&pool->pool_list);
199}
200
201/*
202 * A tmem_obj contains a radix-tree-like tree in which the intermediate
203 * nodes are called tmem_objnodes. (The kernel lib/radix-tree.c implementation
204 * is very specialized and tuned for specific uses and is not particularly
205 * suited for use from this code, though some code from the core algorithms has
206 * been reused, thus the copyright notices below). Each tmem_objnode contains
207 * a set of pointers which point to either a set of intermediate tmem_objnodes
208 * or a set of of pampds.
209 *
210 * Portions Copyright (C) 2001 Momchil Velikov
211 * Portions Copyright (C) 2001 Christoph Hellwig
212 * Portions Copyright (C) 2005 SGI, Christoph Lameter <clameter@sgi.com>
213 */
214
215struct tmem_objnode_tree_path {
216 struct tmem_objnode *objnode;
217 int offset;
218};
219
220/* objnode height_to_maxindex translation */
221static unsigned long tmem_objnode_tree_h2max[OBJNODE_TREE_MAX_PATH + 1];
222
223static void tmem_objnode_tree_init(void)
224{
225 unsigned int ht, tmp;
226
227 for (ht = 0; ht < ARRAY_SIZE(tmem_objnode_tree_h2max); ht++) {
228 tmp = ht * OBJNODE_TREE_MAP_SHIFT;
229 if (tmp >= OBJNODE_TREE_INDEX_BITS)
230 tmem_objnode_tree_h2max[ht] = ~0UL;
231 else
232 tmem_objnode_tree_h2max[ht] =
233 (~0UL >> (OBJNODE_TREE_INDEX_BITS - tmp - 1)) >> 1;
234 }
235}
236
237static struct tmem_objnode *tmem_objnode_alloc(struct tmem_obj *obj)
238{
239 struct tmem_objnode *objnode;
240
241 ASSERT_SENTINEL(obj, OBJ);
242 BUG_ON(obj->pool == NULL);
243 ASSERT_SENTINEL(obj->pool, POOL);
244 objnode = (*tmem_hostops.objnode_alloc)(obj->pool);
245 if (unlikely(objnode == NULL))
246 goto out;
247 objnode->obj = obj;
248 SET_SENTINEL(objnode, OBJNODE);
249 memset(&objnode->slots, 0, sizeof(objnode->slots));
250 objnode->slots_in_use = 0;
251 obj->objnode_count++;
252out:
253 return objnode;
254}
255
256static void tmem_objnode_free(struct tmem_objnode *objnode)
257{
258 struct tmem_pool *pool;
259 int i;
260
261 BUG_ON(objnode == NULL);
262 for (i = 0; i < OBJNODE_TREE_MAP_SIZE; i++)
263 BUG_ON(objnode->slots[i] != NULL);
264 ASSERT_SENTINEL(objnode, OBJNODE);
265 INVERT_SENTINEL(objnode, OBJNODE);
266 BUG_ON(objnode->obj == NULL);
267 ASSERT_SENTINEL(objnode->obj, OBJ);
268 pool = objnode->obj->pool;
269 BUG_ON(pool == NULL);
270 ASSERT_SENTINEL(pool, POOL);
271 objnode->obj->objnode_count--;
272 objnode->obj = NULL;
273 (*tmem_hostops.objnode_free)(objnode, pool);
274}
275
276/*
277 * lookup index in object and return associated pampd (or NULL if not found)
278 */
279static void **__tmem_pampd_lookup_in_obj(struct tmem_obj *obj, uint32_t index)
280{
281 unsigned int height, shift;
282 struct tmem_objnode **slot = NULL;
283
284 BUG_ON(obj == NULL);
285 ASSERT_SENTINEL(obj, OBJ);
286 BUG_ON(obj->pool == NULL);
287 ASSERT_SENTINEL(obj->pool, POOL);
288
289 height = obj->objnode_tree_height;
290 if (index > tmem_objnode_tree_h2max[obj->objnode_tree_height])
291 goto out;
292 if (height == 0 && obj->objnode_tree_root) {
293 slot = &obj->objnode_tree_root;
294 goto out;
295 }
296 shift = (height-1) * OBJNODE_TREE_MAP_SHIFT;
297 slot = &obj->objnode_tree_root;
298 while (height > 0) {
299 if (*slot == NULL)
300 goto out;
301 slot = (struct tmem_objnode **)
302 ((*slot)->slots +
303 ((index >> shift) & OBJNODE_TREE_MAP_MASK));
304 shift -= OBJNODE_TREE_MAP_SHIFT;
305 height--;
306 }
307out:
308 return slot != NULL ? (void **)slot : NULL;
309}
310
311static void *tmem_pampd_lookup_in_obj(struct tmem_obj *obj, uint32_t index)
312{
313 struct tmem_objnode **slot;
314
315 slot = (struct tmem_objnode **)__tmem_pampd_lookup_in_obj(obj, index);
316 return slot != NULL ? *slot : NULL;
317}
318
319static void *tmem_pampd_replace_in_obj(struct tmem_obj *obj, uint32_t index,
320 void *new_pampd, bool no_free)
321{
322 struct tmem_objnode **slot;
323 void *ret = NULL;
324
325 slot = (struct tmem_objnode **)__tmem_pampd_lookup_in_obj(obj, index);
326 if ((slot != NULL) && (*slot != NULL)) {
327 void *old_pampd = *(void **)slot;
328 *(void **)slot = new_pampd;
329 if (!no_free)
330 (*tmem_pamops.free)(old_pampd, obj->pool,
331 NULL, 0, false);
332 ret = new_pampd;
333 }
334 return ret;
335}
336
337static int tmem_pampd_add_to_obj(struct tmem_obj *obj, uint32_t index,
338 void *pampd)
339{
340 int ret = 0;
341 struct tmem_objnode *objnode = NULL, *newnode, *slot;
342 unsigned int height, shift;
343 int offset = 0;
344
345 /* if necessary, extend the tree to be higher */
346 if (index > tmem_objnode_tree_h2max[obj->objnode_tree_height]) {
347 height = obj->objnode_tree_height + 1;
348 if (index > tmem_objnode_tree_h2max[height])
349 while (index > tmem_objnode_tree_h2max[height])
350 height++;
351 if (obj->objnode_tree_root == NULL) {
352 obj->objnode_tree_height = height;
353 goto insert;
354 }
355 do {
356 newnode = tmem_objnode_alloc(obj);
357 if (!newnode) {
358 ret = -ENOMEM;
359 goto out;
360 }
361 newnode->slots[0] = obj->objnode_tree_root;
362 newnode->slots_in_use = 1;
363 obj->objnode_tree_root = newnode;
364 obj->objnode_tree_height++;
365 } while (height > obj->objnode_tree_height);
366 }
367insert:
368 slot = obj->objnode_tree_root;
369 height = obj->objnode_tree_height;
370 shift = (height-1) * OBJNODE_TREE_MAP_SHIFT;
371 while (height > 0) {
372 if (slot == NULL) {
373 /* add a child objnode. */
374 slot = tmem_objnode_alloc(obj);
375 if (!slot) {
376 ret = -ENOMEM;
377 goto out;
378 }
379 if (objnode) {
380
381 objnode->slots[offset] = slot;
382 objnode->slots_in_use++;
383 } else
384 obj->objnode_tree_root = slot;
385 }
386 /* go down a level */
387 offset = (index >> shift) & OBJNODE_TREE_MAP_MASK;
388 objnode = slot;
389 slot = objnode->slots[offset];
390 shift -= OBJNODE_TREE_MAP_SHIFT;
391 height--;
392 }
393 BUG_ON(slot != NULL);
394 if (objnode) {
395 objnode->slots_in_use++;
396 objnode->slots[offset] = pampd;
397 } else
398 obj->objnode_tree_root = pampd;
399 obj->pampd_count++;
400out:
401 return ret;
402}
403
404static void *tmem_pampd_delete_from_obj(struct tmem_obj *obj, uint32_t index)
405{
406 struct tmem_objnode_tree_path path[OBJNODE_TREE_MAX_PATH + 1];
407 struct tmem_objnode_tree_path *pathp = path;
408 struct tmem_objnode *slot = NULL;
409 unsigned int height, shift;
410 int offset;
411
412 BUG_ON(obj == NULL);
413 ASSERT_SENTINEL(obj, OBJ);
414 BUG_ON(obj->pool == NULL);
415 ASSERT_SENTINEL(obj->pool, POOL);
416 height = obj->objnode_tree_height;
417 if (index > tmem_objnode_tree_h2max[height])
418 goto out;
419 slot = obj->objnode_tree_root;
420 if (height == 0 && obj->objnode_tree_root) {
421 obj->objnode_tree_root = NULL;
422 goto out;
423 }
424 shift = (height - 1) * OBJNODE_TREE_MAP_SHIFT;
425 pathp->objnode = NULL;
426 do {
427 if (slot == NULL)
428 goto out;
429 pathp++;
430 offset = (index >> shift) & OBJNODE_TREE_MAP_MASK;
431 pathp->offset = offset;
432 pathp->objnode = slot;
433 slot = slot->slots[offset];
434 shift -= OBJNODE_TREE_MAP_SHIFT;
435 height--;
436 } while (height > 0);
437 if (slot == NULL)
438 goto out;
439 while (pathp->objnode) {
440 pathp->objnode->slots[pathp->offset] = NULL;
441 pathp->objnode->slots_in_use--;
442 if (pathp->objnode->slots_in_use) {
443 if (pathp->objnode == obj->objnode_tree_root) {
444 while (obj->objnode_tree_height > 0 &&
445 obj->objnode_tree_root->slots_in_use == 1 &&
446 obj->objnode_tree_root->slots[0]) {
447 struct tmem_objnode *to_free =
448 obj->objnode_tree_root;
449
450 obj->objnode_tree_root =
451 to_free->slots[0];
452 obj->objnode_tree_height--;
453 to_free->slots[0] = NULL;
454 to_free->slots_in_use = 0;
455 tmem_objnode_free(to_free);
456 }
457 }
458 goto out;
459 }
460 tmem_objnode_free(pathp->objnode); /* 0 slots used, free it */
461 pathp--;
462 }
463 obj->objnode_tree_height = 0;
464 obj->objnode_tree_root = NULL;
465
466out:
467 if (slot != NULL)
468 obj->pampd_count--;
469 BUG_ON(obj->pampd_count < 0);
470 return slot;
471}
472
473/* recursively walk the objnode_tree destroying pampds and objnodes */
474static void tmem_objnode_node_destroy(struct tmem_obj *obj,
475 struct tmem_objnode *objnode,
476 unsigned int ht)
477{
478 int i;
479
480 if (ht == 0)
481 return;
482 for (i = 0; i < OBJNODE_TREE_MAP_SIZE; i++) {
483 if (objnode->slots[i]) {
484 if (ht == 1) {
485 obj->pampd_count--;
486 (*tmem_pamops.free)(objnode->slots[i],
487 obj->pool, NULL, 0, true);
488 objnode->slots[i] = NULL;
489 continue;
490 }
491 tmem_objnode_node_destroy(obj, objnode->slots[i], ht-1);
492 tmem_objnode_free(objnode->slots[i]);
493 objnode->slots[i] = NULL;
494 }
495 }
496}
497
498static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *obj)
499{
500 if (obj->objnode_tree_root == NULL)
501 return;
502 if (obj->objnode_tree_height == 0) {
503 obj->pampd_count--;
504 (*tmem_pamops.free)(obj->objnode_tree_root,
505 obj->pool, NULL, 0, true);
506 } else {
507 tmem_objnode_node_destroy(obj, obj->objnode_tree_root,
508 obj->objnode_tree_height);
509 tmem_objnode_free(obj->objnode_tree_root);
510 obj->objnode_tree_height = 0;
511 }
512 obj->objnode_tree_root = NULL;
513 (*tmem_pamops.free_obj)(obj->pool, obj);
514}
515
516/*
517 * Tmem is operated on by a set of well-defined actions:
518 * "put", "get", "flush", "flush_object", "new pool" and "destroy pool".
519 * (The tmem ABI allows for subpages and exchanges but these operations
520 * are not included in this implementation.)
521 *
522 * These "tmem core" operations are implemented in the following functions.
523 */
524
525/*
526 * "Put" a page, e.g. copy a page from the kernel into newly allocated
527 * PAM space (if such space is available). Tmem_put is complicated by
528 * a corner case: What if a page with matching handle already exists in
529 * tmem? To guarantee coherency, one of two actions is necessary: Either
530 * the data for the page must be overwritten, or the page must be
531 * "flushed" so that the data is not accessible to a subsequent "get".
532 * Since these "duplicate puts" are relatively rare, this implementation
533 * always flushes for simplicity.
534 */
535int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
536 char *data, size_t size, bool raw, int ephemeral)
537{
538 struct tmem_obj *obj = NULL, *objfound = NULL, *objnew = NULL;
539 void *pampd = NULL, *pampd_del = NULL;
540 int ret = -ENOMEM;
541 struct tmem_hashbucket *hb;
542
543 hb = &pool->hashbucket[tmem_oid_hash(oidp)];
544 spin_lock(&hb->lock);
545 obj = objfound = tmem_obj_find(hb, oidp);
546 if (obj != NULL) {
547 pampd = tmem_pampd_lookup_in_obj(objfound, index);
548 if (pampd != NULL) {
549 /* if found, is a dup put, flush the old one */
550 pampd_del = tmem_pampd_delete_from_obj(obj, index);
551 BUG_ON(pampd_del != pampd);
552 (*tmem_pamops.free)(pampd, pool, oidp, index, true);
553 if (obj->pampd_count == 0) {
554 objnew = obj;
555 objfound = NULL;
556 }
557 pampd = NULL;
558 }
559 } else {
560 obj = objnew = (*tmem_hostops.obj_alloc)(pool);
561 if (unlikely(obj == NULL)) {
562 ret = -ENOMEM;
563 goto out;
564 }
565 tmem_obj_init(obj, hb, pool, oidp);
566 }
567 BUG_ON(obj == NULL);
568 BUG_ON(((objnew != obj) && (objfound != obj)) || (objnew == objfound));
569 pampd = (*tmem_pamops.create)(data, size, raw, ephemeral,
570 obj->pool, &obj->oid, index);
571 if (unlikely(pampd == NULL))
572 goto free;
573 ret = tmem_pampd_add_to_obj(obj, index, pampd);
574 if (unlikely(ret == -ENOMEM))
575 /* may have partially built objnode tree ("stump") */
576 goto delete_and_free;
577 goto out;
578
579delete_and_free:
580 (void)tmem_pampd_delete_from_obj(obj, index);
581free:
582 if (pampd)
583 (*tmem_pamops.free)(pampd, pool, NULL, 0, true);
584 if (objnew) {
585 tmem_obj_free(objnew, hb);
586 (*tmem_hostops.obj_free)(objnew, pool);
587 }
588out:
589 spin_unlock(&hb->lock);
590 return ret;
591}
592
593void *tmem_localify_get_pampd(struct tmem_pool *pool, struct tmem_oid *oidp,
594 uint32_t index, struct tmem_obj **ret_obj,
595 void **saved_hb)
596{
597 struct tmem_hashbucket *hb;
598 struct tmem_obj *obj = NULL;
599 void *pampd = NULL;
600
601 hb = &pool->hashbucket[tmem_oid_hash(oidp)];
602 spin_lock(&hb->lock);
603 obj = tmem_obj_find(hb, oidp);
604 if (likely(obj != NULL))
605 pampd = tmem_pampd_lookup_in_obj(obj, index);
606 *ret_obj = obj;
607 *saved_hb = (void *)hb;
608 /* note, hashbucket remains locked */
609 return pampd;
610}
611
612void tmem_localify_finish(struct tmem_obj *obj, uint32_t index,
613 void *pampd, void *saved_hb, bool delete)
614{
615 struct tmem_hashbucket *hb = (struct tmem_hashbucket *)saved_hb;
616
617 BUG_ON(!spin_is_locked(&hb->lock));
618 if (pampd != NULL) {
619 BUG_ON(obj == NULL);
620 (void)tmem_pampd_replace_in_obj(obj, index, pampd, 1);
621 } else if (delete) {
622 BUG_ON(obj == NULL);
623 (void)tmem_pampd_delete_from_obj(obj, index);
624 }
625 spin_unlock(&hb->lock);
626}
627
628static int tmem_repatriate(void **ppampd, struct tmem_hashbucket *hb,
629 struct tmem_pool *pool, struct tmem_oid *oidp,
630 uint32_t index, bool free, char *data)
631{
632 void *old_pampd = *ppampd, *new_pampd = NULL;
633 bool intransit = false;
634 int ret = 0;
635
636
637 if (!is_ephemeral(pool))
638 new_pampd = (*tmem_pamops.repatriate_preload)(
639 old_pampd, pool, oidp, index, &intransit);
640 if (intransit)
641 ret = -EAGAIN;
642 else if (new_pampd != NULL)
643 *ppampd = new_pampd;
644 /* must release the hb->lock else repatriate can't sleep */
645 spin_unlock(&hb->lock);
646 if (!intransit)
647 ret = (*tmem_pamops.repatriate)(old_pampd, new_pampd, pool,
648 oidp, index, free, data);
649 return ret;
650}
651
652/*
653 * "Get" a page, e.g. if one can be found, copy the tmem page with the
654 * matching handle from PAM space to the kernel. By tmem definition,
655 * when a "get" is successful on an ephemeral page, the page is "flushed",
656 * and when a "get" is successful on a persistent page, the page is retained
657 * in tmem. Note that to preserve
658 * coherency, "get" can never be skipped if tmem contains the data.
659 * That is, if a get is done with a certain handle and fails, any
660 * subsequent "get" must also fail (unless of course there is a
661 * "put" done with the same handle).
662
663 */
664int tmem_get(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
665 char *data, size_t *size, bool raw, int get_and_free)
666{
667 struct tmem_obj *obj;
668 void *pampd;
669 bool ephemeral = is_ephemeral(pool);
670 int ret = -1;
671 struct tmem_hashbucket *hb;
672 bool free = (get_and_free == 1) || ((get_and_free == 0) && ephemeral);
673 bool lock_held = 0;
674 void **ppampd;
675
676again:
677 hb = &pool->hashbucket[tmem_oid_hash(oidp)];
678 spin_lock(&hb->lock);
679 lock_held = 1;
680 obj = tmem_obj_find(hb, oidp);
681 if (obj == NULL)
682 goto out;
683 ppampd = __tmem_pampd_lookup_in_obj(obj, index);
684 if (ppampd == NULL)
685 goto out;
686 if (tmem_pamops.is_remote(*ppampd)) {
687 ret = tmem_repatriate(ppampd, hb, pool, oidp,
688 index, free, data);
689 lock_held = 0; /* note hb->lock has been unlocked */
690 if (ret == -EAGAIN) {
691 /* rare I think, but should cond_resched()??? */
692 usleep_range(10, 1000);
693 goto again;
694 } else if (ret != 0) {
695 if (ret != -ENOENT)
696 pr_err("UNTESTED case in tmem_get, ret=%d\n",
697 ret);
698 ret = -1;
699 goto out;
700 }
701 goto out;
702 }
703 if (free)
704 pampd = tmem_pampd_delete_from_obj(obj, index);
705 else
706 pampd = tmem_pampd_lookup_in_obj(obj, index);
707 if (pampd == NULL)
708 goto out;
709 if (free) {
710 if (obj->pampd_count == 0) {
711 tmem_obj_free(obj, hb);
712 (*tmem_hostops.obj_free)(obj, pool);
713 obj = NULL;
714 }
715 }
716 if (free)
717 ret = (*tmem_pamops.get_data_and_free)(
718 data, size, raw, pampd, pool, oidp, index);
719 else
720 ret = (*tmem_pamops.get_data)(
721 data, size, raw, pampd, pool, oidp, index);
722 if (ret < 0)
723 goto out;
724 ret = 0;
725out:
726 if (lock_held)
727 spin_unlock(&hb->lock);
728 return ret;
729}
730
731/*
732 * If a page in tmem matches the handle, "flush" this page from tmem such
733 * that any subsequent "get" does not succeed (unless, of course, there
734 * was another "put" with the same handle).
735 */
736int tmem_flush_page(struct tmem_pool *pool,
737 struct tmem_oid *oidp, uint32_t index)
738{
739 struct tmem_obj *obj;
740 void *pampd;
741 int ret = -1;
742 struct tmem_hashbucket *hb;
743
744 hb = &pool->hashbucket[tmem_oid_hash(oidp)];
745 spin_lock(&hb->lock);
746 obj = tmem_obj_find(hb, oidp);
747 if (obj == NULL)
748 goto out;
749 pampd = tmem_pampd_delete_from_obj(obj, index);
750 if (pampd == NULL)
751 goto out;
752 (*tmem_pamops.free)(pampd, pool, oidp, index, true);
753 if (obj->pampd_count == 0) {
754 tmem_obj_free(obj, hb);
755 (*tmem_hostops.obj_free)(obj, pool);
756 }
757 ret = 0;
758
759out:
760 spin_unlock(&hb->lock);
761 return ret;
762}
763
764/*
765 * If a page in tmem matches the handle, replace the page so that any
766 * subsequent "get" gets the new page. Returns the new page if
767 * there was a page to replace, else returns NULL.
768 */
769int tmem_replace(struct tmem_pool *pool, struct tmem_oid *oidp,
770 uint32_t index, void *new_pampd)
771{
772 struct tmem_obj *obj;
773 int ret = -1;
774 struct tmem_hashbucket *hb;
775
776 hb = &pool->hashbucket[tmem_oid_hash(oidp)];
777 spin_lock(&hb->lock);
778 obj = tmem_obj_find(hb, oidp);
779 if (obj == NULL)
780 goto out;
781 new_pampd = tmem_pampd_replace_in_obj(obj, index, new_pampd, 0);
782 ret = (*tmem_pamops.replace_in_obj)(new_pampd, obj);
783out:
784 spin_unlock(&hb->lock);
785 return ret;
786}
787
788/*
789 * "Flush" all pages in tmem matching this oid.
790 */
791int tmem_flush_object(struct tmem_pool *pool, struct tmem_oid *oidp)
792{
793 struct tmem_obj *obj;
794 struct tmem_hashbucket *hb;
795 int ret = -1;
796
797 hb = &pool->hashbucket[tmem_oid_hash(oidp)];
798 spin_lock(&hb->lock);
799 obj = tmem_obj_find(hb, oidp);
800 if (obj == NULL)
801 goto out;
802 tmem_pampd_destroy_all_in_obj(obj);
803 tmem_obj_free(obj, hb);
804 (*tmem_hostops.obj_free)(obj, pool);
805 ret = 0;
806
807out:
808 spin_unlock(&hb->lock);
809 return ret;
810}
811
812/*
813 * "Flush" all pages (and tmem_objs) from this tmem_pool and disable
814 * all subsequent access to this tmem_pool.
815 */
816int tmem_destroy_pool(struct tmem_pool *pool)
817{
818 int ret = -1;
819
820 if (pool == NULL)
821 goto out;
822 tmem_pool_flush(pool, 1);
823 ret = 0;
824out:
825 return ret;
826}
827
828static LIST_HEAD(tmem_global_pool_list);
829
830/*
831 * Create a new tmem_pool with the provided flag and return
832 * a pool id provided by the tmem host implementation.
833 */
834void tmem_new_pool(struct tmem_pool *pool, uint32_t flags)
835{
836 int persistent = flags & TMEM_POOL_PERSIST;
837 int shared = flags & TMEM_POOL_SHARED;
838 struct tmem_hashbucket *hb = &pool->hashbucket[0];
839 int i;
840
841 for (i = 0; i < TMEM_HASH_BUCKETS; i++, hb++) {
842 hb->obj_rb_root = RB_ROOT;
843 spin_lock_init(&hb->lock);
844 }
845 INIT_LIST_HEAD(&pool->pool_list);
846 atomic_set(&pool->obj_count, 0);
847 SET_SENTINEL(pool, POOL);
848 list_add_tail(&pool->pool_list, &tmem_global_pool_list);
849 pool->persistent = persistent;
850 pool->shared = shared;
851}
diff --git a/drivers/staging/ramster/tmem.h b/drivers/staging/ramster/tmem.h
new file mode 100644
index 000000000000..47f1918c8314
--- /dev/null
+++ b/drivers/staging/ramster/tmem.h
@@ -0,0 +1,244 @@
1/*
2 * tmem.h
3 *
4 * Transcendent memory
5 *
6 * Copyright (c) 2009-2011, Dan Magenheimer, Oracle Corp.
7 */
8
9#ifndef _TMEM_H_
10#define _TMEM_H_
11
12#include <linux/highmem.h>
13#include <linux/hash.h>
14#include <linux/atomic.h>
15
16/*
17 * These are pre-defined by the Xen<->Linux ABI
18 */
19#define TMEM_PUT_PAGE 4
20#define TMEM_GET_PAGE 5
21#define TMEM_FLUSH_PAGE 6
22#define TMEM_FLUSH_OBJECT 7
23#define TMEM_POOL_PERSIST 1
24#define TMEM_POOL_SHARED 2
25#define TMEM_POOL_PRECOMPRESSED 4
26#define TMEM_POOL_PAGESIZE_SHIFT 4
27#define TMEM_POOL_PAGESIZE_MASK 0xf
28#define TMEM_POOL_RESERVED_BITS 0x00ffff00
29
30/*
31 * sentinels have proven very useful for debugging but can be removed
32 * or disabled before final merge.
33 */
34#define SENTINELS
35#ifdef SENTINELS
36#define DECL_SENTINEL uint32_t sentinel;
37#define SET_SENTINEL(_x, _y) (_x->sentinel = _y##_SENTINEL)
38#define INVERT_SENTINEL(_x, _y) (_x->sentinel = ~_y##_SENTINEL)
39#define ASSERT_SENTINEL(_x, _y) WARN_ON(_x->sentinel != _y##_SENTINEL)
40#define ASSERT_INVERTED_SENTINEL(_x, _y) WARN_ON(_x->sentinel != ~_y##_SENTINEL)
41#else
42#define DECL_SENTINEL
43#define SET_SENTINEL(_x, _y) do { } while (0)
44#define INVERT_SENTINEL(_x, _y) do { } while (0)
45#define ASSERT_SENTINEL(_x, _y) do { } while (0)
46#define ASSERT_INVERTED_SENTINEL(_x, _y) do { } while (0)
47#endif
48
49#define ASSERT_SPINLOCK(_l) WARN_ON(!spin_is_locked(_l))
50
51/*
52 * A pool is the highest-level data structure managed by tmem and
53 * usually corresponds to a large independent set of pages such as
54 * a filesystem. Each pool has an id, and certain attributes and counters.
55 * It also contains a set of hash buckets, each of which contains an rbtree
56 * of objects and a lock to manage concurrency within the pool.
57 */
58
59#define TMEM_HASH_BUCKET_BITS 8
60#define TMEM_HASH_BUCKETS (1<<TMEM_HASH_BUCKET_BITS)
61
62struct tmem_hashbucket {
63 struct rb_root obj_rb_root;
64 spinlock_t lock;
65};
66
67struct tmem_pool {
68 void *client; /* "up" for some clients, avoids table lookup */
69 struct list_head pool_list;
70 uint32_t pool_id;
71 bool persistent;
72 bool shared;
73 atomic_t obj_count;
74 atomic_t refcount;
75 struct tmem_hashbucket hashbucket[TMEM_HASH_BUCKETS];
76 DECL_SENTINEL
77};
78
79#define is_persistent(_p) (_p->persistent)
80#define is_ephemeral(_p) (!(_p->persistent))
81
82/*
83 * An object id ("oid") is large: 192-bits (to ensure, for example, files
84 * in a modern filesystem can be uniquely identified).
85 */
86
87struct tmem_oid {
88 uint64_t oid[3];
89};
90
91struct tmem_xhandle {
92 uint8_t client_id;
93 uint8_t xh_data_cksum;
94 uint16_t xh_data_size;
95 uint16_t pool_id;
96 struct tmem_oid oid;
97 uint32_t index;
98 void *extra;
99};
100
101static inline struct tmem_xhandle tmem_xhandle_fill(uint16_t client_id,
102 struct tmem_pool *pool,
103 struct tmem_oid *oidp,
104 uint32_t index)
105{
106 struct tmem_xhandle xh;
107 xh.client_id = client_id;
108 xh.xh_data_cksum = (uint8_t)-1;
109 xh.xh_data_size = (uint16_t)-1;
110 xh.pool_id = pool->pool_id;
111 xh.oid = *oidp;
112 xh.index = index;
113 return xh;
114}
115
116static inline void tmem_oid_set_invalid(struct tmem_oid *oidp)
117{
118 oidp->oid[0] = oidp->oid[1] = oidp->oid[2] = -1UL;
119}
120
121static inline bool tmem_oid_valid(struct tmem_oid *oidp)
122{
123 return oidp->oid[0] != -1UL || oidp->oid[1] != -1UL ||
124 oidp->oid[2] != -1UL;
125}
126
127static inline int tmem_oid_compare(struct tmem_oid *left,
128 struct tmem_oid *right)
129{
130 int ret;
131
132 if (left->oid[2] == right->oid[2]) {
133 if (left->oid[1] == right->oid[1]) {
134 if (left->oid[0] == right->oid[0])
135 ret = 0;
136 else if (left->oid[0] < right->oid[0])
137 ret = -1;
138 else
139 return 1;
140 } else if (left->oid[1] < right->oid[1])
141 ret = -1;
142 else
143 ret = 1;
144 } else if (left->oid[2] < right->oid[2])
145 ret = -1;
146 else
147 ret = 1;
148 return ret;
149}
150
151static inline unsigned tmem_oid_hash(struct tmem_oid *oidp)
152{
153 return hash_long(oidp->oid[0] ^ oidp->oid[1] ^ oidp->oid[2],
154 TMEM_HASH_BUCKET_BITS);
155}
156
157/*
158 * A tmem_obj contains an identifier (oid), pointers to the parent
159 * pool and the rb_tree to which it belongs, counters, and an ordered
160 * set of pampds, structured in a radix-tree-like tree. The intermediate
161 * nodes of the tree are called tmem_objnodes.
162 */
163
164struct tmem_objnode;
165
166struct tmem_obj {
167 struct tmem_oid oid;
168 struct tmem_pool *pool;
169 struct rb_node rb_tree_node;
170 struct tmem_objnode *objnode_tree_root;
171 unsigned int objnode_tree_height;
172 unsigned long objnode_count;
173 long pampd_count;
174 /* for current design of ramster, all pages belonging to
175 * an object reside on the same remotenode and extra is
176 * used to record the number of the remotenode so a
177 * flush-object operation can specify it */
178 void *extra; /* for use by pampd implementation */
179 DECL_SENTINEL
180};
181
182#define OBJNODE_TREE_MAP_SHIFT 6
183#define OBJNODE_TREE_MAP_SIZE (1UL << OBJNODE_TREE_MAP_SHIFT)
184#define OBJNODE_TREE_MAP_MASK (OBJNODE_TREE_MAP_SIZE-1)
185#define OBJNODE_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long))
186#define OBJNODE_TREE_MAX_PATH \
187 (OBJNODE_TREE_INDEX_BITS/OBJNODE_TREE_MAP_SHIFT + 2)
188
189struct tmem_objnode {
190 struct tmem_obj *obj;
191 DECL_SENTINEL
192 void *slots[OBJNODE_TREE_MAP_SIZE];
193 unsigned int slots_in_use;
194};
195
196/* pampd abstract datatype methods provided by the PAM implementation */
197struct tmem_pamops {
198 void *(*create)(char *, size_t, bool, int,
199 struct tmem_pool *, struct tmem_oid *, uint32_t);
200 int (*get_data)(char *, size_t *, bool, void *, struct tmem_pool *,
201 struct tmem_oid *, uint32_t);
202 int (*get_data_and_free)(char *, size_t *, bool, void *,
203 struct tmem_pool *, struct tmem_oid *,
204 uint32_t);
205 void (*free)(void *, struct tmem_pool *,
206 struct tmem_oid *, uint32_t, bool);
207 void (*free_obj)(struct tmem_pool *, struct tmem_obj *);
208 bool (*is_remote)(void *);
209 void *(*repatriate_preload)(void *, struct tmem_pool *,
210 struct tmem_oid *, uint32_t, bool *);
211 int (*repatriate)(void *, void *, struct tmem_pool *,
212 struct tmem_oid *, uint32_t, bool, void *);
213 void (*new_obj)(struct tmem_obj *);
214 int (*replace_in_obj)(void *, struct tmem_obj *);
215};
216extern void tmem_register_pamops(struct tmem_pamops *m);
217
218/* memory allocation methods provided by the host implementation */
219struct tmem_hostops {
220 struct tmem_obj *(*obj_alloc)(struct tmem_pool *);
221 void (*obj_free)(struct tmem_obj *, struct tmem_pool *);
222 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
223 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
224};
225extern void tmem_register_hostops(struct tmem_hostops *m);
226
227/* core tmem accessor functions */
228extern int tmem_put(struct tmem_pool *, struct tmem_oid *, uint32_t index,
229 char *, size_t, bool, int);
230extern int tmem_get(struct tmem_pool *, struct tmem_oid *, uint32_t index,
231 char *, size_t *, bool, int);
232extern int tmem_replace(struct tmem_pool *, struct tmem_oid *, uint32_t index,
233 void *);
234extern void *tmem_localify_get_pampd(struct tmem_pool *, struct tmem_oid *,
235 uint32_t index, struct tmem_obj **,
236 void **);
237extern void tmem_localify_finish(struct tmem_obj *, uint32_t index,
238 void *, void *, bool);
239extern int tmem_flush_page(struct tmem_pool *, struct tmem_oid *,
240 uint32_t index);
241extern int tmem_flush_object(struct tmem_pool *, struct tmem_oid *);
242extern int tmem_destroy_pool(struct tmem_pool *);
243extern void tmem_new_pool(struct tmem_pool *, uint32_t);
244#endif /* _TMEM_H */
diff --git a/drivers/staging/zram/xvmalloc.c b/drivers/staging/ramster/xvmalloc.c
index 1f9c5082b6d5..1f9c5082b6d5 100644
--- a/drivers/staging/zram/xvmalloc.c
+++ b/drivers/staging/ramster/xvmalloc.c
diff --git a/drivers/staging/zram/xvmalloc.h b/drivers/staging/ramster/xvmalloc.h
index 5b1a81aa5faf..5b1a81aa5faf 100644
--- a/drivers/staging/zram/xvmalloc.h
+++ b/drivers/staging/ramster/xvmalloc.h
diff --git a/drivers/staging/zram/xvmalloc_int.h b/drivers/staging/ramster/xvmalloc_int.h
index b5f1f7febcf6..b5f1f7febcf6 100644
--- a/drivers/staging/zram/xvmalloc_int.h
+++ b/drivers/staging/ramster/xvmalloc_int.h
diff --git a/drivers/staging/ramster/zcache-main.c b/drivers/staging/ramster/zcache-main.c
new file mode 100644
index 000000000000..36d53ed9d71a
--- /dev/null
+++ b/drivers/staging/ramster/zcache-main.c
@@ -0,0 +1,3320 @@
1/*
2 * zcache.c
3 *
4 * Copyright (c) 2010-2012, Dan Magenheimer, Oracle Corp.
5 * Copyright (c) 2010,2011, Nitin Gupta
6 *
7 * Zcache provides an in-kernel "host implementation" for transcendent memory
8 * and, thus indirectly, for cleancache and frontswap. Zcache includes two
9 * page-accessible memory [1] interfaces, both utilizing lzo1x compression:
10 * 1) "compression buddies" ("zbud") is used for ephemeral pages
11 * 2) xvmalloc is used for persistent pages.
12 * Xvmalloc (based on the TLSF allocator) has very low fragmentation
13 * so maximizes space efficiency, while zbud allows pairs (and potentially,
14 * in the future, more than a pair of) compressed pages to be closely linked
15 * so that reclaiming can be done via the kernel's physical-page-oriented
16 * "shrinker" interface.
17 *
18 * [1] For a definition of page-accessible memory (aka PAM), see:
19 * http://marc.info/?l=linux-mm&m=127811271605009
20 * RAMSTER TODO:
21 * - handle remotifying of buddied pages (see zbud_remotify_zbpg)
22 * - kernel boot params: nocleancache/nofrontswap don't always work?!?
23 */
24
25#include <linux/module.h>
26#include <linux/cpu.h>
27#include <linux/highmem.h>
28#include <linux/list.h>
29#include <linux/lzo.h>
30#include <linux/slab.h>
31#include <linux/spinlock.h>
32#include <linux/types.h>
33#include <linux/atomic.h>
34#include <linux/math64.h>
35#include "tmem.h"
36#include "zcache.h"
37#include "ramster.h"
38#include "cluster/tcp.h"
39
40#include "xvmalloc.h" /* temporary until change to zsmalloc */
41
42#define RAMSTER_TESTING
43
44#if (!defined(CONFIG_CLEANCACHE) && !defined(CONFIG_FRONTSWAP))
45#error "ramster is useless without CONFIG_CLEANCACHE or CONFIG_FRONTSWAP"
46#endif
47#ifdef CONFIG_CLEANCACHE
48#include <linux/cleancache.h>
49#endif
50#ifdef CONFIG_FRONTSWAP
51#include <linux/frontswap.h>
52#endif
53
54enum ramster_remotify_op {
55 RAMSTER_REMOTIFY_EPH_PUT,
56 RAMSTER_REMOTIFY_PERS_PUT,
57 RAMSTER_REMOTIFY_FLUSH_PAGE,
58 RAMSTER_REMOTIFY_FLUSH_OBJ,
59 RAMSTER_INTRANSIT_PERS
60};
61
62struct ramster_remotify_hdr {
63 enum ramster_remotify_op op;
64 struct list_head list;
65};
66
67#define ZBH_SENTINEL 0x43214321
68#define ZBPG_SENTINEL 0xdeadbeef
69
70#define ZBUD_MAX_BUDS 2
71
72struct zbud_hdr {
73 struct ramster_remotify_hdr rem_op;
74 uint16_t client_id;
75 uint16_t pool_id;
76 struct tmem_oid oid;
77 uint32_t index;
78 uint16_t size; /* compressed size in bytes, zero means unused */
79 DECL_SENTINEL
80};
81
82#define ZVH_SENTINEL 0x43214321
83static const int zv_max_page_size = (PAGE_SIZE / 8) * 7;
84
85struct zv_hdr {
86 struct ramster_remotify_hdr rem_op;
87 uint16_t client_id;
88 uint16_t pool_id;
89 struct tmem_oid oid;
90 uint32_t index;
91 DECL_SENTINEL
92};
93
94struct flushlist_node {
95 struct ramster_remotify_hdr rem_op;
96 struct tmem_xhandle xh;
97};
98
99union {
100 struct ramster_remotify_hdr rem_op;
101 struct zv_hdr zv;
102 struct zbud_hdr zbud;
103 struct flushlist_node flist;
104} remotify_list_node;
105
106static LIST_HEAD(zcache_rem_op_list);
107static DEFINE_SPINLOCK(zcache_rem_op_list_lock);
108
109#if 0
110/* this is more aggressive but may cause other problems? */
111#define ZCACHE_GFP_MASK (GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN)
112#else
113#define ZCACHE_GFP_MASK \
114 (__GFP_FS | __GFP_NORETRY | __GFP_NOWARN | __GFP_NOMEMALLOC)
115#endif
116
117#define MAX_POOLS_PER_CLIENT 16
118
119#define MAX_CLIENTS 16
120#define LOCAL_CLIENT ((uint16_t)-1)
121
122MODULE_LICENSE("GPL");
123
124struct zcache_client {
125 struct tmem_pool *tmem_pools[MAX_POOLS_PER_CLIENT];
126 struct xv_pool *xvpool;
127 bool allocated;
128 atomic_t refcount;
129};
130
131static struct zcache_client zcache_host;
132static struct zcache_client zcache_clients[MAX_CLIENTS];
133
134static inline uint16_t get_client_id_from_client(struct zcache_client *cli)
135{
136 BUG_ON(cli == NULL);
137 if (cli == &zcache_host)
138 return LOCAL_CLIENT;
139 return cli - &zcache_clients[0];
140}
141
142static inline bool is_local_client(struct zcache_client *cli)
143{
144 return cli == &zcache_host;
145}
146
147/**********
148 * Compression buddies ("zbud") provides for packing two (or, possibly
149 * in the future, more) compressed ephemeral pages into a single "raw"
150 * (physical) page and tracking them with data structures so that
151 * the raw pages can be easily reclaimed.
152 *
153 * A zbud page ("zbpg") is an aligned page containing a list_head,
154 * a lock, and two "zbud headers". The remainder of the physical
155 * page is divided up into aligned 64-byte "chunks" which contain
156 * the compressed data for zero, one, or two zbuds. Each zbpg
157 * resides on: (1) an "unused list" if it has no zbuds; (2) a
158 * "buddied" list if it is fully populated with two zbuds; or
159 * (3) one of PAGE_SIZE/64 "unbuddied" lists indexed by how many chunks
160 * the one unbuddied zbud uses. The data inside a zbpg cannot be
161 * read or written unless the zbpg's lock is held.
162 */
163
164struct zbud_page {
165 struct list_head bud_list;
166 spinlock_t lock;
167 struct zbud_hdr buddy[ZBUD_MAX_BUDS];
168 DECL_SENTINEL
169 /* followed by NUM_CHUNK aligned CHUNK_SIZE-byte chunks */
170};
171
172#define CHUNK_SHIFT 6
173#define CHUNK_SIZE (1 << CHUNK_SHIFT)
174#define CHUNK_MASK (~(CHUNK_SIZE-1))
175#define NCHUNKS (((PAGE_SIZE - sizeof(struct zbud_page)) & \
176 CHUNK_MASK) >> CHUNK_SHIFT)
177#define MAX_CHUNK (NCHUNKS-1)
178
179static struct {
180 struct list_head list;
181 unsigned count;
182} zbud_unbuddied[NCHUNKS];
183/* list N contains pages with N chunks USED and NCHUNKS-N unused */
184/* element 0 is never used but optimizing that isn't worth it */
185static unsigned long zbud_cumul_chunk_counts[NCHUNKS];
186
187struct list_head zbud_buddied_list;
188static unsigned long zcache_zbud_buddied_count;
189
190/* protects the buddied list and all unbuddied lists */
191static DEFINE_SPINLOCK(zbud_budlists_spinlock);
192
193static atomic_t zcache_zbud_curr_raw_pages;
194static atomic_t zcache_zbud_curr_zpages;
195static unsigned long zcache_zbud_curr_zbytes;
196static unsigned long zcache_zbud_cumul_zpages;
197static unsigned long zcache_zbud_cumul_zbytes;
198static unsigned long zcache_compress_poor;
199static unsigned long zcache_policy_percent_exceeded;
200static unsigned long zcache_mean_compress_poor;
201
202/*
203 * RAMster counters
204 * - Remote pages are pages with a local pampd but the data is remote
205 * - Foreign pages are pages stored locally but belonging to another node
206 */
207static atomic_t ramster_remote_pers_pages = ATOMIC_INIT(0);
208static unsigned long ramster_pers_remotify_enable;
209static unsigned long ramster_eph_remotify_enable;
210static unsigned long ramster_eph_pages_remoted;
211static unsigned long ramster_eph_pages_remote_failed;
212static unsigned long ramster_pers_pages_remoted;
213static unsigned long ramster_pers_pages_remote_failed;
214static unsigned long ramster_pers_pages_remote_nomem;
215static unsigned long ramster_remote_objects_flushed;
216static unsigned long ramster_remote_object_flushes_failed;
217static unsigned long ramster_remote_pages_flushed;
218static unsigned long ramster_remote_page_flushes_failed;
219static unsigned long ramster_remote_eph_pages_succ_get;
220static unsigned long ramster_remote_pers_pages_succ_get;
221static unsigned long ramster_remote_eph_pages_unsucc_get;
222static unsigned long ramster_remote_pers_pages_unsucc_get;
223static atomic_t ramster_curr_flnode_count = ATOMIC_INIT(0);
224static unsigned long ramster_curr_flnode_count_max;
225static atomic_t ramster_foreign_eph_pampd_count = ATOMIC_INIT(0);
226static unsigned long ramster_foreign_eph_pampd_count_max;
227static atomic_t ramster_foreign_pers_pampd_count = ATOMIC_INIT(0);
228static unsigned long ramster_foreign_pers_pampd_count_max;
229
230/* forward references */
231static void *zcache_get_free_page(void);
232static void zcache_free_page(void *p);
233
234/*
235 * zbud helper functions
236 */
237
238static inline unsigned zbud_max_buddy_size(void)
239{
240 return MAX_CHUNK << CHUNK_SHIFT;
241}
242
243static inline unsigned zbud_size_to_chunks(unsigned size)
244{
245 BUG_ON(size == 0 || size > zbud_max_buddy_size());
246 return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
247}
248
249static inline int zbud_budnum(struct zbud_hdr *zh)
250{
251 unsigned offset = (unsigned long)zh & (PAGE_SIZE - 1);
252 struct zbud_page *zbpg = NULL;
253 unsigned budnum = -1U;
254 int i;
255
256 for (i = 0; i < ZBUD_MAX_BUDS; i++)
257 if (offset == offsetof(typeof(*zbpg), buddy[i])) {
258 budnum = i;
259 break;
260 }
261 BUG_ON(budnum == -1U);
262 return budnum;
263}
264
265static char *zbud_data(struct zbud_hdr *zh, unsigned size)
266{
267 struct zbud_page *zbpg;
268 char *p;
269 unsigned budnum;
270
271 ASSERT_SENTINEL(zh, ZBH);
272 budnum = zbud_budnum(zh);
273 BUG_ON(size == 0 || size > zbud_max_buddy_size());
274 zbpg = container_of(zh, struct zbud_page, buddy[budnum]);
275 ASSERT_SPINLOCK(&zbpg->lock);
276 p = (char *)zbpg;
277 if (budnum == 0)
278 p += ((sizeof(struct zbud_page) + CHUNK_SIZE - 1) &
279 CHUNK_MASK);
280 else if (budnum == 1)
281 p += PAGE_SIZE - ((size + CHUNK_SIZE - 1) & CHUNK_MASK);
282 return p;
283}
284
285static void zbud_copy_from_pampd(char *data, size_t *size, struct zbud_hdr *zh)
286{
287 struct zbud_page *zbpg;
288 char *p;
289 unsigned budnum;
290
291 ASSERT_SENTINEL(zh, ZBH);
292 budnum = zbud_budnum(zh);
293 zbpg = container_of(zh, struct zbud_page, buddy[budnum]);
294 spin_lock(&zbpg->lock);
295 BUG_ON(zh->size > *size);
296 p = (char *)zbpg;
297 if (budnum == 0)
298 p += ((sizeof(struct zbud_page) + CHUNK_SIZE - 1) &
299 CHUNK_MASK);
300 else if (budnum == 1)
301 p += PAGE_SIZE - ((zh->size + CHUNK_SIZE - 1) & CHUNK_MASK);
302 /* client should be filled in by caller */
303 memcpy(data, p, zh->size);
304 *size = zh->size;
305 spin_unlock(&zbpg->lock);
306}
307
308/*
309 * zbud raw page management
310 */
311
312static struct zbud_page *zbud_alloc_raw_page(void)
313{
314 struct zbud_page *zbpg = NULL;
315 struct zbud_hdr *zh0, *zh1;
316 zbpg = zcache_get_free_page();
317 if (likely(zbpg != NULL)) {
318 INIT_LIST_HEAD(&zbpg->bud_list);
319 zh0 = &zbpg->buddy[0]; zh1 = &zbpg->buddy[1];
320 spin_lock_init(&zbpg->lock);
321 atomic_inc(&zcache_zbud_curr_raw_pages);
322 INIT_LIST_HEAD(&zbpg->bud_list);
323 SET_SENTINEL(zbpg, ZBPG);
324 zh0->size = 0; zh1->size = 0;
325 tmem_oid_set_invalid(&zh0->oid);
326 tmem_oid_set_invalid(&zh1->oid);
327 }
328 return zbpg;
329}
330
331static void zbud_free_raw_page(struct zbud_page *zbpg)
332{
333 struct zbud_hdr *zh0 = &zbpg->buddy[0], *zh1 = &zbpg->buddy[1];
334
335 ASSERT_SENTINEL(zbpg, ZBPG);
336 BUG_ON(!list_empty(&zbpg->bud_list));
337 ASSERT_SPINLOCK(&zbpg->lock);
338 BUG_ON(zh0->size != 0 || tmem_oid_valid(&zh0->oid));
339 BUG_ON(zh1->size != 0 || tmem_oid_valid(&zh1->oid));
340 INVERT_SENTINEL(zbpg, ZBPG);
341 spin_unlock(&zbpg->lock);
342 atomic_dec(&zcache_zbud_curr_raw_pages);
343 zcache_free_page(zbpg);
344}
345
346/*
347 * core zbud handling routines
348 */
349
350static unsigned zbud_free(struct zbud_hdr *zh)
351{
352 unsigned size;
353
354 ASSERT_SENTINEL(zh, ZBH);
355 BUG_ON(!tmem_oid_valid(&zh->oid));
356 size = zh->size;
357 BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size());
358 zh->size = 0;
359 tmem_oid_set_invalid(&zh->oid);
360 INVERT_SENTINEL(zh, ZBH);
361 zcache_zbud_curr_zbytes -= size;
362 atomic_dec(&zcache_zbud_curr_zpages);
363 return size;
364}
365
366static void zbud_free_and_delist(struct zbud_hdr *zh)
367{
368 unsigned chunks;
369 struct zbud_hdr *zh_other;
370 unsigned budnum = zbud_budnum(zh), size;
371 struct zbud_page *zbpg =
372 container_of(zh, struct zbud_page, buddy[budnum]);
373
374 /* FIXME, should be BUG_ON, pool destruction path doesn't disable
375 * interrupts tmem_destroy_pool()->tmem_pampd_destroy_all_in_obj()->
376 * tmem_objnode_node_destroy()-> zcache_pampd_free() */
377 WARN_ON(!irqs_disabled());
378 spin_lock(&zbpg->lock);
379 if (list_empty(&zbpg->bud_list)) {
380 /* ignore zombie page... see zbud_evict_pages() */
381 spin_unlock(&zbpg->lock);
382 return;
383 }
384 size = zbud_free(zh);
385 ASSERT_SPINLOCK(&zbpg->lock);
386 zh_other = &zbpg->buddy[(budnum == 0) ? 1 : 0];
387 if (zh_other->size == 0) { /* was unbuddied: unlist and free */
388 chunks = zbud_size_to_chunks(size) ;
389 spin_lock(&zbud_budlists_spinlock);
390 BUG_ON(list_empty(&zbud_unbuddied[chunks].list));
391 list_del_init(&zbpg->bud_list);
392 zbud_unbuddied[chunks].count--;
393 spin_unlock(&zbud_budlists_spinlock);
394 zbud_free_raw_page(zbpg);
395 } else { /* was buddied: move remaining buddy to unbuddied list */
396 chunks = zbud_size_to_chunks(zh_other->size) ;
397 spin_lock(&zbud_budlists_spinlock);
398 list_del_init(&zbpg->bud_list);
399 zcache_zbud_buddied_count--;
400 list_add_tail(&zbpg->bud_list, &zbud_unbuddied[chunks].list);
401 zbud_unbuddied[chunks].count++;
402 spin_unlock(&zbud_budlists_spinlock);
403 spin_unlock(&zbpg->lock);
404 }
405}
406
407static struct zbud_hdr *zbud_create(uint16_t client_id, uint16_t pool_id,
408 struct tmem_oid *oid,
409 uint32_t index, struct page *page,
410 void *cdata, unsigned size)
411{
412 struct zbud_hdr *zh0, *zh1, *zh = NULL;
413 struct zbud_page *zbpg = NULL, *ztmp;
414 unsigned nchunks;
415 char *to;
416 int i, found_good_buddy = 0;
417
418 nchunks = zbud_size_to_chunks(size) ;
419 for (i = MAX_CHUNK - nchunks + 1; i > 0; i--) {
420 spin_lock(&zbud_budlists_spinlock);
421 if (!list_empty(&zbud_unbuddied[i].list)) {
422 list_for_each_entry_safe(zbpg, ztmp,
423 &zbud_unbuddied[i].list, bud_list) {
424 if (spin_trylock(&zbpg->lock)) {
425 found_good_buddy = i;
426 goto found_unbuddied;
427 }
428 }
429 }
430 spin_unlock(&zbud_budlists_spinlock);
431 }
432 /* didn't find a good buddy, try allocating a new page */
433 zbpg = zbud_alloc_raw_page();
434 if (unlikely(zbpg == NULL))
435 goto out;
436 /* ok, have a page, now compress the data before taking locks */
437 spin_lock(&zbud_budlists_spinlock);
438 spin_lock(&zbpg->lock);
439 list_add_tail(&zbpg->bud_list, &zbud_unbuddied[nchunks].list);
440 zbud_unbuddied[nchunks].count++;
441 zh = &zbpg->buddy[0];
442 goto init_zh;
443
444found_unbuddied:
445 ASSERT_SPINLOCK(&zbpg->lock);
446 zh0 = &zbpg->buddy[0]; zh1 = &zbpg->buddy[1];
447 BUG_ON(!((zh0->size == 0) ^ (zh1->size == 0)));
448 if (zh0->size != 0) { /* buddy0 in use, buddy1 is vacant */
449 ASSERT_SENTINEL(zh0, ZBH);
450 zh = zh1;
451 } else if (zh1->size != 0) { /* buddy1 in use, buddy0 is vacant */
452 ASSERT_SENTINEL(zh1, ZBH);
453 zh = zh0;
454 } else
455 BUG();
456 list_del_init(&zbpg->bud_list);
457 zbud_unbuddied[found_good_buddy].count--;
458 list_add_tail(&zbpg->bud_list, &zbud_buddied_list);
459 zcache_zbud_buddied_count++;
460
461init_zh:
462 SET_SENTINEL(zh, ZBH);
463 zh->size = size;
464 zh->index = index;
465 zh->oid = *oid;
466 zh->pool_id = pool_id;
467 zh->client_id = client_id;
468 to = zbud_data(zh, size);
469 memcpy(to, cdata, size);
470 spin_unlock(&zbpg->lock);
471 spin_unlock(&zbud_budlists_spinlock);
472 zbud_cumul_chunk_counts[nchunks]++;
473 atomic_inc(&zcache_zbud_curr_zpages);
474 zcache_zbud_cumul_zpages++;
475 zcache_zbud_curr_zbytes += size;
476 zcache_zbud_cumul_zbytes += size;
477out:
478 return zh;
479}
480
481static int zbud_decompress(struct page *page, struct zbud_hdr *zh)
482{
483 struct zbud_page *zbpg;
484 unsigned budnum = zbud_budnum(zh);
485 size_t out_len = PAGE_SIZE;
486 char *to_va, *from_va;
487 unsigned size;
488 int ret = 0;
489
490 zbpg = container_of(zh, struct zbud_page, buddy[budnum]);
491 spin_lock(&zbpg->lock);
492 if (list_empty(&zbpg->bud_list)) {
493 /* ignore zombie page... see zbud_evict_pages() */
494 ret = -EINVAL;
495 goto out;
496 }
497 ASSERT_SENTINEL(zh, ZBH);
498 BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size());
499 to_va = kmap_atomic(page, KM_USER0);
500 size = zh->size;
501 from_va = zbud_data(zh, size);
502 ret = lzo1x_decompress_safe(from_va, size, to_va, &out_len);
503 BUG_ON(ret != LZO_E_OK);
504 BUG_ON(out_len != PAGE_SIZE);
505 kunmap_atomic(to_va, KM_USER0);
506out:
507 spin_unlock(&zbpg->lock);
508 return ret;
509}
510
511/*
512 * The following routines handle shrinking of ephemeral pages by evicting
513 * pages "least valuable" first.
514 */
515
516static unsigned long zcache_evicted_raw_pages;
517static unsigned long zcache_evicted_buddied_pages;
518static unsigned long zcache_evicted_unbuddied_pages;
519
520static struct tmem_pool *zcache_get_pool_by_id(uint16_t cli_id,
521 uint16_t poolid);
522static void zcache_put_pool(struct tmem_pool *pool);
523
524/*
525 * Flush and free all zbuds in a zbpg, then free the pageframe
526 */
527static void zbud_evict_zbpg(struct zbud_page *zbpg)
528{
529 struct zbud_hdr *zh;
530 int i, j;
531 uint32_t pool_id[ZBUD_MAX_BUDS], client_id[ZBUD_MAX_BUDS];
532 uint32_t index[ZBUD_MAX_BUDS];
533 struct tmem_oid oid[ZBUD_MAX_BUDS];
534 struct tmem_pool *pool;
535 unsigned long flags;
536
537 ASSERT_SPINLOCK(&zbpg->lock);
538 for (i = 0, j = 0; i < ZBUD_MAX_BUDS; i++) {
539 zh = &zbpg->buddy[i];
540 if (zh->size) {
541 client_id[j] = zh->client_id;
542 pool_id[j] = zh->pool_id;
543 oid[j] = zh->oid;
544 index[j] = zh->index;
545 j++;
546 }
547 }
548 spin_unlock(&zbpg->lock);
549 for (i = 0; i < j; i++) {
550 pool = zcache_get_pool_by_id(client_id[i], pool_id[i]);
551 BUG_ON(pool == NULL);
552 local_irq_save(flags);
553 /* these flushes should dispose of any local storage */
554 tmem_flush_page(pool, &oid[i], index[i]);
555 local_irq_restore(flags);
556 zcache_put_pool(pool);
557 }
558}
559
560/*
561 * Free nr pages. This code is funky because we want to hold the locks
562 * protecting various lists for as short a time as possible, and in some
563 * circumstances the list may change asynchronously when the list lock is
564 * not held. In some cases we also trylock not only to avoid waiting on a
565 * page in use by another cpu, but also to avoid potential deadlock due to
566 * lock inversion.
567 */
568static void zbud_evict_pages(int nr)
569{
570 struct zbud_page *zbpg;
571 int i, newly_unused_pages = 0;
572
573
574 /* now try freeing unbuddied pages, starting with least space avail */
575 for (i = 0; i < MAX_CHUNK; i++) {
576retry_unbud_list_i:
577 spin_lock_bh(&zbud_budlists_spinlock);
578 if (list_empty(&zbud_unbuddied[i].list)) {
579 spin_unlock_bh(&zbud_budlists_spinlock);
580 continue;
581 }
582 list_for_each_entry(zbpg, &zbud_unbuddied[i].list, bud_list) {
583 if (unlikely(!spin_trylock(&zbpg->lock)))
584 continue;
585 zbud_unbuddied[i].count--;
586 spin_unlock(&zbud_budlists_spinlock);
587 zcache_evicted_unbuddied_pages++;
588 /* want budlists unlocked when doing zbpg eviction */
589 zbud_evict_zbpg(zbpg);
590 newly_unused_pages++;
591 local_bh_enable();
592 if (--nr <= 0)
593 goto evict_unused;
594 goto retry_unbud_list_i;
595 }
596 spin_unlock_bh(&zbud_budlists_spinlock);
597 }
598
599 /* as a last resort, free buddied pages */
600retry_bud_list:
601 spin_lock_bh(&zbud_budlists_spinlock);
602 if (list_empty(&zbud_buddied_list)) {
603 spin_unlock_bh(&zbud_budlists_spinlock);
604 goto evict_unused;
605 }
606 list_for_each_entry(zbpg, &zbud_buddied_list, bud_list) {
607 if (unlikely(!spin_trylock(&zbpg->lock)))
608 continue;
609 zcache_zbud_buddied_count--;
610 spin_unlock(&zbud_budlists_spinlock);
611 zcache_evicted_buddied_pages++;
612 /* want budlists unlocked when doing zbpg eviction */
613 zbud_evict_zbpg(zbpg);
614 newly_unused_pages++;
615 local_bh_enable();
616 if (--nr <= 0)
617 goto evict_unused;
618 goto retry_bud_list;
619 }
620 spin_unlock_bh(&zbud_budlists_spinlock);
621
622evict_unused:
623 return;
624}
625
626static DEFINE_PER_CPU(unsigned char *, zcache_remoteputmem);
627
628static int zbud_remotify_zbud(struct tmem_xhandle *xh, char *data,
629 size_t size)
630{
631 struct tmem_pool *pool;
632 int i, remotenode, ret = -1;
633 unsigned char cksum, *p;
634 unsigned long flags;
635
636 for (p = data, cksum = 0, i = 0; i < size; i++)
637 cksum += *p;
638 ret = ramster_remote_put(xh, data, size, true, &remotenode);
639 if (ret == 0) {
640 /* data was successfully remoted so change the local version
641 * to point to the remote node where it landed */
642 pool = zcache_get_pool_by_id(LOCAL_CLIENT, xh->pool_id);
643 BUG_ON(pool == NULL);
644 local_irq_save(flags);
645 /* tmem_replace will also free up any local space */
646 (void)tmem_replace(pool, &xh->oid, xh->index,
647 pampd_make_remote(remotenode, size, cksum));
648 local_irq_restore(flags);
649 zcache_put_pool(pool);
650 ramster_eph_pages_remoted++;
651 ret = 0;
652 } else
653 ramster_eph_pages_remote_failed++;
654 return ret;
655}
656
657static int zbud_remotify_zbpg(struct zbud_page *zbpg)
658{
659 struct zbud_hdr *zh1, *zh2 = NULL;
660 struct tmem_xhandle xh1, xh2 = { 0 };
661 char *data1 = NULL, *data2 = NULL;
662 size_t size1 = 0, size2 = 0;
663 int ret = 0;
664 unsigned char *tmpmem = __get_cpu_var(zcache_remoteputmem);
665
666 ASSERT_SPINLOCK(&zbpg->lock);
667 if (zbpg->buddy[0].size == 0)
668 zh1 = &zbpg->buddy[1];
669 else if (zbpg->buddy[1].size == 0)
670 zh1 = &zbpg->buddy[0];
671 else {
672 zh1 = &zbpg->buddy[0];
673 zh2 = &zbpg->buddy[1];
674 }
675 /* don't remotify pages that are already remotified */
676 if (zh1->client_id != LOCAL_CLIENT)
677 zh1 = NULL;
678 if ((zh2 != NULL) && (zh2->client_id != LOCAL_CLIENT))
679 zh2 = NULL;
680
681 /* copy the data and metadata so can release lock */
682 if (zh1 != NULL) {
683 xh1.client_id = zh1->client_id;
684 xh1.pool_id = zh1->pool_id;
685 xh1.oid = zh1->oid;
686 xh1.index = zh1->index;
687 size1 = zh1->size;
688 data1 = zbud_data(zh1, size1);
689 memcpy(tmpmem, zbud_data(zh1, size1), size1);
690 data1 = tmpmem;
691 tmpmem += size1;
692 }
693 if (zh2 != NULL) {
694 xh2.client_id = zh2->client_id;
695 xh2.pool_id = zh2->pool_id;
696 xh2.oid = zh2->oid;
697 xh2.index = zh2->index;
698 size2 = zh2->size;
699 memcpy(tmpmem, zbud_data(zh2, size2), size2);
700 data2 = tmpmem;
701 }
702 spin_unlock(&zbpg->lock);
703 preempt_enable();
704
705 /* OK, no locks held anymore, remotify one or both zbuds */
706 if (zh1 != NULL)
707 ret = zbud_remotify_zbud(&xh1, data1, size1);
708 if (zh2 != NULL)
709 ret |= zbud_remotify_zbud(&xh2, data2, size2);
710 return ret;
711}
712
713void zbud_remotify_pages(int nr)
714{
715 struct zbud_page *zbpg;
716 int i, ret;
717
718 /*
719 * for now just try remotifying unbuddied pages, starting with
720 * least space avail
721 */
722 for (i = 0; i < MAX_CHUNK; i++) {
723retry_unbud_list_i:
724 preempt_disable(); /* enable in zbud_remotify_zbpg */
725 spin_lock_bh(&zbud_budlists_spinlock);
726 if (list_empty(&zbud_unbuddied[i].list)) {
727 spin_unlock_bh(&zbud_budlists_spinlock);
728 preempt_enable();
729 continue; /* next i in for loop */
730 }
731 list_for_each_entry(zbpg, &zbud_unbuddied[i].list, bud_list) {
732 if (unlikely(!spin_trylock(&zbpg->lock)))
733 continue; /* next list_for_each_entry */
734 zbud_unbuddied[i].count--;
735 /* want budlists unlocked when doing zbpg remotify */
736 spin_unlock_bh(&zbud_budlists_spinlock);
737 ret = zbud_remotify_zbpg(zbpg);
738 /* preemption is re-enabled in zbud_remotify_zbpg */
739 if (ret == 0) {
740 if (--nr <= 0)
741 goto out;
742 goto retry_unbud_list_i;
743 }
744 /* if fail to remotify any page, quit */
745 pr_err("TESTING zbud_remotify_pages failed on page,"
746 " trying to re-add\n");
747 spin_lock_bh(&zbud_budlists_spinlock);
748 spin_lock(&zbpg->lock);
749 list_add_tail(&zbpg->bud_list, &zbud_unbuddied[i].list);
750 zbud_unbuddied[i].count++;
751 spin_unlock(&zbpg->lock);
752 spin_unlock_bh(&zbud_budlists_spinlock);
753 pr_err("TESTING zbud_remotify_pages failed on page,"
754 " finished re-add\n");
755 goto out;
756 }
757 spin_unlock_bh(&zbud_budlists_spinlock);
758 preempt_enable();
759 }
760
761next_buddied_zbpg:
762 preempt_disable(); /* enable in zbud_remotify_zbpg */
763 spin_lock_bh(&zbud_budlists_spinlock);
764 if (list_empty(&zbud_buddied_list))
765 goto unlock_out;
766 list_for_each_entry(zbpg, &zbud_buddied_list, bud_list) {
767 if (unlikely(!spin_trylock(&zbpg->lock)))
768 continue; /* next list_for_each_entry */
769 zcache_zbud_buddied_count--;
770 /* want budlists unlocked when doing zbpg remotify */
771 spin_unlock_bh(&zbud_budlists_spinlock);
772 ret = zbud_remotify_zbpg(zbpg);
773 /* preemption is re-enabled in zbud_remotify_zbpg */
774 if (ret == 0) {
775 if (--nr <= 0)
776 goto out;
777 goto next_buddied_zbpg;
778 }
779 /* if fail to remotify any page, quit */
780 pr_err("TESTING zbud_remotify_pages failed on BUDDIED page,"
781 " trying to re-add\n");
782 spin_lock_bh(&zbud_budlists_spinlock);
783 spin_lock(&zbpg->lock);
784 list_add_tail(&zbpg->bud_list, &zbud_buddied_list);
785 zcache_zbud_buddied_count++;
786 spin_unlock(&zbpg->lock);
787 spin_unlock_bh(&zbud_budlists_spinlock);
788 pr_err("TESTING zbud_remotify_pages failed on BUDDIED page,"
789 " finished re-add\n");
790 goto out;
791 }
792unlock_out:
793 spin_unlock_bh(&zbud_budlists_spinlock);
794 preempt_enable();
795out:
796 return;
797}
798
799/* the "flush list" asynchronously collects pages to remotely flush */
800#define FLUSH_ENTIRE_OBJECT ((uint32_t)-1)
801static void ramster_flnode_free(struct flushlist_node *,
802 struct tmem_pool *);
803
804static void zcache_remote_flush_page(struct flushlist_node *flnode)
805{
806 struct tmem_xhandle *xh;
807 int remotenode, ret;
808
809 preempt_disable();
810 xh = &flnode->xh;
811 remotenode = flnode->xh.client_id;
812 ret = ramster_remote_flush(xh, remotenode);
813 if (ret >= 0)
814 ramster_remote_pages_flushed++;
815 else
816 ramster_remote_page_flushes_failed++;
817 preempt_enable_no_resched();
818 ramster_flnode_free(flnode, NULL);
819}
820
821static void zcache_remote_flush_object(struct flushlist_node *flnode)
822{
823 struct tmem_xhandle *xh;
824 int remotenode, ret;
825
826 preempt_disable();
827 xh = &flnode->xh;
828 remotenode = flnode->xh.client_id;
829 ret = ramster_remote_flush_object(xh, remotenode);
830 if (ret >= 0)
831 ramster_remote_objects_flushed++;
832 else
833 ramster_remote_object_flushes_failed++;
834 preempt_enable_no_resched();
835 ramster_flnode_free(flnode, NULL);
836}
837
838static void zcache_remote_eph_put(struct zbud_hdr *zbud)
839{
840 /* FIXME */
841}
842
843static void zcache_remote_pers_put(struct zv_hdr *zv)
844{
845 struct tmem_xhandle xh;
846 uint16_t size;
847 bool ephemeral;
848 int remotenode, ret = -1;
849 char *data;
850 struct tmem_pool *pool;
851 unsigned long flags;
852 unsigned char cksum;
853 char *p;
854 int i;
855 unsigned char *tmpmem = __get_cpu_var(zcache_remoteputmem);
856
857 ASSERT_SENTINEL(zv, ZVH);
858 BUG_ON(zv->client_id != LOCAL_CLIENT);
859 local_bh_disable();
860 xh.client_id = zv->client_id;
861 xh.pool_id = zv->pool_id;
862 xh.oid = zv->oid;
863 xh.index = zv->index;
864 size = xv_get_object_size(zv) - sizeof(*zv);
865 BUG_ON(size == 0 || size > zv_max_page_size);
866 data = (char *)zv + sizeof(*zv);
867 for (p = data, cksum = 0, i = 0; i < size; i++)
868 cksum += *p;
869 memcpy(tmpmem, data, size);
870 data = tmpmem;
871 pool = zcache_get_pool_by_id(zv->client_id, zv->pool_id);
872 ephemeral = is_ephemeral(pool);
873 zcache_put_pool(pool);
874 /* now OK to release lock set in caller */
875 spin_unlock(&zcache_rem_op_list_lock);
876 local_bh_enable();
877 preempt_disable();
878 ret = ramster_remote_put(&xh, data, size, ephemeral, &remotenode);
879 preempt_enable_no_resched();
880 if (ret != 0) {
881 /*
882 * This is some form of a memory leak... if the remote put
883 * fails, there will never be another attempt to remotify
884 * this page. But since we've dropped the zv pointer,
885 * the page may have been freed or the data replaced
886 * so we can't just "put it back" in the remote op list.
887 * Even if we could, not sure where to put it in the list
888 * because there may be flushes that must be strictly
889 * ordered vs the put. So leave this as a FIXME for now.
890 * But count them so we know if it becomes a problem.
891 */
892 ramster_pers_pages_remote_failed++;
893 goto out;
894 } else
895 atomic_inc(&ramster_remote_pers_pages);
896 ramster_pers_pages_remoted++;
897 /*
898 * data was successfully remoted so change the local version to
899 * point to the remote node where it landed
900 */
901 local_bh_disable();
902 pool = zcache_get_pool_by_id(LOCAL_CLIENT, xh.pool_id);
903 local_irq_save(flags);
904 (void)tmem_replace(pool, &xh.oid, xh.index,
905 pampd_make_remote(remotenode, size, cksum));
906 local_irq_restore(flags);
907 zcache_put_pool(pool);
908 local_bh_enable();
909out:
910 return;
911}
912
913static void zcache_do_remotify_ops(int nr)
914{
915 struct ramster_remotify_hdr *rem_op;
916 union remotify_list_node *u;
917
918 while (1) {
919 if (!nr)
920 goto out;
921 spin_lock(&zcache_rem_op_list_lock);
922 if (list_empty(&zcache_rem_op_list)) {
923 spin_unlock(&zcache_rem_op_list_lock);
924 goto out;
925 }
926 rem_op = list_first_entry(&zcache_rem_op_list,
927 struct ramster_remotify_hdr, list);
928 list_del_init(&rem_op->list);
929 if (rem_op->op != RAMSTER_REMOTIFY_PERS_PUT)
930 spin_unlock(&zcache_rem_op_list_lock);
931 u = (union remotify_list_node *)rem_op;
932 switch (rem_op->op) {
933 case RAMSTER_REMOTIFY_EPH_PUT:
934BUG();
935 zcache_remote_eph_put((struct zbud_hdr *)rem_op);
936 break;
937 case RAMSTER_REMOTIFY_PERS_PUT:
938 zcache_remote_pers_put((struct zv_hdr *)rem_op);
939 break;
940 case RAMSTER_REMOTIFY_FLUSH_PAGE:
941 zcache_remote_flush_page((struct flushlist_node *)u);
942 break;
943 case RAMSTER_REMOTIFY_FLUSH_OBJ:
944 zcache_remote_flush_object((struct flushlist_node *)u);
945 break;
946 default:
947 BUG();
948 }
949 }
950out:
951 return;
952}
953
954/*
955 * Communicate interface revision with userspace
956 */
957#include "cluster/ramster_nodemanager.h"
958static unsigned long ramster_interface_revision = R2NM_API_VERSION;
959
960/*
961 * For now, just push over a few pages every few seconds to
962 * ensure that it basically works
963 */
964static struct workqueue_struct *ramster_remotify_workqueue;
965static void ramster_remotify_process(struct work_struct *work);
966static DECLARE_DELAYED_WORK(ramster_remotify_worker,
967 ramster_remotify_process);
968
969static void ramster_remotify_queue_delayed_work(unsigned long delay)
970{
971 if (!queue_delayed_work(ramster_remotify_workqueue,
972 &ramster_remotify_worker, delay))
973 pr_err("ramster_remotify: bad workqueue\n");
974}
975
976
977static int use_frontswap;
978static int use_cleancache;
979static int ramster_remote_target_nodenum = -1;
980static void ramster_remotify_process(struct work_struct *work)
981{
982 static bool remotify_in_progress;
983
984 BUG_ON(irqs_disabled());
985 if (remotify_in_progress)
986 ramster_remotify_queue_delayed_work(HZ);
987 else if (ramster_remote_target_nodenum != -1) {
988 remotify_in_progress = true;
989#ifdef CONFIG_CLEANCACHE
990 if (use_cleancache && ramster_eph_remotify_enable)
991 zbud_remotify_pages(5000); /* FIXME is this a good number? */
992#endif
993#ifdef CONFIG_FRONTSWAP
994 if (use_frontswap && ramster_pers_remotify_enable)
995 zcache_do_remotify_ops(500); /* FIXME is this a good number? */
996#endif
997 remotify_in_progress = false;
998 ramster_remotify_queue_delayed_work(HZ);
999 }
1000}
1001
1002static void ramster_remotify_init(void)
1003{
1004 unsigned long n = 60UL;
1005 ramster_remotify_workqueue =
1006 create_singlethread_workqueue("ramster_remotify");
1007 ramster_remotify_queue_delayed_work(n * HZ);
1008}
1009
1010
1011static void zbud_init(void)
1012{
1013 int i;
1014
1015 INIT_LIST_HEAD(&zbud_buddied_list);
1016 zcache_zbud_buddied_count = 0;
1017 for (i = 0; i < NCHUNKS; i++) {
1018 INIT_LIST_HEAD(&zbud_unbuddied[i].list);
1019 zbud_unbuddied[i].count = 0;
1020 }
1021}
1022
1023#ifdef CONFIG_SYSFS
1024/*
1025 * These sysfs routines show a nice distribution of how many zbpg's are
1026 * currently (and have ever been placed) in each unbuddied list. It's fun
1027 * to watch but can probably go away before final merge.
1028 */
1029static int zbud_show_unbuddied_list_counts(char *buf)
1030{
1031 int i;
1032 char *p = buf;
1033
1034 for (i = 0; i < NCHUNKS; i++)
1035 p += sprintf(p, "%u ", zbud_unbuddied[i].count);
1036 return p - buf;
1037}
1038
1039static int zbud_show_cumul_chunk_counts(char *buf)
1040{
1041 unsigned long i, chunks = 0, total_chunks = 0, sum_total_chunks = 0;
1042 unsigned long total_chunks_lte_21 = 0, total_chunks_lte_32 = 0;
1043 unsigned long total_chunks_lte_42 = 0;
1044 char *p = buf;
1045
1046 for (i = 0; i < NCHUNKS; i++) {
1047 p += sprintf(p, "%lu ", zbud_cumul_chunk_counts[i]);
1048 chunks += zbud_cumul_chunk_counts[i];
1049 total_chunks += zbud_cumul_chunk_counts[i];
1050 sum_total_chunks += i * zbud_cumul_chunk_counts[i];
1051 if (i == 21)
1052 total_chunks_lte_21 = total_chunks;
1053 if (i == 32)
1054 total_chunks_lte_32 = total_chunks;
1055 if (i == 42)
1056 total_chunks_lte_42 = total_chunks;
1057 }
1058 p += sprintf(p, "<=21:%lu <=32:%lu <=42:%lu, mean:%lu\n",
1059 total_chunks_lte_21, total_chunks_lte_32, total_chunks_lte_42,
1060 chunks == 0 ? 0 : sum_total_chunks / chunks);
1061 return p - buf;
1062}
1063#endif
1064
1065/**********
1066 * This "zv" PAM implementation combines the TLSF-based xvMalloc
1067 * with lzo1x compression to maximize the amount of data that can
1068 * be packed into a physical page.
1069 *
1070 * Zv represents a PAM page with the index and object (plus a "size" value
1071 * necessary for decompression) immediately preceding the compressed data.
1072 */
1073
1074/* rudimentary policy limits */
1075/* total number of persistent pages may not exceed this percentage */
1076static unsigned int zv_page_count_policy_percent = 75;
1077/*
1078 * byte count defining poor compression; pages with greater zsize will be
1079 * rejected
1080 */
1081static unsigned int zv_max_zsize = (PAGE_SIZE / 8) * 7;
1082/*
1083 * byte count defining poor *mean* compression; pages with greater zsize
1084 * will be rejected until sufficient better-compressed pages are accepted
1085 * driving the mean below this threshold
1086 */
1087static unsigned int zv_max_mean_zsize = (PAGE_SIZE / 8) * 5;
1088
1089static atomic_t zv_curr_dist_counts[NCHUNKS];
1090static atomic_t zv_cumul_dist_counts[NCHUNKS];
1091
1092
1093static struct zv_hdr *zv_create(struct zcache_client *cli, uint32_t pool_id,
1094 struct tmem_oid *oid, uint32_t index,
1095 void *cdata, unsigned clen)
1096{
1097 struct page *page;
1098 struct zv_hdr *zv = NULL;
1099 uint32_t offset;
1100 int alloc_size = clen + sizeof(struct zv_hdr);
1101 int chunks = (alloc_size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
1102 int ret;
1103
1104 BUG_ON(!irqs_disabled());
1105 BUG_ON(chunks >= NCHUNKS);
1106 ret = xv_malloc(cli->xvpool, clen + sizeof(struct zv_hdr),
1107 &page, &offset, ZCACHE_GFP_MASK);
1108 if (unlikely(ret))
1109 goto out;
1110 atomic_inc(&zv_curr_dist_counts[chunks]);
1111 atomic_inc(&zv_cumul_dist_counts[chunks]);
1112 zv = kmap_atomic(page, KM_USER0) + offset;
1113 zv->index = index;
1114 zv->oid = *oid;
1115 zv->pool_id = pool_id;
1116 SET_SENTINEL(zv, ZVH);
1117 INIT_LIST_HEAD(&zv->rem_op.list);
1118 zv->client_id = get_client_id_from_client(cli);
1119 zv->rem_op.op = RAMSTER_REMOTIFY_PERS_PUT;
1120 if (zv->client_id == LOCAL_CLIENT) {
1121 spin_lock(&zcache_rem_op_list_lock);
1122 list_add_tail(&zv->rem_op.list, &zcache_rem_op_list);
1123 spin_unlock(&zcache_rem_op_list_lock);
1124 }
1125 memcpy((char *)zv + sizeof(struct zv_hdr), cdata, clen);
1126 kunmap_atomic(zv, KM_USER0);
1127out:
1128 return zv;
1129}
1130
1131/* similar to zv_create, but just reserve space, no data yet */
1132static struct zv_hdr *zv_alloc(struct tmem_pool *pool,
1133 struct tmem_oid *oid, uint32_t index,
1134 unsigned clen)
1135{
1136 struct zcache_client *cli = pool->client;
1137 struct page *page;
1138 struct zv_hdr *zv = NULL;
1139 uint32_t offset;
1140 int ret;
1141
1142 BUG_ON(!irqs_disabled());
1143 BUG_ON(!is_local_client(pool->client));
1144 ret = xv_malloc(cli->xvpool, clen + sizeof(struct zv_hdr),
1145 &page, &offset, ZCACHE_GFP_MASK);
1146 if (unlikely(ret))
1147 goto out;
1148 zv = kmap_atomic(page, KM_USER0) + offset;
1149 SET_SENTINEL(zv, ZVH);
1150 INIT_LIST_HEAD(&zv->rem_op.list);
1151 zv->client_id = LOCAL_CLIENT;
1152 zv->rem_op.op = RAMSTER_INTRANSIT_PERS;
1153 zv->index = index;
1154 zv->oid = *oid;
1155 zv->pool_id = pool->pool_id;
1156 kunmap_atomic(zv, KM_USER0);
1157out:
1158 return zv;
1159}
1160
1161static void zv_free(struct xv_pool *xvpool, struct zv_hdr *zv)
1162{
1163 unsigned long flags;
1164 struct page *page;
1165 uint32_t offset;
1166 uint16_t size = xv_get_object_size(zv);
1167 int chunks = (size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
1168
1169 ASSERT_SENTINEL(zv, ZVH);
1170 BUG_ON(chunks >= NCHUNKS);
1171 atomic_dec(&zv_curr_dist_counts[chunks]);
1172 size -= sizeof(*zv);
1173 spin_lock(&zcache_rem_op_list_lock);
1174 size = xv_get_object_size(zv) - sizeof(*zv);
1175 BUG_ON(size == 0);
1176 INVERT_SENTINEL(zv, ZVH);
1177 if (!list_empty(&zv->rem_op.list))
1178 list_del_init(&zv->rem_op.list);
1179 spin_unlock(&zcache_rem_op_list_lock);
1180 page = virt_to_page(zv);
1181 offset = (unsigned long)zv & ~PAGE_MASK;
1182 local_irq_save(flags);
1183 xv_free(xvpool, page, offset);
1184 local_irq_restore(flags);
1185}
1186
1187static void zv_decompress(struct page *page, struct zv_hdr *zv)
1188{
1189 size_t clen = PAGE_SIZE;
1190 char *to_va;
1191 unsigned size;
1192 int ret;
1193
1194 ASSERT_SENTINEL(zv, ZVH);
1195 size = xv_get_object_size(zv) - sizeof(*zv);
1196 BUG_ON(size == 0);
1197 to_va = kmap_atomic(page, KM_USER0);
1198 ret = lzo1x_decompress_safe((char *)zv + sizeof(*zv),
1199 size, to_va, &clen);
1200 kunmap_atomic(to_va, KM_USER0);
1201 BUG_ON(ret != LZO_E_OK);
1202 BUG_ON(clen != PAGE_SIZE);
1203}
1204
1205static void zv_copy_from_pampd(char *data, size_t *bufsize, struct zv_hdr *zv)
1206{
1207 unsigned size;
1208
1209 ASSERT_SENTINEL(zv, ZVH);
1210 size = xv_get_object_size(zv) - sizeof(*zv);
1211 BUG_ON(size == 0 || size > zv_max_page_size);
1212 BUG_ON(size > *bufsize);
1213 memcpy(data, (char *)zv + sizeof(*zv), size);
1214 *bufsize = size;
1215}
1216
1217static void zv_copy_to_pampd(struct zv_hdr *zv, char *data, size_t size)
1218{
1219 unsigned zv_size;
1220
1221 ASSERT_SENTINEL(zv, ZVH);
1222 zv_size = xv_get_object_size(zv) - sizeof(*zv);
1223 BUG_ON(zv_size != size);
1224 BUG_ON(zv_size == 0 || zv_size > zv_max_page_size);
1225 memcpy((char *)zv + sizeof(*zv), data, size);
1226}
1227
1228#ifdef CONFIG_SYSFS
1229/*
1230 * show a distribution of compression stats for zv pages.
1231 */
1232
1233static int zv_curr_dist_counts_show(char *buf)
1234{
1235 unsigned long i, n, chunks = 0, sum_total_chunks = 0;
1236 char *p = buf;
1237
1238 for (i = 0; i < NCHUNKS; i++) {
1239 n = atomic_read(&zv_curr_dist_counts[i]);
1240 p += sprintf(p, "%lu ", n);
1241 chunks += n;
1242 sum_total_chunks += i * n;
1243 }
1244 p += sprintf(p, "mean:%lu\n",
1245 chunks == 0 ? 0 : sum_total_chunks / chunks);
1246 return p - buf;
1247}
1248
1249static int zv_cumul_dist_counts_show(char *buf)
1250{
1251 unsigned long i, n, chunks = 0, sum_total_chunks = 0;
1252 char *p = buf;
1253
1254 for (i = 0; i < NCHUNKS; i++) {
1255 n = atomic_read(&zv_cumul_dist_counts[i]);
1256 p += sprintf(p, "%lu ", n);
1257 chunks += n;
1258 sum_total_chunks += i * n;
1259 }
1260 p += sprintf(p, "mean:%lu\n",
1261 chunks == 0 ? 0 : sum_total_chunks / chunks);
1262 return p - buf;
1263}
1264
1265/*
1266 * setting zv_max_zsize via sysfs causes all persistent (e.g. swap)
1267 * pages that don't compress to less than this value (including metadata
1268 * overhead) to be rejected. We don't allow the value to get too close
1269 * to PAGE_SIZE.
1270 */
1271static ssize_t zv_max_zsize_show(struct kobject *kobj,
1272 struct kobj_attribute *attr,
1273 char *buf)
1274{
1275 return sprintf(buf, "%u\n", zv_max_zsize);
1276}
1277
1278static ssize_t zv_max_zsize_store(struct kobject *kobj,
1279 struct kobj_attribute *attr,
1280 const char *buf, size_t count)
1281{
1282 unsigned long val;
1283 int err;
1284
1285 if (!capable(CAP_SYS_ADMIN))
1286 return -EPERM;
1287
1288 err = kstrtoul(buf, 10, &val);
1289 if (err || (val == 0) || (val > (PAGE_SIZE / 8) * 7))
1290 return -EINVAL;
1291 zv_max_zsize = val;
1292 return count;
1293}
1294
1295/*
1296 * setting zv_max_mean_zsize via sysfs causes all persistent (e.g. swap)
1297 * pages that don't compress to less than this value (including metadata
1298 * overhead) to be rejected UNLESS the mean compression is also smaller
1299 * than this value. In other words, we are load-balancing-by-zsize the
1300 * accepted pages. Again, we don't allow the value to get too close
1301 * to PAGE_SIZE.
1302 */
1303static ssize_t zv_max_mean_zsize_show(struct kobject *kobj,
1304 struct kobj_attribute *attr,
1305 char *buf)
1306{
1307 return sprintf(buf, "%u\n", zv_max_mean_zsize);
1308}
1309
1310static ssize_t zv_max_mean_zsize_store(struct kobject *kobj,
1311 struct kobj_attribute *attr,
1312 const char *buf, size_t count)
1313{
1314 unsigned long val;
1315 int err;
1316
1317 if (!capable(CAP_SYS_ADMIN))
1318 return -EPERM;
1319
1320 err = kstrtoul(buf, 10, &val);
1321 if (err || (val == 0) || (val > (PAGE_SIZE / 8) * 7))
1322 return -EINVAL;
1323 zv_max_mean_zsize = val;
1324 return count;
1325}
1326
1327/*
1328 * setting zv_page_count_policy_percent via sysfs sets an upper bound of
1329 * persistent (e.g. swap) pages that will be retained according to:
1330 * (zv_page_count_policy_percent * totalram_pages) / 100)
1331 * when that limit is reached, further puts will be rejected (until
1332 * some pages have been flushed). Note that, due to compression,
1333 * this number may exceed 100; it defaults to 75 and we set an
1334 * arbitary limit of 150. A poor choice will almost certainly result
1335 * in OOM's, so this value should only be changed prudently.
1336 */
1337static ssize_t zv_page_count_policy_percent_show(struct kobject *kobj,
1338 struct kobj_attribute *attr,
1339 char *buf)
1340{
1341 return sprintf(buf, "%u\n", zv_page_count_policy_percent);
1342}
1343
1344static ssize_t zv_page_count_policy_percent_store(struct kobject *kobj,
1345 struct kobj_attribute *attr,
1346 const char *buf, size_t count)
1347{
1348 unsigned long val;
1349 int err;
1350
1351 if (!capable(CAP_SYS_ADMIN))
1352 return -EPERM;
1353
1354 err = kstrtoul(buf, 10, &val);
1355 if (err || (val == 0) || (val > 150))
1356 return -EINVAL;
1357 zv_page_count_policy_percent = val;
1358 return count;
1359}
1360
1361static struct kobj_attribute zcache_zv_max_zsize_attr = {
1362 .attr = { .name = "zv_max_zsize", .mode = 0644 },
1363 .show = zv_max_zsize_show,
1364 .store = zv_max_zsize_store,
1365};
1366
1367static struct kobj_attribute zcache_zv_max_mean_zsize_attr = {
1368 .attr = { .name = "zv_max_mean_zsize", .mode = 0644 },
1369 .show = zv_max_mean_zsize_show,
1370 .store = zv_max_mean_zsize_store,
1371};
1372
1373static struct kobj_attribute zcache_zv_page_count_policy_percent_attr = {
1374 .attr = { .name = "zv_page_count_policy_percent",
1375 .mode = 0644 },
1376 .show = zv_page_count_policy_percent_show,
1377 .store = zv_page_count_policy_percent_store,
1378};
1379#endif
1380
1381/*
1382 * zcache core code starts here
1383 */
1384
1385/* useful stats not collected by cleancache or frontswap */
1386static unsigned long zcache_flush_total;
1387static unsigned long zcache_flush_found;
1388static unsigned long zcache_flobj_total;
1389static unsigned long zcache_flobj_found;
1390static unsigned long zcache_failed_eph_puts;
1391static unsigned long zcache_nonactive_puts;
1392static unsigned long zcache_failed_pers_puts;
1393
1394/*
1395 * Tmem operations assume the poolid implies the invoking client.
1396 * Zcache only has one client (the kernel itself): LOCAL_CLIENT.
1397 * RAMster has each client numbered by cluster node, and a KVM version
1398 * of zcache would have one client per guest and each client might
1399 * have a poolid==N.
1400 */
1401static struct tmem_pool *zcache_get_pool_by_id(uint16_t cli_id, uint16_t poolid)
1402{
1403 struct tmem_pool *pool = NULL;
1404 struct zcache_client *cli = NULL;
1405
1406 if (cli_id == LOCAL_CLIENT)
1407 cli = &zcache_host;
1408 else {
1409 if (cli_id >= MAX_CLIENTS)
1410 goto out;
1411 cli = &zcache_clients[cli_id];
1412 if (cli == NULL)
1413 goto out;
1414 atomic_inc(&cli->refcount);
1415 }
1416 if (poolid < MAX_POOLS_PER_CLIENT) {
1417 pool = cli->tmem_pools[poolid];
1418 if (pool != NULL)
1419 atomic_inc(&pool->refcount);
1420 }
1421out:
1422 return pool;
1423}
1424
1425static void zcache_put_pool(struct tmem_pool *pool)
1426{
1427 struct zcache_client *cli = NULL;
1428
1429 if (pool == NULL)
1430 BUG();
1431 cli = pool->client;
1432 atomic_dec(&pool->refcount);
1433 atomic_dec(&cli->refcount);
1434}
1435
1436int zcache_new_client(uint16_t cli_id)
1437{
1438 struct zcache_client *cli = NULL;
1439 int ret = -1;
1440
1441 if (cli_id == LOCAL_CLIENT)
1442 cli = &zcache_host;
1443 else if ((unsigned int)cli_id < MAX_CLIENTS)
1444 cli = &zcache_clients[cli_id];
1445 if (cli == NULL)
1446 goto out;
1447 if (cli->allocated)
1448 goto out;
1449 cli->allocated = 1;
1450#ifdef CONFIG_FRONTSWAP
1451 cli->xvpool = xv_create_pool();
1452 if (cli->xvpool == NULL)
1453 goto out;
1454#endif
1455 ret = 0;
1456out:
1457 return ret;
1458}
1459
1460/* counters for debugging */
1461static unsigned long zcache_failed_get_free_pages;
1462static unsigned long zcache_failed_alloc;
1463static unsigned long zcache_put_to_flush;
1464
1465/*
1466 * for now, used named slabs so can easily track usage; later can
1467 * either just use kmalloc, or perhaps add a slab-like allocator
1468 * to more carefully manage total memory utilization
1469 */
1470static struct kmem_cache *zcache_objnode_cache;
1471static struct kmem_cache *zcache_obj_cache;
1472static struct kmem_cache *ramster_flnode_cache;
1473static atomic_t zcache_curr_obj_count = ATOMIC_INIT(0);
1474static unsigned long zcache_curr_obj_count_max;
1475static atomic_t zcache_curr_objnode_count = ATOMIC_INIT(0);
1476static unsigned long zcache_curr_objnode_count_max;
1477
1478/*
1479 * to avoid memory allocation recursion (e.g. due to direct reclaim), we
1480 * preload all necessary data structures so the hostops callbacks never
1481 * actually do a malloc
1482 */
1483struct zcache_preload {
1484 void *page;
1485 struct tmem_obj *obj;
1486 int nr;
1487 struct tmem_objnode *objnodes[OBJNODE_TREE_MAX_PATH];
1488 struct flushlist_node *flnode;
1489};
1490static DEFINE_PER_CPU(struct zcache_preload, zcache_preloads) = { 0, };
1491
1492static int zcache_do_preload(struct tmem_pool *pool)
1493{
1494 struct zcache_preload *kp;
1495 struct tmem_objnode *objnode;
1496 struct tmem_obj *obj;
1497 struct flushlist_node *flnode;
1498 void *page;
1499 int ret = -ENOMEM;
1500
1501 if (unlikely(zcache_objnode_cache == NULL))
1502 goto out;
1503 if (unlikely(zcache_obj_cache == NULL))
1504 goto out;
1505 preempt_disable();
1506 kp = &__get_cpu_var(zcache_preloads);
1507 while (kp->nr < ARRAY_SIZE(kp->objnodes)) {
1508 preempt_enable_no_resched();
1509 objnode = kmem_cache_alloc(zcache_objnode_cache,
1510 ZCACHE_GFP_MASK);
1511 if (unlikely(objnode == NULL)) {
1512 zcache_failed_alloc++;
1513 goto out;
1514 }
1515 preempt_disable();
1516 kp = &__get_cpu_var(zcache_preloads);
1517 if (kp->nr < ARRAY_SIZE(kp->objnodes))
1518 kp->objnodes[kp->nr++] = objnode;
1519 else
1520 kmem_cache_free(zcache_objnode_cache, objnode);
1521 }
1522 preempt_enable_no_resched();
1523 obj = kmem_cache_alloc(zcache_obj_cache, ZCACHE_GFP_MASK);
1524 if (unlikely(obj == NULL)) {
1525 zcache_failed_alloc++;
1526 goto out;
1527 }
1528 flnode = kmem_cache_alloc(ramster_flnode_cache, ZCACHE_GFP_MASK);
1529 if (unlikely(flnode == NULL)) {
1530 zcache_failed_alloc++;
1531 goto out;
1532 }
1533 if (is_ephemeral(pool)) {
1534 page = (void *)__get_free_page(ZCACHE_GFP_MASK);
1535 if (unlikely(page == NULL)) {
1536 zcache_failed_get_free_pages++;
1537 kmem_cache_free(zcache_obj_cache, obj);
1538 kmem_cache_free(ramster_flnode_cache, flnode);
1539 goto out;
1540 }
1541 }
1542 preempt_disable();
1543 kp = &__get_cpu_var(zcache_preloads);
1544 if (kp->obj == NULL)
1545 kp->obj = obj;
1546 else
1547 kmem_cache_free(zcache_obj_cache, obj);
1548 if (kp->flnode == NULL)
1549 kp->flnode = flnode;
1550 else
1551 kmem_cache_free(ramster_flnode_cache, flnode);
1552 if (is_ephemeral(pool)) {
1553 if (kp->page == NULL)
1554 kp->page = page;
1555 else
1556 free_page((unsigned long)page);
1557 }
1558 ret = 0;
1559out:
1560 return ret;
1561}
1562
1563static int ramster_do_preload_flnode_only(struct tmem_pool *pool)
1564{
1565 struct zcache_preload *kp;
1566 struct flushlist_node *flnode;
1567 int ret = -ENOMEM;
1568
1569 BUG_ON(!irqs_disabled());
1570 if (unlikely(ramster_flnode_cache == NULL))
1571 BUG();
1572 kp = &__get_cpu_var(zcache_preloads);
1573 flnode = kmem_cache_alloc(ramster_flnode_cache, GFP_ATOMIC);
1574 if (unlikely(flnode == NULL) && kp->flnode == NULL)
1575 BUG(); /* FIXME handle more gracefully, but how??? */
1576 else if (kp->flnode == NULL)
1577 kp->flnode = flnode;
1578 else
1579 kmem_cache_free(ramster_flnode_cache, flnode);
1580 return ret;
1581}
1582
1583static void *zcache_get_free_page(void)
1584{
1585 struct zcache_preload *kp;
1586 void *page;
1587
1588 kp = &__get_cpu_var(zcache_preloads);
1589 page = kp->page;
1590 BUG_ON(page == NULL);
1591 kp->page = NULL;
1592 return page;
1593}
1594
1595static void zcache_free_page(void *p)
1596{
1597 free_page((unsigned long)p);
1598}
1599
1600/*
1601 * zcache implementation for tmem host ops
1602 */
1603
1604static struct tmem_objnode *zcache_objnode_alloc(struct tmem_pool *pool)
1605{
1606 struct tmem_objnode *objnode = NULL;
1607 unsigned long count;
1608 struct zcache_preload *kp;
1609
1610 kp = &__get_cpu_var(zcache_preloads);
1611 if (kp->nr <= 0)
1612 goto out;
1613 objnode = kp->objnodes[kp->nr - 1];
1614 BUG_ON(objnode == NULL);
1615 kp->objnodes[kp->nr - 1] = NULL;
1616 kp->nr--;
1617 count = atomic_inc_return(&zcache_curr_objnode_count);
1618 if (count > zcache_curr_objnode_count_max)
1619 zcache_curr_objnode_count_max = count;
1620out:
1621 return objnode;
1622}
1623
1624static void zcache_objnode_free(struct tmem_objnode *objnode,
1625 struct tmem_pool *pool)
1626{
1627 atomic_dec(&zcache_curr_objnode_count);
1628 BUG_ON(atomic_read(&zcache_curr_objnode_count) < 0);
1629 kmem_cache_free(zcache_objnode_cache, objnode);
1630}
1631
1632static struct tmem_obj *zcache_obj_alloc(struct tmem_pool *pool)
1633{
1634 struct tmem_obj *obj = NULL;
1635 unsigned long count;
1636 struct zcache_preload *kp;
1637
1638 kp = &__get_cpu_var(zcache_preloads);
1639 obj = kp->obj;
1640 BUG_ON(obj == NULL);
1641 kp->obj = NULL;
1642 count = atomic_inc_return(&zcache_curr_obj_count);
1643 if (count > zcache_curr_obj_count_max)
1644 zcache_curr_obj_count_max = count;
1645 return obj;
1646}
1647
1648static void zcache_obj_free(struct tmem_obj *obj, struct tmem_pool *pool)
1649{
1650 atomic_dec(&zcache_curr_obj_count);
1651 BUG_ON(atomic_read(&zcache_curr_obj_count) < 0);
1652 kmem_cache_free(zcache_obj_cache, obj);
1653}
1654
1655static struct flushlist_node *ramster_flnode_alloc(struct tmem_pool *pool)
1656{
1657 struct flushlist_node *flnode = NULL;
1658 struct zcache_preload *kp;
1659 int count;
1660
1661 kp = &__get_cpu_var(zcache_preloads);
1662 flnode = kp->flnode;
1663 BUG_ON(flnode == NULL);
1664 kp->flnode = NULL;
1665 count = atomic_inc_return(&ramster_curr_flnode_count);
1666 if (count > ramster_curr_flnode_count_max)
1667 ramster_curr_flnode_count_max = count;
1668 return flnode;
1669}
1670
1671static void ramster_flnode_free(struct flushlist_node *flnode,
1672 struct tmem_pool *pool)
1673{
1674 atomic_dec(&ramster_curr_flnode_count);
1675 BUG_ON(atomic_read(&ramster_curr_flnode_count) < 0);
1676 kmem_cache_free(ramster_flnode_cache, flnode);
1677}
1678
1679static struct tmem_hostops zcache_hostops = {
1680 .obj_alloc = zcache_obj_alloc,
1681 .obj_free = zcache_obj_free,
1682 .objnode_alloc = zcache_objnode_alloc,
1683 .objnode_free = zcache_objnode_free,
1684};
1685
1686/*
1687 * zcache implementations for PAM page descriptor ops
1688 */
1689
1690
1691static inline void dec_and_check(atomic_t *pvar)
1692{
1693 atomic_dec(pvar);
1694 /* later when all accounting is fixed, make this a BUG */
1695 WARN_ON_ONCE(atomic_read(pvar) < 0);
1696}
1697
1698static atomic_t zcache_curr_eph_pampd_count = ATOMIC_INIT(0);
1699static unsigned long zcache_curr_eph_pampd_count_max;
1700static atomic_t zcache_curr_pers_pampd_count = ATOMIC_INIT(0);
1701static unsigned long zcache_curr_pers_pampd_count_max;
1702
1703/* forward reference */
1704static int zcache_compress(struct page *from, void **out_va, size_t *out_len);
1705
1706static int zcache_pampd_eph_create(char *data, size_t size, bool raw,
1707 struct tmem_pool *pool, struct tmem_oid *oid,
1708 uint32_t index, void **pampd)
1709{
1710 int ret = -1;
1711 void *cdata = data;
1712 size_t clen = size;
1713 struct zcache_client *cli = pool->client;
1714 uint16_t client_id = get_client_id_from_client(cli);
1715 struct page *page = NULL;
1716 unsigned long count;
1717
1718 if (!raw) {
1719 page = virt_to_page(data);
1720 ret = zcache_compress(page, &cdata, &clen);
1721 if (ret == 0)
1722 goto out;
1723 if (clen == 0 || clen > zbud_max_buddy_size()) {
1724 zcache_compress_poor++;
1725 goto out;
1726 }
1727 }
1728 *pampd = (void *)zbud_create(client_id, pool->pool_id, oid,
1729 index, page, cdata, clen);
1730 if (*pampd == NULL) {
1731 ret = -ENOMEM;
1732 goto out;
1733 }
1734 ret = 0;
1735 count = atomic_inc_return(&zcache_curr_eph_pampd_count);
1736 if (count > zcache_curr_eph_pampd_count_max)
1737 zcache_curr_eph_pampd_count_max = count;
1738 if (client_id != LOCAL_CLIENT) {
1739 count = atomic_inc_return(&ramster_foreign_eph_pampd_count);
1740 if (count > ramster_foreign_eph_pampd_count_max)
1741 ramster_foreign_eph_pampd_count_max = count;
1742 }
1743out:
1744 return ret;
1745}
1746
1747static int zcache_pampd_pers_create(char *data, size_t size, bool raw,
1748 struct tmem_pool *pool, struct tmem_oid *oid,
1749 uint32_t index, void **pampd)
1750{
1751 int ret = -1;
1752 void *cdata = data;
1753 size_t clen = size;
1754 struct zcache_client *cli = pool->client;
1755 struct page *page;
1756 unsigned long count;
1757 unsigned long zv_mean_zsize;
1758 struct zv_hdr *zv;
1759 long curr_pers_pampd_count;
1760 u64 total_zsize;
1761#ifdef RAMSTER_TESTING
1762 static bool pampd_neg_warned;
1763#endif
1764
1765 curr_pers_pampd_count = atomic_read(&zcache_curr_pers_pampd_count) -
1766 atomic_read(&ramster_remote_pers_pages);
1767#ifdef RAMSTER_TESTING
1768 /* should always be positive, but warn if accounting is off */
1769 if (!pampd_neg_warned) {
1770 pr_warn("ramster: bad accounting for curr_pers_pampd_count\n");
1771 pampd_neg_warned = true;
1772 }
1773#endif
1774 if (curr_pers_pampd_count >
1775 (zv_page_count_policy_percent * totalram_pages) / 100) {
1776 zcache_policy_percent_exceeded++;
1777 goto out;
1778 }
1779 if (raw)
1780 goto ok_to_create;
1781 page = virt_to_page(data);
1782 if (zcache_compress(page, &cdata, &clen) == 0)
1783 goto out;
1784 /* reject if compression is too poor */
1785 if (clen > zv_max_zsize) {
1786 zcache_compress_poor++;
1787 goto out;
1788 }
1789 /* reject if mean compression is too poor */
1790 if ((clen > zv_max_mean_zsize) && (curr_pers_pampd_count > 0)) {
1791 total_zsize = xv_get_total_size_bytes(cli->xvpool);
1792 zv_mean_zsize = div_u64(total_zsize, curr_pers_pampd_count);
1793 if (zv_mean_zsize > zv_max_mean_zsize) {
1794 zcache_mean_compress_poor++;
1795 goto out;
1796 }
1797 }
1798ok_to_create:
1799 *pampd = (void *)zv_create(cli, pool->pool_id, oid, index, cdata, clen);
1800 if (*pampd == NULL) {
1801 ret = -ENOMEM;
1802 goto out;
1803 }
1804 ret = 0;
1805 count = atomic_inc_return(&zcache_curr_pers_pampd_count);
1806 if (count > zcache_curr_pers_pampd_count_max)
1807 zcache_curr_pers_pampd_count_max = count;
1808 if (is_local_client(cli))
1809 goto out;
1810 zv = *(struct zv_hdr **)pampd;
1811 count = atomic_inc_return(&ramster_foreign_pers_pampd_count);
1812 if (count > ramster_foreign_pers_pampd_count_max)
1813 ramster_foreign_pers_pampd_count_max = count;
1814out:
1815 return ret;
1816}
1817
1818static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph,
1819 struct tmem_pool *pool, struct tmem_oid *oid,
1820 uint32_t index)
1821{
1822 void *pampd = NULL;
1823 int ret;
1824 bool ephemeral;
1825
1826 BUG_ON(preemptible());
1827 ephemeral = (eph == 1) || ((eph == 0) && is_ephemeral(pool));
1828 if (ephemeral)
1829 ret = zcache_pampd_eph_create(data, size, raw, pool,
1830 oid, index, &pampd);
1831 else
1832 ret = zcache_pampd_pers_create(data, size, raw, pool,
1833 oid, index, &pampd);
1834 /* FIXME add some counters here for failed creates? */
1835 return pampd;
1836}
1837
1838/*
1839 * fill the pageframe corresponding to the struct page with the data
1840 * from the passed pampd
1841 */
1842static int zcache_pampd_get_data(char *data, size_t *bufsize, bool raw,
1843 void *pampd, struct tmem_pool *pool,
1844 struct tmem_oid *oid, uint32_t index)
1845{
1846 int ret = 0;
1847
1848 BUG_ON(preemptible());
1849 BUG_ON(is_ephemeral(pool)); /* Fix later for shared pools? */
1850 BUG_ON(pampd_is_remote(pampd));
1851 if (raw)
1852 zv_copy_from_pampd(data, bufsize, pampd);
1853 else
1854 zv_decompress(virt_to_page(data), pampd);
1855 return ret;
1856}
1857
1858static int zcache_pampd_get_data_and_free(char *data, size_t *bufsize, bool raw,
1859 void *pampd, struct tmem_pool *pool,
1860 struct tmem_oid *oid, uint32_t index)
1861{
1862 int ret = 0;
1863 unsigned long flags;
1864 struct zcache_client *cli = pool->client;
1865
1866 BUG_ON(preemptible());
1867 BUG_ON(pampd_is_remote(pampd));
1868 if (is_ephemeral(pool)) {
1869 local_irq_save(flags);
1870 if (raw)
1871 zbud_copy_from_pampd(data, bufsize, pampd);
1872 else
1873 ret = zbud_decompress(virt_to_page(data), pampd);
1874 zbud_free_and_delist((struct zbud_hdr *)pampd);
1875 local_irq_restore(flags);
1876 if (!is_local_client(cli))
1877 dec_and_check(&ramster_foreign_eph_pampd_count);
1878 dec_and_check(&zcache_curr_eph_pampd_count);
1879 } else {
1880 if (is_local_client(cli))
1881 BUG();
1882 if (raw)
1883 zv_copy_from_pampd(data, bufsize, pampd);
1884 else
1885 zv_decompress(virt_to_page(data), pampd);
1886 zv_free(cli->xvpool, pampd);
1887 if (!is_local_client(cli))
1888 dec_and_check(&ramster_foreign_pers_pampd_count);
1889 dec_and_check(&zcache_curr_pers_pampd_count);
1890 ret = 0;
1891 }
1892 return ret;
1893}
1894
1895static bool zcache_pampd_is_remote(void *pampd)
1896{
1897 return pampd_is_remote(pampd);
1898}
1899
1900/*
1901 * free the pampd and remove it from any zcache lists
1902 * pampd must no longer be pointed to from any tmem data structures!
1903 */
1904static void zcache_pampd_free(void *pampd, struct tmem_pool *pool,
1905 struct tmem_oid *oid, uint32_t index, bool acct)
1906{
1907 struct zcache_client *cli = pool->client;
1908 bool eph = is_ephemeral(pool);
1909 struct zv_hdr *zv;
1910
1911 BUG_ON(preemptible());
1912 if (pampd_is_remote(pampd)) {
1913 WARN_ON(acct == false);
1914 if (oid == NULL) {
1915 /*
1916 * a NULL oid means to ignore this pampd free
1917 * as the remote freeing will be handled elsewhere
1918 */
1919 } else if (eph) {
1920 /* FIXME remote flush optional but probably good idea */
1921 /* FIXME get these working properly again */
1922 dec_and_check(&zcache_curr_eph_pampd_count);
1923 } else if (pampd_is_intransit(pampd)) {
1924 /* did a pers remote get_and_free, so just free local */
1925 pampd = pampd_mask_intransit_and_remote(pampd);
1926 goto local_pers;
1927 } else {
1928 struct flushlist_node *flnode =
1929 ramster_flnode_alloc(pool);
1930
1931 flnode->xh.client_id = pampd_remote_node(pampd);
1932 flnode->xh.pool_id = pool->pool_id;
1933 flnode->xh.oid = *oid;
1934 flnode->xh.index = index;
1935 flnode->rem_op.op = RAMSTER_REMOTIFY_FLUSH_PAGE;
1936 spin_lock(&zcache_rem_op_list_lock);
1937 list_add(&flnode->rem_op.list, &zcache_rem_op_list);
1938 spin_unlock(&zcache_rem_op_list_lock);
1939 dec_and_check(&zcache_curr_pers_pampd_count);
1940 dec_and_check(&ramster_remote_pers_pages);
1941 }
1942 } else if (eph) {
1943 zbud_free_and_delist((struct zbud_hdr *)pampd);
1944 if (!is_local_client(pool->client))
1945 dec_and_check(&ramster_foreign_eph_pampd_count);
1946 if (acct)
1947 /* FIXME get these working properly again */
1948 dec_and_check(&zcache_curr_eph_pampd_count);
1949 } else {
1950local_pers:
1951 zv = (struct zv_hdr *)pampd;
1952 if (!is_local_client(pool->client))
1953 dec_and_check(&ramster_foreign_pers_pampd_count);
1954 zv_free(cli->xvpool, zv);
1955 if (acct)
1956 /* FIXME get these working properly again */
1957 dec_and_check(&zcache_curr_pers_pampd_count);
1958 }
1959}
1960
1961static void zcache_pampd_free_obj(struct tmem_pool *pool,
1962 struct tmem_obj *obj)
1963{
1964 struct flushlist_node *flnode;
1965
1966 BUG_ON(preemptible());
1967 if (obj->extra == NULL)
1968 return;
1969 BUG_ON(!pampd_is_remote(obj->extra));
1970 flnode = ramster_flnode_alloc(pool);
1971 flnode->xh.client_id = pampd_remote_node(obj->extra);
1972 flnode->xh.pool_id = pool->pool_id;
1973 flnode->xh.oid = obj->oid;
1974 flnode->xh.index = FLUSH_ENTIRE_OBJECT;
1975 flnode->rem_op.op = RAMSTER_REMOTIFY_FLUSH_OBJ;
1976 spin_lock(&zcache_rem_op_list_lock);
1977 list_add(&flnode->rem_op.list, &zcache_rem_op_list);
1978 spin_unlock(&zcache_rem_op_list_lock);
1979}
1980
1981void zcache_pampd_new_obj(struct tmem_obj *obj)
1982{
1983 obj->extra = NULL;
1984}
1985
1986int zcache_pampd_replace_in_obj(void *new_pampd, struct tmem_obj *obj)
1987{
1988 int ret = -1;
1989
1990 if (new_pampd != NULL) {
1991 if (obj->extra == NULL)
1992 obj->extra = new_pampd;
1993 /* enforce that all remote pages in an object reside
1994 * in the same node! */
1995 else if (pampd_remote_node(new_pampd) !=
1996 pampd_remote_node((void *)(obj->extra)))
1997 BUG();
1998 ret = 0;
1999 }
2000 return ret;
2001}
2002
2003/*
2004 * Called by the message handler after a (still compressed) page has been
2005 * fetched from the remote machine in response to an "is_remote" tmem_get
2006 * or persistent tmem_localify. For a tmem_get, "extra" is the address of
2007 * the page that is to be filled to succesfully resolve the tmem_get; for
2008 * a (persistent) tmem_localify, "extra" is NULL (as the data is placed only
2009 * in the local zcache). "data" points to "size" bytes of (compressed) data
2010 * passed in the message. In the case of a persistent remote get, if
2011 * pre-allocation was successful (see zcache_repatriate_preload), the page
2012 * is placed into both local zcache and at "extra".
2013 */
2014int zcache_localify(int pool_id, struct tmem_oid *oidp,
2015 uint32_t index, char *data, size_t size,
2016 void *extra)
2017{
2018 int ret = -ENOENT;
2019 unsigned long flags;
2020 struct tmem_pool *pool;
2021 bool ephemeral, delete = false;
2022 size_t clen = PAGE_SIZE;
2023 void *pampd, *saved_hb;
2024 struct tmem_obj *obj;
2025
2026 pool = zcache_get_pool_by_id(LOCAL_CLIENT, pool_id);
2027 if (unlikely(pool == NULL))
2028 /* pool doesn't exist anymore */
2029 goto out;
2030 ephemeral = is_ephemeral(pool);
2031 local_irq_save(flags); /* FIXME: maybe only disable softirqs? */
2032 pampd = tmem_localify_get_pampd(pool, oidp, index, &obj, &saved_hb);
2033 if (pampd == NULL) {
2034 /* hmmm... must have been a flush while waiting */
2035#ifdef RAMSTER_TESTING
2036 pr_err("UNTESTED pampd==NULL in zcache_localify\n");
2037#endif
2038 if (ephemeral)
2039 ramster_remote_eph_pages_unsucc_get++;
2040 else
2041 ramster_remote_pers_pages_unsucc_get++;
2042 obj = NULL;
2043 goto finish;
2044 } else if (unlikely(!pampd_is_remote(pampd))) {
2045 /* hmmm... must have been a dup put while waiting */
2046#ifdef RAMSTER_TESTING
2047 pr_err("UNTESTED dup while waiting in zcache_localify\n");
2048#endif
2049 if (ephemeral)
2050 ramster_remote_eph_pages_unsucc_get++;
2051 else
2052 ramster_remote_pers_pages_unsucc_get++;
2053 obj = NULL;
2054 pampd = NULL;
2055 ret = -EEXIST;
2056 goto finish;
2057 } else if (size == 0) {
2058 /* no remote data, delete the local is_remote pampd */
2059 pampd = NULL;
2060 if (ephemeral)
2061 ramster_remote_eph_pages_unsucc_get++;
2062 else
2063 BUG();
2064 delete = true;
2065 goto finish;
2066 }
2067 if (!ephemeral && pampd_is_intransit(pampd)) {
2068 /* localify to zcache */
2069 pampd = pampd_mask_intransit_and_remote(pampd);
2070 zv_copy_to_pampd(pampd, data, size);
2071 } else {
2072 pampd = NULL;
2073 obj = NULL;
2074 }
2075 if (extra != NULL) {
2076 /* decompress direct-to-memory to complete remotify */
2077 ret = lzo1x_decompress_safe((char *)data, size,
2078 (char *)extra, &clen);
2079 BUG_ON(ret != LZO_E_OK);
2080 BUG_ON(clen != PAGE_SIZE);
2081 }
2082 if (ephemeral)
2083 ramster_remote_eph_pages_succ_get++;
2084 else
2085 ramster_remote_pers_pages_succ_get++;
2086 ret = 0;
2087finish:
2088 tmem_localify_finish(obj, index, pampd, saved_hb, delete);
2089 zcache_put_pool(pool);
2090 local_irq_restore(flags);
2091out:
2092 return ret;
2093}
2094
2095/*
2096 * Called on a remote persistent tmem_get to attempt to preallocate
2097 * local storage for the data contained in the remote persistent page.
2098 * If succesfully preallocated, returns the pampd, marked as remote and
2099 * in_transit. Else returns NULL. Note that the appropriate tmem data
2100 * structure must be locked.
2101 */
2102static void *zcache_pampd_repatriate_preload(void *pampd,
2103 struct tmem_pool *pool,
2104 struct tmem_oid *oid,
2105 uint32_t index,
2106 bool *intransit)
2107{
2108 int clen = pampd_remote_size(pampd);
2109 void *ret_pampd = NULL;
2110 unsigned long flags;
2111
2112 if (!pampd_is_remote(pampd))
2113 BUG();
2114 if (is_ephemeral(pool))
2115 BUG();
2116 if (pampd_is_intransit(pampd)) {
2117 /*
2118 * to avoid multiple allocations (and maybe a memory leak)
2119 * don't preallocate if already in the process of being
2120 * repatriated
2121 */
2122 *intransit = true;
2123 goto out;
2124 }
2125 *intransit = false;
2126 local_irq_save(flags);
2127 ret_pampd = (void *)zv_alloc(pool, oid, index, clen);
2128 if (ret_pampd != NULL) {
2129 /*
2130 * a pampd is marked intransit if it is remote and space has
2131 * been allocated for it locally (note, only happens for
2132 * persistent pages, in which case the remote copy is freed)
2133 */
2134 ret_pampd = pampd_mark_intransit(ret_pampd);
2135 dec_and_check(&ramster_remote_pers_pages);
2136 } else
2137 ramster_pers_pages_remote_nomem++;
2138 local_irq_restore(flags);
2139out:
2140 return ret_pampd;
2141}
2142
2143/*
2144 * Called on a remote tmem_get to invoke a message to fetch the page.
2145 * Might sleep so no tmem locks can be held. "extra" is passed
2146 * all the way through the round-trip messaging to zcache_localify.
2147 */
2148static int zcache_pampd_repatriate(void *fake_pampd, void *real_pampd,
2149 struct tmem_pool *pool,
2150 struct tmem_oid *oid, uint32_t index,
2151 bool free, void *extra)
2152{
2153 struct tmem_xhandle xh;
2154 int ret;
2155
2156 if (pampd_is_intransit(real_pampd))
2157 /* have local space pre-reserved, so free remote copy */
2158 free = true;
2159 xh = tmem_xhandle_fill(LOCAL_CLIENT, pool, oid, index);
2160 /* unreliable request/response for now */
2161 ret = ramster_remote_async_get(&xh, free,
2162 pampd_remote_node(fake_pampd),
2163 pampd_remote_size(fake_pampd),
2164 pampd_remote_cksum(fake_pampd),
2165 extra);
2166#ifdef RAMSTER_TESTING
2167 if (ret != 0 && ret != -ENOENT)
2168 pr_err("TESTING zcache_pampd_repatriate returns, ret=%d\n",
2169 ret);
2170#endif
2171 return ret;
2172}
2173
2174static struct tmem_pamops zcache_pamops = {
2175 .create = zcache_pampd_create,
2176 .get_data = zcache_pampd_get_data,
2177 .free = zcache_pampd_free,
2178 .get_data_and_free = zcache_pampd_get_data_and_free,
2179 .free_obj = zcache_pampd_free_obj,
2180 .is_remote = zcache_pampd_is_remote,
2181 .repatriate_preload = zcache_pampd_repatriate_preload,
2182 .repatriate = zcache_pampd_repatriate,
2183 .new_obj = zcache_pampd_new_obj,
2184 .replace_in_obj = zcache_pampd_replace_in_obj,
2185};
2186
2187/*
2188 * zcache compression/decompression and related per-cpu stuff
2189 */
2190
2191#define LZO_WORKMEM_BYTES LZO1X_1_MEM_COMPRESS
2192#define LZO_DSTMEM_PAGE_ORDER 1
2193static DEFINE_PER_CPU(unsigned char *, zcache_workmem);
2194static DEFINE_PER_CPU(unsigned char *, zcache_dstmem);
2195
2196static int zcache_compress(struct page *from, void **out_va, size_t *out_len)
2197{
2198 int ret = 0;
2199 unsigned char *dmem = __get_cpu_var(zcache_dstmem);
2200 unsigned char *wmem = __get_cpu_var(zcache_workmem);
2201 char *from_va;
2202
2203 BUG_ON(!irqs_disabled());
2204 if (unlikely(dmem == NULL || wmem == NULL))
2205 goto out; /* no buffer, so can't compress */
2206 from_va = kmap_atomic(from, KM_USER0);
2207 mb();
2208 ret = lzo1x_1_compress(from_va, PAGE_SIZE, dmem, out_len, wmem);
2209 BUG_ON(ret != LZO_E_OK);
2210 *out_va = dmem;
2211 kunmap_atomic(from_va, KM_USER0);
2212 ret = 1;
2213out:
2214 return ret;
2215}
2216
2217
2218static int zcache_cpu_notifier(struct notifier_block *nb,
2219 unsigned long action, void *pcpu)
2220{
2221 int cpu = (long)pcpu;
2222 struct zcache_preload *kp;
2223
2224 switch (action) {
2225 case CPU_UP_PREPARE:
2226 per_cpu(zcache_dstmem, cpu) = (void *)__get_free_pages(
2227 GFP_KERNEL | __GFP_REPEAT,
2228 LZO_DSTMEM_PAGE_ORDER),
2229 per_cpu(zcache_workmem, cpu) =
2230 kzalloc(LZO1X_MEM_COMPRESS,
2231 GFP_KERNEL | __GFP_REPEAT);
2232 per_cpu(zcache_remoteputmem, cpu) =
2233 kzalloc(PAGE_SIZE, GFP_KERNEL | __GFP_REPEAT);
2234 break;
2235 case CPU_DEAD:
2236 case CPU_UP_CANCELED:
2237 kfree(per_cpu(zcache_remoteputmem, cpu));
2238 per_cpu(zcache_remoteputmem, cpu) = NULL;
2239 free_pages((unsigned long)per_cpu(zcache_dstmem, cpu),
2240 LZO_DSTMEM_PAGE_ORDER);
2241 per_cpu(zcache_dstmem, cpu) = NULL;
2242 kfree(per_cpu(zcache_workmem, cpu));
2243 per_cpu(zcache_workmem, cpu) = NULL;
2244 kp = &per_cpu(zcache_preloads, cpu);
2245 while (kp->nr) {
2246 kmem_cache_free(zcache_objnode_cache,
2247 kp->objnodes[kp->nr - 1]);
2248 kp->objnodes[kp->nr - 1] = NULL;
2249 kp->nr--;
2250 }
2251 if (kp->obj) {
2252 kmem_cache_free(zcache_obj_cache, kp->obj);
2253 kp->obj = NULL;
2254 }
2255 if (kp->flnode) {
2256 kmem_cache_free(ramster_flnode_cache, kp->flnode);
2257 kp->flnode = NULL;
2258 }
2259 if (kp->page) {
2260 free_page((unsigned long)kp->page);
2261 kp->page = NULL;
2262 }
2263 break;
2264 default:
2265 break;
2266 }
2267 return NOTIFY_OK;
2268}
2269
2270static struct notifier_block zcache_cpu_notifier_block = {
2271 .notifier_call = zcache_cpu_notifier
2272};
2273
2274#ifdef CONFIG_SYSFS
2275#define ZCACHE_SYSFS_RO(_name) \
2276 static ssize_t zcache_##_name##_show(struct kobject *kobj, \
2277 struct kobj_attribute *attr, char *buf) \
2278 { \
2279 return sprintf(buf, "%lu\n", zcache_##_name); \
2280 } \
2281 static struct kobj_attribute zcache_##_name##_attr = { \
2282 .attr = { .name = __stringify(_name), .mode = 0444 }, \
2283 .show = zcache_##_name##_show, \
2284 }
2285
2286#define ZCACHE_SYSFS_RO_ATOMIC(_name) \
2287 static ssize_t zcache_##_name##_show(struct kobject *kobj, \
2288 struct kobj_attribute *attr, char *buf) \
2289 { \
2290 return sprintf(buf, "%d\n", atomic_read(&zcache_##_name)); \
2291 } \
2292 static struct kobj_attribute zcache_##_name##_attr = { \
2293 .attr = { .name = __stringify(_name), .mode = 0444 }, \
2294 .show = zcache_##_name##_show, \
2295 }
2296
2297#define ZCACHE_SYSFS_RO_CUSTOM(_name, _func) \
2298 static ssize_t zcache_##_name##_show(struct kobject *kobj, \
2299 struct kobj_attribute *attr, char *buf) \
2300 { \
2301 return _func(buf); \
2302 } \
2303 static struct kobj_attribute zcache_##_name##_attr = { \
2304 .attr = { .name = __stringify(_name), .mode = 0444 }, \
2305 .show = zcache_##_name##_show, \
2306 }
2307
2308ZCACHE_SYSFS_RO(curr_obj_count_max);
2309ZCACHE_SYSFS_RO(curr_objnode_count_max);
2310ZCACHE_SYSFS_RO(flush_total);
2311ZCACHE_SYSFS_RO(flush_found);
2312ZCACHE_SYSFS_RO(flobj_total);
2313ZCACHE_SYSFS_RO(flobj_found);
2314ZCACHE_SYSFS_RO(failed_eph_puts);
2315ZCACHE_SYSFS_RO(nonactive_puts);
2316ZCACHE_SYSFS_RO(failed_pers_puts);
2317ZCACHE_SYSFS_RO(zbud_curr_zbytes);
2318ZCACHE_SYSFS_RO(zbud_cumul_zpages);
2319ZCACHE_SYSFS_RO(zbud_cumul_zbytes);
2320ZCACHE_SYSFS_RO(zbud_buddied_count);
2321ZCACHE_SYSFS_RO(evicted_raw_pages);
2322ZCACHE_SYSFS_RO(evicted_unbuddied_pages);
2323ZCACHE_SYSFS_RO(evicted_buddied_pages);
2324ZCACHE_SYSFS_RO(failed_get_free_pages);
2325ZCACHE_SYSFS_RO(failed_alloc);
2326ZCACHE_SYSFS_RO(put_to_flush);
2327ZCACHE_SYSFS_RO(compress_poor);
2328ZCACHE_SYSFS_RO(mean_compress_poor);
2329ZCACHE_SYSFS_RO(policy_percent_exceeded);
2330ZCACHE_SYSFS_RO_ATOMIC(zbud_curr_raw_pages);
2331ZCACHE_SYSFS_RO_ATOMIC(zbud_curr_zpages);
2332ZCACHE_SYSFS_RO_ATOMIC(curr_obj_count);
2333ZCACHE_SYSFS_RO_ATOMIC(curr_objnode_count);
2334ZCACHE_SYSFS_RO_CUSTOM(zbud_unbuddied_list_counts,
2335 zbud_show_unbuddied_list_counts);
2336ZCACHE_SYSFS_RO_CUSTOM(zbud_cumul_chunk_counts,
2337 zbud_show_cumul_chunk_counts);
2338ZCACHE_SYSFS_RO_CUSTOM(zv_curr_dist_counts,
2339 zv_curr_dist_counts_show);
2340ZCACHE_SYSFS_RO_CUSTOM(zv_cumul_dist_counts,
2341 zv_cumul_dist_counts_show);
2342
2343static struct attribute *zcache_attrs[] = {
2344 &zcache_curr_obj_count_attr.attr,
2345 &zcache_curr_obj_count_max_attr.attr,
2346 &zcache_curr_objnode_count_attr.attr,
2347 &zcache_curr_objnode_count_max_attr.attr,
2348 &zcache_flush_total_attr.attr,
2349 &zcache_flobj_total_attr.attr,
2350 &zcache_flush_found_attr.attr,
2351 &zcache_flobj_found_attr.attr,
2352 &zcache_failed_eph_puts_attr.attr,
2353 &zcache_nonactive_puts_attr.attr,
2354 &zcache_failed_pers_puts_attr.attr,
2355 &zcache_policy_percent_exceeded_attr.attr,
2356 &zcache_compress_poor_attr.attr,
2357 &zcache_mean_compress_poor_attr.attr,
2358 &zcache_zbud_curr_raw_pages_attr.attr,
2359 &zcache_zbud_curr_zpages_attr.attr,
2360 &zcache_zbud_curr_zbytes_attr.attr,
2361 &zcache_zbud_cumul_zpages_attr.attr,
2362 &zcache_zbud_cumul_zbytes_attr.attr,
2363 &zcache_zbud_buddied_count_attr.attr,
2364 &zcache_evicted_raw_pages_attr.attr,
2365 &zcache_evicted_unbuddied_pages_attr.attr,
2366 &zcache_evicted_buddied_pages_attr.attr,
2367 &zcache_failed_get_free_pages_attr.attr,
2368 &zcache_failed_alloc_attr.attr,
2369 &zcache_put_to_flush_attr.attr,
2370 &zcache_zbud_unbuddied_list_counts_attr.attr,
2371 &zcache_zbud_cumul_chunk_counts_attr.attr,
2372 &zcache_zv_curr_dist_counts_attr.attr,
2373 &zcache_zv_cumul_dist_counts_attr.attr,
2374 &zcache_zv_max_zsize_attr.attr,
2375 &zcache_zv_max_mean_zsize_attr.attr,
2376 &zcache_zv_page_count_policy_percent_attr.attr,
2377 NULL,
2378};
2379
2380static struct attribute_group zcache_attr_group = {
2381 .attrs = zcache_attrs,
2382 .name = "zcache",
2383};
2384
2385#define RAMSTER_SYSFS_RO(_name) \
2386 static ssize_t ramster_##_name##_show(struct kobject *kobj, \
2387 struct kobj_attribute *attr, char *buf) \
2388 { \
2389 return sprintf(buf, "%lu\n", ramster_##_name); \
2390 } \
2391 static struct kobj_attribute ramster_##_name##_attr = { \
2392 .attr = { .name = __stringify(_name), .mode = 0444 }, \
2393 .show = ramster_##_name##_show, \
2394 }
2395
2396#define RAMSTER_SYSFS_RW(_name) \
2397 static ssize_t ramster_##_name##_show(struct kobject *kobj, \
2398 struct kobj_attribute *attr, char *buf) \
2399 { \
2400 return sprintf(buf, "%lu\n", ramster_##_name); \
2401 } \
2402 static ssize_t ramster_##_name##_store(struct kobject *kobj, \
2403 struct kobj_attribute *attr, const char *buf, size_t count) \
2404 { \
2405 int err; \
2406 unsigned long enable; \
2407 err = kstrtoul(buf, 10, &enable); \
2408 if (err) \
2409 return -EINVAL; \
2410 ramster_##_name = enable; \
2411 return count; \
2412 } \
2413 static struct kobj_attribute ramster_##_name##_attr = { \
2414 .attr = { .name = __stringify(_name), .mode = 0644 }, \
2415 .show = ramster_##_name##_show, \
2416 .store = ramster_##_name##_store, \
2417 }
2418
2419#define RAMSTER_SYSFS_RO_ATOMIC(_name) \
2420 static ssize_t ramster_##_name##_show(struct kobject *kobj, \
2421 struct kobj_attribute *attr, char *buf) \
2422 { \
2423 return sprintf(buf, "%d\n", atomic_read(&ramster_##_name)); \
2424 } \
2425 static struct kobj_attribute ramster_##_name##_attr = { \
2426 .attr = { .name = __stringify(_name), .mode = 0444 }, \
2427 .show = ramster_##_name##_show, \
2428 }
2429
2430RAMSTER_SYSFS_RO(interface_revision);
2431RAMSTER_SYSFS_RO_ATOMIC(remote_pers_pages);
2432RAMSTER_SYSFS_RW(pers_remotify_enable);
2433RAMSTER_SYSFS_RW(eph_remotify_enable);
2434RAMSTER_SYSFS_RO(eph_pages_remoted);
2435RAMSTER_SYSFS_RO(eph_pages_remote_failed);
2436RAMSTER_SYSFS_RO(pers_pages_remoted);
2437RAMSTER_SYSFS_RO(pers_pages_remote_failed);
2438RAMSTER_SYSFS_RO(pers_pages_remote_nomem);
2439RAMSTER_SYSFS_RO(remote_pages_flushed);
2440RAMSTER_SYSFS_RO(remote_page_flushes_failed);
2441RAMSTER_SYSFS_RO(remote_objects_flushed);
2442RAMSTER_SYSFS_RO(remote_object_flushes_failed);
2443RAMSTER_SYSFS_RO(remote_eph_pages_succ_get);
2444RAMSTER_SYSFS_RO(remote_eph_pages_unsucc_get);
2445RAMSTER_SYSFS_RO(remote_pers_pages_succ_get);
2446RAMSTER_SYSFS_RO(remote_pers_pages_unsucc_get);
2447RAMSTER_SYSFS_RO_ATOMIC(foreign_eph_pampd_count);
2448RAMSTER_SYSFS_RO(foreign_eph_pampd_count_max);
2449RAMSTER_SYSFS_RO_ATOMIC(foreign_pers_pampd_count);
2450RAMSTER_SYSFS_RO(foreign_pers_pampd_count_max);
2451RAMSTER_SYSFS_RO_ATOMIC(curr_flnode_count);
2452RAMSTER_SYSFS_RO(curr_flnode_count_max);
2453
2454#define MANUAL_NODES 8
2455static bool ramster_nodes_manual_up[MANUAL_NODES];
2456static ssize_t ramster_manual_node_up_show(struct kobject *kobj,
2457 struct kobj_attribute *attr, char *buf)
2458{
2459 int i;
2460 char *p = buf;
2461 for (i = 0; i < MANUAL_NODES; i++)
2462 if (ramster_nodes_manual_up[i])
2463 p += sprintf(p, "%d ", i);
2464 p += sprintf(p, "\n");
2465 return p - buf;
2466}
2467
2468static ssize_t ramster_manual_node_up_store(struct kobject *kobj,
2469 struct kobj_attribute *attr, const char *buf, size_t count)
2470{
2471 int err;
2472 unsigned long node_num;
2473
2474 err = kstrtoul(buf, 10, &node_num);
2475 if (err) {
2476 pr_err("ramster: bad strtoul?\n");
2477 return -EINVAL;
2478 }
2479 if (node_num >= MANUAL_NODES) {
2480 pr_err("ramster: bad node_num=%lu?\n", node_num);
2481 return -EINVAL;
2482 }
2483 if (ramster_nodes_manual_up[node_num]) {
2484 pr_err("ramster: node %d already up, ignoring\n",
2485 (int)node_num);
2486 } else {
2487 ramster_nodes_manual_up[node_num] = true;
2488 r2net_hb_node_up_manual((int)node_num);
2489 }
2490 return count;
2491}
2492
2493static struct kobj_attribute ramster_manual_node_up_attr = {
2494 .attr = { .name = "manual_node_up", .mode = 0644 },
2495 .show = ramster_manual_node_up_show,
2496 .store = ramster_manual_node_up_store,
2497};
2498
2499static ssize_t ramster_remote_target_nodenum_show(struct kobject *kobj,
2500 struct kobj_attribute *attr, char *buf)
2501{
2502 if (ramster_remote_target_nodenum == -1UL)
2503 return sprintf(buf, "unset\n");
2504 else
2505 return sprintf(buf, "%d\n", ramster_remote_target_nodenum);
2506}
2507
2508static ssize_t ramster_remote_target_nodenum_store(struct kobject *kobj,
2509 struct kobj_attribute *attr, const char *buf, size_t count)
2510{
2511 int err;
2512 unsigned long node_num;
2513
2514 err = kstrtoul(buf, 10, &node_num);
2515 if (err) {
2516 pr_err("ramster: bad strtoul?\n");
2517 return -EINVAL;
2518 } else if (node_num == -1UL) {
2519 pr_err("ramster: disabling all remotification, "
2520 "data may still reside on remote nodes however\n");
2521 return -EINVAL;
2522 } else if (node_num >= MANUAL_NODES) {
2523 pr_err("ramster: bad node_num=%lu?\n", node_num);
2524 return -EINVAL;
2525 } else if (!ramster_nodes_manual_up[node_num]) {
2526 pr_err("ramster: node %d not up, ignoring setting "
2527 "of remotification target\n", (int)node_num);
2528 } else if (r2net_remote_target_node_set((int)node_num) >= 0) {
2529 pr_info("ramster: node %d set as remotification target\n",
2530 (int)node_num);
2531 ramster_remote_target_nodenum = (int)node_num;
2532 } else {
2533 pr_err("ramster: bad num to node node_num=%d?\n",
2534 (int)node_num);
2535 return -EINVAL;
2536 }
2537 return count;
2538}
2539
2540static struct kobj_attribute ramster_remote_target_nodenum_attr = {
2541 .attr = { .name = "remote_target_nodenum", .mode = 0644 },
2542 .show = ramster_remote_target_nodenum_show,
2543 .store = ramster_remote_target_nodenum_store,
2544};
2545
2546
2547static struct attribute *ramster_attrs[] = {
2548 &ramster_interface_revision_attr.attr,
2549 &ramster_pers_remotify_enable_attr.attr,
2550 &ramster_eph_remotify_enable_attr.attr,
2551 &ramster_remote_pers_pages_attr.attr,
2552 &ramster_eph_pages_remoted_attr.attr,
2553 &ramster_eph_pages_remote_failed_attr.attr,
2554 &ramster_pers_pages_remoted_attr.attr,
2555 &ramster_pers_pages_remote_failed_attr.attr,
2556 &ramster_pers_pages_remote_nomem_attr.attr,
2557 &ramster_remote_pages_flushed_attr.attr,
2558 &ramster_remote_page_flushes_failed_attr.attr,
2559 &ramster_remote_objects_flushed_attr.attr,
2560 &ramster_remote_object_flushes_failed_attr.attr,
2561 &ramster_remote_eph_pages_succ_get_attr.attr,
2562 &ramster_remote_eph_pages_unsucc_get_attr.attr,
2563 &ramster_remote_pers_pages_succ_get_attr.attr,
2564 &ramster_remote_pers_pages_unsucc_get_attr.attr,
2565 &ramster_foreign_eph_pampd_count_attr.attr,
2566 &ramster_foreign_eph_pampd_count_max_attr.attr,
2567 &ramster_foreign_pers_pampd_count_attr.attr,
2568 &ramster_foreign_pers_pampd_count_max_attr.attr,
2569 &ramster_curr_flnode_count_attr.attr,
2570 &ramster_curr_flnode_count_max_attr.attr,
2571 &ramster_manual_node_up_attr.attr,
2572 &ramster_remote_target_nodenum_attr.attr,
2573 NULL,
2574};
2575
2576static struct attribute_group ramster_attr_group = {
2577 .attrs = ramster_attrs,
2578 .name = "ramster",
2579};
2580
2581#endif /* CONFIG_SYSFS */
2582/*
2583 * When zcache is disabled ("frozen"), pools can be created and destroyed,
2584 * but all puts (and thus all other operations that require memory allocation)
2585 * must fail. If zcache is unfrozen, accepts puts, then frozen again,
2586 * data consistency requires all puts while frozen to be converted into
2587 * flushes.
2588 */
2589static bool zcache_freeze;
2590
2591/*
2592 * zcache shrinker interface (only useful for ephemeral pages, so zbud only)
2593 */
2594static int shrink_zcache_memory(struct shrinker *shrink,
2595 struct shrink_control *sc)
2596{
2597 int ret = -1;
2598 int nr = sc->nr_to_scan;
2599 gfp_t gfp_mask = sc->gfp_mask;
2600
2601 if (nr >= 0) {
2602 if (!(gfp_mask & __GFP_FS))
2603 /* does this case really need to be skipped? */
2604 goto out;
2605 zbud_evict_pages(nr);
2606 }
2607 ret = (int)atomic_read(&zcache_zbud_curr_raw_pages);
2608out:
2609 return ret;
2610}
2611
2612static struct shrinker zcache_shrinker = {
2613 .shrink = shrink_zcache_memory,
2614 .seeks = DEFAULT_SEEKS,
2615};
2616
2617/*
2618 * zcache shims between cleancache/frontswap ops and tmem
2619 */
2620
2621int zcache_put(int cli_id, int pool_id, struct tmem_oid *oidp,
2622 uint32_t index, char *data, size_t size,
2623 bool raw, int ephemeral)
2624{
2625 struct tmem_pool *pool;
2626 int ret = -1;
2627
2628 BUG_ON(!irqs_disabled());
2629 pool = zcache_get_pool_by_id(cli_id, pool_id);
2630 if (unlikely(pool == NULL))
2631 goto out;
2632 if (!zcache_freeze && zcache_do_preload(pool) == 0) {
2633 /* preload does preempt_disable on success */
2634 ret = tmem_put(pool, oidp, index, data, size, raw, ephemeral);
2635 if (ret < 0) {
2636 if (is_ephemeral(pool))
2637 zcache_failed_eph_puts++;
2638 else
2639 zcache_failed_pers_puts++;
2640 }
2641 zcache_put_pool(pool);
2642 preempt_enable_no_resched();
2643 } else {
2644 zcache_put_to_flush++;
2645 if (atomic_read(&pool->obj_count) > 0)
2646 /* the put fails whether the flush succeeds or not */
2647 (void)tmem_flush_page(pool, oidp, index);
2648 zcache_put_pool(pool);
2649 }
2650out:
2651 return ret;
2652}
2653
2654int zcache_get(int cli_id, int pool_id, struct tmem_oid *oidp,
2655 uint32_t index, char *data, size_t *sizep,
2656 bool raw, int get_and_free)
2657{
2658 struct tmem_pool *pool;
2659 int ret = -1;
2660 bool eph;
2661
2662 if (!raw) {
2663 BUG_ON(irqs_disabled());
2664 BUG_ON(in_softirq());
2665 }
2666 pool = zcache_get_pool_by_id(cli_id, pool_id);
2667 eph = is_ephemeral(pool);
2668 if (likely(pool != NULL)) {
2669 if (atomic_read(&pool->obj_count) > 0)
2670 ret = tmem_get(pool, oidp, index, data, sizep,
2671 raw, get_and_free);
2672 zcache_put_pool(pool);
2673 }
2674 WARN_ONCE((!eph && (ret != 0)), "zcache_get fails on persistent pool, "
2675 "bad things are very likely to happen soon\n");
2676#ifdef RAMSTER_TESTING
2677 if (ret != 0 && ret != -1 && !(ret == -EINVAL && is_ephemeral(pool)))
2678 pr_err("TESTING zcache_get tmem_get returns ret=%d\n", ret);
2679#endif
2680 if (ret == -EAGAIN)
2681 BUG(); /* FIXME... don't need this anymore??? let's ensure */
2682 return ret;
2683}
2684
2685int zcache_flush(int cli_id, int pool_id,
2686 struct tmem_oid *oidp, uint32_t index)
2687{
2688 struct tmem_pool *pool;
2689 int ret = -1;
2690 unsigned long flags;
2691
2692 local_irq_save(flags);
2693 zcache_flush_total++;
2694 pool = zcache_get_pool_by_id(cli_id, pool_id);
2695 ramster_do_preload_flnode_only(pool);
2696 if (likely(pool != NULL)) {
2697 if (atomic_read(&pool->obj_count) > 0)
2698 ret = tmem_flush_page(pool, oidp, index);
2699 zcache_put_pool(pool);
2700 }
2701 if (ret >= 0)
2702 zcache_flush_found++;
2703 local_irq_restore(flags);
2704 return ret;
2705}
2706
2707int zcache_flush_object(int cli_id, int pool_id, struct tmem_oid *oidp)
2708{
2709 struct tmem_pool *pool;
2710 int ret = -1;
2711 unsigned long flags;
2712
2713 local_irq_save(flags);
2714 zcache_flobj_total++;
2715 pool = zcache_get_pool_by_id(cli_id, pool_id);
2716 ramster_do_preload_flnode_only(pool);
2717 if (likely(pool != NULL)) {
2718 if (atomic_read(&pool->obj_count) > 0)
2719 ret = tmem_flush_object(pool, oidp);
2720 zcache_put_pool(pool);
2721 }
2722 if (ret >= 0)
2723 zcache_flobj_found++;
2724 local_irq_restore(flags);
2725 return ret;
2726}
2727
2728int zcache_client_destroy_pool(int cli_id, int pool_id)
2729{
2730 struct tmem_pool *pool = NULL;
2731 struct zcache_client *cli = NULL;
2732 int ret = -1;
2733
2734 if (pool_id < 0)
2735 goto out;
2736 if (cli_id == LOCAL_CLIENT)
2737 cli = &zcache_host;
2738 else if ((unsigned int)cli_id < MAX_CLIENTS)
2739 cli = &zcache_clients[cli_id];
2740 if (cli == NULL)
2741 goto out;
2742 atomic_inc(&cli->refcount);
2743 pool = cli->tmem_pools[pool_id];
2744 if (pool == NULL)
2745 goto out;
2746 cli->tmem_pools[pool_id] = NULL;
2747 /* wait for pool activity on other cpus to quiesce */
2748 while (atomic_read(&pool->refcount) != 0)
2749 ;
2750 atomic_dec(&cli->refcount);
2751 local_bh_disable();
2752 ret = tmem_destroy_pool(pool);
2753 local_bh_enable();
2754 kfree(pool);
2755 pr_info("ramster: destroyed pool id=%d cli_id=%d\n", pool_id, cli_id);
2756out:
2757 return ret;
2758}
2759
2760static int zcache_destroy_pool(int pool_id)
2761{
2762 return zcache_client_destroy_pool(LOCAL_CLIENT, pool_id);
2763}
2764
2765int zcache_new_pool(uint16_t cli_id, uint32_t flags)
2766{
2767 int poolid = -1;
2768 struct tmem_pool *pool;
2769 struct zcache_client *cli = NULL;
2770
2771 if (cli_id == LOCAL_CLIENT)
2772 cli = &zcache_host;
2773 else if ((unsigned int)cli_id < MAX_CLIENTS)
2774 cli = &zcache_clients[cli_id];
2775 if (cli == NULL)
2776 goto out;
2777 atomic_inc(&cli->refcount);
2778 pool = kmalloc(sizeof(struct tmem_pool), GFP_ATOMIC);
2779 if (pool == NULL) {
2780 pr_info("ramster: pool creation failed: out of memory\n");
2781 goto out;
2782 }
2783
2784 for (poolid = 0; poolid < MAX_POOLS_PER_CLIENT; poolid++)
2785 if (cli->tmem_pools[poolid] == NULL)
2786 break;
2787 if (poolid >= MAX_POOLS_PER_CLIENT) {
2788 pr_info("ramster: pool creation failed: max exceeded\n");
2789 kfree(pool);
2790 poolid = -1;
2791 goto out;
2792 }
2793 atomic_set(&pool->refcount, 0);
2794 pool->client = cli;
2795 pool->pool_id = poolid;
2796 tmem_new_pool(pool, flags);
2797 cli->tmem_pools[poolid] = pool;
2798 if (cli_id == LOCAL_CLIENT)
2799 pr_info("ramster: created %s tmem pool, id=%d, local client\n",
2800 flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral",
2801 poolid);
2802 else
2803 pr_info("ramster: created %s tmem pool, id=%d, client=%d\n",
2804 flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral",
2805 poolid, cli_id);
2806out:
2807 if (cli != NULL)
2808 atomic_dec(&cli->refcount);
2809 return poolid;
2810}
2811
2812static int zcache_local_new_pool(uint32_t flags)
2813{
2814 return zcache_new_pool(LOCAL_CLIENT, flags);
2815}
2816
2817int zcache_autocreate_pool(int cli_id, int pool_id, bool ephemeral)
2818{
2819 struct tmem_pool *pool;
2820 struct zcache_client *cli = NULL;
2821 uint32_t flags = ephemeral ? 0 : TMEM_POOL_PERSIST;
2822 int ret = -1;
2823
2824 if (cli_id == LOCAL_CLIENT)
2825 goto out;
2826 if (pool_id >= MAX_POOLS_PER_CLIENT)
2827 goto out;
2828 else if ((unsigned int)cli_id < MAX_CLIENTS)
2829 cli = &zcache_clients[cli_id];
2830 if ((ephemeral && !use_cleancache) || (!ephemeral && !use_frontswap))
2831 BUG(); /* FIXME, handle more gracefully later */
2832 if (!cli->allocated) {
2833 if (zcache_new_client(cli_id))
2834 BUG(); /* FIXME, handle more gracefully later */
2835 cli = &zcache_clients[cli_id];
2836 }
2837 atomic_inc(&cli->refcount);
2838 pool = cli->tmem_pools[pool_id];
2839 if (pool != NULL) {
2840 if (pool->persistent && ephemeral) {
2841 pr_err("zcache_autocreate_pool: type mismatch\n");
2842 goto out;
2843 }
2844 ret = 0;
2845 goto out;
2846 }
2847 pool = kmalloc(sizeof(struct tmem_pool), GFP_KERNEL);
2848 if (pool == NULL) {
2849 pr_info("ramster: pool creation failed: out of memory\n");
2850 goto out;
2851 }
2852 atomic_set(&pool->refcount, 0);
2853 pool->client = cli;
2854 pool->pool_id = pool_id;
2855 tmem_new_pool(pool, flags);
2856 cli->tmem_pools[pool_id] = pool;
2857 pr_info("ramster: AUTOcreated %s tmem poolid=%d, for remote client=%d\n",
2858 flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral",
2859 pool_id, cli_id);
2860 ret = 0;
2861out:
2862 if (cli == NULL)
2863 BUG(); /* FIXME, handle more gracefully later */
2864 /* pr_err("zcache_autocreate_pool: failed\n"); */
2865 if (cli != NULL)
2866 atomic_dec(&cli->refcount);
2867 return ret;
2868}
2869
2870/**********
2871 * Two kernel functionalities currently can be layered on top of tmem.
2872 * These are "cleancache" which is used as a second-chance cache for clean
2873 * page cache pages; and "frontswap" which is used for swap pages
2874 * to avoid writes to disk. A generic "shim" is provided here for each
2875 * to translate in-kernel semantics to zcache semantics.
2876 */
2877
2878#ifdef CONFIG_CLEANCACHE
2879static void zcache_cleancache_put_page(int pool_id,
2880 struct cleancache_filekey key,
2881 pgoff_t index, struct page *page)
2882{
2883 u32 ind = (u32) index;
2884 struct tmem_oid oid = *(struct tmem_oid *)&key;
2885
2886#ifdef __PG_WAS_ACTIVE
2887 if (!PageWasActive(page)) {
2888 zcache_nonactive_puts++;
2889 return;
2890 }
2891#endif
2892 if (likely(ind == index)) {
2893 char *kva = page_address(page);
2894
2895 (void)zcache_put(LOCAL_CLIENT, pool_id, &oid, index,
2896 kva, PAGE_SIZE, 0, 1);
2897 }
2898}
2899
2900static int zcache_cleancache_get_page(int pool_id,
2901 struct cleancache_filekey key,
2902 pgoff_t index, struct page *page)
2903{
2904 u32 ind = (u32) index;
2905 struct tmem_oid oid = *(struct tmem_oid *)&key;
2906 int ret = -1;
2907
2908 preempt_disable();
2909 if (likely(ind == index)) {
2910 char *kva = page_address(page);
2911 size_t size = PAGE_SIZE;
2912
2913 ret = zcache_get(LOCAL_CLIENT, pool_id, &oid, index,
2914 kva, &size, 0, 0);
2915#ifdef __PG_WAS_ACTIVE
2916 if (ret == 0)
2917 SetPageWasActive(page);
2918#endif
2919 }
2920 preempt_enable();
2921 return ret;
2922}
2923
2924static void zcache_cleancache_flush_page(int pool_id,
2925 struct cleancache_filekey key,
2926 pgoff_t index)
2927{
2928 u32 ind = (u32) index;
2929 struct tmem_oid oid = *(struct tmem_oid *)&key;
2930
2931 if (likely(ind == index))
2932 (void)zcache_flush(LOCAL_CLIENT, pool_id, &oid, ind);
2933}
2934
2935static void zcache_cleancache_flush_inode(int pool_id,
2936 struct cleancache_filekey key)
2937{
2938 struct tmem_oid oid = *(struct tmem_oid *)&key;
2939
2940 (void)zcache_flush_object(LOCAL_CLIENT, pool_id, &oid);
2941}
2942
2943static void zcache_cleancache_flush_fs(int pool_id)
2944{
2945 if (pool_id >= 0)
2946 (void)zcache_destroy_pool(pool_id);
2947}
2948
2949static int zcache_cleancache_init_fs(size_t pagesize)
2950{
2951 BUG_ON(sizeof(struct cleancache_filekey) !=
2952 sizeof(struct tmem_oid));
2953 BUG_ON(pagesize != PAGE_SIZE);
2954 return zcache_local_new_pool(0);
2955}
2956
2957static int zcache_cleancache_init_shared_fs(char *uuid, size_t pagesize)
2958{
2959 /* shared pools are unsupported and map to private */
2960 BUG_ON(sizeof(struct cleancache_filekey) !=
2961 sizeof(struct tmem_oid));
2962 BUG_ON(pagesize != PAGE_SIZE);
2963 return zcache_local_new_pool(0);
2964}
2965
2966static struct cleancache_ops zcache_cleancache_ops = {
2967 .put_page = zcache_cleancache_put_page,
2968 .get_page = zcache_cleancache_get_page,
2969 .invalidate_page = zcache_cleancache_flush_page,
2970 .invalidate_inode = zcache_cleancache_flush_inode,
2971 .invalidate_fs = zcache_cleancache_flush_fs,
2972 .init_shared_fs = zcache_cleancache_init_shared_fs,
2973 .init_fs = zcache_cleancache_init_fs
2974};
2975
2976struct cleancache_ops zcache_cleancache_register_ops(void)
2977{
2978 struct cleancache_ops old_ops =
2979 cleancache_register_ops(&zcache_cleancache_ops);
2980
2981 return old_ops;
2982}
2983#endif
2984
2985#ifdef CONFIG_FRONTSWAP
2986/* a single tmem poolid is used for all frontswap "types" (swapfiles) */
2987static int zcache_frontswap_poolid = -1;
2988
2989/*
2990 * Swizzling increases objects per swaptype, increasing tmem concurrency
2991 * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS
2992 */
2993#define SWIZ_BITS 8
2994#define SWIZ_MASK ((1 << SWIZ_BITS) - 1)
2995#define _oswiz(_type, _ind) ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK))
2996#define iswiz(_ind) (_ind >> SWIZ_BITS)
2997
2998static inline struct tmem_oid oswiz(unsigned type, u32 ind)
2999{
3000 struct tmem_oid oid = { .oid = { 0 } };
3001 oid.oid[0] = _oswiz(type, ind);
3002 return oid;
3003}
3004
3005static int zcache_frontswap_put_page(unsigned type, pgoff_t offset,
3006 struct page *page)
3007{
3008 u64 ind64 = (u64)offset;
3009 u32 ind = (u32)offset;
3010 struct tmem_oid oid = oswiz(type, ind);
3011 int ret = -1;
3012 unsigned long flags;
3013 char *kva;
3014
3015 BUG_ON(!PageLocked(page));
3016 if (likely(ind64 == ind)) {
3017 local_irq_save(flags);
3018 kva = page_address(page);
3019 ret = zcache_put(LOCAL_CLIENT, zcache_frontswap_poolid,
3020 &oid, iswiz(ind), kva, PAGE_SIZE, 0, 0);
3021 local_irq_restore(flags);
3022 }
3023 return ret;
3024}
3025
3026/* returns 0 if the page was successfully gotten from frontswap, -1 if
3027 * was not present (should never happen!) */
3028static int zcache_frontswap_get_page(unsigned type, pgoff_t offset,
3029 struct page *page)
3030{
3031 u64 ind64 = (u64)offset;
3032 u32 ind = (u32)offset;
3033 struct tmem_oid oid = oswiz(type, ind);
3034 int ret = -1;
3035
3036 preempt_disable(); /* FIXME, remove this? */
3037 BUG_ON(!PageLocked(page));
3038 if (likely(ind64 == ind)) {
3039 char *kva = page_address(page);
3040 size_t size = PAGE_SIZE;
3041
3042 ret = zcache_get(LOCAL_CLIENT, zcache_frontswap_poolid,
3043 &oid, iswiz(ind), kva, &size, 0, -1);
3044 }
3045 preempt_enable(); /* FIXME, remove this? */
3046 return ret;
3047}
3048
3049/* flush a single page from frontswap */
3050static void zcache_frontswap_flush_page(unsigned type, pgoff_t offset)
3051{
3052 u64 ind64 = (u64)offset;
3053 u32 ind = (u32)offset;
3054 struct tmem_oid oid = oswiz(type, ind);
3055
3056 if (likely(ind64 == ind))
3057 (void)zcache_flush(LOCAL_CLIENT, zcache_frontswap_poolid,
3058 &oid, iswiz(ind));
3059}
3060
3061/* flush all pages from the passed swaptype */
3062static void zcache_frontswap_flush_area(unsigned type)
3063{
3064 struct tmem_oid oid;
3065 int ind;
3066
3067 for (ind = SWIZ_MASK; ind >= 0; ind--) {
3068 oid = oswiz(type, ind);
3069 (void)zcache_flush_object(LOCAL_CLIENT,
3070 zcache_frontswap_poolid, &oid);
3071 }
3072}
3073
3074static void zcache_frontswap_init(unsigned ignored)
3075{
3076 /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
3077 if (zcache_frontswap_poolid < 0)
3078 zcache_frontswap_poolid =
3079 zcache_local_new_pool(TMEM_POOL_PERSIST);
3080}
3081
3082static struct frontswap_ops zcache_frontswap_ops = {
3083 .put_page = zcache_frontswap_put_page,
3084 .get_page = zcache_frontswap_get_page,
3085 .invalidate_page = zcache_frontswap_flush_page,
3086 .invalidate_area = zcache_frontswap_flush_area,
3087 .init = zcache_frontswap_init
3088};
3089
3090struct frontswap_ops zcache_frontswap_register_ops(void)
3091{
3092 struct frontswap_ops old_ops =
3093 frontswap_register_ops(&zcache_frontswap_ops);
3094
3095 return old_ops;
3096}
3097#endif
3098
3099/*
3100 * frontswap selfshrinking
3101 */
3102
3103#ifdef CONFIG_FRONTSWAP
3104/* In HZ, controls frequency of worker invocation. */
3105static unsigned int selfshrink_interval __read_mostly = 5;
3106
3107static void selfshrink_process(struct work_struct *work);
3108static DECLARE_DELAYED_WORK(selfshrink_worker, selfshrink_process);
3109
3110/* Enable/disable with sysfs. */
3111static bool frontswap_selfshrinking __read_mostly;
3112
3113/* Enable/disable with kernel boot option. */
3114static bool use_frontswap_selfshrink __initdata = true;
3115
3116/*
3117 * The default values for the following parameters were deemed reasonable
3118 * by experimentation, may be workload-dependent, and can all be
3119 * adjusted via sysfs.
3120 */
3121
3122/* Control rate for frontswap shrinking. Higher hysteresis is slower. */
3123static unsigned int frontswap_hysteresis __read_mostly = 20;
3124
3125/*
3126 * Number of selfshrink worker invocations to wait before observing that
3127 * frontswap selfshrinking should commence. Note that selfshrinking does
3128 * not use a separate worker thread.
3129 */
3130static unsigned int frontswap_inertia __read_mostly = 3;
3131
3132/* Countdown to next invocation of frontswap_shrink() */
3133static unsigned long frontswap_inertia_counter;
3134
3135/*
3136 * Invoked by the selfshrink worker thread, uses current number of pages
3137 * in frontswap (frontswap_curr_pages()), previous status, and control
3138 * values (hysteresis and inertia) to determine if frontswap should be
3139 * shrunk and what the new frontswap size should be. Note that
3140 * frontswap_shrink is essentially a partial swapoff that immediately
3141 * transfers pages from the "swap device" (frontswap) back into kernel
3142 * RAM; despite the name, frontswap "shrinking" is very different from
3143 * the "shrinker" interface used by the kernel MM subsystem to reclaim
3144 * memory.
3145 */
3146static void frontswap_selfshrink(void)
3147{
3148 static unsigned long cur_frontswap_pages;
3149 static unsigned long last_frontswap_pages;
3150 static unsigned long tgt_frontswap_pages;
3151
3152 last_frontswap_pages = cur_frontswap_pages;
3153 cur_frontswap_pages = frontswap_curr_pages();
3154 if (!cur_frontswap_pages ||
3155 (cur_frontswap_pages > last_frontswap_pages)) {
3156 frontswap_inertia_counter = frontswap_inertia;
3157 return;
3158 }
3159 if (frontswap_inertia_counter && --frontswap_inertia_counter)
3160 return;
3161 if (cur_frontswap_pages <= frontswap_hysteresis)
3162 tgt_frontswap_pages = 0;
3163 else
3164 tgt_frontswap_pages = cur_frontswap_pages -
3165 (cur_frontswap_pages / frontswap_hysteresis);
3166 frontswap_shrink(tgt_frontswap_pages);
3167}
3168
3169static int __init ramster_nofrontswap_selfshrink_setup(char *s)
3170{
3171 use_frontswap_selfshrink = false;
3172 return 1;
3173}
3174
3175__setup("noselfshrink", ramster_nofrontswap_selfshrink_setup);
3176
3177static void selfshrink_process(struct work_struct *work)
3178{
3179 if (frontswap_selfshrinking && frontswap_enabled) {
3180 frontswap_selfshrink();
3181 schedule_delayed_work(&selfshrink_worker,
3182 selfshrink_interval * HZ);
3183 }
3184}
3185
3186static int ramster_enabled;
3187
3188static int __init ramster_selfshrink_init(void)
3189{
3190 frontswap_selfshrinking = ramster_enabled && use_frontswap_selfshrink;
3191 if (frontswap_selfshrinking)
3192 pr_info("ramster: Initializing frontswap "
3193 "selfshrinking driver.\n");
3194 else
3195 return -ENODEV;
3196
3197 schedule_delayed_work(&selfshrink_worker, selfshrink_interval * HZ);
3198
3199 return 0;
3200}
3201
3202subsys_initcall(ramster_selfshrink_init);
3203#endif
3204
3205/*
3206 * zcache initialization
3207 * NOTE FOR NOW ramster MUST BE PROVIDED AS A KERNEL BOOT PARAMETER OR
3208 * NOTHING HAPPENS!
3209 */
3210
3211static int ramster_enabled;
3212
3213static int __init enable_ramster(char *s)
3214{
3215 ramster_enabled = 1;
3216 return 1;
3217}
3218__setup("ramster", enable_ramster);
3219
3220/* allow independent dynamic disabling of cleancache and frontswap */
3221
3222static int use_cleancache = 1;
3223
3224static int __init no_cleancache(char *s)
3225{
3226 pr_info("INIT no_cleancache called\n");
3227 use_cleancache = 0;
3228 return 1;
3229}
3230
3231/*
3232 * FIXME: need to guarantee this gets checked before zcache_init is called
3233 * What is the correct way to achieve this?
3234 */
3235early_param("nocleancache", no_cleancache);
3236
3237static int use_frontswap = 1;
3238
3239static int __init no_frontswap(char *s)
3240{
3241 pr_info("INIT no_frontswap called\n");
3242 use_frontswap = 0;
3243 return 1;
3244}
3245
3246__setup("nofrontswap", no_frontswap);
3247
3248static int __init zcache_init(void)
3249{
3250 int ret = 0;
3251
3252#ifdef CONFIG_SYSFS
3253 ret = sysfs_create_group(mm_kobj, &zcache_attr_group);
3254 ret = sysfs_create_group(mm_kobj, &ramster_attr_group);
3255 if (ret) {
3256 pr_err("ramster: can't create sysfs\n");
3257 goto out;
3258 }
3259#endif /* CONFIG_SYSFS */
3260#if defined(CONFIG_CLEANCACHE) || defined(CONFIG_FRONTSWAP)
3261 if (ramster_enabled) {
3262 unsigned int cpu;
3263
3264 (void)r2net_register_handlers();
3265 tmem_register_hostops(&zcache_hostops);
3266 tmem_register_pamops(&zcache_pamops);
3267 ret = register_cpu_notifier(&zcache_cpu_notifier_block);
3268 if (ret) {
3269 pr_err("ramster: can't register cpu notifier\n");
3270 goto out;
3271 }
3272 for_each_online_cpu(cpu) {
3273 void *pcpu = (void *)(long)cpu;
3274 zcache_cpu_notifier(&zcache_cpu_notifier_block,
3275 CPU_UP_PREPARE, pcpu);
3276 }
3277 }
3278 zcache_objnode_cache = kmem_cache_create("zcache_objnode",
3279 sizeof(struct tmem_objnode), 0, 0, NULL);
3280 zcache_obj_cache = kmem_cache_create("zcache_obj",
3281 sizeof(struct tmem_obj), 0, 0, NULL);
3282 ramster_flnode_cache = kmem_cache_create("ramster_flnode",
3283 sizeof(struct flushlist_node), 0, 0, NULL);
3284#endif
3285#ifdef CONFIG_CLEANCACHE
3286 pr_info("INIT ramster_enabled=%d use_cleancache=%d\n",
3287 ramster_enabled, use_cleancache);
3288 if (ramster_enabled && use_cleancache) {
3289 struct cleancache_ops old_ops;
3290
3291 zbud_init();
3292 register_shrinker(&zcache_shrinker);
3293 old_ops = zcache_cleancache_register_ops();
3294 pr_info("ramster: cleancache enabled using kernel "
3295 "transcendent memory and compression buddies\n");
3296 if (old_ops.init_fs != NULL)
3297 pr_warning("ramster: cleancache_ops overridden");
3298 }
3299#endif
3300#ifdef CONFIG_FRONTSWAP
3301 pr_info("INIT ramster_enabled=%d use_frontswap=%d\n",
3302 ramster_enabled, use_frontswap);
3303 if (ramster_enabled && use_frontswap) {
3304 struct frontswap_ops old_ops;
3305
3306 zcache_new_client(LOCAL_CLIENT);
3307 old_ops = zcache_frontswap_register_ops();
3308 pr_info("ramster: frontswap enabled using kernel "
3309 "transcendent memory and xvmalloc\n");
3310 if (old_ops.init != NULL)
3311 pr_warning("ramster: frontswap_ops overridden");
3312 }
3313 if (ramster_enabled && (use_frontswap || use_cleancache))
3314 ramster_remotify_init();
3315#endif
3316out:
3317 return ret;
3318}
3319
3320module_init(zcache_init)
diff --git a/drivers/staging/ramster/zcache.h b/drivers/staging/ramster/zcache.h
new file mode 100644
index 000000000000..250b121c22e5
--- /dev/null
+++ b/drivers/staging/ramster/zcache.h
@@ -0,0 +1,22 @@
1/*
2 * zcache.h
3 *
4 * External zcache functions
5 *
6 * Copyright (c) 2009-2012, Dan Magenheimer, Oracle Corp.
7 */
8
9#ifndef _ZCACHE_H_
10#define _ZCACHE_H_
11
12extern int zcache_put(int, int, struct tmem_oid *, uint32_t,
13 char *, size_t, bool, int);
14extern int zcache_autocreate_pool(int, int, bool);
15extern int zcache_get(int, int, struct tmem_oid *, uint32_t,
16 char *, size_t *, bool, int);
17extern int zcache_flush(int, int, struct tmem_oid *, uint32_t);
18extern int zcache_flush_object(int, int, struct tmem_oid *);
19extern int zcache_localify(int, struct tmem_oid *, uint32_t,
20 char *, size_t, void *);
21
22#endif /* _ZCACHE_H */
diff --git a/drivers/staging/rtl8187se/r8180_core.c b/drivers/staging/rtl8187se/r8180_core.c
index 04c23919f4d6..e4ade550cfe5 100644
--- a/drivers/staging/rtl8187se/r8180_core.c
+++ b/drivers/staging/rtl8187se/r8180_core.c
@@ -439,8 +439,7 @@ void buffer_free(struct net_device *dev, struct buffer **buffer, int len, short
439 } 439 }
440 kfree(tmp); 440 kfree(tmp);
441 tmp = next; 441 tmp = next;
442 } 442 } while (next != *buffer);
443 while (next != *buffer);
444 443
445 *buffer = NULL; 444 *buffer = NULL;
446} 445}
@@ -1392,11 +1391,13 @@ void PerformUndecoratedSignalSmoothing8185(struct r8180_priv *priv,
1392 priv->bCurCCKPkt = bCckRate; 1391 priv->bCurCCKPkt = bCckRate;
1393 1392
1394 if (priv->UndecoratedSmoothedSS >= 0) 1393 if (priv->UndecoratedSmoothedSS >= 0)
1395 priv->UndecoratedSmoothedSS = ((priv->UndecoratedSmoothedSS * 5) + (priv->SignalStrength * 10)) / 6; 1394 priv->UndecoratedSmoothedSS = ((priv->UndecoratedSmoothedSS * 5) +
1395 (priv->SignalStrength * 10)) / 6;
1396 else 1396 else
1397 priv->UndecoratedSmoothedSS = priv->SignalStrength * 10; 1397 priv->UndecoratedSmoothedSS = priv->SignalStrength * 10;
1398 1398
1399 priv->UndercorateSmoothedRxPower = ((priv->UndercorateSmoothedRxPower * 50) + (priv->RxPower * 11)) / 60; 1399 priv->UndercorateSmoothedRxPower = ((priv->UndercorateSmoothedRxPower * 50) +
1400 (priv->RxPower * 11)) / 60;
1400 1401
1401 if (bCckRate) 1402 if (bCckRate)
1402 priv->CurCCKRSSI = priv->RSSI; 1403 priv->CurCCKRSSI = priv->RSSI;
@@ -1607,43 +1608,50 @@ void rtl8180_rx(struct net_device *dev)
1607 /* printk("==========================>rx : RXAGC is %d,signalstrength is %d\n",RXAGC,stats.signalstrength); */ 1608 /* printk("==========================>rx : RXAGC is %d,signalstrength is %d\n",RXAGC,stats.signalstrength); */
1608 stats.rssi = priv->wstats.qual.qual = priv->SignalQuality; 1609 stats.rssi = priv->wstats.qual.qual = priv->SignalQuality;
1609 stats.noise = priv->wstats.qual.noise = 100 - priv->wstats.qual.qual; 1610 stats.noise = priv->wstats.qual.noise = 100 - priv->wstats.qual.qual;
1610 bHwError = (((*(priv->rxringtail)) & (0x00000fff)) == 4080) | (((*(priv->rxringtail)) & (0x04000000)) != 0) 1611 bHwError = (((*(priv->rxringtail)) & (0x00000fff)) == 4080) |
1611 | (((*(priv->rxringtail)) & (0x08000000)) != 0) | (((~(*(priv->rxringtail))) & (0x10000000)) != 0) | (((~(*(priv->rxringtail))) & (0x20000000)) != 0); 1612 (((*(priv->rxringtail)) & (0x04000000)) != 0) |
1613 (((*(priv->rxringtail)) & (0x08000000)) != 0) |
1614 (((~(*(priv->rxringtail))) & (0x10000000)) != 0) |
1615 (((~(*(priv->rxringtail))) & (0x20000000)) != 0);
1612 bCRC = ((*(priv->rxringtail)) & (0x00002000)) >> 13; 1616 bCRC = ((*(priv->rxringtail)) & (0x00002000)) >> 13;
1613 bICV = ((*(priv->rxringtail)) & (0x00001000)) >> 12; 1617 bICV = ((*(priv->rxringtail)) & (0x00001000)) >> 12;
1614 hdr = (struct ieee80211_hdr_4addr *)priv->rxbuffer->buf; 1618 hdr = (struct ieee80211_hdr_4addr *)priv->rxbuffer->buf;
1615 fc = le16_to_cpu(hdr->frame_ctl); 1619 fc = le16_to_cpu(hdr->frame_ctl);
1616 type = WLAN_FC_GET_TYPE(fc); 1620 type = WLAN_FC_GET_TYPE(fc);
1617 1621
1618 if ((IEEE80211_FTYPE_CTL != type) && 1622 if (IEEE80211_FTYPE_CTL != type &&
1619 (eqMacAddr(priv->ieee80211->current_network.bssid, (fc & IEEE80211_FCTL_TODS) ? hdr->addr1 : (fc & IEEE80211_FCTL_FROMDS) ? hdr->addr2 : hdr->addr3)) 1623 !bHwError && !bCRC && !bICV &&
1620 && (!bHwError) && (!bCRC) && (!bICV)) { 1624 eqMacAddr(priv->ieee80211->current_network.bssid,
1621 /* Perform signal smoothing for dynamic 1625 fc & IEEE80211_FCTL_TODS ? hdr->addr1 :
1622 * mechanism on demand. This is different 1626 fc & IEEE80211_FCTL_FROMDS ? hdr->addr2 :
1623 * with PerformSignalSmoothing8185 in smoothing 1627 hdr->addr3)) {
1624 * fomula. No dramatic adjustion is apply 1628
1625 * because dynamic mechanism need some degree 1629 /* Perform signal smoothing for dynamic
1626 * of correctness. */ 1630 * mechanism on demand. This is different
1627 PerformUndecoratedSignalSmoothing8185(priv, bCckRate); 1631 * with PerformSignalSmoothing8185 in smoothing
1628 1632 * fomula. No dramatic adjustion is apply
1629 /* For good-looking singal strength. */ 1633 * because dynamic mechanism need some degree
1630 SignalStrengthIndex = NetgearSignalStrengthTranslate( 1634 * of correctness. */
1631 priv->LastSignalStrengthInPercent, 1635 PerformUndecoratedSignalSmoothing8185(priv, bCckRate);
1632 priv->SignalStrength); 1636
1633 1637 /* For good-looking singal strength. */
1634 priv->LastSignalStrengthInPercent = SignalStrengthIndex; 1638 SignalStrengthIndex = NetgearSignalStrengthTranslate(
1635 priv->Stats_SignalStrength = TranslateToDbm8185((u8)SignalStrengthIndex); 1639 priv->LastSignalStrengthInPercent,
1640 priv->SignalStrength);
1641
1642 priv->LastSignalStrengthInPercent = SignalStrengthIndex;
1643 priv->Stats_SignalStrength = TranslateToDbm8185((u8)SignalStrengthIndex);
1636 /* 1644 /*
1637 * We need more correct power of received packets and the "SignalStrength" of RxStats is beautified, 1645 * We need more correct power of received packets and the "SignalStrength" of RxStats is beautified,
1638 * so we record the correct power here. 1646 * so we record the correct power here.
1639 */ 1647 */
1640 priv->Stats_SignalQuality = (long)(priv->Stats_SignalQuality * 5 + (long)priv->SignalQuality + 5) / 6; 1648 priv->Stats_SignalQuality = (long)(priv->Stats_SignalQuality * 5 + (long)priv->SignalQuality + 5) / 6;
1641 priv->Stats_RecvSignalPower = (long)(priv->Stats_RecvSignalPower * 5 + priv->RecvSignalPower - 1) / 6; 1649 priv->Stats_RecvSignalPower = (long)(priv->Stats_RecvSignalPower * 5 + priv->RecvSignalPower - 1) / 6;
1642 1650
1643 /* Figure out which antenna that received the lasted packet. */ 1651 /* Figure out which antenna that received the lasted packet. */
1644 priv->LastRxPktAntenna = Antenna ? 1 : 0; /* 0: aux, 1: main. */ 1652 priv->LastRxPktAntenna = Antenna ? 1 : 0; /* 0: aux, 1: main. */
1645 SwAntennaDiversityRxOk8185(dev, priv->SignalStrength); 1653 SwAntennaDiversityRxOk8185(dev, priv->SignalStrength);
1646 } 1654 }
1647 1655
1648 if (first) { 1656 if (first) {
1649 if (!priv->rx_skb_complete) { 1657 if (!priv->rx_skb_complete) {
@@ -1654,7 +1662,7 @@ void rtl8180_rx(struct net_device *dev)
1654 } 1662 }
1655 /* support for prism header has been originally added by Christian */ 1663 /* support for prism header has been originally added by Christian */
1656 if (priv->prism_hdr && priv->ieee80211->iw_mode == IW_MODE_MONITOR) { 1664 if (priv->prism_hdr && priv->ieee80211->iw_mode == IW_MODE_MONITOR) {
1657 1665
1658 } else { 1666 } else {
1659 priv->rx_skb = dev_alloc_skb(len+2); 1667 priv->rx_skb = dev_alloc_skb(len+2);
1660 if (!priv->rx_skb) 1668 if (!priv->rx_skb)
@@ -1766,7 +1774,7 @@ void rtl8180_data_hard_resume(struct net_device *dev)
1766 rtl8180_set_mode(dev, EPROM_CMD_NORMAL); 1774 rtl8180_set_mode(dev, EPROM_CMD_NORMAL);
1767} 1775}
1768 1776
1769/* 1777/*
1770 * This function TX data frames when the ieee80211 stack requires this. 1778 * This function TX data frames when the ieee80211 stack requires this.
1771 * It checks also if we need to stop the ieee tx queue, eventually do it 1779 * It checks also if we need to stop the ieee tx queue, eventually do it
1772 */ 1780 */
@@ -1810,7 +1818,7 @@ rate) {
1810 spin_unlock_irqrestore(&priv->tx_lock, flags); 1818 spin_unlock_irqrestore(&priv->tx_lock, flags);
1811} 1819}
1812 1820
1813/* 1821/*
1814 * This is a rough attempt to TX a frame 1822 * This is a rough attempt to TX a frame
1815 * This is called by the ieee 80211 stack to TX management frames. 1823 * This is called by the ieee 80211 stack to TX management frames.
1816 * If the ring is full packet are dropped (for data frame the queue 1824 * If the ring is full packet are dropped (for data frame the queue
@@ -1916,7 +1924,7 @@ void rtl8180_prepare_beacon(struct net_device *dev)
1916 } 1924 }
1917} 1925}
1918 1926
1919/* 1927/*
1920 * This function do the real dirty work: it enqueues a TX command 1928 * This function do the real dirty work: it enqueues a TX command
1921 * descriptor in the ring buffer, copyes the frame in a TX buffer 1929 * descriptor in the ring buffer, copyes the frame in a TX buffer
1922 * and kicks the NIC to ensure it does the DMA transfer. 1930 * and kicks the NIC to ensure it does the DMA transfer.
@@ -2002,7 +2010,8 @@ short rtl8180_tx(struct net_device *dev, u8* txbuf, int len, int priority,
2002 bRTSEnable = 0; 2010 bRTSEnable = 0;
2003 bCTSEnable = 0; 2011 bCTSEnable = 0;
2004 2012
2005 ThisFrameTime = ComputeTxTime(len + sCrcLng, rtl8180_rate2rate(rate), 0, bUseShortPreamble); 2013 ThisFrameTime = ComputeTxTime(len + sCrcLng, rtl8180_rate2rate(rate),
2014 0, bUseShortPreamble);
2006 TxDescDuration = ThisFrameTime; 2015 TxDescDuration = ThisFrameTime;
2007 } else { /* Unicast packet */ 2016 } else { /* Unicast packet */
2008 u16 AckTime; 2017 u16 AckTime;
@@ -2040,7 +2049,8 @@ short rtl8180_tx(struct net_device *dev, u8* txbuf, int len, int priority,
2040 bRTSEnable = 0; 2049 bRTSEnable = 0;
2041 RtsDur = 0; 2050 RtsDur = 0;
2042 2051
2043 ThisFrameTime = ComputeTxTime(len + sCrcLng, rtl8180_rate2rate(rate), 0, bUseShortPreamble); 2052 ThisFrameTime = ComputeTxTime(len + sCrcLng, rtl8180_rate2rate(rate),
2053 0, bUseShortPreamble);
2044 TxDescDuration = ThisFrameTime + aSifsTime + AckTime; 2054 TxDescDuration = ThisFrameTime + aSifsTime + AckTime;
2045 } 2055 }
2046 2056
@@ -2184,7 +2194,7 @@ short rtl8180_tx(struct net_device *dev, u8* txbuf, int len, int priority,
2184 priv->txhpbufstail = buflist; 2194 priv->txhpbufstail = buflist;
2185 break; 2195 break;
2186 case BEACON_PRIORITY: 2196 case BEACON_PRIORITY:
2187 /* 2197 /*
2188 * The HW seems to be happy with the 1st 2198 * The HW seems to be happy with the 1st
2189 * descriptor filled and the 2nd empty... 2199 * descriptor filled and the 2nd empty...
2190 * So always update descriptor 1 and never 2200 * So always update descriptor 1 and never
@@ -2304,13 +2314,13 @@ void rtl8180_hw_sleep(struct net_device *dev, u32 th, u32 tl)
2304 2314
2305 spin_lock_irqsave(&priv->ps_lock, flags); 2315 spin_lock_irqsave(&priv->ps_lock, flags);
2306 2316
2307 /* 2317 /*
2308 * Writing HW register with 0 equals to disable 2318 * Writing HW register with 0 equals to disable
2309 * the timer, that is not really what we want 2319 * the timer, that is not really what we want
2310 */ 2320 */
2311 tl -= MSECS(4+16+7); 2321 tl -= MSECS(4+16+7);
2312 2322
2313 /* 2323 /*
2314 * If the interval in witch we are requested to sleep is too 2324 * If the interval in witch we are requested to sleep is too
2315 * short then give up and remain awake 2325 * short then give up and remain awake
2316 */ 2326 */
@@ -2325,10 +2335,10 @@ void rtl8180_hw_sleep(struct net_device *dev, u32 th, u32 tl)
2325 u32 tmp = (tl > rb) ? (tl-rb) : (rb-tl); 2335 u32 tmp = (tl > rb) ? (tl-rb) : (rb-tl);
2326 2336
2327 priv->DozePeriodInPast2Sec += jiffies_to_msecs(tmp); 2337 priv->DozePeriodInPast2Sec += jiffies_to_msecs(tmp);
2328 2338 /* as tl may be less than rb */
2329 queue_delayed_work(priv->ieee80211->wq, &priv->ieee80211->hw_wakeup_wq, tmp); /* as tl may be less than rb */ 2339 queue_delayed_work(priv->ieee80211->wq, &priv->ieee80211->hw_wakeup_wq, tmp);
2330 } 2340 }
2331 /* 2341 /*
2332 * If we suspect the TimerInt is gone beyond tl 2342 * If we suspect the TimerInt is gone beyond tl
2333 * while setting it, then give up 2343 * while setting it, then give up
2334 */ 2344 */
@@ -3086,7 +3096,8 @@ void rtl8185_set_rate(struct net_device *dev)
3086 max_rr_rate = ieeerate2rtlrate(240); 3096 max_rr_rate = ieeerate2rtlrate(240);
3087 3097
3088 write_nic_byte(dev, RESP_RATE, 3098 write_nic_byte(dev, RESP_RATE,
3089 max_rr_rate<<MAX_RESP_RATE_SHIFT | min_rr_rate<<MIN_RESP_RATE_SHIFT); 3099 max_rr_rate<<MAX_RESP_RATE_SHIFT |
3100 min_rr_rate<<MIN_RESP_RATE_SHIFT);
3090 3101
3091 word = read_nic_word(dev, BRSR); 3102 word = read_nic_word(dev, BRSR);
3092 word &= ~BRSR_MBR_8185; 3103 word &= ~BRSR_MBR_8185;
@@ -3168,7 +3179,7 @@ void rtl8180_adapter_start(struct net_device *dev)
3168 netif_start_queue(dev); 3179 netif_start_queue(dev);
3169} 3180}
3170 3181
3171/* 3182/*
3172 * This configures registers for beacon tx and enables it via 3183 * This configures registers for beacon tx and enables it via
3173 * rtl8180_beacon_tx_enable(). rtl8180_beacon_tx_disable() might 3184 * rtl8180_beacon_tx_enable(). rtl8180_beacon_tx_disable() might
3174 * be used to stop beacon transmission 3185 * be used to stop beacon transmission
@@ -3227,7 +3238,8 @@ void LeisurePSEnter(struct r8180_priv *priv)
3227{ 3238{
3228 if (priv->bLeisurePs) { 3239 if (priv->bLeisurePs) {
3229 if (priv->ieee80211->ps == IEEE80211_PS_DISABLED) 3240 if (priv->ieee80211->ps == IEEE80211_PS_DISABLED)
3230 MgntActSet_802_11_PowerSaveMode(priv, IEEE80211_PS_MBCAST|IEEE80211_PS_UNICAST); /* IEEE80211_PS_ENABLE */ 3241 /* IEEE80211_PS_ENABLE */
3242 MgntActSet_802_11_PowerSaveMode(priv, IEEE80211_PS_MBCAST|IEEE80211_PS_UNICAST);
3231 } 3243 }
3232} 3244}
3233 3245
@@ -3299,7 +3311,10 @@ void rtl8180_watch_dog(struct net_device *dev)
3299 u16 SlotIndex = 0; 3311 u16 SlotIndex = 0;
3300 u16 i = 0; 3312 u16 i = 0;
3301 if (priv->ieee80211->actscanning == false) { 3313 if (priv->ieee80211->actscanning == false) {
3302 if ((priv->ieee80211->iw_mode != IW_MODE_ADHOC) && (priv->ieee80211->state == IEEE80211_NOLINK) && (priv->ieee80211->beinretry == false) && (priv->eRFPowerState == eRfOn)) 3314 if ((priv->ieee80211->iw_mode != IW_MODE_ADHOC) &&
3315 (priv->ieee80211->state == IEEE80211_NOLINK) &&
3316 (priv->ieee80211->beinretry == false) &&
3317 (priv->eRFPowerState == eRfOn))
3303 IPSEnter(dev); 3318 IPSEnter(dev);
3304 } 3319 }
3305 /* YJ,add,080828,for link state check */ 3320 /* YJ,add,080828,for link state check */
@@ -3732,7 +3747,7 @@ static int __init rtl8180_pci_module_init(void)
3732 DMESG("Wireless extensions version %d", WIRELESS_EXT); 3747 DMESG("Wireless extensions version %d", WIRELESS_EXT);
3733 rtl8180_proc_module_init(); 3748 rtl8180_proc_module_init();
3734 3749
3735 if (pci_register_driver(&rtl8180_pci_driver)) { 3750 if (pci_register_driver(&rtl8180_pci_driver)) {
3736 DMESG("No device found"); 3751 DMESG("No device found");
3737 return -ENODEV; 3752 return -ENODEV;
3738 } 3753 }
@@ -3839,7 +3854,7 @@ void rtl8180_tx_isr(struct net_device *dev, int pri, short error)
3839 return; 3854 return;
3840 } 3855 }
3841 3856
3842 /* 3857 /*
3843 * We check all the descriptors between the head and the nic, 3858 * We check all the descriptors between the head and the nic,
3844 * but not the currently pointed by the nic (the next to be txed) 3859 * but not the currently pointed by the nic (the next to be txed)
3845 * and the previous of the pointed (might be in process ??) 3860 * and the previous of the pointed (might be in process ??)
@@ -3877,7 +3892,7 @@ void rtl8180_tx_isr(struct net_device *dev, int pri, short error)
3877 head += 8; 3892 head += 8;
3878 } 3893 }
3879 3894
3880 /* 3895 /*
3881 * The head has been moved to the last certainly TXed 3896 * The head has been moved to the last certainly TXed
3882 * (or at least processed by the nic) packet. 3897 * (or at least processed by the nic) packet.
3883 * The driver take forcefully owning of all these packets 3898 * The driver take forcefully owning of all these packets
diff --git a/drivers/staging/rtl8187se/r8180_dm.c b/drivers/staging/rtl8187se/r8180_dm.c
index 261085d4b74a..4d7a5951486e 100644
--- a/drivers/staging/rtl8187se/r8180_dm.c
+++ b/drivers/staging/rtl8187se/r8180_dm.c
@@ -1,14 +1,8 @@
1//#include "r8180.h"
2#include "r8180_dm.h" 1#include "r8180_dm.h"
3#include "r8180_hw.h" 2#include "r8180_hw.h"
4#include "r8180_93cx6.h" 3#include "r8180_93cx6.h"
5//{by amy 080312
6 4
7// 5 /* Return TRUE if we shall perform High Power Mecahnism, FALSE otherwise. */
8// Description:
9// Return TRUE if we shall perform High Power Mecahnism, FALSE otherwise.
10//
11//+by amy 080312
12#define RATE_ADAPTIVE_TIMER_PERIOD 300 6#define RATE_ADAPTIVE_TIMER_PERIOD 300
13 7
14bool CheckHighPower(struct net_device *dev) 8bool CheckHighPower(struct net_device *dev)
@@ -17,33 +11,26 @@ bool CheckHighPower(struct net_device *dev)
17 struct ieee80211_device *ieee = priv->ieee80211; 11 struct ieee80211_device *ieee = priv->ieee80211;
18 12
19 if(!priv->bRegHighPowerMechanism) 13 if(!priv->bRegHighPowerMechanism)
20 {
21 return false; 14 return false;
22 }
23 15
24 if(ieee->state == IEEE80211_LINKED_SCANNING) 16 if(ieee->state == IEEE80211_LINKED_SCANNING)
25 {
26 return false; 17 return false;
27 }
28 18
29 return true; 19 return true;
30} 20}
31 21
32// 22/*
33// Description: 23 * Description:
34// Update Tx power level if necessary. 24 * Update Tx power level if necessary.
35// See also DoRxHighPower() and SetTxPowerLevel8185() for reference. 25 * See also DoRxHighPower() and SetTxPowerLevel8185() for reference.
36// 26 *
37// Note: 27 * Note:
38// The reason why we udpate Tx power level here instead of DoRxHighPower() 28 * The reason why we udpate Tx power level here instead of DoRxHighPower()
39// is the number of IO to change Tx power is much more than channel TR switch 29 * is the number of IO to change Tx power is much more than channel TR switch
40// and they are related to OFDM and MAC registers. 30 * and they are related to OFDM and MAC registers.
41// So, we don't want to update it so frequently in per-Rx packet base. 31 * So, we don't want to update it so frequently in per-Rx packet base.
42// 32 */
43void 33void DoTxHighPower(struct net_device *dev)
44DoTxHighPower(
45 struct net_device *dev
46 )
47{ 34{
48 struct r8180_priv *priv = ieee80211_priv(dev); 35 struct r8180_priv *priv = ieee80211_priv(dev);
49 u16 HiPwrUpperTh = 0; 36 u16 HiPwrUpperTh = 0;
@@ -53,8 +40,6 @@ DoTxHighPower(
53 u8 u1bTmp; 40 u8 u1bTmp;
54 char OfdmTxPwrIdx, CckTxPwrIdx; 41 char OfdmTxPwrIdx, CckTxPwrIdx;
55 42
56 //printk("----> DoTxHighPower()\n");
57
58 HiPwrUpperTh = priv->RegHiPwrUpperTh; 43 HiPwrUpperTh = priv->RegHiPwrUpperTh;
59 HiPwrLowerTh = priv->RegHiPwrLowerTh; 44 HiPwrLowerTh = priv->RegHiPwrLowerTh;
60 45
@@ -63,526 +48,411 @@ DoTxHighPower(
63 RSSIHiPwrUpperTh = priv->RegRSSIHiPwrUpperTh; 48 RSSIHiPwrUpperTh = priv->RegRSSIHiPwrUpperTh;
64 RSSIHiPwrLowerTh = priv->RegRSSIHiPwrLowerTh; 49 RSSIHiPwrLowerTh = priv->RegRSSIHiPwrLowerTh;
65 50
66 //lzm add 080826 51 /* lzm add 080826 */
67 OfdmTxPwrIdx = priv->chtxpwr_ofdm[priv->ieee80211->current_network.channel]; 52 OfdmTxPwrIdx = priv->chtxpwr_ofdm[priv->ieee80211->current_network.channel];
68 CckTxPwrIdx = priv->chtxpwr[priv->ieee80211->current_network.channel]; 53 CckTxPwrIdx = priv->chtxpwr[priv->ieee80211->current_network.channel];
69 54
70 // printk("DoTxHighPower() - UndecoratedSmoothedSS:%d, CurCCKRSSI = %d , bCurCCKPkt= %d \n", priv->UndecoratedSmoothedSS, priv->CurCCKRSSI, priv->bCurCCKPkt ); 55 if ((priv->UndecoratedSmoothedSS > HiPwrUpperTh) ||
56 (priv->bCurCCKPkt && (priv->CurCCKRSSI > RSSIHiPwrUpperTh))) {
57 /* Stevenl suggested that degrade 8dbm in high power sate. 2007-12-04 Isaiah */
71 58
72 if((priv->UndecoratedSmoothedSS > HiPwrUpperTh) ||
73 (priv->bCurCCKPkt && (priv->CurCCKRSSI > RSSIHiPwrUpperTh)))
74 {
75 // Stevenl suggested that degrade 8dbm in high power sate. 2007-12-04 Isaiah
76
77 // printk("=====>DoTxHighPower() - High Power - UndecoratedSmoothedSS:%d, HiPwrUpperTh = %d \n", priv->UndecoratedSmoothedSS, HiPwrUpperTh );
78 priv->bToUpdateTxPwr = true; 59 priv->bToUpdateTxPwr = true;
79 u1bTmp= read_nic_byte(dev, CCK_TXAGC); 60 u1bTmp= read_nic_byte(dev, CCK_TXAGC);
80 61
81 // If it never enter High Power. 62 /* If it never enter High Power. */
82 if( CckTxPwrIdx == u1bTmp) 63 if (CckTxPwrIdx == u1bTmp) {
83 { 64 u1bTmp = (u1bTmp > 16) ? (u1bTmp -16): 0; /* 8dbm */
84 u1bTmp = (u1bTmp > 16) ? (u1bTmp -16): 0; // 8dbm 65 write_nic_byte(dev, CCK_TXAGC, u1bTmp);
85 write_nic_byte(dev, CCK_TXAGC, u1bTmp);
86 66
87 u1bTmp= read_nic_byte(dev, OFDM_TXAGC); 67 u1bTmp= read_nic_byte(dev, OFDM_TXAGC);
88 u1bTmp = (u1bTmp > 16) ? (u1bTmp -16): 0; // 8dbm 68 u1bTmp = (u1bTmp > 16) ? (u1bTmp -16): 0; /* 8dbm */
89 write_nic_byte(dev, OFDM_TXAGC, u1bTmp); 69 write_nic_byte(dev, OFDM_TXAGC, u1bTmp);
90 } 70 }
91 71
92 } 72 } else if ((priv->UndecoratedSmoothedSS < HiPwrLowerTh) &&
93 else if((priv->UndecoratedSmoothedSS < HiPwrLowerTh) && 73 (!priv->bCurCCKPkt || priv->CurCCKRSSI < RSSIHiPwrLowerTh)) {
94 (!priv->bCurCCKPkt || priv->CurCCKRSSI < RSSIHiPwrLowerTh)) 74 if (priv->bToUpdateTxPwr) {
95 {
96 // printk("DoTxHighPower() - lower Power - UndecoratedSmoothedSS:%d, HiPwrUpperTh = %d \n", priv->UndecoratedSmoothedSS, HiPwrLowerTh );
97 if(priv->bToUpdateTxPwr)
98 {
99 priv->bToUpdateTxPwr = false; 75 priv->bToUpdateTxPwr = false;
100 //SD3 required. 76 /* SD3 required. */
101 u1bTmp= read_nic_byte(dev, CCK_TXAGC); 77 u1bTmp= read_nic_byte(dev, CCK_TXAGC);
102 if(u1bTmp < CckTxPwrIdx) 78 if (u1bTmp < CckTxPwrIdx) {
103 { 79 write_nic_byte(dev, CCK_TXAGC, CckTxPwrIdx);
104 //u1bTmp = ((u1bTmp+16) > 35) ? 35: (u1bTmp+16); // 8dbm
105 //write_nic_byte(dev, CCK_TXAGC, u1bTmp);
106 write_nic_byte(dev, CCK_TXAGC, CckTxPwrIdx);
107 } 80 }
108 81
109 u1bTmp= read_nic_byte(dev, OFDM_TXAGC); 82 u1bTmp= read_nic_byte(dev, OFDM_TXAGC);
110 if(u1bTmp < OfdmTxPwrIdx) 83 if (u1bTmp < OfdmTxPwrIdx) {
111 { 84 write_nic_byte(dev, OFDM_TXAGC, OfdmTxPwrIdx);
112 //u1bTmp = ((u1bTmp+16) > 35) ? 35: (u1bTmp+16); // 8dbm
113 //write_nic_byte(dev, OFDM_TXAGC, u1bTmp);
114 write_nic_byte(dev, OFDM_TXAGC, OfdmTxPwrIdx);
115 } 85 }
116 } 86 }
117 } 87 }
118
119 //printk("<---- DoTxHighPower()\n");
120} 88}
121 89
122 90
123// 91/*
124// Description: 92 * Description:
125// Callback function of UpdateTxPowerWorkItem. 93 * Callback function of UpdateTxPowerWorkItem.
126// Because of some event happened, e.g. CCX TPC, High Power Mechanism, 94 * Because of some event happened, e.g. CCX TPC, High Power Mechanism,
127// We update Tx power of current channel again. 95 * We update Tx power of current channel again.
128// 96 */
129void rtl8180_tx_pw_wq (struct work_struct *work) 97void rtl8180_tx_pw_wq(struct work_struct *work)
130{ 98{
131// struct r8180_priv *priv = container_of(work, struct r8180_priv, watch_dog_wq);
132// struct ieee80211_device * ieee = (struct ieee80211_device*)
133// container_of(work, struct ieee80211_device, watch_dog_wq);
134 struct delayed_work *dwork = to_delayed_work(work); 99 struct delayed_work *dwork = to_delayed_work(work);
135 struct ieee80211_device *ieee = container_of(dwork,struct ieee80211_device,tx_pw_wq); 100 struct ieee80211_device *ieee = container_of(dwork,struct ieee80211_device,tx_pw_wq);
136 struct net_device *dev = ieee->dev; 101 struct net_device *dev = ieee->dev;
137
138// printk("----> UpdateTxPowerWorkItemCallback()\n");
139 102
140 DoTxHighPower(dev); 103 DoTxHighPower(dev);
141
142// printk("<---- UpdateTxPowerWorkItemCallback()\n");
143} 104}
144 105
145 106
146// 107/*
147// Description: 108 * Return TRUE if we shall perform DIG Mecahnism, FALSE otherwise.
148// Return TRUE if we shall perform DIG Mecahnism, FALSE otherwise. 109 */
149// 110bool CheckDig(struct net_device *dev)
150bool
151CheckDig(
152 struct net_device *dev
153 )
154{ 111{
155 struct r8180_priv *priv = ieee80211_priv(dev); 112 struct r8180_priv *priv = ieee80211_priv(dev);
156 struct ieee80211_device *ieee = priv->ieee80211; 113 struct ieee80211_device *ieee = priv->ieee80211;
157 114
158 if(!priv->bDigMechanism) 115 if (!priv->bDigMechanism)
159 return false; 116 return false;
160 117
161 if(ieee->state != IEEE80211_LINKED) 118 if (ieee->state != IEEE80211_LINKED)
162 return false; 119 return false;
163 120
164 //if(priv->CurrentOperaRate < 36) // Schedule Dig under all OFDM rates. By Bruce, 2007-06-01. 121 if ((priv->ieee80211->rate / 5) < 36) /* Schedule Dig under all OFDM rates. By Bruce, 2007-06-01. */
165 if((priv->ieee80211->rate/5) < 36) // Schedule Dig under all OFDM rates. By Bruce, 2007-06-01.
166 return false; 122 return false;
167 return true; 123 return true;
168} 124}
169// 125/*
170// Description: 126 * Implementation of DIG for Zebra and Zebra2.
171// Implementation of DIG for Zebra and Zebra2. 127 */
172// 128void DIG_Zebra(struct net_device *dev)
173void
174DIG_Zebra(
175 struct net_device *dev
176 )
177{ 129{
178 struct r8180_priv *priv = ieee80211_priv(dev); 130 struct r8180_priv *priv = ieee80211_priv(dev);
179 u16 CCKFalseAlarm, OFDMFalseAlarm; 131 u16 CCKFalseAlarm, OFDMFalseAlarm;
180 u16 OfdmFA1, OfdmFA2; 132 u16 OfdmFA1, OfdmFA2;
181 int InitialGainStep = 7; // The number of initial gain stages. 133 int InitialGainStep = 7; /* The number of initial gain stages. */
182 int LowestGainStage = 4; // The capable lowest stage of performing dig workitem. 134 int LowestGainStage = 4; /* The capable lowest stage of performing dig workitem. */
183 u32 AwakePeriodIn2Sec=0; 135 u32 AwakePeriodIn2Sec = 0;
184
185 //printk("---------> DIG_Zebra()\n");
186 136
187 CCKFalseAlarm = (u16)(priv->FalseAlarmRegValue & 0x0000ffff); 137 CCKFalseAlarm = (u16)(priv->FalseAlarmRegValue & 0x0000ffff);
188 OFDMFalseAlarm = (u16)((priv->FalseAlarmRegValue >> 16) & 0x0000ffff); 138 OFDMFalseAlarm = (u16)((priv->FalseAlarmRegValue >> 16) & 0x0000ffff);
189 OfdmFA1 = 0x15; 139 OfdmFA1 = 0x15;
190 OfdmFA2 = ((u16)(priv->RegDigOfdmFaUpTh)) << 8; 140 OfdmFA2 = ((u16)(priv->RegDigOfdmFaUpTh)) << 8;
191 141
192// printk("DIG**********CCK False Alarm: %#X \n",CCKFalseAlarm); 142 /* The number of initial gain steps is different, by Bruce, 2007-04-13. */
193// printk("DIG**********OFDM False Alarm: %#X \n",OFDMFalseAlarm); 143 if (priv->InitialGain == 0) { /* autoDIG */
194 144 /* Advised from SD3 DZ */
195 // The number of initial gain steps is different, by Bruce, 2007-04-13. 145 priv->InitialGain = 4; /* In 87B, m74dBm means State 4 (m82dBm) */
196 if (priv->InitialGain == 0 ) //autoDIG
197 { // Advised from SD3 DZ
198 priv->InitialGain = 4; // In 87B, m74dBm means State 4 (m82dBm)
199 }
200 { // Advised from SD3 DZ
201 OfdmFA1 = 0x20;
202 } 146 }
203 147 /* Advised from SD3 DZ */
204#if 1 //lzm reserved 080826 148 OfdmFA1 = 0x20;
205 AwakePeriodIn2Sec = (2000-priv ->DozePeriodInPast2Sec); 149
206 //printk("&&& DozePeriod=%d AwakePeriod=%d\n", priv->DozePeriodInPast2Sec, AwakePeriodIn2Sec); 150#if 1 /* lzm reserved 080826 */
207 priv ->DozePeriodInPast2Sec=0; 151 AwakePeriodIn2Sec = (2000 - priv->DozePeriodInPast2Sec);
208 152 priv ->DozePeriodInPast2Sec = 0;
209 if(AwakePeriodIn2Sec) 153
210 { 154 if (AwakePeriodIn2Sec) {
211 //RT_TRACE(COMP_DIG, DBG_TRACE, ("DIG: AwakePeriodIn2Sec(%d) - FATh(0x%X , 0x%X) ->",AwakePeriodIn2Sec, OfdmFA1, OfdmFA2)); 155 OfdmFA1 = (u16)((OfdmFA1 * AwakePeriodIn2Sec) / 2000) ;
212 // adjuest DIG threshold. 156 OfdmFA2 = (u16)((OfdmFA2 * AwakePeriodIn2Sec) / 2000) ;
213 OfdmFA1 = (u16)((OfdmFA1*AwakePeriodIn2Sec) / 2000) ; 157 } else {
214 OfdmFA2 = (u16)((OfdmFA2*AwakePeriodIn2Sec) / 2000) ; 158 ;
215 //RT_TRACE(COMP_DIG, DBG_TRACE, ("( 0x%X , 0x%X)\n", OfdmFA1, OfdmFA2));
216 }
217 else
218 {
219 ;//RT_TRACE(COMP_DIG, DBG_WARNING, ("ERROR!! AwakePeriodIn2Sec should not be ZERO!!\n"));
220 } 159 }
221#endif 160#endif
222 161
223 InitialGainStep = 8; 162 InitialGainStep = 8;
224 LowestGainStage = priv->RegBModeGainStage; // Lowest gain stage. 163 LowestGainStage = priv->RegBModeGainStage; /* Lowest gain stage. */
225 164
226 if (OFDMFalseAlarm > OfdmFA1) 165 if (OFDMFalseAlarm > OfdmFA1) {
227 { 166 if (OFDMFalseAlarm > OfdmFA2) {
228 if (OFDMFalseAlarm > OfdmFA2)
229 {
230 priv->DIG_NumberFallbackVote++; 167 priv->DIG_NumberFallbackVote++;
231 if (priv->DIG_NumberFallbackVote >1) 168 if (priv->DIG_NumberFallbackVote > 1) {
232 { 169 /* serious OFDM False Alarm, need fallback */
233 //serious OFDM False Alarm, need fallback 170 if (priv->InitialGain < InitialGainStep) {
234 if (priv->InitialGain < InitialGainStep) 171 priv->InitialGainBackUp = priv->InitialGain;
235 {
236 priv->InitialGainBackUp= priv->InitialGain;
237 172
238 priv->InitialGain = (priv->InitialGain + 1); 173 priv->InitialGain = (priv->InitialGain + 1);
239// printk("DIG**********OFDM False Alarm: %#X, OfdmFA1: %#X, OfdmFA2: %#X\n", OFDMFalseAlarm, OfdmFA1, OfdmFA2);
240// printk("DIG+++++++ fallback OFDM:%d \n", priv->InitialGain);
241 UpdateInitialGain(dev); 174 UpdateInitialGain(dev);
242 } 175 }
243 priv->DIG_NumberFallbackVote = 0; 176 priv->DIG_NumberFallbackVote = 0;
244 priv->DIG_NumberUpgradeVote=0; 177 priv->DIG_NumberUpgradeVote = 0;
245 } 178 }
246 } 179 } else {
247 else
248 {
249 if (priv->DIG_NumberFallbackVote) 180 if (priv->DIG_NumberFallbackVote)
250 priv->DIG_NumberFallbackVote--; 181 priv->DIG_NumberFallbackVote--;
251 } 182 }
252 priv->DIG_NumberUpgradeVote=0; 183 priv->DIG_NumberUpgradeVote = 0;
253 } 184 } else {
254 else
255 {
256 if (priv->DIG_NumberFallbackVote) 185 if (priv->DIG_NumberFallbackVote)
257 priv->DIG_NumberFallbackVote--; 186 priv->DIG_NumberFallbackVote--;
258 priv->DIG_NumberUpgradeVote++; 187 priv->DIG_NumberUpgradeVote++;
259 188
260 if (priv->DIG_NumberUpgradeVote>9) 189 if (priv->DIG_NumberUpgradeVote > 9) {
261 { 190 if (priv->InitialGain > LowestGainStage) { /* In 87B, m78dBm means State 4 (m864dBm) */
262 if (priv->InitialGain > LowestGainStage) // In 87B, m78dBm means State 4 (m864dBm) 191 priv->InitialGainBackUp = priv->InitialGain;
263 {
264 priv->InitialGainBackUp= priv->InitialGain;
265 192
266 priv->InitialGain = (priv->InitialGain - 1); 193 priv->InitialGain = (priv->InitialGain - 1);
267// printk("DIG**********OFDM False Alarm: %#X, OfdmFA1: %#X, OfdmFA2: %#X\n", OFDMFalseAlarm, OfdmFA1, OfdmFA2);
268// printk("DIG--------- Upgrade OFDM:%d \n", priv->InitialGain);
269 UpdateInitialGain(dev); 194 UpdateInitialGain(dev);
270 } 195 }
271 priv->DIG_NumberFallbackVote = 0; 196 priv->DIG_NumberFallbackVote = 0;
272 priv->DIG_NumberUpgradeVote=0; 197 priv->DIG_NumberUpgradeVote = 0;
273 } 198 }
274 } 199 }
275
276// printk("DIG+++++++ OFDM:%d\n", priv->InitialGain);
277 //printk("<--------- DIG_Zebra()\n");
278} 200}
279 201
280// 202/*
281// Description: 203 * Dispatch DIG implementation according to RF.
282// Dispatch DIG implementation according to RF. 204 */
283// 205void DynamicInitGain(struct net_device *dev)
284void
285DynamicInitGain(struct net_device *dev)
286{ 206{
287 DIG_Zebra(dev); 207 DIG_Zebra(dev);
288} 208}
289 209
290void rtl8180_hw_dig_wq (struct work_struct *work) 210void rtl8180_hw_dig_wq(struct work_struct *work)
291{ 211{
292 struct delayed_work *dwork = to_delayed_work(work); 212 struct delayed_work *dwork = to_delayed_work(work);
293 struct ieee80211_device *ieee = container_of(dwork,struct ieee80211_device,hw_dig_wq); 213 struct ieee80211_device *ieee = container_of(dwork,struct ieee80211_device,hw_dig_wq);
294 struct net_device *dev = ieee->dev; 214 struct net_device *dev = ieee->dev;
295 struct r8180_priv *priv = ieee80211_priv(dev); 215 struct r8180_priv *priv = ieee80211_priv(dev);
296 216
297 // Read CCK and OFDM False Alarm. 217 /* Read CCK and OFDM False Alarm. */
298 priv->FalseAlarmRegValue = read_nic_dword(dev, CCK_FALSE_ALARM); 218 priv->FalseAlarmRegValue = read_nic_dword(dev, CCK_FALSE_ALARM);
299 219
300 220
301 // Adjust Initial Gain dynamically. 221 /* Adjust Initial Gain dynamically. */
302 DynamicInitGain(dev); 222 DynamicInitGain(dev);
303 223
304} 224}
305 225
306int 226int IncludedInSupportedRates(struct r8180_priv *priv, u8 TxRate)
307IncludedInSupportedRates(
308 struct r8180_priv *priv,
309 u8 TxRate )
310{ 227{
311 u8 rate_len; 228 u8 rate_len;
312 u8 rate_ex_len; 229 u8 rate_ex_len;
313 u8 RateMask = 0x7F; 230 u8 RateMask = 0x7F;
314 u8 idx; 231 u8 idx;
315 unsigned short Found = 0; 232 unsigned short Found = 0;
316 u8 NaiveTxRate = TxRate&RateMask; 233 u8 NaiveTxRate = TxRate&RateMask;
317 234
318 rate_len = priv->ieee80211->current_network.rates_len; 235 rate_len = priv->ieee80211->current_network.rates_len;
319 rate_ex_len = priv->ieee80211->current_network.rates_ex_len; 236 rate_ex_len = priv->ieee80211->current_network.rates_ex_len;
320 for( idx=0; idx< rate_len; idx++ ) 237 for (idx=0; idx < rate_len; idx++) {
321 { 238 if ((priv->ieee80211->current_network.rates[idx] & RateMask) == NaiveTxRate) {
322 if( (priv->ieee80211->current_network.rates[idx] & RateMask) == NaiveTxRate ) 239 Found = 1;
323 { 240 goto found_rate;
324 Found = 1; 241 }
325 goto found_rate; 242 }
326 } 243 for (idx = 0; idx < rate_ex_len; idx++) {
327 } 244 if ((priv->ieee80211->current_network.rates_ex[idx] & RateMask) == NaiveTxRate) {
328 for( idx=0; idx< rate_ex_len; idx++ ) 245 Found = 1;
329 { 246 goto found_rate;
330 if( (priv->ieee80211->current_network.rates_ex[idx] & RateMask) == NaiveTxRate ) 247 }
331 { 248 }
332 Found = 1; 249 return Found;
333 goto found_rate; 250 found_rate:
334 } 251 return Found;
335 }
336 return Found;
337 found_rate:
338 return Found;
339} 252}
340 253
341// 254/*
342// Description: 255 * Get the Tx rate one degree up form the input rate in the supported rates.
343// Get the Tx rate one degree up form the input rate in the supported rates. 256 * Return the upgrade rate if it is successed, otherwise return the input rate.
344// Return the upgrade rate if it is successed, otherwise return the input rate. 257 */
345// By Bruce, 2007-06-05. 258u8 GetUpgradeTxRate(struct net_device *dev, u8 rate)
346//
347u8
348GetUpgradeTxRate(
349 struct net_device *dev,
350 u8 rate
351 )
352{ 259{
353 struct r8180_priv *priv = ieee80211_priv(dev); 260 struct r8180_priv *priv = ieee80211_priv(dev);
354 u8 UpRate; 261 u8 UpRate;
355 262
356 // Upgrade 1 degree. 263 /* Upgrade 1 degree. */
357 switch(rate) 264 switch (rate) {
358 { 265 case 108: /* Up to 54Mbps. */
359 case 108: // Up to 54Mbps. 266 UpRate = 108;
360 UpRate = 108; 267 break;
361 break; 268
362 269 case 96: /* Up to 54Mbps. */
363 case 96: // Up to 54Mbps. 270 UpRate = 108;
364 UpRate = 108; 271 break;
365 break; 272
366 273 case 72: /* Up to 48Mbps. */
367 case 72: // Up to 48Mbps. 274 UpRate = 96;
368 UpRate = 96; 275 break;
369 break; 276
370 277 case 48: /* Up to 36Mbps. */
371 case 48: // Up to 36Mbps. 278 UpRate = 72;
372 UpRate = 72; 279 break;
373 break; 280
374 281 case 36: /* Up to 24Mbps. */
375 case 36: // Up to 24Mbps. 282 UpRate = 48;
376 UpRate = 48; 283 break;
377 break; 284
378 285 case 22: /* Up to 18Mbps. */
379 case 22: // Up to 18Mbps. 286 UpRate = 36;
380 UpRate = 36; 287 break;
381 break; 288
382 289 case 11: /* Up to 11Mbps. */
383 case 11: // Up to 11Mbps. 290 UpRate = 22;
384 UpRate = 22; 291 break;
385 break; 292
386 293 case 4: /* Up to 5.5Mbps. */
387 case 4: // Up to 5.5Mbps. 294 UpRate = 11;
388 UpRate = 11; 295 break;
389 break; 296
390 297 case 2: /* Up to 2Mbps. */
391 case 2: // Up to 2Mbps. 298 UpRate = 4;
392 UpRate = 4; 299 break;
393 break; 300
394 301 default:
395 default: 302 printk("GetUpgradeTxRate(): Input Tx Rate(%d) is undefined!\n", rate);
396 printk("GetUpgradeTxRate(): Input Tx Rate(%d) is undefined!\n", rate); 303 return rate;
397 return rate; 304 }
398 } 305 /* Check if the rate is valid. */
399 // Check if the rate is valid. 306 if (IncludedInSupportedRates(priv, UpRate)) {
400 if(IncludedInSupportedRates(priv, UpRate)) 307 return UpRate;
401 { 308 } else {
402// printk("GetUpgradeTxRate(): GetUpgrade Tx rate(%d) from %d !\n", UpRate, priv->CurrentOperaRate); 309 return rate;
403 return UpRate; 310 }
404 } 311 return rate;
405 else
406 {
407 //printk("GetUpgradeTxRate(): Tx rate (%d) is not in supported rates\n", UpRate);
408 return rate;
409 }
410 return rate;
411} 312}
412// 313/*
413// Description: 314 * Get the Tx rate one degree down form the input rate in the supported rates.
414// Get the Tx rate one degree down form the input rate in the supported rates. 315 * Return the degrade rate if it is successed, otherwise return the input rate.
415// Return the degrade rate if it is successed, otherwise return the input rate. 316 */
416// By Bruce, 2007-06-05. 317
417// 318u8 GetDegradeTxRate(struct net_device *dev, u8 rate)
418u8
419GetDegradeTxRate(
420 struct net_device *dev,
421 u8 rate
422 )
423{ 319{
424 struct r8180_priv *priv = ieee80211_priv(dev); 320 struct r8180_priv *priv = ieee80211_priv(dev);
425 u8 DownRate; 321 u8 DownRate;
426 322
427 // Upgrade 1 degree. 323 /* Upgrade 1 degree. */
428 switch(rate) 324 switch (rate) {
429 { 325 case 108: /* Down to 48Mbps. */
430 case 108: // Down to 48Mbps. 326 DownRate = 96;
431 DownRate = 96; 327 break;
432 break; 328
433 329 case 96: /* Down to 36Mbps. */
434 case 96: // Down to 36Mbps. 330 DownRate = 72;
435 DownRate = 72; 331 break;
436 break; 332
437 333 case 72: /* Down to 24Mbps. */
438 case 72: // Down to 24Mbps. 334 DownRate = 48;
439 DownRate = 48; 335 break;
440 break; 336
441 337 case 48: /* Down to 18Mbps. */
442 case 48: // Down to 18Mbps. 338 DownRate = 36;
443 DownRate = 36; 339 break;
444 break; 340
445 341 case 36: /* Down to 11Mbps. */
446 case 36: // Down to 11Mbps. 342 DownRate = 22;
447 DownRate = 22; 343 break;
448 break; 344
449 345 case 22: /* Down to 5.5Mbps. */
450 case 22: // Down to 5.5Mbps. 346 DownRate = 11;
451 DownRate = 11; 347 break;
452 break; 348
453 349 case 11: /* Down to 2Mbps. */
454 case 11: // Down to 2Mbps. 350 DownRate = 4;
455 DownRate = 4; 351 break;
456 break; 352
457 353 case 4: /* Down to 1Mbps. */
458 case 4: // Down to 1Mbps. 354 DownRate = 2;
459 DownRate = 2; 355 break;
460 break; 356
461 357 case 2: /* Down to 1Mbps. */
462 case 2: // Down to 1Mbps. 358 DownRate = 2;
463 DownRate = 2; 359 break;
464 break; 360
465 361 default:
466 default: 362 printk("GetDegradeTxRate(): Input Tx Rate(%d) is undefined!\n", rate);
467 printk("GetDegradeTxRate(): Input Tx Rate(%d) is undefined!\n", rate); 363 return rate;
468 return rate; 364 }
469 } 365 /* Check if the rate is valid. */
470 // Check if the rate is valid. 366 if (IncludedInSupportedRates(priv, DownRate)) {
471 if(IncludedInSupportedRates(priv, DownRate)) 367 return DownRate;
472 { 368 } else {
473// printk("GetDegradeTxRate(): GetDegrade Tx rate(%d) from %d!\n", DownRate, priv->CurrentOperaRate); 369 return rate;
474 return DownRate; 370 }
475 } 371 return rate;
476 else
477 {
478 //printk("GetDegradeTxRate(): Tx rate (%d) is not in supported rates\n", DownRate);
479 return rate;
480 }
481 return rate;
482} 372}
483// 373/*
484// Helper function to determine if specified data rate is 374 * Helper function to determine if specified data rate is
485// CCK rate. 375 * CCK rate.
486// 2005.01.25, by rcnjko. 376 */
487// 377
488bool 378bool MgntIsCckRate(u16 rate)
489MgntIsCckRate(
490 u16 rate
491 )
492{ 379{
493 bool bReturn = false; 380 bool bReturn = false;
494 381
495 if((rate <= 22) && (rate != 12) && (rate != 18)) 382 if ((rate <= 22) && (rate != 12) && (rate != 18)) {
496 { 383 bReturn = true;
497 bReturn = true; 384 }
498 }
499 385
500 return bReturn; 386 return bReturn;
501} 387}
502// 388/*
503// Description: 389 * Description:
504// Tx Power tracking mechanism routine on 87SE. 390 * Tx Power tracking mechanism routine on 87SE.
505// Created by Roger, 2007.12.11. 391 */
506// 392void TxPwrTracking87SE(struct net_device *dev)
507void
508TxPwrTracking87SE(
509 struct net_device *dev
510)
511{ 393{
512 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev); 394 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
513 u8 tmpu1Byte, CurrentThermal, Idx; 395 u8 tmpu1Byte, CurrentThermal, Idx;
514 char CckTxPwrIdx, OfdmTxPwrIdx; 396 char CckTxPwrIdx, OfdmTxPwrIdx;
515 //u32 u4bRfReg;
516 397
517 tmpu1Byte = read_nic_byte(dev, EN_LPF_CAL); 398 tmpu1Byte = read_nic_byte(dev, EN_LPF_CAL);
518 CurrentThermal = (tmpu1Byte & 0xf0)>>4; //[ 7:4]: thermal meter indication. 399 CurrentThermal = (tmpu1Byte & 0xf0) >> 4; /*[ 7:4]: thermal meter indication. */
519 CurrentThermal = (CurrentThermal>0x0c)? 0x0c:CurrentThermal;//lzm add 080826 400 CurrentThermal = (CurrentThermal > 0x0c) ? 0x0c:CurrentThermal;/* lzm add 080826 */
520
521 //printk("TxPwrTracking87SE(): CurrentThermal(%d)\n", CurrentThermal);
522 401
523 if( CurrentThermal != priv->ThermalMeter) 402 if (CurrentThermal != priv->ThermalMeter) {
524 { 403 /* Update Tx Power level on each channel. */
525// printk("TxPwrTracking87SE(): Thermal meter changed!!!\n"); 404 for (Idx = 1; Idx < 15; Idx++) {
526
527 // Update Tx Power level on each channel.
528 for(Idx = 1; Idx<15; Idx++)
529 {
530 CckTxPwrIdx = priv->chtxpwr[Idx]; 405 CckTxPwrIdx = priv->chtxpwr[Idx];
531 OfdmTxPwrIdx = priv->chtxpwr_ofdm[Idx]; 406 OfdmTxPwrIdx = priv->chtxpwr_ofdm[Idx];
532 407
533 if( CurrentThermal > priv->ThermalMeter ) 408 if (CurrentThermal > priv->ThermalMeter) {
534 { // higher thermal meter. 409 /* higher thermal meter. */
535 CckTxPwrIdx += (CurrentThermal - priv->ThermalMeter)*2; 410 CckTxPwrIdx += (CurrentThermal - priv->ThermalMeter) * 2;
536 OfdmTxPwrIdx += (CurrentThermal - priv->ThermalMeter)*2; 411 OfdmTxPwrIdx += (CurrentThermal - priv->ThermalMeter) * 2;
537 412
538 if(CckTxPwrIdx >35) 413 if (CckTxPwrIdx > 35)
539 CckTxPwrIdx = 35; // Force TxPower to maximal index. 414 CckTxPwrIdx = 35; /* Force TxPower to maximal index. */
540 if(OfdmTxPwrIdx >35) 415 if (OfdmTxPwrIdx > 35)
541 OfdmTxPwrIdx = 35; 416 OfdmTxPwrIdx = 35;
542 } 417 } else {
543 else 418 /* lower thermal meter. */
544 { // lower thermal meter. 419 CckTxPwrIdx -= (priv->ThermalMeter - CurrentThermal) * 2;
545 CckTxPwrIdx -= (priv->ThermalMeter - CurrentThermal)*2; 420 OfdmTxPwrIdx -= (priv->ThermalMeter - CurrentThermal) * 2;
546 OfdmTxPwrIdx -= (priv->ThermalMeter - CurrentThermal)*2;
547 421
548 if(CckTxPwrIdx <0) 422 if (CckTxPwrIdx < 0)
549 CckTxPwrIdx = 0; 423 CckTxPwrIdx = 0;
550 if(OfdmTxPwrIdx <0) 424 if (OfdmTxPwrIdx < 0)
551 OfdmTxPwrIdx = 0; 425 OfdmTxPwrIdx = 0;
552 } 426 }
553 427
554 // Update TxPower level on CCK and OFDM resp. 428 /* Update TxPower level on CCK and OFDM resp. */
555 priv->chtxpwr[Idx] = CckTxPwrIdx; 429 priv->chtxpwr[Idx] = CckTxPwrIdx;
556 priv->chtxpwr_ofdm[Idx] = OfdmTxPwrIdx; 430 priv->chtxpwr_ofdm[Idx] = OfdmTxPwrIdx;
557 } 431 }
558 432
559 // Update TxPower level immediately. 433 /* Update TxPower level immediately. */
560 rtl8225z2_SetTXPowerLevel(dev, priv->ieee80211->current_network.channel); 434 rtl8225z2_SetTXPowerLevel(dev, priv->ieee80211->current_network.channel);
561 } 435 }
562 priv->ThermalMeter = CurrentThermal; 436 priv->ThermalMeter = CurrentThermal;
563} 437}
564void 438void StaRateAdaptive87SE(struct net_device *dev)
565StaRateAdaptive87SE(
566 struct net_device *dev
567 )
568{ 439{
569 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev); 440 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
570 unsigned long CurrTxokCnt; 441 unsigned long CurrTxokCnt;
571 u16 CurrRetryCnt; 442 u16 CurrRetryCnt;
572 u16 CurrRetryRate; 443 u16 CurrRetryRate;
573 //u16 i,idx; 444 unsigned long CurrRxokCnt;
574 unsigned long CurrRxokCnt; 445 bool bTryUp = false;
575 bool bTryUp = false; 446 bool bTryDown = false;
576 bool bTryDown = false; 447 u8 TryUpTh = 1;
577 u8 TryUpTh = 1; 448 u8 TryDownTh = 2;
578 u8 TryDownTh = 2; 449 u32 TxThroughput;
579 u32 TxThroughput;
580 long CurrSignalStrength; 450 long CurrSignalStrength;
581 bool bUpdateInitialGain = false; 451 bool bUpdateInitialGain = false;
582 u8 u1bOfdm=0, u1bCck = 0; 452 u8 u1bOfdm = 0, u1bCck = 0;
583 char OfdmTxPwrIdx, CckTxPwrIdx; 453 char OfdmTxPwrIdx, CckTxPwrIdx;
584 454
585 priv->RateAdaptivePeriod= RATE_ADAPTIVE_TIMER_PERIOD; 455 priv->RateAdaptivePeriod = RATE_ADAPTIVE_TIMER_PERIOD;
586 456
587 457
588 CurrRetryCnt = priv->CurrRetryCnt; 458 CurrRetryCnt = priv->CurrRetryCnt;
@@ -591,707 +461,462 @@ StaRateAdaptive87SE(
591 CurrSignalStrength = priv->Stats_RecvSignalPower; 461 CurrSignalStrength = priv->Stats_RecvSignalPower;
592 TxThroughput = (u32)(priv->NumTxOkBytesTotal - priv->LastTxOKBytes); 462 TxThroughput = (u32)(priv->NumTxOkBytesTotal - priv->LastTxOKBytes);
593 priv->LastTxOKBytes = priv->NumTxOkBytesTotal; 463 priv->LastTxOKBytes = priv->NumTxOkBytesTotal;
594 priv->CurrentOperaRate = priv->ieee80211->rate/5; 464 priv->CurrentOperaRate = priv->ieee80211->rate / 5;
595 //printk("priv->CurrentOperaRate is %d\n",priv->CurrentOperaRate); 465 /* 2 Compute retry ratio. */
596 //2 Compute retry ratio. 466 if (CurrTxokCnt > 0) {
597 if (CurrTxokCnt>0) 467 CurrRetryRate = (u16)(CurrRetryCnt * 100 / CurrTxokCnt);
598 { 468 } else {
599 CurrRetryRate = (u16)(CurrRetryCnt*100/CurrTxokCnt); 469 /* It may be serious retry. To distinguish serious retry or no packets modified by Bruce */
470 CurrRetryRate = (u16)(CurrRetryCnt * 100 / 1);
600 } 471 }
601 else
602 { // It may be serious retry. To distinguish serious retry or no packets modified by Bruce
603 CurrRetryRate = (u16)(CurrRetryCnt*100/1);
604 }
605
606
607 //
608 // Added by Roger, 2007.01.02.
609 // For debug information.
610 //
611 //printk("\n(1) pHalData->LastRetryRate: %d \n",priv->LastRetryRate);
612 //printk("(2) RetryCnt = %d \n", CurrRetryCnt);
613 //printk("(3) TxokCnt = %d \n", CurrTxokCnt);
614 //printk("(4) CurrRetryRate = %d \n", CurrRetryRate);
615 //printk("(5) CurrSignalStrength = %d \n",CurrSignalStrength);
616 //printk("(6) TxThroughput is %d\n",TxThroughput);
617 //printk("priv->NumTxOkBytesTotal is %d\n",priv->NumTxOkBytesTotal);
618 472
619 priv->LastRetryCnt = priv->CurrRetryCnt; 473 priv->LastRetryCnt = priv->CurrRetryCnt;
620 priv->LastTxokCnt = priv->NumTxOkTotal; 474 priv->LastTxokCnt = priv->NumTxOkTotal;
621 priv->LastRxokCnt = priv->ieee80211->NumRxOkTotal; 475 priv->LastRxokCnt = priv->ieee80211->NumRxOkTotal;
622 priv->CurrRetryCnt = 0; 476 priv->CurrRetryCnt = 0;
623 477
624 //2No Tx packets, return to init_rate or not? 478 /* 2No Tx packets, return to init_rate or not? */
625 if (CurrRetryRate==0 && CurrTxokCnt == 0) 479 if (CurrRetryRate == 0 && CurrTxokCnt == 0) {
626 { 480 /*
627 // 481 * After 9 (30*300ms) seconds in this condition, we try to raise rate.
628 //After 9 (30*300ms) seconds in this condition, we try to raise rate. 482 */
629 //
630 priv->TryupingCountNoData++; 483 priv->TryupingCountNoData++;
631 484
632// printk("No Tx packets, TryupingCountNoData(%d)\n", priv->TryupingCountNoData); 485 /* [TRC Dell Lab] Extend raised period from 4.5sec to 9sec, Isaiah 2008-02-15 18:00 */
633 //[TRC Dell Lab] Extend raised period from 4.5sec to 9sec, Isaiah 2008-02-15 18:00 486 if (priv->TryupingCountNoData > 30) {
634 if (priv->TryupingCountNoData>30)
635 {
636 priv->TryupingCountNoData = 0; 487 priv->TryupingCountNoData = 0;
637 priv->CurrentOperaRate = GetUpgradeTxRate(dev, priv->CurrentOperaRate); 488 priv->CurrentOperaRate = GetUpgradeTxRate(dev, priv->CurrentOperaRate);
638 // Reset Fail Record 489 /* Reset Fail Record */
639 priv->LastFailTxRate = 0; 490 priv->LastFailTxRate = 0;
640 priv->LastFailTxRateSS = -200; 491 priv->LastFailTxRateSS = -200;
641 priv->FailTxRateCount = 0; 492 priv->FailTxRateCount = 0;
642 } 493 }
643 goto SetInitialGain; 494 goto SetInitialGain;
644 } 495 } else {
645 else 496 priv->TryupingCountNoData = 0; /*Reset trying up times. */
646 {
647 priv->TryupingCountNoData=0; //Reset trying up times.
648 } 497 }
649 498
650 499
651 // 500 /*
652 // For Netgear case, I comment out the following signal strength estimation, 501 * For Netgear case, I comment out the following signal strength estimation,
653 // which can results in lower rate to transmit when sample is NOT enough (e.g. PING request). 502 * which can results in lower rate to transmit when sample is NOT enough (e.g. PING request).
654 // 2007.04.09, by Roger. 503 *
655 // 504 * Restructure rate adaptive as the following main stages:
656 505 * (1) Add retry threshold in 54M upgrading condition with signal strength.
657 // 506 * (2) Add the mechanism to degrade to CCK rate according to signal strength
658 // Restructure rate adaptive as the following main stages: 507 * and retry rate.
659 // (1) Add retry threshold in 54M upgrading condition with signal strength. 508 * (3) Remove all Initial Gain Updates over OFDM rate. To avoid the complicated
660 // (2) Add the mechanism to degrade to CCK rate according to signal strength 509 * situation, Initial Gain Update is upon on DIG mechanism except CCK rate.
661 // and retry rate. 510 * (4) Add the mehanism of trying to upgrade tx rate.
662 // (3) Remove all Initial Gain Updates over OFDM rate. To avoid the complicated 511 * (5) Record the information of upping tx rate to avoid trying upping tx rate constantly.
663 // situation, Initial Gain Update is upon on DIG mechanism except CCK rate. 512 *
664 // (4) Add the mehanism of trying to upgrade tx rate. 513 */
665 // (5) Record the information of upping tx rate to avoid trying upping tx rate constantly. 514
666 // By Bruce, 2007-06-05. 515 /*
667 // 516 * 11Mbps or 36Mbps
668 // 517 * Check more times in these rate(key rates).
669 518 */
670 // 11Mbps or 36Mbps 519 if (priv->CurrentOperaRate == 22 || priv->CurrentOperaRate == 72)
671 // Check more times in these rate(key rates).
672 //
673 if(priv->CurrentOperaRate == 22 || priv->CurrentOperaRate == 72)
674 {
675 TryUpTh += 9; 520 TryUpTh += 9;
676 } 521 /*
677 // 522 * Let these rates down more difficult.
678 // Let these rates down more difficult. 523 */
679 // 524 if (MgntIsCckRate(priv->CurrentOperaRate) || priv->CurrentOperaRate == 36)
680 if(MgntIsCckRate(priv->CurrentOperaRate) || priv->CurrentOperaRate == 36) 525 TryDownTh += 1;
681 { 526
682 TryDownTh += 1; 527 /* 1 Adjust Rate. */
683 } 528 if (priv->bTryuping == true) {
684 529 /* 2 For Test Upgrading mechanism
685 //1 Adjust Rate. 530 * Note:
686 if (priv->bTryuping == true) 531 * Sometimes the throughput is upon on the capability bwtween the AP and NIC,
687 { 532 * thus the low data rate does not improve the performance.
688 //2 For Test Upgrading mechanism 533 * We randomly upgrade the data rate and check if the retry rate is improved.
689 // Note: 534 */
690 // Sometimes the throughput is upon on the capability bwtween the AP and NIC, 535
691 // thus the low data rate does not improve the performance. 536 /* Upgrading rate did not improve the retry rate, fallback to the original rate. */
692 // We randomly upgrade the data rate and check if the retry rate is improved. 537 if ((CurrRetryRate > 25) && TxThroughput < priv->LastTxThroughput) {
693 538 /*Not necessary raising rate, fall back rate. */
694 // Upgrading rate did not improve the retry rate, fallback to the original rate.
695 if ( (CurrRetryRate > 25) && TxThroughput < priv->LastTxThroughput)
696 {
697 //Not necessary raising rate, fall back rate.
698 bTryDown = true; 539 bTryDown = true;
699 //printk("case1-1: Not necessary raising rate, fall back rate....\n"); 540 } else {
700 //printk("case1-1: pMgntInfo->CurrentOperaRate =%d, TxThroughput = %d, LastThroughput = %d\n",
701 // priv->CurrentOperaRate, TxThroughput, priv->LastTxThroughput);
702 }
703 else
704 {
705 priv->bTryuping = false; 541 priv->bTryuping = false;
706 } 542 }
707 } 543 } else if (CurrSignalStrength > -47 && (CurrRetryRate < 50)) {
708 else if (CurrSignalStrength > -47 && (CurrRetryRate < 50)) 544 /*
709 { 545 * 2For High Power
710 //2For High Power 546 *
711 // 547 * Return to highest data rate, if signal strength is good enough.
712 // Added by Roger, 2007.04.09. 548 * SignalStrength threshold(-50dbm) is for RTL8186.
713 // Return to highest data rate, if signal strength is good enough. 549 * Revise SignalStrength threshold to -51dbm.
714 // SignalStrength threshold(-50dbm) is for RTL8186. 550 */
715 // Revise SignalStrength threshold to -51dbm. 551 /* Also need to check retry rate for safety, by Bruce, 2007-06-05. */
716 // 552 if (priv->CurrentOperaRate != priv->ieee80211->current_network.HighestOperaRate) {
717 // Also need to check retry rate for safety, by Bruce, 2007-06-05.
718 if(priv->CurrentOperaRate != priv->ieee80211->current_network.HighestOperaRate )
719 {
720 bTryUp = true; 553 bTryUp = true;
721 // Upgrade Tx Rate directly. 554 /* Upgrade Tx Rate directly. */
722 priv->TryupingCount += TryUpTh; 555 priv->TryupingCount += TryUpTh;
723 } 556 }
724// printk("case2: StaRateAdaptive87SE: Power(%d) is high enough!!. \n", CurrSignalStrength);
725 557
726 } 558 } else if (CurrTxokCnt > 9 && CurrTxokCnt < 100 && CurrRetryRate >= 600) {
727 else if(CurrTxokCnt > 9 && CurrTxokCnt< 100 && CurrRetryRate >= 600) 559 /*
728 { 560 *2 For Serious Retry
729 //2 For Serious Retry 561 *
730 // 562 * Traffic is not busy but our Tx retry is serious.
731 // Traffic is not busy but our Tx retry is serious. 563 */
732 //
733 bTryDown = true; 564 bTryDown = true;
734 // Let Rate Mechanism to degrade tx rate directly. 565 /* Let Rate Mechanism to degrade tx rate directly. */
735 priv->TryDownCountLowData += TryDownTh; 566 priv->TryDownCountLowData += TryDownTh;
736// printk("case3: RA: Tx Retry is serious. Degrade Tx Rate to %d directly...\n", priv->CurrentOperaRate); 567 } else if (priv->CurrentOperaRate == 108) {
737 } 568 /* 2For 54Mbps */
738 else if ( priv->CurrentOperaRate == 108 ) 569 /* Air Link */
739 { 570 if ((CurrRetryRate > 26) && (priv->LastRetryRate > 25)) {
740 //2For 54Mbps
741 // Air Link
742 if ( (CurrRetryRate>26)&&(priv->LastRetryRate>25))
743// if ( (CurrRetryRate>40)&&(priv->LastRetryRate>39))
744 {
745 //Down to rate 48Mbps.
746 bTryDown = true; 571 bTryDown = true;
747 } 572 }
748 // Cable Link 573 /* Cable Link */
749 else if ( (CurrRetryRate>17)&&(priv->LastRetryRate>16) && (CurrSignalStrength > -72)) 574 else if ((CurrRetryRate > 17) && (priv->LastRetryRate > 16) && (CurrSignalStrength > -72)) {
750// else if ( (CurrRetryRate>17)&&(priv->LastRetryRate>16) && (CurrSignalStrength > -72))
751 {
752 //Down to rate 48Mbps.
753 bTryDown = true; 575 bTryDown = true;
754 } 576 }
755 577
756 if(bTryDown && (CurrSignalStrength < -75)) //cable link 578 if (bTryDown && (CurrSignalStrength < -75)) /* cable link */
757 {
758 priv->TryDownCountLowData += TryDownTh; 579 priv->TryDownCountLowData += TryDownTh;
759 }
760 //printk("case4---54M \n");
761
762 } 580 }
763 else if ( priv->CurrentOperaRate == 96 ) 581 else if (priv->CurrentOperaRate == 96) {
764 { 582 /* 2For 48Mbps */
765 //2For 48Mbps 583 /* Air Link */
766 //Air Link 584 if (((CurrRetryRate > 48) && (priv->LastRetryRate > 47))) {
767 if ( ((CurrRetryRate>48) && (priv->LastRetryRate>47)))
768// if ( ((CurrRetryRate>65) && (priv->LastRetryRate>64)))
769
770 {
771 //Down to rate 36Mbps.
772 bTryDown = true; 585 bTryDown = true;
773 } 586 } else if (((CurrRetryRate > 21) && (priv->LastRetryRate > 20)) && (CurrSignalStrength > -74)) { /* Cable Link */
774 //Cable Link 587 /* Down to rate 36Mbps. */
775 else if ( ((CurrRetryRate>21) && (priv->LastRetryRate>20)) && (CurrSignalStrength > -74))
776 {
777 //Down to rate 36Mbps.
778 bTryDown = true; 588 bTryDown = true;
779 } 589 } else if ((CurrRetryRate > (priv->LastRetryRate + 50)) && (priv->FailTxRateCount > 2)) {
780 else if((CurrRetryRate> (priv->LastRetryRate + 50 )) && (priv->FailTxRateCount >2 ))
781// else if((CurrRetryRate> (priv->LastRetryRate + 70 )) && (priv->FailTxRateCount >2 ))
782 {
783 bTryDown = true; 590 bTryDown = true;
784 priv->TryDownCountLowData += TryDownTh; 591 priv->TryDownCountLowData += TryDownTh;
785 } 592 } else if ((CurrRetryRate < 8) && (priv->LastRetryRate < 8)) { /* TO DO: need to consider (RSSI) */
786 else if ( (CurrRetryRate<8) && (priv->LastRetryRate<8) ) //TO DO: need to consider (RSSI)
787// else if ( (CurrRetryRate<28) && (priv->LastRetryRate<8) )
788 {
789 bTryUp = true; 593 bTryUp = true;
790 } 594 }
791 595
792 if(bTryDown && (CurrSignalStrength < -75)) 596 if (bTryDown && (CurrSignalStrength < -75)){
793 {
794 priv->TryDownCountLowData += TryDownTh; 597 priv->TryDownCountLowData += TryDownTh;
795 } 598 }
796 //printk("case5---48M \n"); 599 } else if (priv->CurrentOperaRate == 72) {
797 } 600 /* 2For 36Mbps */
798 else if ( priv->CurrentOperaRate == 72 ) 601 if ((CurrRetryRate > 43) && (priv->LastRetryRate > 41)) {
799 { 602 /* Down to rate 24Mbps. */
800 //2For 36Mbps
801 if ( (CurrRetryRate>43) && (priv->LastRetryRate>41))
802// if ( (CurrRetryRate>60) && (priv->LastRetryRate>59))
803 {
804 //Down to rate 24Mbps.
805 bTryDown = true; 603 bTryDown = true;
806 } 604 } else if ((CurrRetryRate > (priv->LastRetryRate + 50)) && (priv->FailTxRateCount > 2)) {
807 else if((CurrRetryRate> (priv->LastRetryRate + 50 )) && (priv->FailTxRateCount >2 ))
808// else if((CurrRetryRate> (priv->LastRetryRate + 70 )) && (priv->FailTxRateCount >2 ))
809 {
810 bTryDown = true; 605 bTryDown = true;
811 priv->TryDownCountLowData += TryDownTh; 606 priv->TryDownCountLowData += TryDownTh;
812 } 607 } else if ((CurrRetryRate < 15) && (priv->LastRetryRate < 16)) { /* TO DO: need to consider (RSSI) */
813 else if ( (CurrRetryRate<15) && (priv->LastRetryRate<16)) //TO DO: need to consider (RSSI)
814// else if ( (CurrRetryRate<35) && (priv->LastRetryRate<36))
815 {
816 bTryUp = true; 608 bTryUp = true;
817 } 609 }
818 610
819 if(bTryDown && (CurrSignalStrength < -80)) 611 if (bTryDown && (CurrSignalStrength < -80))
820 {
821 priv->TryDownCountLowData += TryDownTh; 612 priv->TryDownCountLowData += TryDownTh;
822 } 613
823 //printk("case6---36M \n"); 614 } else if (priv->CurrentOperaRate == 48) {
824 } 615 /* 2For 24Mbps */
825 else if ( priv->CurrentOperaRate == 48 ) 616 /* Air Link */
826 { 617 if (((CurrRetryRate > 63) && (priv->LastRetryRate > 62))) {
827 //2For 24Mbps
828 // Air Link
829 if ( ((CurrRetryRate>63) && (priv->LastRetryRate>62)))
830// if ( ((CurrRetryRate>83) && (priv->LastRetryRate>82)))
831 {
832 //Down to rate 18Mbps.
833 bTryDown = true; 618 bTryDown = true;
834 } 619 } else if (((CurrRetryRate > 33) && (priv->LastRetryRate > 32)) && (CurrSignalStrength > -82)) { /* Cable Link */
835 //Cable Link
836 else if ( ((CurrRetryRate>33) && (priv->LastRetryRate>32)) && (CurrSignalStrength > -82) )
837// else if ( ((CurrRetryRate>50) && (priv->LastRetryRate>49)) && (CurrSignalStrength > -82) )
838 {
839 //Down to rate 18Mbps.
840 bTryDown = true; 620 bTryDown = true;
841 } 621 } else if ((CurrRetryRate > (priv->LastRetryRate + 50)) && (priv->FailTxRateCount > 2 )) {
842 else if((CurrRetryRate> (priv->LastRetryRate + 50 )) && (priv->FailTxRateCount >2 ))
843// else if((CurrRetryRate> (priv->LastRetryRate + 70 )) && (priv->FailTxRateCount >2 ))
844
845 {
846 bTryDown = true; 622 bTryDown = true;
847 priv->TryDownCountLowData += TryDownTh; 623 priv->TryDownCountLowData += TryDownTh;
848 } 624 } else if ((CurrRetryRate < 20) && (priv->LastRetryRate < 21)) { /* TO DO: need to consider (RSSI) */
849 else if ( (CurrRetryRate<20) && (priv->LastRetryRate<21)) //TO DO: need to consider (RSSI)
850// else if ( (CurrRetryRate<40) && (priv->LastRetryRate<41))
851 {
852 bTryUp = true; 625 bTryUp = true;
853 } 626 }
854 627
855 if(bTryDown && (CurrSignalStrength < -82)) 628 if (bTryDown && (CurrSignalStrength < -82))
856 {
857 priv->TryDownCountLowData += TryDownTh; 629 priv->TryDownCountLowData += TryDownTh;
858 } 630
859 //printk("case7---24M \n"); 631 } else if (priv->CurrentOperaRate == 36) {
860 } 632 if (((CurrRetryRate > 85) && (priv->LastRetryRate > 86))) {
861 else if ( priv->CurrentOperaRate == 36 )
862 {
863 //2For 18Mbps
864 // original (109, 109)
865 //[TRC Dell Lab] (90, 91), Isaiah 2008-02-18 23:24
866 // (85, 86), Isaiah 2008-02-18 24:00
867 if ( ((CurrRetryRate>85) && (priv->LastRetryRate>86)))
868// if ( ((CurrRetryRate>115) && (priv->LastRetryRate>116)))
869 {
870 //Down to rate 11Mbps.
871 bTryDown = true; 633 bTryDown = true;
872 } 634 } else if ((CurrRetryRate > (priv->LastRetryRate + 50)) && (priv->FailTxRateCount > 2)) {
873 //[TRC Dell Lab] Isaiah 2008-02-18 23:24
874 else if((CurrRetryRate> (priv->LastRetryRate + 50 )) && (priv->FailTxRateCount >2 ))
875// else if((CurrRetryRate> (priv->LastRetryRate + 70 )) && (priv->FailTxRateCount >2 ))
876 {
877 bTryDown = true; 635 bTryDown = true;
878 priv->TryDownCountLowData += TryDownTh; 636 priv->TryDownCountLowData += TryDownTh;
879 } 637 } else if ((CurrRetryRate < 22) && (priv->LastRetryRate < 23)) { /* TO DO: need to consider (RSSI) */
880 else if ( (CurrRetryRate<22) && (priv->LastRetryRate<23)) //TO DO: need to consider (RSSI)
881// else if ( (CurrRetryRate<42) && (priv->LastRetryRate<43))
882 {
883 bTryUp = true; 638 bTryUp = true;
884 } 639 }
885 //printk("case8---18M \n"); 640 } else if (priv->CurrentOperaRate == 22) {
886 } 641 /* 2For 11Mbps */
887 else if ( priv->CurrentOperaRate == 22 ) 642 if (CurrRetryRate > 95) {
888 {
889 //2For 11Mbps
890 if (CurrRetryRate>95)
891// if (CurrRetryRate>155)
892 {
893 bTryDown = true; 643 bTryDown = true;
894 } 644 }
895 else if ( (CurrRetryRate<29) && (priv->LastRetryRate <30) )//TO DO: need to consider (RSSI) 645 else if ((CurrRetryRate < 29) && (priv->LastRetryRate < 30)) { /*TO DO: need to consider (RSSI) */
896// else if ( (CurrRetryRate<49) && (priv->LastRetryRate <50) )
897 {
898 bTryUp = true; 646 bTryUp = true;
899 }
900 //printk("case9---11M \n");
901 } 647 }
902 else if ( priv->CurrentOperaRate == 11 ) 648 } else if (priv->CurrentOperaRate == 11) {
903 { 649 /* 2For 5.5Mbps */
904 //2For 5.5Mbps 650 if (CurrRetryRate > 149) {
905 if (CurrRetryRate>149)
906// if (CurrRetryRate>189)
907 {
908 bTryDown = true; 651 bTryDown = true;
909 } 652 } else if ((CurrRetryRate < 60) && (priv->LastRetryRate < 65)) {
910 else if ( (CurrRetryRate<60) && (priv->LastRetryRate < 65))
911// else if ( (CurrRetryRate<80) && (priv->LastRetryRate < 85))
912
913 {
914 bTryUp = true; 653 bTryUp = true;
915 }
916 //printk("case10---5.5M \n");
917 } 654 }
918 else if ( priv->CurrentOperaRate == 4 ) 655 } else if (priv->CurrentOperaRate == 4) {
919 { 656 /* 2For 2 Mbps */
920 //2For 2 Mbps 657 if ((CurrRetryRate > 99) && (priv->LastRetryRate > 99)) {
921 if((CurrRetryRate>99) && (priv->LastRetryRate>99))
922// if((CurrRetryRate>199) && (priv->LastRetryRate>199))
923 {
924 bTryDown = true; 658 bTryDown = true;
925 } 659 } else if ((CurrRetryRate < 65) && (priv->LastRetryRate < 70)) {
926 else if ( (CurrRetryRate < 65) && (priv->LastRetryRate < 70))
927// else if ( (CurrRetryRate < 85) && (priv->LastRetryRate < 90))
928 {
929 bTryUp = true; 660 bTryUp = true;
930 } 661 }
931 //printk("case11---2M \n"); 662 } else if (priv->CurrentOperaRate == 2) {
932 } 663 /* 2For 1 Mbps */
933 else if ( priv->CurrentOperaRate == 2 ) 664 if ((CurrRetryRate < 70) && (priv->LastRetryRate < 75)) {
934 {
935 //2For 1 Mbps
936 if( (CurrRetryRate<70) && (priv->LastRetryRate<75))
937// if( (CurrRetryRate<90) && (priv->LastRetryRate<95))
938 {
939 bTryUp = true; 665 bTryUp = true;
940 } 666 }
941 //printk("case12---1M \n");
942 } 667 }
943 668
944 if(bTryUp && bTryDown) 669 if (bTryUp && bTryDown)
945 printk("StaRateAdaptive87B(): Tx Rate tried upping and downing simultaneously!\n"); 670 printk("StaRateAdaptive87B(): Tx Rate tried upping and downing simultaneously!\n");
946 671
947 //1 Test Upgrading Tx Rate 672 /* 1 Test Upgrading Tx Rate
948 // Sometimes the cause of the low throughput (high retry rate) is the compatibility between the AP and NIC. 673 * Sometimes the cause of the low throughput (high retry rate) is the compatibility between the AP and NIC.
949 // To test if the upper rate may cause lower retry rate, this mechanism randomly occurs to test upgrading tx rate. 674 * To test if the upper rate may cause lower retry rate, this mechanism randomly occurs to test upgrading tx rate.
950 if(!bTryUp && !bTryDown && (priv->TryupingCount == 0) && (priv->TryDownCountLowData == 0) 675 */
951 && priv->CurrentOperaRate != priv->ieee80211->current_network.HighestOperaRate && priv->FailTxRateCount < 2) 676 if (!bTryUp && !bTryDown && (priv->TryupingCount == 0) && (priv->TryDownCountLowData == 0)
952 { 677 && priv->CurrentOperaRate != priv->ieee80211->current_network.HighestOperaRate && priv->FailTxRateCount < 2) {
953 if(jiffies% (CurrRetryRate + 101) == 0) 678 if (jiffies % (CurrRetryRate + 101) == 0) {
954 {
955 bTryUp = true; 679 bTryUp = true;
956 priv->bTryuping = true; 680 priv->bTryuping = true;
957 //printk("StaRateAdaptive87SE(): Randomly try upgrading...\n");
958 } 681 }
959 } 682 }
960 683
961 //1 Rate Mechanism 684 /* 1 Rate Mechanism */
962 if(bTryUp) 685 if (bTryUp) {
963 {
964 priv->TryupingCount++; 686 priv->TryupingCount++;
965 priv->TryDownCountLowData = 0; 687 priv->TryDownCountLowData = 0;
966 688
967 { 689 /*
968// printk("UP: pHalData->TryupingCount = %d\n", priv->TryupingCount); 690 * Check more times if we need to upgrade indeed.
969// printk("UP: TryUpTh(%d)+ (FailTxRateCount(%d))^2 =%d\n", 691 * Because the largest value of pHalData->TryupingCount is 0xFFFF and
970// TryUpTh, priv->FailTxRateCount, (TryUpTh + priv->FailTxRateCount * priv->FailTxRateCount) ); 692 * the largest value of pHalData->FailTxRateCount is 0x14,
971// printk("UP: pHalData->bTryuping=%d\n", priv->bTryuping); 693 * this condition will be satisfied at most every 2 min.
972 694 */
973 }
974 695
975 // 696 if ((priv->TryupingCount > (TryUpTh + priv->FailTxRateCount * priv->FailTxRateCount)) ||
976 // Check more times if we need to upgrade indeed. 697 (CurrSignalStrength > priv->LastFailTxRateSS) || priv->bTryuping) {
977 // Because the largest value of pHalData->TryupingCount is 0xFFFF and
978 // the largest value of pHalData->FailTxRateCount is 0x14,
979 // this condition will be satisfied at most every 2 min.
980 //
981
982 if((priv->TryupingCount > (TryUpTh + priv->FailTxRateCount * priv->FailTxRateCount)) ||
983 (CurrSignalStrength > priv->LastFailTxRateSS) || priv->bTryuping)
984 {
985 priv->TryupingCount = 0; 698 priv->TryupingCount = 0;
986 // 699 /*
987 // When transferring from CCK to OFDM, DIG is an important issue. 700 * When transferring from CCK to OFDM, DIG is an important issue.
988 // 701 */
989 if(priv->CurrentOperaRate == 22) 702 if (priv->CurrentOperaRate == 22)
990 bUpdateInitialGain = true; 703 bUpdateInitialGain = true;
991 704
992 // The difference in throughput between 48Mbps and 36Mbps is 8M. 705 /*
993 // So, we must be carefully in this rate scale. Isaiah 2008-02-15. 706 * The difference in throughput between 48Mbps and 36Mbps is 8M.
994 // 707 * So, we must be carefully in this rate scale. Isaiah 2008-02-15.
995 if( ((priv->CurrentOperaRate == 72) || (priv->CurrentOperaRate == 48) || (priv->CurrentOperaRate == 36)) && 708 */
996 (priv->FailTxRateCount > 2) ) 709 if (((priv->CurrentOperaRate == 72) || (priv->CurrentOperaRate == 48) || (priv->CurrentOperaRate == 36)) &&
997 priv->RateAdaptivePeriod= (RATE_ADAPTIVE_TIMER_PERIOD/2); 710 (priv->FailTxRateCount > 2))
711 priv->RateAdaptivePeriod = (RATE_ADAPTIVE_TIMER_PERIOD / 2);
998 712
999 // (1)To avoid upgrade frequently to the fail tx rate, add the FailTxRateCount into the threshold. 713 /* (1)To avoid upgrade frequently to the fail tx rate, add the FailTxRateCount into the threshold. */
1000 // (2)If the signal strength is increased, it may be able to upgrade. 714 /* (2)If the signal strength is increased, it may be able to upgrade. */
1001 715
1002 priv->CurrentOperaRate = GetUpgradeTxRate(dev, priv->CurrentOperaRate); 716 priv->CurrentOperaRate = GetUpgradeTxRate(dev, priv->CurrentOperaRate);
1003// printk("StaRateAdaptive87SE(): Upgrade Tx Rate to %d\n", priv->CurrentOperaRate); 717
1004 718 if (priv->CurrentOperaRate == 36) {
1005 //[TRC Dell Lab] Bypass 12/9/6, Isaiah 2008-02-18 20:00 719 priv->bUpdateARFR = true;
1006 if(priv->CurrentOperaRate ==36) 720 write_nic_word(dev, ARFR, 0x0F8F); /* bypass 12/9/6 */
1007 { 721 } else if(priv->bUpdateARFR) {
1008 priv->bUpdateARFR=true; 722 priv->bUpdateARFR = false;
1009 write_nic_word(dev, ARFR, 0x0F8F); //bypass 12/9/6 723 write_nic_word(dev, ARFR, 0x0FFF); /* set 1M ~ 54Mbps. */
1010// printk("UP: ARFR=0xF8F\n");
1011 }
1012 else if(priv->bUpdateARFR)
1013 {
1014 priv->bUpdateARFR=false;
1015 write_nic_word(dev, ARFR, 0x0FFF); //set 1M ~ 54Mbps.
1016// printk("UP: ARFR=0xFFF\n");
1017 } 724 }
1018 725
1019 // Update Fail Tx rate and count. 726 /* Update Fail Tx rate and count. */
1020 if(priv->LastFailTxRate != priv->CurrentOperaRate) 727 if (priv->LastFailTxRate != priv->CurrentOperaRate) {
1021 {
1022 priv->LastFailTxRate = priv->CurrentOperaRate; 728 priv->LastFailTxRate = priv->CurrentOperaRate;
1023 priv->FailTxRateCount = 0; 729 priv->FailTxRateCount = 0;
1024 priv->LastFailTxRateSS = -200; // Set lowest power. 730 priv->LastFailTxRateSS = -200; /* Set lowest power. */
1025 } 731 }
1026 } 732 }
1027 } 733 } else {
1028 else 734 if (priv->TryupingCount > 0)
1029 {
1030 if(priv->TryupingCount > 0)
1031 priv->TryupingCount --; 735 priv->TryupingCount --;
1032 } 736 }
1033 737
1034 if(bTryDown) 738 if (bTryDown) {
1035 {
1036 priv->TryDownCountLowData++; 739 priv->TryDownCountLowData++;
1037 priv->TryupingCount = 0; 740 priv->TryupingCount = 0;
1038 {
1039// printk("DN: pHalData->TryDownCountLowData = %d\n",priv->TryDownCountLowData);
1040// printk("DN: TryDownTh =%d\n", TryDownTh);
1041// printk("DN: pHalData->bTryuping=%d\n", priv->bTryuping);
1042 }
1043 741
1044 //Check if Tx rate can be degraded or Test trying upgrading should fallback. 742 /* Check if Tx rate can be degraded or Test trying upgrading should fallback. */
1045 if(priv->TryDownCountLowData > TryDownTh || priv->bTryuping) 743 if (priv->TryDownCountLowData > TryDownTh || priv->bTryuping) {
1046 {
1047 priv->TryDownCountLowData = 0; 744 priv->TryDownCountLowData = 0;
1048 priv->bTryuping = false; 745 priv->bTryuping = false;
1049 // Update fail information. 746 /* Update fail information. */
1050 if(priv->LastFailTxRate == priv->CurrentOperaRate) 747 if (priv->LastFailTxRate == priv->CurrentOperaRate) {
1051 { 748 priv->FailTxRateCount++;
1052 priv->FailTxRateCount ++; 749 /* Record the Tx fail rate signal strength. */
1053 // Record the Tx fail rate signal strength. 750 if (CurrSignalStrength > priv->LastFailTxRateSS)
1054 if(CurrSignalStrength > priv->LastFailTxRateSS)
1055 {
1056 priv->LastFailTxRateSS = CurrSignalStrength; 751 priv->LastFailTxRateSS = CurrSignalStrength;
1057 } 752 } else {
1058 }
1059 else
1060 {
1061 priv->LastFailTxRate = priv->CurrentOperaRate; 753 priv->LastFailTxRate = priv->CurrentOperaRate;
1062 priv->FailTxRateCount = 1; 754 priv->FailTxRateCount = 1;
1063 priv->LastFailTxRateSS = CurrSignalStrength; 755 priv->LastFailTxRateSS = CurrSignalStrength;
1064 } 756 }
1065 priv->CurrentOperaRate = GetDegradeTxRate(dev, priv->CurrentOperaRate); 757 priv->CurrentOperaRate = GetDegradeTxRate(dev, priv->CurrentOperaRate);
1066 758
1067 // Reduce chariot training time at weak signal strength situation. SD3 ED demand. 759 /* Reduce chariot training time at weak signal strength situation. SD3 ED demand. */
1068 //[TRC Dell Lab] Revise Signal Threshold from -75 to -80 , Isaiah 2008-02-18 20:00 760 if ((CurrSignalStrength < -80) && (priv->CurrentOperaRate > 72 )) {
1069 if( (CurrSignalStrength < -80) && (priv->CurrentOperaRate > 72 ))
1070 {
1071 priv->CurrentOperaRate = 72; 761 priv->CurrentOperaRate = 72;
1072// printk("DN: weak signal strength (%d), degrade to 36Mbps\n", CurrSignalStrength);
1073 } 762 }
1074 763
1075 //[TRC Dell Lab] Bypass 12/9/6, Isaiah 2008-02-18 20:00 764 if (priv->CurrentOperaRate == 36) {
1076 if(priv->CurrentOperaRate ==36) 765 priv->bUpdateARFR = true;
1077 { 766 write_nic_word(dev, ARFR, 0x0F8F); /* bypass 12/9/6 */
1078 priv->bUpdateARFR=true; 767 } else if (priv->bUpdateARFR) {
1079 write_nic_word(dev, ARFR, 0x0F8F); //bypass 12/9/6 768 priv->bUpdateARFR = false;
1080// printk("DN: ARFR=0xF8F\n"); 769 write_nic_word(dev, ARFR, 0x0FFF); /* set 1M ~ 54Mbps. */
1081 }
1082 else if(priv->bUpdateARFR)
1083 {
1084 priv->bUpdateARFR=false;
1085 write_nic_word(dev, ARFR, 0x0FFF); //set 1M ~ 54Mbps.
1086// printk("DN: ARFR=0xFFF\n");
1087 } 770 }
1088 771
1089 // 772 /*
1090 // When it is CCK rate, it may need to update initial gain to receive lower power packets. 773 * When it is CCK rate, it may need to update initial gain to receive lower power packets.
1091 // 774 */
1092 if(MgntIsCckRate(priv->CurrentOperaRate)) 775 if (MgntIsCckRate(priv->CurrentOperaRate)) {
1093 {
1094 bUpdateInitialGain = true; 776 bUpdateInitialGain = true;
1095 } 777 }
1096// printk("StaRateAdaptive87SE(): Degrade Tx Rate to %d\n", priv->CurrentOperaRate);
1097 } 778 }
1098 } 779 } else {
1099 else 780 if (priv->TryDownCountLowData > 0)
1100 { 781 priv->TryDownCountLowData--;
1101 if(priv->TryDownCountLowData > 0)
1102 priv->TryDownCountLowData --;
1103 } 782 }
1104 783
1105 // Keep the Tx fail rate count to equal to 0x15 at most. 784 /*
1106 // Reduce the fail count at least to 10 sec if tx rate is tending stable. 785 * Keep the Tx fail rate count to equal to 0x15 at most.
1107 if(priv->FailTxRateCount >= 0x15 || 786 * Reduce the fail count at least to 10 sec if tx rate is tending stable.
1108 (!bTryUp && !bTryDown && priv->TryDownCountLowData == 0 && priv->TryupingCount && priv->FailTxRateCount > 0x6)) 787 */
1109 { 788 if (priv->FailTxRateCount >= 0x15 ||
1110 priv->FailTxRateCount --; 789 (!bTryUp && !bTryDown && priv->TryDownCountLowData == 0 && priv->TryupingCount && priv->FailTxRateCount > 0x6)) {
790 priv->FailTxRateCount--;
1111 } 791 }
1112 792
1113 793
1114 OfdmTxPwrIdx = priv->chtxpwr_ofdm[priv->ieee80211->current_network.channel]; 794 OfdmTxPwrIdx = priv->chtxpwr_ofdm[priv->ieee80211->current_network.channel];
1115 CckTxPwrIdx = priv->chtxpwr[priv->ieee80211->current_network.channel]; 795 CckTxPwrIdx = priv->chtxpwr[priv->ieee80211->current_network.channel];
1116 796
1117 //[TRC Dell Lab] Mac0x9e increase 2 level in 36M~18M situation, Isaiah 2008-02-18 24:00 797 /* Mac0x9e increase 2 level in 36M~18M situation */
1118 if((priv->CurrentOperaRate < 96) &&(priv->CurrentOperaRate > 22)) 798 if ((priv->CurrentOperaRate < 96) && (priv->CurrentOperaRate > 22)) {
1119 {
1120 u1bCck = read_nic_byte(dev, CCK_TXAGC); 799 u1bCck = read_nic_byte(dev, CCK_TXAGC);
1121 u1bOfdm = read_nic_byte(dev, OFDM_TXAGC); 800 u1bOfdm = read_nic_byte(dev, OFDM_TXAGC);
1122 801
1123 // case 1: Never enter High power 802 /* case 1: Never enter High power */
1124 if(u1bCck == CckTxPwrIdx ) 803 if (u1bCck == CckTxPwrIdx) {
1125 { 804 if (u1bOfdm != (OfdmTxPwrIdx + 2)) {
1126 if(u1bOfdm != (OfdmTxPwrIdx+2) ) 805 priv->bEnhanceTxPwr = true;
1127 { 806 u1bOfdm = ((u1bOfdm + 2) > 35) ? 35: (u1bOfdm + 2);
1128 priv->bEnhanceTxPwr= true;
1129 u1bOfdm = ((u1bOfdm+2) > 35) ? 35: (u1bOfdm+2);
1130 write_nic_byte(dev, OFDM_TXAGC, u1bOfdm); 807 write_nic_byte(dev, OFDM_TXAGC, u1bOfdm);
1131// printk("Enhance OFDM_TXAGC : +++++ u1bOfdm= 0x%x\n", u1bOfdm);
1132 } 808 }
1133 } 809 } else if (u1bCck < CckTxPwrIdx) {
1134 // case 2: enter high power 810 /* case 2: enter high power */
1135 else if(u1bCck < CckTxPwrIdx) 811 if (!priv->bEnhanceTxPwr) {
1136 { 812 priv->bEnhanceTxPwr = true;
1137 if(!priv->bEnhanceTxPwr) 813 u1bOfdm = ((u1bOfdm + 2) > 35) ? 35: (u1bOfdm + 2);
1138 {
1139 priv->bEnhanceTxPwr= true;
1140 u1bOfdm = ((u1bOfdm+2) > 35) ? 35: (u1bOfdm+2);
1141 write_nic_byte(dev, OFDM_TXAGC, u1bOfdm); 814 write_nic_byte(dev, OFDM_TXAGC, u1bOfdm);
1142 //RT_TRACE(COMP_RATE, DBG_TRACE, ("Enhance OFDM_TXAGC(2) : +++++ u1bOfdm= 0x%x\n", u1bOfdm));
1143 } 815 }
1144 } 816 }
1145 } 817 } else if (priv->bEnhanceTxPwr) { /* 54/48/11/5.5/2/1 */
1146 else if(priv->bEnhanceTxPwr) //54/48/11/5.5/2/1
1147 {
1148 u1bCck = read_nic_byte(dev, CCK_TXAGC); 818 u1bCck = read_nic_byte(dev, CCK_TXAGC);
1149 u1bOfdm = read_nic_byte(dev, OFDM_TXAGC); 819 u1bOfdm = read_nic_byte(dev, OFDM_TXAGC);
1150 820
1151 // case 1: Never enter High power 821 /* case 1: Never enter High power */
1152 if(u1bCck == CckTxPwrIdx ) 822 if (u1bCck == CckTxPwrIdx) {
1153 { 823 priv->bEnhanceTxPwr = false;
1154 priv->bEnhanceTxPwr= false; 824 write_nic_byte(dev, OFDM_TXAGC, OfdmTxPwrIdx);
1155 write_nic_byte(dev, OFDM_TXAGC, OfdmTxPwrIdx);
1156 //printk("Recover OFDM_TXAGC : ===== u1bOfdm= 0x%x\n", OfdmTxPwrIdx);
1157 } 825 }
1158 // case 2: enter high power 826 /* case 2: enter high power */
1159 else if(u1bCck < CckTxPwrIdx) 827 else if (u1bCck < CckTxPwrIdx) {
1160 { 828 priv->bEnhanceTxPwr = false;
1161 priv->bEnhanceTxPwr= false; 829 u1bOfdm = ((u1bOfdm - 2) > 0) ? (u1bOfdm - 2): 0;
1162 u1bOfdm = ((u1bOfdm-2) > 0) ? (u1bOfdm-2): 0;
1163 write_nic_byte(dev, OFDM_TXAGC, u1bOfdm); 830 write_nic_byte(dev, OFDM_TXAGC, u1bOfdm);
1164 //RT_TRACE(COMP_RATE, DBG_TRACE, ("Recover OFDM_TXAGC(2): ===== u1bOfdm= 0x%x\n", u1bOfdm));
1165
1166 } 831 }
1167 } 832 }
1168 833
1169 // 834 /*
1170 // We need update initial gain when we set tx rate "from OFDM to CCK" or 835 * We need update initial gain when we set tx rate "from OFDM to CCK" or
1171 // "from CCK to OFDM". 836 * "from CCK to OFDM".
1172 // 837 */
1173SetInitialGain: 838SetInitialGain:
1174 if(bUpdateInitialGain) 839 if (bUpdateInitialGain) {
1175 { 840 if (MgntIsCckRate(priv->CurrentOperaRate)) { /* CCK */
1176 if(MgntIsCckRate(priv->CurrentOperaRate)) // CCK 841 if (priv->InitialGain > priv->RegBModeGainStage) {
1177 { 842 priv->InitialGainBackUp = priv->InitialGain;
1178 if(priv->InitialGain > priv->RegBModeGainStage) 843
1179 { 844 if (CurrSignalStrength < -85) /* Low power, OFDM [0x17] = 26. */
1180 priv->InitialGainBackUp= priv->InitialGain; 845 /* SD3 SYs suggest that CurrSignalStrength < -65, ofdm 0x17=26. */
1181
1182 if(CurrSignalStrength < -85) // Low power, OFDM [0x17] = 26.
1183 {
1184 //SD3 SYs suggest that CurrSignalStrength < -65, ofdm 0x17=26.
1185 priv->InitialGain = priv->RegBModeGainStage; 846 priv->InitialGain = priv->RegBModeGainStage;
1186 } 847
1187 else if(priv->InitialGain > priv->RegBModeGainStage + 1) 848 else if (priv->InitialGain > priv->RegBModeGainStage + 1)
1188 {
1189 priv->InitialGain -= 2; 849 priv->InitialGain -= 2;
1190 } 850
1191 else 851 else
1192 { 852 priv->InitialGain--;
1193 priv->InitialGain --; 853
1194 }
1195 printk("StaRateAdaptive87SE(): update init_gain to index %d for date rate %d\n",priv->InitialGain, priv->CurrentOperaRate); 854 printk("StaRateAdaptive87SE(): update init_gain to index %d for date rate %d\n",priv->InitialGain, priv->CurrentOperaRate);
1196 UpdateInitialGain(dev); 855 UpdateInitialGain(dev);
1197 } 856 }
1198 } 857 } else { /* OFDM */
1199 else // OFDM 858 if (priv->InitialGain < 4) {
1200 { 859 priv->InitialGainBackUp = priv->InitialGain;
1201 if(priv->InitialGain < 4)
1202 {
1203 priv->InitialGainBackUp= priv->InitialGain;
1204 860
1205 priv->InitialGain ++; 861 priv->InitialGain++;
1206 printk("StaRateAdaptive87SE(): update init_gain to index %d for date rate %d\n",priv->InitialGain, priv->CurrentOperaRate); 862 printk("StaRateAdaptive87SE(): update init_gain to index %d for date rate %d\n",priv->InitialGain, priv->CurrentOperaRate);
1207 UpdateInitialGain(dev); 863 UpdateInitialGain(dev);
1208 } 864 }
1209 } 865 }
1210 } 866 }
1211 867
1212 //Record the related info 868 /* Record the related info */
1213 priv->LastRetryRate = CurrRetryRate; 869 priv->LastRetryRate = CurrRetryRate;
1214 priv->LastTxThroughput = TxThroughput; 870 priv->LastTxThroughput = TxThroughput;
1215 priv->ieee80211->rate = priv->CurrentOperaRate * 5; 871 priv->ieee80211->rate = priv->CurrentOperaRate * 5;
1216} 872}
1217 873
1218void rtl8180_rate_adapter(struct work_struct * work) 874void rtl8180_rate_adapter(struct work_struct *work)
1219{ 875{
1220 struct delayed_work *dwork = to_delayed_work(work); 876 struct delayed_work *dwork = to_delayed_work(work);
1221 struct ieee80211_device *ieee = container_of(dwork,struct ieee80211_device,rate_adapter_wq); 877 struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, rate_adapter_wq);
1222 struct net_device *dev = ieee->dev; 878 struct net_device *dev = ieee->dev;
1223 //struct r8180_priv *priv = ieee80211_priv(dev); 879 StaRateAdaptive87SE(dev);
1224// DMESG("---->rtl8180_rate_adapter");
1225 StaRateAdaptive87SE(dev);
1226// DMESG("<----rtl8180_rate_adapter");
1227} 880}
1228void timer_rate_adaptive(unsigned long data) 881void timer_rate_adaptive(unsigned long data)
1229{ 882{
1230 struct r8180_priv* priv = ieee80211_priv((struct net_device *)data); 883 struct r8180_priv *priv = ieee80211_priv((struct net_device *)data);
1231 //DMESG("---->timer_rate_adaptive()\n"); 884 if (!priv->up) {
1232 if(!priv->up)
1233 {
1234// DMESG("<----timer_rate_adaptive():driver is not up!\n");
1235 return; 885 return;
1236 } 886 }
1237 if((priv->ieee80211->iw_mode != IW_MODE_MASTER) 887 if ((priv->ieee80211->iw_mode != IW_MODE_MASTER)
1238 && (priv->ieee80211->state == IEEE80211_LINKED) && 888 && (priv->ieee80211->state == IEEE80211_LINKED) &&
1239 (priv->ForcedDataRate == 0) ) 889 (priv->ForcedDataRate == 0)) {
1240 {
1241// DMESG("timer_rate_adaptive():schedule rate_adapter_wq\n");
1242 queue_work(priv->ieee80211->wq, (void *)&priv->ieee80211->rate_adapter_wq); 890 queue_work(priv->ieee80211->wq, (void *)&priv->ieee80211->rate_adapter_wq);
1243// StaRateAdaptive87SE((struct net_device *)data);
1244 } 891 }
1245 priv->rateadapter_timer.expires = jiffies + MSECS(priv->RateAdaptivePeriod); 892 priv->rateadapter_timer.expires = jiffies + MSECS(priv->RateAdaptivePeriod);
1246 add_timer(&priv->rateadapter_timer); 893 add_timer(&priv->rateadapter_timer);
1247 //DMESG("<----timer_rate_adaptive()\n");
1248} 894}
1249//by amy 080312} 895
1250void 896void SwAntennaDiversityRxOk8185(struct net_device *dev, u8 SignalStrength)
1251SwAntennaDiversityRxOk8185(
1252 struct net_device *dev,
1253 u8 SignalStrength
1254 )
1255{ 897{
1256 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev); 898 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
1257 899
1258// printk("+SwAntennaDiversityRxOk8185: RxSs: %d\n", SignalStrength);
1259
1260 priv->AdRxOkCnt++; 900 priv->AdRxOkCnt++;
1261 901
1262 if( priv->AdRxSignalStrength != -1) 902 if (priv->AdRxSignalStrength != -1) {
1263 { 903 priv->AdRxSignalStrength = ((priv->AdRxSignalStrength * 7) + (SignalStrength * 3)) / 10;
1264 priv->AdRxSignalStrength = ((priv->AdRxSignalStrength*7) + (SignalStrength*3)) / 10; 904 } else { /* Initialization case. */
1265 }
1266 else
1267 { // Initialization case.
1268 priv->AdRxSignalStrength = SignalStrength; 905 priv->AdRxSignalStrength = SignalStrength;
1269 } 906 }
1270//{+by amy 080312 907
1271 if( priv->LastRxPktAntenna ) //Main antenna. 908 if (priv->LastRxPktAntenna) /* Main antenna. */
1272 priv->AdMainAntennaRxOkCnt++; 909 priv->AdMainAntennaRxOkCnt++;
1273 else // Aux antenna. 910 else /* Aux antenna. */
1274 priv->AdAuxAntennaRxOkCnt++; 911 priv->AdAuxAntennaRxOkCnt++;
1275//+by amy 080312
1276// printk("-SwAntennaDiversityRxOk8185: AdRxOkCnt: %d AdRxSignalStrength: %d\n", priv->AdRxOkCnt, priv->AdRxSignalStrength);
1277} 912}
1278// 913 /* Change Antenna Switch. */
1279// Description: 914bool SetAntenna8185(struct net_device *dev, u8 u1bAntennaIndex)
1280// Change Antenna Switch.
1281//
1282bool
1283SetAntenna8185(
1284 struct net_device *dev,
1285 u8 u1bAntennaIndex
1286 )
1287{ 915{
1288 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev); 916 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
1289 bool bAntennaSwitched = false; 917 bool bAntennaSwitched = false;
1290 918
1291// printk("+SetAntenna8185(): Antenna is switching to: %d \n", u1bAntennaIndex); 919 switch (u1bAntennaIndex) {
1292
1293 switch(u1bAntennaIndex)
1294 {
1295 case 0: 920 case 0:
1296 /* Mac register, main antenna */ 921 /* Mac register, main antenna */
1297 write_nic_byte(dev, ANTSEL, 0x03); 922 write_nic_byte(dev, ANTSEL, 0x03);
@@ -1319,64 +944,35 @@ SetAntenna8185(
1319 } 944 }
1320 945
1321 if(bAntennaSwitched) 946 if(bAntennaSwitched)
1322 {
1323 priv->CurrAntennaIndex = u1bAntennaIndex; 947 priv->CurrAntennaIndex = u1bAntennaIndex;
1324 }
1325
1326// printk("-SetAntenna8185(): return (%#X)\n", bAntennaSwitched);
1327 948
1328 return bAntennaSwitched; 949 return bAntennaSwitched;
1329} 950}
1330// 951 /* Toggle Antenna switch. */
1331// Description: 952bool SwitchAntenna(struct net_device *dev)
1332// Toggle Antenna switch.
1333//
1334bool
1335SwitchAntenna(
1336 struct net_device *dev
1337 )
1338{ 953{
1339 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev); 954 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
1340 955
1341 bool bResult; 956 bool bResult;
1342 957
1343 if(priv->CurrAntennaIndex == 0) 958 if (priv->CurrAntennaIndex == 0) {
1344 { 959 bResult = SetAntenna8185(dev, 1);
1345 bResult = SetAntenna8185(dev, 1); 960 } else {
1346//by amy 080312 961 bResult = SetAntenna8185(dev, 0);
1347// printk("SwitchAntenna(): switching to antenna 1 ......\n");
1348// bResult = SetAntenna8185(dev, 1);//-by amy 080312
1349 }
1350 else
1351 {
1352 bResult = SetAntenna8185(dev, 0);
1353//by amy 080312
1354// printk("SwitchAntenna(): switching to antenna 0 ......\n");
1355// bResult = SetAntenna8185(dev, 0);//-by amy 080312
1356 } 962 }
1357 963
1358 return bResult; 964 return bResult;
1359} 965}
1360// 966/*
1361// Description: 967 * Engine of SW Antenna Diversity mechanism.
1362// Engine of SW Antenna Diversity mechanism. 968 * Since 8187 has no Tx part information,
1363// Since 8187 has no Tx part information, 969 * this implementation is only dependend on Rx part information.
1364// this implementation is only dependend on Rx part information. 970 */
1365// 971void SwAntennaDiversity(struct net_device *dev)
1366// 2006.04.17, by rcnjko.
1367//
1368void
1369SwAntennaDiversity(
1370 struct net_device *dev
1371 )
1372{ 972{
1373 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev); 973 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
1374 bool bSwCheckSS=false; 974 bool bSwCheckSS = false;
1375// printk("+SwAntennaDiversity(): CurrAntennaIndex: %d\n", priv->CurrAntennaIndex); 975 if (bSwCheckSS) {
1376// printk("AdTickCount is %d\n",priv->AdTickCount);
1377//by amy 080312
1378 if(bSwCheckSS)
1379 {
1380 priv->AdTickCount++; 976 priv->AdTickCount++;
1381 977
1382 printk("(1) AdTickCount: %d, AdCheckPeriod: %d\n", 978 printk("(1) AdTickCount: %d, AdCheckPeriod: %d\n",
@@ -1384,246 +980,162 @@ SwAntennaDiversity(
1384 printk("(2) AdRxSignalStrength: %ld, AdRxSsThreshold: %ld\n", 980 printk("(2) AdRxSignalStrength: %ld, AdRxSsThreshold: %ld\n",
1385 priv->AdRxSignalStrength, priv->AdRxSsThreshold); 981 priv->AdRxSignalStrength, priv->AdRxSsThreshold);
1386 } 982 }
1387// priv->AdTickCount++;//-by amy 080312
1388
1389 // Case 1. No Link.
1390 if(priv->ieee80211->state != IEEE80211_LINKED)
1391 {
1392 // printk("SwAntennaDiversity(): Case 1. No Link.\n");
1393 983
984 /* Case 1. No Link. */
985 if (priv->ieee80211->state != IEEE80211_LINKED) {
1394 priv->bAdSwitchedChecking = false; 986 priv->bAdSwitchedChecking = false;
1395 // I switch antenna here to prevent any one of antenna is broken before link established, 2006.04.18, by rcnjko.. 987 /* I switch antenna here to prevent any one of antenna is broken before link established, 2006.04.18, by rcnjko.. */
1396 SwitchAntenna(dev); 988 SwitchAntenna(dev);
1397 }
1398 // Case 2. Linked but no packet received.
1399 else if(priv->AdRxOkCnt == 0)
1400 {
1401 // printk("SwAntennaDiversity(): Case 2. Linked but no packet received.\n");
1402 989
990 /* Case 2. Linked but no packet receive.d */
991 } else if (priv->AdRxOkCnt == 0) {
1403 priv->bAdSwitchedChecking = false; 992 priv->bAdSwitchedChecking = false;
1404 SwitchAntenna(dev); 993 SwitchAntenna(dev);
1405 }
1406 // Case 3. Evaluate last antenna switch action and undo it if necessary.
1407 else if(priv->bAdSwitchedChecking == true)
1408 {
1409 // printk("SwAntennaDiversity(): Case 3. Evaluate last antenna switch action.\n");
1410 994
995 /* Case 3. Evaluate last antenna switch action and undo it if necessary. */
996 } else if (priv->bAdSwitchedChecking == true) {
1411 priv->bAdSwitchedChecking = false; 997 priv->bAdSwitchedChecking = false;
1412 998
1413 // Adjust Rx signal strength threshold. 999 /* Adjust Rx signal strength threshold. */
1414 priv->AdRxSsThreshold = (priv->AdRxSignalStrength + priv->AdRxSsBeforeSwitched) / 2; 1000 priv->AdRxSsThreshold = (priv->AdRxSignalStrength + priv->AdRxSsBeforeSwitched) / 2;
1415 1001
1416 priv->AdRxSsThreshold = (priv->AdRxSsThreshold > priv->AdMaxRxSsThreshold) ? 1002 priv->AdRxSsThreshold = (priv->AdRxSsThreshold > priv->AdMaxRxSsThreshold) ?
1417 priv->AdMaxRxSsThreshold: priv->AdRxSsThreshold; 1003 priv->AdMaxRxSsThreshold: priv->AdRxSsThreshold;
1418 if(priv->AdRxSignalStrength < priv->AdRxSsBeforeSwitched) 1004 if(priv->AdRxSignalStrength < priv->AdRxSsBeforeSwitched) {
1419 { // Rx signal strength is not improved after we swtiched antenna. => Swich back. 1005 /* Rx signal strength is not improved after we swtiched antenna. => Swich back. */
1420// printk("SwAntennaDiversity(): Rx Signal Strength is not improved, CurrRxSs: %d, LastRxSs: %d\n", 1006 /* Increase Antenna Diversity checking period due to bad decision. */
1421// priv->AdRxSignalStrength, priv->AdRxSsBeforeSwitched);
1422//by amy 080312
1423 // Increase Antenna Diversity checking period due to bad decision.
1424 priv->AdCheckPeriod *= 2; 1007 priv->AdCheckPeriod *= 2;
1425//by amy 080312 1008 /* Increase Antenna Diversity checking period. */
1426 // Increase Antenna Diversity checking period. 1009 if (priv->AdCheckPeriod > priv->AdMaxCheckPeriod)
1427 if(priv->AdCheckPeriod > priv->AdMaxCheckPeriod)
1428 priv->AdCheckPeriod = priv->AdMaxCheckPeriod; 1010 priv->AdCheckPeriod = priv->AdMaxCheckPeriod;
1429 1011
1430 // Wrong deceision => switch back. 1012 /* Wrong deceision => switch back. */
1431 SwitchAntenna(dev); 1013 SwitchAntenna(dev);
1432 } 1014 } else {
1433 else 1015 /* Rx Signal Strength is improved. */
1434 { // Rx Signal Strength is improved.
1435// printk("SwAntennaDiversity(): Rx Signal Strength is improved, CurrRxSs: %d, LastRxSs: %d\n",
1436// priv->AdRxSignalStrength, priv->AdRxSsBeforeSwitched);
1437 1016
1438 // Reset Antenna Diversity checking period to its min value. 1017 /* Reset Antenna Diversity checking period to its min value. */
1439 priv->AdCheckPeriod = priv->AdMinCheckPeriod; 1018 priv->AdCheckPeriod = priv->AdMinCheckPeriod;
1440 } 1019 }
1441 1020
1442// printk("SwAntennaDiversity(): AdRxSsThreshold: %d, AdCheckPeriod: %d\n",
1443// priv->AdRxSsThreshold, priv->AdCheckPeriod);
1444 } 1021 }
1445 // Case 4. Evaluate if we shall switch antenna now. 1022 /* Case 4. Evaluate if we shall switch antenna now. */
1446 // Cause Table Speed is very fast in TRC Dell Lab, we check it every time. 1023 /* Cause Table Speed is very fast in TRC Dell Lab, we check it every time. */
1447 else// if(priv->AdTickCount >= priv->AdCheckPeriod)//-by amy 080312 1024 else {
1448 {
1449// printk("SwAntennaDiversity(): Case 4. Evaluate if we shall switch antenna now.\n");
1450
1451 priv->AdTickCount = 0; 1025 priv->AdTickCount = 0;
1452 1026
1453 // 1027 /*
1454 // <Roger_Notes> We evaluate RxOk counts for each antenna first and than 1028 * <Roger_Notes> We evaluate RxOk counts for each antenna first and than
1455 // evaluate signal strength. 1029 * evaluate signal strength.
1456 // The following operation can overcome the disability of CCA on both two antennas 1030 * The following operation can overcome the disability of CCA on both two antennas
1457 // When signal strength was extremely low or high. 1031 * When signal strength was extremely low or high.
1458 // 2008.01.30. 1032 * 2008.01.30.
1459 // 1033 */
1460 1034
1461 // 1035 /*
1462 // Evaluate RxOk count from each antenna if we shall switch default antenna now. 1036 * Evaluate RxOk count from each antenna if we shall switch default antenna now.
1463 // Added by Roger, 2008.02.21. 1037 */
1464//{by amy 080312 1038 if ((priv->AdMainAntennaRxOkCnt < priv->AdAuxAntennaRxOkCnt)
1465 if((priv->AdMainAntennaRxOkCnt < priv->AdAuxAntennaRxOkCnt) 1039 && (priv->CurrAntennaIndex == 0)) {
1466 && (priv->CurrAntennaIndex == 0)) 1040 /* We set Main antenna as default but RxOk count was less than Aux ones. */
1467 { // We set Main antenna as default but RxOk count was less than Aux ones. 1041
1468 1042 /* Switch to Aux antenna. */
1469 // printk("SwAntennaDiversity(): Main antenna RxOK is poor, AdMainAntennaRxOkCnt: %d, AdAuxAntennaRxOkCnt: %d\n",
1470 // priv->AdMainAntennaRxOkCnt, priv->AdAuxAntennaRxOkCnt);
1471
1472 // Switch to Aux antenna.
1473 SwitchAntenna(dev); 1043 SwitchAntenna(dev);
1474 priv->bHWAdSwitched = true; 1044 priv->bHWAdSwitched = true;
1475 } 1045 } else if ((priv->AdAuxAntennaRxOkCnt < priv->AdMainAntennaRxOkCnt)
1476 else if((priv->AdAuxAntennaRxOkCnt < priv->AdMainAntennaRxOkCnt) 1046 && (priv->CurrAntennaIndex == 1)) {
1477 && (priv->CurrAntennaIndex == 1)) 1047 /* We set Aux antenna as default but RxOk count was less than Main ones. */
1478 { // We set Aux antenna as default but RxOk count was less than Main ones.
1479 1048
1480 // printk("SwAntennaDiversity(): Aux antenna RxOK is poor, AdMainAntennaRxOkCnt: %d, AdAuxAntennaRxOkCnt: %d\n", 1049 /* Switch to Main antenna. */
1481 // priv->AdMainAntennaRxOkCnt, priv->AdAuxAntennaRxOkCnt);
1482
1483 // Switch to Main antenna.
1484 SwitchAntenna(dev); 1050 SwitchAntenna(dev);
1485 priv->bHWAdSwitched = true; 1051 priv->bHWAdSwitched = true;
1486 } 1052 } else {
1487 else 1053 /* Default antenna is better. */
1488 {// Default antenna is better.
1489 1054
1490 // printk("SwAntennaDiversity(): Default antenna is better., AdMainAntennaRxOkCnt: %d, AdAuxAntennaRxOkCnt: %d\n", 1055 /* Still need to check current signal strength. */
1491 // priv->AdMainAntennaRxOkCnt, priv->AdAuxAntennaRxOkCnt);
1492
1493 // Still need to check current signal strength.
1494 priv->bHWAdSwitched = false; 1056 priv->bHWAdSwitched = false;
1495 } 1057 }
1496 // 1058 /*
1497 // <Roger_Notes> We evaluate Rx signal strength ONLY when default antenna 1059 * <Roger_Notes> We evaluate Rx signal strength ONLY when default antenna
1498 // didn't changed by HW evaluation. 1060 * didn't changed by HW evaluation.
1499 // 2008.02.27. 1061 * 2008.02.27.
1500 // 1062 *
1501 // [TRC Dell Lab] SignalStrength is inaccuracy. Isaiah 2008-03-05 1063 * [TRC Dell Lab] SignalStrength is inaccuracy. Isaiah 2008-03-05
1502 // For example, Throughput of aux is better than main antenna(about 10M v.s 2M), 1064 * For example, Throughput of aux is better than main antenna(about 10M v.s 2M),
1503 // but AdRxSignalStrength is less than main. 1065 * but AdRxSignalStrength is less than main.
1504 // Our guess is that main antenna have lower throughput and get many change 1066 * Our guess is that main antenna have lower throughput and get many change
1505 // to receive more CCK packets(ex.Beacon) which have stronger SignalStrength. 1067 * to receive more CCK packets(ex.Beacon) which have stronger SignalStrength.
1506 // 1068 */
1507 if( (!priv->bHWAdSwitched) && (bSwCheckSS)) 1069 if ((!priv->bHWAdSwitched) && (bSwCheckSS)) {
1508 { 1070 /* Evaluate Rx signal strength if we shall switch antenna now. */
1509//by amy 080312} 1071 if (priv->AdRxSignalStrength < priv->AdRxSsThreshold) {
1510 // Evaluate Rx signal strength if we shall switch antenna now. 1072 /* Rx signal strength is weak => Switch Antenna. */
1511 if(priv->AdRxSignalStrength < priv->AdRxSsThreshold) 1073 priv->AdRxSsBeforeSwitched = priv->AdRxSignalStrength;
1512 { // Rx signal strength is weak => Switch Antenna. 1074 priv->bAdSwitchedChecking = true;
1513// printk("SwAntennaDiversity(): Rx Signal Strength is weak, CurrRxSs: %d, RxSsThreshold: %d\n", 1075
1514// priv->AdRxSignalStrength, priv->AdRxSsThreshold); 1076 SwitchAntenna(dev);
1515 1077 } else {
1516 priv->AdRxSsBeforeSwitched = priv->AdRxSignalStrength; 1078 /* Rx signal strength is OK. */
1517 priv->bAdSwitchedChecking = true; 1079 priv->bAdSwitchedChecking = false;
1518 1080 /* Increase Rx signal strength threshold if necessary. */
1519 SwitchAntenna(dev); 1081 if ((priv->AdRxSignalStrength > (priv->AdRxSsThreshold + 10)) && /* Signal is much stronger than current threshold */
1520 } 1082 priv->AdRxSsThreshold <= priv->AdMaxRxSsThreshold) { /* Current threhold is not yet reach upper limit. */
1521 else 1083
1522 { // Rx signal strength is OK. 1084 priv->AdRxSsThreshold = (priv->AdRxSsThreshold + priv->AdRxSignalStrength) / 2;
1523// printk("SwAntennaDiversity(): Rx Signal Strength is OK, CurrRxSs: %d, RxSsThreshold: %d\n", 1085 priv->AdRxSsThreshold = (priv->AdRxSsThreshold > priv->AdMaxRxSsThreshold) ?
1524// priv->AdRxSignalStrength, priv->AdRxSsThreshold); 1086 priv->AdMaxRxSsThreshold: priv->AdRxSsThreshold;/* +by amy 080312 */
1525 1087 }
1526 priv->bAdSwitchedChecking = false;
1527 // Increase Rx signal strength threshold if necessary.
1528 if( (priv->AdRxSignalStrength > (priv->AdRxSsThreshold + 10)) && // Signal is much stronger than current threshold
1529 priv->AdRxSsThreshold <= priv->AdMaxRxSsThreshold) // Current threhold is not yet reach upper limit.
1530 {
1531 priv->AdRxSsThreshold = (priv->AdRxSsThreshold + priv->AdRxSignalStrength) / 2;
1532 priv->AdRxSsThreshold = (priv->AdRxSsThreshold > priv->AdMaxRxSsThreshold) ?
1533 priv->AdMaxRxSsThreshold: priv->AdRxSsThreshold;//+by amy 080312
1534 }
1535 1088
1536 // Reduce Antenna Diversity checking period if possible. 1089 /* Reduce Antenna Diversity checking period if possible. */
1537 if( priv->AdCheckPeriod > priv->AdMinCheckPeriod ) 1090 if (priv->AdCheckPeriod > priv->AdMinCheckPeriod)
1538 { 1091 priv->AdCheckPeriod /= 2;
1539 priv->AdCheckPeriod /= 2;
1540 } 1092 }
1541 } 1093 }
1542 }
1543 } 1094 }
1544//by amy 080312 1095 /* Reset antenna diversity Rx related statistics. */
1545 // Reset antenna diversity Rx related statistics.
1546 priv->AdRxOkCnt = 0; 1096 priv->AdRxOkCnt = 0;
1547 priv->AdMainAntennaRxOkCnt = 0; 1097 priv->AdMainAntennaRxOkCnt = 0;
1548 priv->AdAuxAntennaRxOkCnt = 0; 1098 priv->AdAuxAntennaRxOkCnt = 0;
1549//by amy 080312
1550
1551// priv->AdRxOkCnt = 0;//-by amy 080312
1552
1553// printk("-SwAntennaDiversity()\n");
1554} 1099}
1555 1100
1556// 1101 /* Return TRUE if we shall perform Tx Power Tracking Mecahnism, FALSE otherwise. */
1557// Description: 1102bool CheckTxPwrTracking(struct net_device *dev)
1558// Return TRUE if we shall perform Tx Power Tracking Mecahnism, FALSE otherwise.
1559//
1560bool
1561CheckTxPwrTracking( struct net_device *dev)
1562{ 1103{
1563 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev); 1104 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
1564 1105
1565 if(!priv->bTxPowerTrack) 1106 if (!priv->bTxPowerTrack)
1566 {
1567 return false; 1107 return false;
1568 }
1569 1108
1570//lzm reserved 080826 1109 /* if 87SE is in High Power , don't do Tx Power Tracking. asked by SD3 ED. 2008-08-08 Isaiah */
1571 //if(priv->bScanInProgress) 1110 if (priv->bToUpdateTxPwr)
1572 //{
1573 // return false;
1574 //}
1575
1576 //if 87SE is in High Power , don't do Tx Power Tracking. asked by SD3 ED. 2008-08-08 Isaiah
1577 if(priv->bToUpdateTxPwr)
1578 {
1579 return false; 1111 return false;
1580 }
1581 1112
1582 return true; 1113 return true;
1583} 1114}
1584 1115
1585 1116
1586// 1117 /* Timer callback function of SW Antenna Diversity. */
1587// Description: 1118void SwAntennaDiversityTimerCallback(struct net_device *dev)
1588// Timer callback function of SW Antenna Diversity.
1589//
1590void
1591SwAntennaDiversityTimerCallback(
1592 struct net_device *dev
1593 )
1594{ 1119{
1595 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev); 1120 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
1596 RT_RF_POWER_STATE rtState; 1121 RT_RF_POWER_STATE rtState;
1597 1122
1598 //printk("+SwAntennaDiversityTimerCallback()\n"); 1123 /* We do NOT need to switch antenna while RF is off. */
1599
1600 //
1601 // We do NOT need to switch antenna while RF is off.
1602 // 2007.05.09, added by Roger.
1603 //
1604 rtState = priv->eRFPowerState; 1124 rtState = priv->eRFPowerState;
1605 do{ 1125 do {
1606 if (rtState == eRfOff) 1126 if (rtState == eRfOff) {
1607 {
1608// printk("SwAntennaDiversityTimer - RF is OFF.\n");
1609 break; 1127 break;
1610 } 1128 } else if (rtState == eRfSleep) {
1611 else if (rtState == eRfSleep) 1129 /* Don't access BB/RF under Disable PLL situation. */
1612 {
1613 // Don't access BB/RF under Disable PLL situation.
1614 //RT_TRACE((COMP_RF|COMP_ANTENNA), DBG_LOUD, ("SwAntennaDiversityTimerCallback(): RF is Sleep => skip it\n"));
1615 break; 1130 break;
1616 } 1131 }
1617 SwAntennaDiversity(dev); 1132 SwAntennaDiversity(dev);
1618 1133
1619 }while(false); 1134 } while (false);
1620 1135
1621 if(priv->up) 1136 if (priv->up) {
1622 {
1623 priv->SwAntennaDiversityTimer.expires = jiffies + MSECS(ANTENNA_DIVERSITY_TIMER_PERIOD); 1137 priv->SwAntennaDiversityTimer.expires = jiffies + MSECS(ANTENNA_DIVERSITY_TIMER_PERIOD);
1624 add_timer(&priv->SwAntennaDiversityTimer); 1138 add_timer(&priv->SwAntennaDiversityTimer);
1625 } 1139 }
1626
1627 //printk("-SwAntennaDiversityTimerCallback()\n");
1628} 1140}
1629 1141
diff --git a/drivers/staging/rtl8187se/r8180_wx.c b/drivers/staging/rtl8187se/r8180_wx.c
index 39ef7e0193fb..303ec691262a 100644
--- a/drivers/staging/rtl8187se/r8180_wx.c
+++ b/drivers/staging/rtl8187se/r8180_wx.c
@@ -23,24 +23,22 @@
23 23
24#include "ieee80211/dot11d.h" 24#include "ieee80211/dot11d.h"
25 25
26/* #define RATE_COUNT 4 */
27u32 rtl8180_rates[] = {1000000, 2000000, 5500000, 11000000, 26u32 rtl8180_rates[] = {1000000, 2000000, 5500000, 11000000,
28 6000000, 9000000, 12000000, 18000000, 24000000, 36000000, 48000000, 54000000}; 27 6000000, 9000000, 12000000, 18000000, 24000000, 36000000, 48000000, 54000000};
29 28
30#define RATE_COUNT ARRAY_SIZE(rtl8180_rates) 29#define RATE_COUNT ARRAY_SIZE(rtl8180_rates)
31 30
32static CHANNEL_LIST DefaultChannelPlan[] = { 31static CHANNEL_LIST DefaultChannelPlan[] = {
33/* {{1,2,3,4,5,6,7,8,9,10,11,12,13,14},14}, */ /*Default channel plan */ 32 {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 36, 40, 44, 48, 52, 56, 60, 64}, 19}, /* FCC */
34 {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 36, 40, 44, 48, 52, 56, 60, 64}, 19}, /*FCC */ 33 {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 11}, /* IC */
35 {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 11}, /*IC */ 34 {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40, 44, 48, 52, 56, 60, 64}, 21}, /* ETSI */
36 {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40, 44, 48, 52, 56, 60, 64}, 21}, /*ETSI */ 35 {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40, 44, 48, 52, 56, 60, 64}, 21}, /* Spain. Change to ETSI. */
37 {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40, 44, 48, 52, 56, 60, 64}, 21}, /*Spain. Change to ETSI. */ 36 {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40, 44, 48, 52, 56, 60, 64}, 21}, /* France. Change to ETSI. */
38 {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40, 44, 48, 52, 56, 60, 64}, 21}, /*France. Change to ETSI. */ 37 {{14, 36, 40, 44, 48, 52, 56, 60, 64}, 9}, /* MKK */
39 {{14, 36, 40, 44, 48, 52, 56, 60, 64}, 9}, /*MKK */ 38 {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 36, 40, 44, 48, 52, 56, 60, 64}, 22}, /* MKK1 */
40 {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 36, 40, 44, 48, 52, 56, 60, 64}, 22},/*MKK1 */ 39 {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40, 44, 48, 52, 56, 60, 64}, 21}, /* Israel */
41 {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40, 44, 48, 52, 56, 60, 64}, 21}, /*Israel. */ 40 {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 34, 38, 42, 46}, 17}, /* For 11a , TELEC */
42 {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 34, 38, 42, 46}, 17}, /*For 11a , TELEC */ 41 {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}, 14} /* For Global Domain. 1-11:active scan, 12-14 passive scan.*/ /* +YJ, 080626 */
43 {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}, 14} /*For Global Domain. 1-11:active scan, 12-14 passive scan.*/ /* +YJ, 080626 */
44}; 42};
45static int r8180_wx_get_freq(struct net_device *dev, 43static int r8180_wx_get_freq(struct net_device *dev,
46 struct iw_request_info *a, 44 struct iw_request_info *a,
@@ -63,14 +61,7 @@ int r8180_wx_set_key(struct net_device *dev, struct iw_request_info *info,
63 61
64 if (erq->flags & IW_ENCODE_DISABLED) 62 if (erq->flags & IW_ENCODE_DISABLED)
65 63
66/* i = erq->flags & IW_ENCODE_INDEX;
67 if (i < 1 || i > 4)
68*/
69
70 if (erq->length > 0) { 64 if (erq->length > 0) {
71
72 /*int len = erq->length <= 5 ? 5 : 13; */
73
74 u32* tkey = (u32*) key; 65 u32* tkey = (u32*) key;
75 priv->key0[0] = tkey[0]; 66 priv->key0[0] = tkey[0];
76 priv->key0[1] = tkey[1]; 67 priv->key0[1] = tkey[1];
@@ -192,33 +183,32 @@ static int r8180_wx_set_mode(struct net_device *dev, struct iw_request_info *a,
192 return 0; 183 return 0;
193 184
194 down(&priv->wx_sem); 185 down(&priv->wx_sem);
195/* printk("set mode ENABLE_IPS\n"); */
196 if (priv->bInactivePs) { 186 if (priv->bInactivePs) {
197 if (wrqu->mode == IW_MODE_ADHOC) 187 if (wrqu->mode == IW_MODE_ADHOC)
198 IPSLeave(dev); 188 IPSLeave(dev);
199 } 189 }
200 ret = ieee80211_wx_set_mode(priv->ieee80211, a, wrqu, b); 190 ret = ieee80211_wx_set_mode(priv->ieee80211, a, wrqu, b);
201 191
202/* rtl8180_commit(dev); */
203
204 up(&priv->wx_sem); 192 up(&priv->wx_sem);
205 return ret; 193 return ret;
206} 194}
207 195
208/* YJ,add,080819,for hidden ap */ 196/* YJ,add,080819,for hidden ap */
209struct iw_range_with_scan_capa { 197struct iw_range_with_scan_capa {
210 /* Informative stuff (to choose between different interface) */ 198 /* Informative stuff (to choose between different interface) */
211 __u32 throughput; /* To give an idea... */ 199
200 __u32 throughput; /* To give an idea... */
201
212 /* In theory this value should be the maximum benchmarked 202 /* In theory this value should be the maximum benchmarked
213 * TCP/IP throughput, because with most of these devices the 203 * TCP/IP throughput, because with most of these devices the
214 * bit rate is meaningless (overhead an co) to estimate how 204 * bit rate is meaningless (overhead an co) to estimate how
215 * fast the connection will go and pick the fastest one. 205 * fast the connection will go and pick the fastest one.
216 * I suggest people to play with Netperf or any benchmark... 206 * I suggest people to play with Netperf or any benchmark...
217 */ 207 */
218 208
219 /* NWID (or domain id) */ 209 /* NWID (or domain id) */
220 __u32 min_nwid; /* Minimal NWID we are able to set */ 210 __u32 min_nwid; /* Minimal NWID we are able to set */
221 __u32 max_nwid; /* Maximal NWID we are able to set */ 211 __u32 max_nwid; /* Maximal NWID we are able to set */
222 212
223 /* Old Frequency (backward compat - moved lower ) */ 213 /* Old Frequency (backward compat - moved lower ) */
224 __u16 old_num_channels; 214 __u16 old_num_channels;
@@ -238,7 +228,6 @@ static int rtl8180_wx_get_range(struct net_device *dev,
238 struct r8180_priv *priv = ieee80211_priv(dev); 228 struct r8180_priv *priv = ieee80211_priv(dev);
239 u16 val; 229 u16 val;
240 int i; 230 int i;
241 /*struct iw_range_with_scan_capa* tmp = (struct iw_range_with_scan_capa*)range; */ /*YJ,add,080819,for hidden ap */
242 231
243 wrqu->data.length = sizeof(*range); 232 wrqu->data.length = sizeof(*range);
244 memset(range, 0, sizeof(*range)); 233 memset(range, 0, sizeof(*range));
@@ -291,14 +280,6 @@ static int rtl8180_wx_get_range(struct net_device *dev,
291 range->we_version_compiled = WIRELESS_EXT; 280 range->we_version_compiled = WIRELESS_EXT;
292 range->we_version_source = 16; 281 range->we_version_source = 16;
293 282
294/* range->retry_capa; */ /* What retry options are supported */
295/* range->retry_flags; */ /* How to decode max/min retry limit */
296/* range->r_time_flags;*/ /* How to decode max/min retry life */
297/* range->min_retry; */ /* Minimal number of retries */
298/* range->max_retry; */ /* Maximal number of retries */
299/* range->min_r_time; */ /* Minimal retry lifetime */
300/* range->max_r_time; */ /* Maximal retry lifetime */
301
302 range->num_channels = 14; 283 range->num_channels = 14;
303 284
304 for (i = 0, val = 0; i < 14; i++) { 285 for (i = 0, val = 0; i < 14; i++) {
@@ -310,8 +291,8 @@ static int rtl8180_wx_get_range(struct net_device *dev,
310 range->freq[val].e = 1; 291 range->freq[val].e = 1;
311 val++; 292 val++;
312 } else { 293 } else {
313 /* FIXME: do we need to set anything for channels */ 294 /* FIXME: do we need to set anything for channels */
314 /* we don't use ? */ 295 /* we don't use ? */
315 } 296 }
316 297
317 if (val == IW_MAX_FREQUENCIES) 298 if (val == IW_MAX_FREQUENCIES)
@@ -322,8 +303,6 @@ static int rtl8180_wx_get_range(struct net_device *dev,
322 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 | 303 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
323 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP; 304 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
324 305
325 /*tmp->scan_capa = 0x01; */ /*YJ,add,080819,for hidden ap */
326
327 return 0; 306 return 0;
328} 307}
329 308
@@ -339,50 +318,29 @@ static int r8180_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
339 if (priv->ieee80211->bHwRadioOff) 318 if (priv->ieee80211->bHwRadioOff)
340 return 0; 319 return 0;
341 320
342/*YJ,add,080819, for hidden ap */
343 /*printk("==*&*&*&==>%s in\n", __func__); */
344 /*printk("=*&*&*&*===>flag:%x, %x\n", wrqu->data.flags, IW_SCAN_THIS_ESSID); */
345 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) { 321 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
346 struct iw_scan_req* req = (struct iw_scan_req*)b; 322 struct iw_scan_req* req = (struct iw_scan_req*)b;
347 if (req->essid_len) { 323 if (req->essid_len) {
348 /*printk("==**&*&*&**===>scan set ssid:%s\n", req->essid); */
349 ieee->current_network.ssid_len = req->essid_len; 324 ieee->current_network.ssid_len = req->essid_len;
350 memcpy(ieee->current_network.ssid, req->essid, req->essid_len); 325 memcpy(ieee->current_network.ssid, req->essid, req->essid_len);
351 /*printk("=====>network ssid:%s\n", ieee->current_network.ssid); */
352 } 326 }
353 } 327 }
354/*YJ,add,080819, for hidden ap, end */
355 328
356 down(&priv->wx_sem); 329 down(&priv->wx_sem);
357 if (priv->up) { 330 if (priv->up) {
358/* printk("set scan ENABLE_IPS\n"); */
359 priv->ieee80211->actscanning = true; 331 priv->ieee80211->actscanning = true;
360 if (priv->bInactivePs && (priv->ieee80211->state != IEEE80211_LINKED)) { 332 if (priv->bInactivePs && (priv->ieee80211->state != IEEE80211_LINKED)) {
361 IPSLeave(dev); 333 IPSLeave(dev);
362 /*down(&priv->ieee80211->wx_sem); */
363/*
364 if (priv->ieee80211->iw_mode == IW_MODE_MONITOR || !(priv->ieee80211->proto_started)){
365 ret = -1;
366 up(&priv->ieee80211->wx_sem);
367 up(&priv->wx_sem);
368 return ret;
369 }
370*/
371 /* queue_work(priv->ieee80211->wq, &priv->ieee80211->wx_sync_scan_wq); */
372 /* printk("start scan============================>\n"); */
373 ieee80211_softmac_ips_scan_syncro(priv->ieee80211); 334 ieee80211_softmac_ips_scan_syncro(priv->ieee80211);
374/* ieee80211_rtl_start_scan(priv->ieee80211); */
375 /* intentionally forget to up sem */
376/* up(&priv->ieee80211->wx_sem); */
377 ret = 0; 335 ret = 0;
378 } else { 336 } else {
379 /* YJ,add,080828, prevent scan in BusyTraffic */ 337 /* prevent scan in BusyTraffic */
380 /* FIXME: Need to consider last scan time */ 338 /* FIXME: Need to consider last scan time */
381 if ((priv->link_detect.bBusyTraffic) && (true)) { 339 if ((priv->link_detect.bBusyTraffic) && (true)) {
382 ret = 0; 340 ret = 0;
383 printk("Now traffic is busy, please try later!\n"); 341 printk("Now traffic is busy, please try later!\n");
384 } else 342 } else
385 /* YJ,add,080828, prevent scan in BusyTraffic,end */ 343 /* prevent scan in BusyTraffic,end */
386 ret = ieee80211_wx_set_scan(priv->ieee80211, a, wrqu, b); 344 ret = ieee80211_wx_set_scan(priv->ieee80211, a, wrqu, b);
387 } 345 }
388 } else 346 } else
@@ -424,10 +382,8 @@ static int r8180_wx_set_essid(struct net_device *dev,
424 return 0; 382 return 0;
425 383
426 down(&priv->wx_sem); 384 down(&priv->wx_sem);
427 /* printk("set essid ENABLE_IPS\n"); */
428 if (priv->bInactivePs) 385 if (priv->bInactivePs)
429 IPSLeave(dev); 386 IPSLeave(dev);
430/* printk("haha:set essid %s essid_len = %d essid_flgs = %d\n",b, wrqu->essid.length, wrqu->essid.flags); */
431 387
432 ret = ieee80211_wx_set_essid(priv->ieee80211, a, wrqu, b); 388 ret = ieee80211_wx_set_essid(priv->ieee80211, a, wrqu, b);
433 389
@@ -597,28 +553,6 @@ static int r8180_wx_set_scan_type(struct net_device *dev, struct iw_request_info
597 return 1; 553 return 1;
598} 554}
599 555
600
601/* added by christian */
602/*
603static int r8180_wx_set_monitor_type(struct net_device *dev, struct iw_request_info *aa, union
604 iwreq_data *wrqu, char *p){
605
606 struct r8180_priv *priv = ieee80211_priv(dev);
607 int *parms=(int*)p;
608 int mode=parms[0];
609
610 if(priv->ieee80211->iw_mode != IW_MODE_MONITOR) return -1;
611 priv->prism_hdr = mode;
612 if(!mode)dev->type=ARPHRD_IEEE80211;
613 else dev->type=ARPHRD_IEEE80211_PRISM;
614 DMESG("using %s RX encap", mode ? "AVS":"80211");
615 return 0;
616
617}
618*/
619/*of r8180_wx_set_monitor_type */
620/* end added christian */
621
622static int r8180_wx_set_retry(struct net_device *dev, 556static int r8180_wx_set_retry(struct net_device *dev,
623 struct iw_request_info *info, 557 struct iw_request_info *info,
624 union iwreq_data *wrqu, char *extra) 558 union iwreq_data *wrqu, char *extra)
@@ -661,14 +595,6 @@ static int r8180_wx_set_retry(struct net_device *dev,
661 */ 595 */
662 596
663 rtl8180_commit(dev); 597 rtl8180_commit(dev);
664 /*
665 if(priv->up){
666 rtl8180_rtx_disable(dev);
667 rtl8180_rx_enable(dev);
668 rtl8180_tx_enable(dev);
669
670 }
671 */
672exit: 598exit:
673 up(&priv->wx_sem); 599 up(&priv->wx_sem);
674 600
@@ -695,8 +621,6 @@ static int r8180_wx_get_retry(struct net_device *dev,
695 wrqu->retry.flags = IW_RETRY_LIMIT & IW_RETRY_MIN; 621 wrqu->retry.flags = IW_RETRY_LIMIT & IW_RETRY_MIN;
696 wrqu->retry.value = priv->retry_data; 622 wrqu->retry.value = priv->retry_data;
697 } 623 }
698 /* DMESG("returning %d",wrqu->retry.value); */
699
700 624
701 return 0; 625 return 0;
702} 626}
@@ -726,7 +650,6 @@ static int r8180_wx_set_sens(struct net_device *dev,
726 return 0; 650 return 0;
727 651
728 down(&priv->wx_sem); 652 down(&priv->wx_sem);
729 /* DMESG("attempt to set sensivity to %ddb",wrqu->sens.value); */
730 if (priv->rf_set_sens == NULL) { 653 if (priv->rf_set_sens == NULL) {
731 err = -1; /* we have not this support for this radio */ 654 err = -1; /* we have not this support for this radio */
732 goto exit; 655 goto exit;
@@ -847,58 +770,6 @@ static int dummy(struct net_device *dev, struct iw_request_info *a,
847 return -1; 770 return -1;
848} 771}
849 772
850/*
851static int r8180_wx_get_psmode(struct net_device *dev,
852 struct iw_request_info *info,
853 union iwreq_data *wrqu, char *extra)
854{
855 struct r8180_priv *priv = ieee80211_priv(dev);
856 struct ieee80211_device *ieee;
857 int ret = 0;
858
859
860
861 down(&priv->wx_sem);
862
863 if(priv) {
864 ieee = priv->ieee80211;
865 if(ieee->ps == IEEE80211_PS_DISABLED) {
866 *((unsigned int *)extra) = IEEE80211_PS_DISABLED;
867 goto exit;
868 }
869 *((unsigned int *)extra) = IW_POWER_TIMEOUT;
870 if (ieee->ps & IEEE80211_PS_MBCAST)
871 *((unsigned int *)extra) |= IW_POWER_ALL_R;
872 else
873 *((unsigned int *)extra) |= IW_POWER_UNICAST_R;
874 } else
875 ret = -1;
876exit:
877 up(&priv->wx_sem);
878
879 return ret;
880}
881static int r8180_wx_set_psmode(struct net_device *dev,
882 struct iw_request_info *info,
883 union iwreq_data *wrqu, char *extra)
884{
885 struct r8180_priv *priv = ieee80211_priv(dev);
886 //struct ieee80211_device *ieee;
887 int ret = 0;
888
889
890
891 down(&priv->wx_sem);
892
893 ret = ieee80211_wx_set_power(priv->ieee80211, info, wrqu, extra);
894
895 up(&priv->wx_sem);
896
897 return ret;
898
899}
900*/
901
902static int r8180_wx_get_iwmode(struct net_device *dev, 773static int r8180_wx_get_iwmode(struct net_device *dev,
903 struct iw_request_info *info, 774 struct iw_request_info *info,
904 union iwreq_data *wrqu, char *extra) 775 union iwreq_data *wrqu, char *extra)
@@ -964,7 +835,6 @@ static int r8180_wx_set_iwmode(struct net_device *dev,
964 } else { 835 } else {
965 ieee->mode = mode; 836 ieee->mode = mode;
966 ieee->modulation = modulation; 837 ieee->modulation = modulation;
967/* ieee80211_start_protocol(ieee); */
968 } 838 }
969 839
970 up(&priv->wx_sem); 840 up(&priv->wx_sem);
@@ -1016,7 +886,6 @@ static int r8180_wx_get_siglevel(struct net_device *dev,
1016 union iwreq_data *wrqu, char *extra) 886 union iwreq_data *wrqu, char *extra)
1017{ 887{
1018 struct r8180_priv *priv = ieee80211_priv(dev); 888 struct r8180_priv *priv = ieee80211_priv(dev);
1019 /* struct ieee80211_network *network = &(priv->ieee80211->current_network); */
1020 int ret = 0; 889 int ret = 0;
1021 890
1022 891
@@ -1036,7 +905,6 @@ static int r8180_wx_get_sigqual(struct net_device *dev,
1036 union iwreq_data *wrqu, char *extra) 905 union iwreq_data *wrqu, char *extra)
1037{ 906{
1038 struct r8180_priv *priv = ieee80211_priv(dev); 907 struct r8180_priv *priv = ieee80211_priv(dev);
1039 /* struct ieee80211_network *network = &(priv->ieee80211->current_network); */
1040 int ret = 0; 908 int ret = 0;
1041 909
1042 910
@@ -1150,7 +1018,6 @@ static int r8180_wx_set_channelplan(struct net_device *dev,
1150 union iwreq_data *wrqu, char *extra) 1018 union iwreq_data *wrqu, char *extra)
1151{ 1019{
1152 struct r8180_priv *priv = ieee80211_priv(dev); 1020 struct r8180_priv *priv = ieee80211_priv(dev);
1153 /* struct ieee80211_device *ieee = netdev_priv(dev); */
1154 int *val = (int *)extra; 1021 int *val = (int *)extra;
1155 int i; 1022 int i;
1156 printk("-----in fun %s\n", __func__); 1023 printk("-----in fun %s\n", __func__);
@@ -1223,7 +1090,6 @@ static int r8180_wx_set_enc_ext(struct net_device *dev,
1223{ 1090{
1224 1091
1225 struct r8180_priv *priv = ieee80211_priv(dev); 1092 struct r8180_priv *priv = ieee80211_priv(dev);
1226 /* printk("===>%s()\n", __func__); */
1227 1093
1228 int ret = 0; 1094 int ret = 0;
1229 1095
@@ -1240,7 +1106,6 @@ static int r8180_wx_set_auth(struct net_device *dev,
1240 struct iw_request_info *info, 1106 struct iw_request_info *info,
1241 union iwreq_data *wrqu, char *extra) 1107 union iwreq_data *wrqu, char *extra)
1242{ 1108{
1243 /* printk("====>%s()\n", __func__); */
1244 struct r8180_priv *priv = ieee80211_priv(dev); 1109 struct r8180_priv *priv = ieee80211_priv(dev);
1245 int ret = 0; 1110 int ret = 0;
1246 1111
@@ -1257,8 +1122,6 @@ static int r8180_wx_set_mlme(struct net_device *dev,
1257 struct iw_request_info *info, 1122 struct iw_request_info *info,
1258 union iwreq_data *wrqu, char *extra) 1123 union iwreq_data *wrqu, char *extra)
1259{ 1124{
1260 /* printk("====>%s()\n", __func__); */
1261
1262 int ret = 0; 1125 int ret = 0;
1263 struct r8180_priv *priv = ieee80211_priv(dev); 1126 struct r8180_priv *priv = ieee80211_priv(dev);
1264 1127
@@ -1278,7 +1141,6 @@ static int r8180_wx_set_gen_ie(struct net_device *dev,
1278 struct iw_request_info *info, 1141 struct iw_request_info *info,
1279 union iwreq_data *wrqu, char *extra) 1142 union iwreq_data *wrqu, char *extra)
1280{ 1143{
1281/* printk("====>%s(), len:%d\n", __func__, data->length); */
1282 int ret = 0; 1144 int ret = 0;
1283 struct r8180_priv *priv = ieee80211_priv(dev); 1145 struct r8180_priv *priv = ieee80211_priv(dev);
1284 1146
@@ -1291,68 +1153,67 @@ static int r8180_wx_set_gen_ie(struct net_device *dev,
1291 ret = ieee80211_wx_set_gen_ie(priv->ieee80211, extra, wrqu->data.length); 1153 ret = ieee80211_wx_set_gen_ie(priv->ieee80211, extra, wrqu->data.length);
1292#endif 1154#endif
1293 up(&priv->wx_sem); 1155 up(&priv->wx_sem);
1294 /* printk("<======%s(), ret:%d\n", __func__, ret); */
1295 return ret; 1156 return ret;
1296 1157
1297 1158
1298} 1159}
1299static iw_handler r8180_wx_handlers[] = { 1160static iw_handler r8180_wx_handlers[] = {
1300 NULL, /* SIOCSIWCOMMIT */ 1161 NULL, /* SIOCSIWCOMMIT */
1301 r8180_wx_get_name, /* SIOCGIWNAME */ 1162 r8180_wx_get_name, /* SIOCGIWNAME */
1302 dummy, /* SIOCSIWNWID */ 1163 dummy, /* SIOCSIWNWID */
1303 dummy, /* SIOCGIWNWID */ 1164 dummy, /* SIOCGIWNWID */
1304 r8180_wx_set_freq, /* SIOCSIWFREQ */ 1165 r8180_wx_set_freq, /* SIOCSIWFREQ */
1305 r8180_wx_get_freq, /* SIOCGIWFREQ */ 1166 r8180_wx_get_freq, /* SIOCGIWFREQ */
1306 r8180_wx_set_mode, /* SIOCSIWMODE */ 1167 r8180_wx_set_mode, /* SIOCSIWMODE */
1307 r8180_wx_get_mode, /* SIOCGIWMODE */ 1168 r8180_wx_get_mode, /* SIOCGIWMODE */
1308 r8180_wx_set_sens, /* SIOCSIWSENS */ 1169 r8180_wx_set_sens, /* SIOCSIWSENS */
1309 r8180_wx_get_sens, /* SIOCGIWSENS */ 1170 r8180_wx_get_sens, /* SIOCGIWSENS */
1310 NULL, /* SIOCSIWRANGE */ 1171 NULL, /* SIOCSIWRANGE */
1311 rtl8180_wx_get_range, /* SIOCGIWRANGE */ 1172 rtl8180_wx_get_range, /* SIOCGIWRANGE */
1312 NULL, /* SIOCSIWPRIV */ 1173 NULL, /* SIOCSIWPRIV */
1313 NULL, /* SIOCGIWPRIV */ 1174 NULL, /* SIOCGIWPRIV */
1314 NULL, /* SIOCSIWSTATS */ 1175 NULL, /* SIOCSIWSTATS */
1315 NULL, /* SIOCGIWSTATS */ 1176 NULL, /* SIOCGIWSTATS */
1316 dummy, /* SIOCSIWSPY */ 1177 dummy, /* SIOCSIWSPY */
1317 dummy, /* SIOCGIWSPY */ 1178 dummy, /* SIOCGIWSPY */
1318 NULL, /* SIOCGIWTHRSPY */ 1179 NULL, /* SIOCGIWTHRSPY */
1319 NULL, /* SIOCWIWTHRSPY */ 1180 NULL, /* SIOCWIWTHRSPY */
1320 r8180_wx_set_wap, /* SIOCSIWAP */ 1181 r8180_wx_set_wap, /* SIOCSIWAP */
1321 r8180_wx_get_wap, /* SIOCGIWAP */ 1182 r8180_wx_get_wap, /* SIOCGIWAP */
1322 r8180_wx_set_mlme, /* SIOCSIWMLME*/ 1183 r8180_wx_set_mlme, /* SIOCSIWMLME*/
1323 dummy, /* SIOCGIWAPLIST -- depricated */ 1184 dummy, /* SIOCGIWAPLIST -- depricated */
1324 r8180_wx_set_scan, /* SIOCSIWSCAN */ 1185 r8180_wx_set_scan, /* SIOCSIWSCAN */
1325 r8180_wx_get_scan, /* SIOCGIWSCAN */ 1186 r8180_wx_get_scan, /* SIOCGIWSCAN */
1326 r8180_wx_set_essid, /* SIOCSIWESSID */ 1187 r8180_wx_set_essid, /* SIOCSIWESSID */
1327 r8180_wx_get_essid, /* SIOCGIWESSID */ 1188 r8180_wx_get_essid, /* SIOCGIWESSID */
1328 dummy, /* SIOCSIWNICKN */ 1189 dummy, /* SIOCSIWNICKN */
1329 dummy, /* SIOCGIWNICKN */ 1190 dummy, /* SIOCGIWNICKN */
1330 NULL, /* -- hole -- */ 1191 NULL, /* -- hole -- */
1331 NULL, /* -- hole -- */ 1192 NULL, /* -- hole -- */
1332 r8180_wx_set_rate, /* SIOCSIWRATE */ 1193 r8180_wx_set_rate, /* SIOCSIWRATE */
1333 r8180_wx_get_rate, /* SIOCGIWRATE */ 1194 r8180_wx_get_rate, /* SIOCGIWRATE */
1334 r8180_wx_set_rts, /* SIOCSIWRTS */ 1195 r8180_wx_set_rts, /* SIOCSIWRTS */
1335 r8180_wx_get_rts, /* SIOCGIWRTS */ 1196 r8180_wx_get_rts, /* SIOCGIWRTS */
1336 r8180_wx_set_frag, /* SIOCSIWFRAG */ 1197 r8180_wx_set_frag, /* SIOCSIWFRAG */
1337 r8180_wx_get_frag, /* SIOCGIWFRAG */ 1198 r8180_wx_get_frag, /* SIOCGIWFRAG */
1338 dummy, /* SIOCSIWTXPOW */ 1199 dummy, /* SIOCSIWTXPOW */
1339 dummy, /* SIOCGIWTXPOW */ 1200 dummy, /* SIOCGIWTXPOW */
1340 r8180_wx_set_retry, /* SIOCSIWRETRY */ 1201 r8180_wx_set_retry, /* SIOCSIWRETRY */
1341 r8180_wx_get_retry, /* SIOCGIWRETRY */ 1202 r8180_wx_get_retry, /* SIOCGIWRETRY */
1342 r8180_wx_set_enc, /* SIOCSIWENCODE */ 1203 r8180_wx_set_enc, /* SIOCSIWENCODE */
1343 r8180_wx_get_enc, /* SIOCGIWENCODE */ 1204 r8180_wx_get_enc, /* SIOCGIWENCODE */
1344 r8180_wx_set_power, /* SIOCSIWPOWER */ 1205 r8180_wx_set_power, /* SIOCSIWPOWER */
1345 r8180_wx_get_power, /* SIOCGIWPOWER */ 1206 r8180_wx_get_power, /* SIOCGIWPOWER */
1346 NULL, /*---hole---*/ 1207 NULL, /*---hole---*/
1347 NULL, /*---hole---*/ 1208 NULL, /*---hole---*/
1348 r8180_wx_set_gen_ie, /* SIOCSIWGENIE */ 1209 r8180_wx_set_gen_ie, /* SIOCSIWGENIE */
1349 NULL, /* SIOCSIWGENIE */ 1210 NULL, /* SIOCSIWGENIE */
1350 r8180_wx_set_auth, /* SIOCSIWAUTH */ 1211 r8180_wx_set_auth, /* SIOCSIWAUTH */
1351 NULL, /* SIOCSIWAUTH */ 1212 NULL, /* SIOCSIWAUTH */
1352 r8180_wx_set_enc_ext, /* SIOCSIWENCODEEXT */ 1213 r8180_wx_set_enc_ext, /* SIOCSIWENCODEEXT */
1353 NULL, /* SIOCSIWENCODEEXT */ 1214 NULL, /* SIOCSIWENCODEEXT */
1354 NULL, /* SIOCSIWPMKSA */ 1215 NULL, /* SIOCSIWPMKSA */
1355 NULL, /*---hole---*/ 1216 NULL, /*---hole---*/
1356}; 1217};
1357 1218
1358 1219
@@ -1373,14 +1234,6 @@ static const struct iw_priv_args r8180_private_args[] = {
1373 0, 0, "dummy" 1234 0, 0, "dummy"
1374 1235
1375 }, 1236 },
1376 /* added by christian */
1377 /*
1378 {
1379 SIOCIWFIRSTPRIV + 0x2,
1380 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "prismhdr"
1381 },
1382 */
1383 /* end added by christian */
1384 { 1237 {
1385 SIOCIWFIRSTPRIV + 0x4, 1238 SIOCIWFIRSTPRIV + 0x4,
1386 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "activescan" 1239 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "activescan"
@@ -1399,18 +1252,6 @@ static const struct iw_priv_args r8180_private_args[] = {
1399 0, 0, "dummy" 1252 0, 0, "dummy"
1400 1253
1401 }, 1254 },
1402/*
1403 {
1404 SIOCIWFIRSTPRIV + 0x5,
1405 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getpsmode"
1406 },
1407 {
1408 SIOCIWFIRSTPRIV + 0x6,
1409 IW_PRIV_SIZE_FIXED, 0, "setpsmode"
1410 },
1411*/
1412/* set/get mode have been realized in public handlers */
1413
1414 { 1255 {
1415 SIOCIWFIRSTPRIV + 0x8, 1256 SIOCIWFIRSTPRIV + 0x8,
1416 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "setiwmode" 1257 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "setiwmode"
@@ -1481,7 +1322,7 @@ static const struct iw_priv_args r8180_private_args[] = {
1481 1322
1482 1323
1483static iw_handler r8180_private_handler[] = { 1324static iw_handler r8180_private_handler[] = {
1484 r8180_wx_set_crcmon, /*SIOCIWSECONDPRIV*/ 1325 r8180_wx_set_crcmon, /*SIOCIWSECONDPRIV*/
1485 dummy, 1326 dummy,
1486 r8180_wx_set_beaconinterval, 1327 r8180_wx_set_beaconinterval,
1487 dummy, 1328 dummy,
@@ -1513,16 +1354,15 @@ static inline int is_same_network(struct ieee80211_network *src,
1513 struct ieee80211_network *dst, 1354 struct ieee80211_network *dst,
1514 struct ieee80211_device *ieee) 1355 struct ieee80211_device *ieee)
1515{ 1356{
1516 /* A network is only a duplicate if the channel, BSSID, ESSID 1357 /* A network is only a duplicate if the channel, BSSID, ESSID
1517 * and the capability field (in particular IBSS and BSS) all match. 1358 * and the capability field (in particular IBSS and BSS) all match.
1518 * We treat all <hidden> with the same BSSID and channel 1359 * We treat all <hidden> with the same BSSID and channel
1519 * as one network */ 1360 * as one network
1520 return (((src->ssid_len == dst->ssid_len) || (ieee->iw_mode == IW_MODE_INFRA)) && /* YJ,mod, 080819,for hidden ap */ 1361 */
1521 /* ((src->ssid_len == dst->ssid_len) && */ 1362 return (((src->ssid_len == dst->ssid_len) || (ieee->iw_mode == IW_MODE_INFRA)) && /* YJ,mod, 080819,for hidden ap */
1522 (src->channel == dst->channel) && 1363 (src->channel == dst->channel) &&
1523 !memcmp(src->bssid, dst->bssid, ETH_ALEN) && 1364 !memcmp(src->bssid, dst->bssid, ETH_ALEN) &&
1524 (!memcmp(src->ssid, dst->ssid, src->ssid_len) || (ieee->iw_mode == IW_MODE_INFRA)) && /* YJ,mod, 080819,for hidden ap */ 1365 (!memcmp(src->ssid, dst->ssid, src->ssid_len) || (ieee->iw_mode == IW_MODE_INFRA)) && /* YJ,mod, 080819,for hidden ap */
1525 /*!memcmp(src->ssid, dst->ssid, src->ssid_len) && */
1526 ((src->capability & WLAN_CAPABILITY_IBSS) == 1366 ((src->capability & WLAN_CAPABILITY_IBSS) ==
1527 (dst->capability & WLAN_CAPABILITY_IBSS)) && 1367 (dst->capability & WLAN_CAPABILITY_IBSS)) &&
1528 ((src->capability & WLAN_CAPABILITY_BSS) == 1368 ((src->capability & WLAN_CAPABILITY_BSS) ==
@@ -1535,11 +1375,9 @@ static struct iw_statistics *r8180_get_wireless_stats(struct net_device *dev)
1535 struct r8180_priv *priv = ieee80211_priv(dev); 1375 struct r8180_priv *priv = ieee80211_priv(dev);
1536 struct ieee80211_device* ieee = priv->ieee80211; 1376 struct ieee80211_device* ieee = priv->ieee80211;
1537 struct iw_statistics* wstats = &priv->wstats; 1377 struct iw_statistics* wstats = &priv->wstats;
1538 /* struct ieee80211_network* target = NULL; */
1539 int tmp_level = 0; 1378 int tmp_level = 0;
1540 int tmp_qual = 0; 1379 int tmp_qual = 0;
1541 int tmp_noise = 0; 1380 int tmp_noise = 0;
1542 /* unsigned long flag; */
1543 1381
1544 if (ieee->state < IEEE80211_LINKED) { 1382 if (ieee->state < IEEE80211_LINKED) {
1545 wstats->qual.qual = 0; 1383 wstats->qual.qual = 0;
@@ -1552,9 +1390,7 @@ static struct iw_statistics *r8180_get_wireless_stats(struct net_device *dev)
1552 tmp_level = (&ieee->current_network)->stats.signal; 1390 tmp_level = (&ieee->current_network)->stats.signal;
1553 tmp_qual = (&ieee->current_network)->stats.signalstrength; 1391 tmp_qual = (&ieee->current_network)->stats.signalstrength;
1554 tmp_noise = (&ieee->current_network)->stats.noise; 1392 tmp_noise = (&ieee->current_network)->stats.noise;
1555 /* printk("level:%d, qual:%d, noise:%d\n", tmp_level, tmp_qual, tmp_noise); */
1556 1393
1557/* printk("level:%d\n", tmp_level); */
1558 wstats->qual.level = tmp_level; 1394 wstats->qual.level = tmp_level;
1559 wstats->qual.qual = tmp_qual; 1395 wstats->qual.qual = tmp_qual;
1560 wstats->qual.noise = tmp_noise; 1396 wstats->qual.noise = tmp_noise;
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index 6c5061f12bad..13979b5ea32a 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -2453,7 +2453,7 @@ static inline void update_network(struct rtllib_network *dst,
2453 if (src->wmm_param[0].ac_aci_acm_aifsn || 2453 if (src->wmm_param[0].ac_aci_acm_aifsn ||
2454 src->wmm_param[1].ac_aci_acm_aifsn || 2454 src->wmm_param[1].ac_aci_acm_aifsn ||
2455 src->wmm_param[2].ac_aci_acm_aifsn || 2455 src->wmm_param[2].ac_aci_acm_aifsn ||
2456 src->wmm_param[1].ac_aci_acm_aifsn) 2456 src->wmm_param[3].ac_aci_acm_aifsn)
2457 memcpy(dst->wmm_param, src->wmm_param, WME_AC_PRAM_LEN); 2457 memcpy(dst->wmm_param, src->wmm_param, WME_AC_PRAM_LEN);
2458 2458
2459 dst->SignalStrength = src->SignalStrength; 2459 dst->SignalStrength = src->SignalStrength;
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index 1637f1110991..c5a15dba1bf5 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -2234,7 +2234,6 @@ inline int rtllib_rx_assoc_resp(struct rtllib_device *ieee, struct sk_buff *skb,
2234 2234
2235 if (!network) 2235 if (!network)
2236 return 1; 2236 return 1;
2237 memset(network, 0, sizeof(*network));
2238 ieee->state = RTLLIB_LINKED; 2237 ieee->state = RTLLIB_LINKED;
2239 ieee->assoc_id = aid; 2238 ieee->assoc_id = aid;
2240 ieee->softmac_stats.rx_ass_ok++; 2239 ieee->softmac_stats.rx_ass_ok++;
@@ -2259,8 +2258,8 @@ inline int rtllib_rx_assoc_resp(struct rtllib_device *ieee, struct sk_buff *skb,
2259 ieee->handle_assoc_response(ieee->dev, 2258 ieee->handle_assoc_response(ieee->dev,
2260 (struct rtllib_assoc_response_frame *)header, 2259 (struct rtllib_assoc_response_frame *)header,
2261 network); 2260 network);
2262 kfree(network);
2263 } 2261 }
2262 kfree(network);
2264 2263
2265 kfree(ieee->assocresp_ies); 2264 kfree(ieee->assocresp_ies);
2266 ieee->assocresp_ies = NULL; 2265 ieee->assocresp_ies = NULL;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index c9bdc7f6bdce..be2a28cf8edd 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -237,7 +237,7 @@ ieee80211_rx_frame_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb,
237 237
238 #ifdef NOT_YET 238 #ifdef NOT_YET
239 if (ieee->iw_mode == IW_MODE_MASTER) { 239 if (ieee->iw_mode == IW_MODE_MASTER) {
240 printk(KERN_DEBUG "%s: Master mode not yet suppported.\n", 240 printk(KERN_DEBUG "%s: Master mode not yet supported.\n",
241 ieee->dev->name); 241 ieee->dev->name);
242 return 0; 242 return 0;
243/* 243/*
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index c09be0a66467..9c00865f302a 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -105,7 +105,6 @@ u32 rt_global_debug_component = \
105 105
106static const struct usb_device_id rtl8192_usb_id_tbl[] = { 106static const struct usb_device_id rtl8192_usb_id_tbl[] = {
107 /* Realtek */ 107 /* Realtek */
108 {USB_DEVICE(0x0bda, 0x8192)},
109 {USB_DEVICE(0x0bda, 0x8709)}, 108 {USB_DEVICE(0x0bda, 0x8709)},
110 /* Corega */ 109 /* Corega */
111 {USB_DEVICE(0x07aa, 0x0043)}, 110 {USB_DEVICE(0x07aa, 0x0043)},
diff --git a/drivers/staging/rtl8712/Kconfig b/drivers/staging/rtl8712/Kconfig
index ea37473f71e5..6a43312380e0 100644
--- a/drivers/staging/rtl8712/Kconfig
+++ b/drivers/staging/rtl8712/Kconfig
@@ -9,13 +9,6 @@ config R8712U
9 This option adds the Realtek RTL8712 USB device such as the D-Link DWA-130. 9 This option adds the Realtek RTL8712 USB device such as the D-Link DWA-130.
10 If built as a module, it will be called r8712u. 10 If built as a module, it will be called r8712u.
11 11
12config R8712_AP
13 bool "Realtek RTL8712U AP code"
14 depends on R8712U
15 default N
16 ---help---
17 This option allows the Realtek RTL8712 USB device to be an Access Point.
18
19config R8712_TX_AGGR 12config R8712_TX_AGGR
20 bool "Realtek RTL8712U Transmit Aggregation code" 13 bool "Realtek RTL8712U Transmit Aggregation code"
21 depends on R8712U && BROKEN 14 depends on R8712U && BROKEN
diff --git a/drivers/staging/rtl8712/drv_types.h b/drivers/staging/rtl8712/drv_types.h
index ed85b4415207..e83665d06020 100644
--- a/drivers/staging/rtl8712/drv_types.h
+++ b/drivers/staging/rtl8712/drv_types.h
@@ -140,7 +140,6 @@ struct dvobj_priv {
140 u8 ishighspeed; 140 u8 ishighspeed;
141 uint(*inirp_init)(struct _adapter *adapter); 141 uint(*inirp_init)(struct _adapter *adapter);
142 uint(*inirp_deinit)(struct _adapter *adapter); 142 uint(*inirp_deinit)(struct _adapter *adapter);
143 struct semaphore usb_suspend_sema;
144 struct usb_device *pusbdev; 143 struct usb_device *pusbdev;
145}; 144};
146 145
diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c
index 98a3d684f9b2..7bbd53a410e3 100644
--- a/drivers/staging/rtl8712/os_intfs.c
+++ b/drivers/staging/rtl8712/os_intfs.c
@@ -330,7 +330,6 @@ u8 r8712_init_drv_sw(struct _adapter *padapter)
330 padapter->stapriv.padapter = padapter; 330 padapter->stapriv.padapter = padapter;
331 r8712_init_bcmc_stainfo(padapter); 331 r8712_init_bcmc_stainfo(padapter);
332 r8712_init_pwrctrl_priv(padapter); 332 r8712_init_pwrctrl_priv(padapter);
333 sema_init(&(padapter->pwrctrlpriv.pnp_pwr_mgnt_sema), 0);
334 mp871xinit(padapter); 333 mp871xinit(padapter);
335 if (init_default_value(padapter) != _SUCCESS) 334 if (init_default_value(padapter) != _SUCCESS)
336 return _FAIL; 335 return _FAIL;
@@ -476,11 +475,6 @@ static int netdev_close(struct net_device *pnetdev)
476 r8712_free_assoc_resources(padapter); 475 r8712_free_assoc_resources(padapter);
477 /*s2-4.*/ 476 /*s2-4.*/
478 r8712_free_network_queue(padapter); 477 r8712_free_network_queue(padapter);
479 /* The interface is no longer Up: */
480 padapter->bup = false;
481 release_firmware(padapter->fw);
482 /* never exit with a firmware callback pending */
483 wait_for_completion(&padapter->rtl8712_fw_ready);
484 return 0; 478 return 0;
485} 479}
486 480
diff --git a/drivers/staging/rtl8712/osdep_service.h b/drivers/staging/rtl8712/osdep_service.h
index 1ee943a58c4c..9ba603310fdc 100644
--- a/drivers/staging/rtl8712/osdep_service.h
+++ b/drivers/staging/rtl8712/osdep_service.h
@@ -72,18 +72,6 @@ static inline struct list_head *get_list_head(struct __queue *queue)
72#define LIST_CONTAINOR(ptr, type, member) \ 72#define LIST_CONTAINOR(ptr, type, member) \
73 ((type *)((char *)(ptr)-(SIZE_T)(&((type *)0)->member))) 73 ((type *)((char *)(ptr)-(SIZE_T)(&((type *)0)->member)))
74 74
75static inline void _enter_hwio_critical(struct semaphore *prwlock,
76 unsigned long *pirqL)
77{
78 down(prwlock);
79}
80
81static inline void _exit_hwio_critical(struct semaphore *prwlock,
82 unsigned long *pirqL)
83{
84 up(prwlock);
85}
86
87static inline void list_delete(struct list_head *plist) 75static inline void list_delete(struct list_head *plist)
88{ 76{
89 list_del_init(plist); 77 list_del_init(plist);
@@ -152,11 +140,6 @@ static inline u32 _down_sema(struct semaphore *sema)
152 return _SUCCESS; 140 return _SUCCESS;
153} 141}
154 142
155static inline void _rtl_rwlock_init(struct semaphore *prwlock)
156{
157 sema_init(prwlock, 1);
158}
159
160static inline void _init_listhead(struct list_head *list) 143static inline void _init_listhead(struct list_head *list)
161{ 144{
162 INIT_LIST_HEAD(list); 145 INIT_LIST_HEAD(list);
diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
index 6d692657e784..fa6dc9c09b3f 100644
--- a/drivers/staging/rtl8712/rtl8712_recv.c
+++ b/drivers/staging/rtl8712/rtl8712_recv.c
@@ -55,8 +55,6 @@ int r8712_init_recv_priv(struct recv_priv *precvpriv, struct _adapter *padapter)
55 int alignment = 0; 55 int alignment = 0;
56 struct sk_buff *pskb = NULL; 56 struct sk_buff *pskb = NULL;
57 57
58 sema_init(&precvpriv->recv_sema, 0);
59 sema_init(&precvpriv->terminate_recvthread_sema, 0);
60 /*init recv_buf*/ 58 /*init recv_buf*/
61 _init_queue(&precvpriv->free_recv_buf_queue); 59 _init_queue(&precvpriv->free_recv_buf_queue);
62 precvpriv->pallocated_recv_buf = _malloc(NR_RECVBUFF * 60 precvpriv->pallocated_recv_buf = _malloc(NR_RECVBUFF *
diff --git a/drivers/staging/rtl8712/rtl871x_io.c b/drivers/staging/rtl8712/rtl871x_io.c
index ca84ee02eacc..abc1c97378f7 100644
--- a/drivers/staging/rtl8712/rtl871x_io.c
+++ b/drivers/staging/rtl8712/rtl871x_io.c
@@ -131,7 +131,6 @@ uint r8712_alloc_io_queue(struct _adapter *adapter)
131 pio_req = (struct io_req *)(pio_queue->free_ioreqs_buf); 131 pio_req = (struct io_req *)(pio_queue->free_ioreqs_buf);
132 for (i = 0; i < NUM_IOREQ; i++) { 132 for (i = 0; i < NUM_IOREQ; i++) {
133 _init_listhead(&pio_req->list); 133 _init_listhead(&pio_req->list);
134 sema_init(&pio_req->sema, 0);
135 list_insert_tail(&pio_req->list, &pio_queue->free_ioreqs); 134 list_insert_tail(&pio_req->list, &pio_queue->free_ioreqs);
136 pio_req++; 135 pio_req++;
137 } 136 }
diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
index 86308a0093ed..d3d8727c2ec5 100644
--- a/drivers/staging/rtl8712/rtl871x_io.h
+++ b/drivers/staging/rtl8712/rtl871x_io.h
@@ -117,7 +117,6 @@ struct io_req {
117 u32 command; 117 u32 command;
118 u32 status; 118 u32 status;
119 u8 *pbuf; 119 u8 *pbuf;
120 struct semaphore sema;
121 void (*_async_io_callback)(struct _adapter *padater, 120 void (*_async_io_callback)(struct _adapter *padater,
122 struct io_req *pio_req, u8 *cnxt); 121 struct io_req *pio_req, u8 *cnxt);
123 u8 *cnxt; 122 u8 *cnxt;
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
index 507584b837c3..ef35bc29a3fa 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
@@ -2380,13 +2380,7 @@ static struct iw_statistics *r871x_get_wireless_stats(struct net_device *dev)
2380 tmp_qual = padapter->recvpriv.signal; 2380 tmp_qual = padapter->recvpriv.signal;
2381 tmp_noise = padapter->recvpriv.noise; 2381 tmp_noise = padapter->recvpriv.noise;
2382 piwstats->qual.level = tmp_level; 2382 piwstats->qual.level = tmp_level;
2383 /*piwstats->qual.qual = tmp_qual; 2383 piwstats->qual.qual = tmp_qual;
2384 * The NetworkManager of Fedora 10, 13 will use the link
2385 * quality for its display.
2386 * So, use the fw_rssi on link quality variable because
2387 * fw_rssi will be updated per 2 seconds.
2388 */
2389 piwstats->qual.qual = tmp_level;
2390 piwstats->qual.noise = tmp_noise; 2384 piwstats->qual.noise = tmp_noise;
2391 } 2385 }
2392 piwstats->qual.updated = IW_QUAL_ALL_UPDATED; 2386 piwstats->qual.updated = IW_QUAL_ALL_UPDATED;
diff --git a/drivers/staging/rtl8712/rtl871x_pwrctrl.c b/drivers/staging/rtl8712/rtl871x_pwrctrl.c
index 23e72a0401a8..9fd2ec7596cc 100644
--- a/drivers/staging/rtl8712/rtl871x_pwrctrl.c
+++ b/drivers/staging/rtl8712/rtl871x_pwrctrl.c
@@ -100,7 +100,6 @@ void r8712_cpwm_int_hdl(struct _adapter *padapter,
100{ 100{
101 struct pwrctrl_priv *pwrpriv = &(padapter->pwrctrlpriv); 101 struct pwrctrl_priv *pwrpriv = &(padapter->pwrctrlpriv);
102 struct cmd_priv *pcmdpriv = &(padapter->cmdpriv); 102 struct cmd_priv *pcmdpriv = &(padapter->cmdpriv);
103 struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
104 103
105 if (pwrpriv->cpwm_tog == ((preportpwrstate->state) & 0x80)) 104 if (pwrpriv->cpwm_tog == ((preportpwrstate->state) & 0x80))
106 return; 105 return;
@@ -110,8 +109,6 @@ void r8712_cpwm_int_hdl(struct _adapter *padapter,
110 if (pwrpriv->cpwm >= PS_STATE_S2) { 109 if (pwrpriv->cpwm >= PS_STATE_S2) {
111 if (pwrpriv->alives & CMD_ALIVE) 110 if (pwrpriv->alives & CMD_ALIVE)
112 up(&(pcmdpriv->cmd_queue_sema)); 111 up(&(pcmdpriv->cmd_queue_sema));
113 if (pwrpriv->alives & XMIT_ALIVE)
114 up(&(pxmitpriv->xmit_sema));
115 } 112 }
116 pwrpriv->cpwm_tog = (preportpwrstate->state) & 0x80; 113 pwrpriv->cpwm_tog = (preportpwrstate->state) & 0x80;
117 up(&pwrpriv->lock); 114 up(&pwrpriv->lock);
@@ -145,12 +142,12 @@ static void SetPSModeWorkItemCallback(struct work_struct *work)
145 struct pwrctrl_priv, SetPSModeWorkItem); 142 struct pwrctrl_priv, SetPSModeWorkItem);
146 struct _adapter *padapter = container_of(pwrpriv, 143 struct _adapter *padapter = container_of(pwrpriv,
147 struct _adapter, pwrctrlpriv); 144 struct _adapter, pwrctrlpriv);
148 _enter_pwrlock(&pwrpriv->lock);
149 if (!pwrpriv->bSleep) { 145 if (!pwrpriv->bSleep) {
146 _enter_pwrlock(&pwrpriv->lock);
150 if (pwrpriv->pwr_mode == PS_MODE_ACTIVE) 147 if (pwrpriv->pwr_mode == PS_MODE_ACTIVE)
151 r8712_set_rpwm(padapter, PS_STATE_S4); 148 r8712_set_rpwm(padapter, PS_STATE_S4);
149 up(&pwrpriv->lock);
152 } 150 }
153 up(&pwrpriv->lock);
154} 151}
155 152
156static void rpwm_workitem_callback(struct work_struct *work) 153static void rpwm_workitem_callback(struct work_struct *work)
@@ -160,13 +157,13 @@ static void rpwm_workitem_callback(struct work_struct *work)
160 struct _adapter *padapter = container_of(pwrpriv, 157 struct _adapter *padapter = container_of(pwrpriv,
161 struct _adapter, pwrctrlpriv); 158 struct _adapter, pwrctrlpriv);
162 u8 cpwm = pwrpriv->cpwm; 159 u8 cpwm = pwrpriv->cpwm;
163 _enter_pwrlock(&pwrpriv->lock);
164 if (pwrpriv->cpwm != pwrpriv->rpwm) { 160 if (pwrpriv->cpwm != pwrpriv->rpwm) {
161 _enter_pwrlock(&pwrpriv->lock);
165 cpwm = r8712_read8(padapter, SDIO_HCPWM); 162 cpwm = r8712_read8(padapter, SDIO_HCPWM);
166 pwrpriv->rpwm_retry = 1; 163 pwrpriv->rpwm_retry = 1;
167 r8712_set_rpwm(padapter, pwrpriv->rpwm); 164 r8712_set_rpwm(padapter, pwrpriv->rpwm);
165 up(&pwrpriv->lock);
168 } 166 }
169 up(&pwrpriv->lock);
170} 167}
171 168
172static void rpwm_check_handler (void *FunctionContext) 169static void rpwm_check_handler (void *FunctionContext)
diff --git a/drivers/staging/rtl8712/rtl871x_pwrctrl.h b/drivers/staging/rtl8712/rtl871x_pwrctrl.h
index b41ca2892be5..6024c4f63d5b 100644
--- a/drivers/staging/rtl8712/rtl871x_pwrctrl.h
+++ b/drivers/staging/rtl8712/rtl871x_pwrctrl.h
@@ -133,7 +133,6 @@ struct pwrctrl_priv {
133 u8 rpwm_retry; 133 u8 rpwm_retry;
134 uint bSetPSModeWorkItemInProgress; 134 uint bSetPSModeWorkItemInProgress;
135 135
136 struct semaphore pnp_pwr_mgnt_sema;
137 spinlock_t pnp_pwr_mgnt_lock; 136 spinlock_t pnp_pwr_mgnt_lock;
138 s32 pnp_current_pwr_state; 137 s32 pnp_current_pwr_state;
139 u8 pnp_bstop_trx; 138 u8 pnp_bstop_trx;
diff --git a/drivers/staging/rtl8712/rtl871x_recv.c b/drivers/staging/rtl8712/rtl871x_recv.c
index 7069f06d9b5d..5b03b405883e 100644
--- a/drivers/staging/rtl8712/rtl871x_recv.c
+++ b/drivers/staging/rtl8712/rtl871x_recv.c
@@ -93,7 +93,6 @@ sint _r8712_init_recv_priv(struct recv_priv *precvpriv,
93 precvframe++; 93 precvframe++;
94 } 94 }
95 precvpriv->rx_pending_cnt = 1; 95 precvpriv->rx_pending_cnt = 1;
96 sema_init(&precvpriv->allrxreturnevt, 0);
97 return r8712_init_recv_priv(precvpriv, padapter); 96 return r8712_init_recv_priv(precvpriv, padapter);
98} 97}
99 98
diff --git a/drivers/staging/rtl8712/rtl871x_recv.h b/drivers/staging/rtl8712/rtl871x_recv.h
index cc7a72fee1c2..e42e6f0a15e6 100644
--- a/drivers/staging/rtl8712/rtl871x_recv.h
+++ b/drivers/staging/rtl8712/rtl871x_recv.h
@@ -85,8 +85,6 @@ using enter_critical section to protect
85*/ 85*/
86struct recv_priv { 86struct recv_priv {
87 spinlock_t lock; 87 spinlock_t lock;
88 struct semaphore recv_sema;
89 struct semaphore terminate_recvthread_sema;
90 struct __queue free_recv_queue; 88 struct __queue free_recv_queue;
91 struct __queue recv_pending_queue; 89 struct __queue recv_pending_queue;
92 u8 *pallocated_frame_buf; 90 u8 *pallocated_frame_buf;
@@ -100,7 +98,6 @@ struct recv_priv {
100 uint rx_largepacket_crcerr; 98 uint rx_largepacket_crcerr;
101 uint rx_smallpacket_crcerr; 99 uint rx_smallpacket_crcerr;
102 uint rx_middlepacket_crcerr; 100 uint rx_middlepacket_crcerr;
103 struct semaphore allrxreturnevt;
104 u8 rx_pending_cnt; 101 u8 rx_pending_cnt;
105 uint ff_hwaddr; 102 uint ff_hwaddr;
106 struct tasklet_struct recv_tasklet; 103 struct tasklet_struct recv_tasklet;
diff --git a/drivers/staging/rtl8712/rtl871x_sta_mgt.c b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
index 81bde803c59f..1247b3d9719d 100644
--- a/drivers/staging/rtl8712/rtl871x_sta_mgt.c
+++ b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
@@ -42,10 +42,8 @@ static void _init_stainfo(struct sta_info *psta)
42 _init_listhead(&psta->hash_list); 42 _init_listhead(&psta->hash_list);
43 _r8712_init_sta_xmit_priv(&psta->sta_xmitpriv); 43 _r8712_init_sta_xmit_priv(&psta->sta_xmitpriv);
44 _r8712_init_sta_recv_priv(&psta->sta_recvpriv); 44 _r8712_init_sta_recv_priv(&psta->sta_recvpriv);
45#ifdef CONFIG_R8712_AP
46 _init_listhead(&psta->asoc_list); 45 _init_listhead(&psta->asoc_list);
47 _init_listhead(&psta->auth_list); 46 _init_listhead(&psta->auth_list);
48#endif
49} 47}
50 48
51u32 _r8712_init_sta_priv(struct sta_priv *pstapriv) 49u32 _r8712_init_sta_priv(struct sta_priv *pstapriv)
@@ -72,10 +70,8 @@ u32 _r8712_init_sta_priv(struct sta_priv *pstapriv)
72 get_list_head(&pstapriv->free_sta_queue)); 70 get_list_head(&pstapriv->free_sta_queue));
73 psta++; 71 psta++;
74 } 72 }
75#ifdef CONFIG_R8712_AP
76 _init_listhead(&pstapriv->asoc_list); 73 _init_listhead(&pstapriv->asoc_list);
77 _init_listhead(&pstapriv->auth_list); 74 _init_listhead(&pstapriv->auth_list);
78#endif
79 return _SUCCESS; 75 return _SUCCESS;
80} 76}
81 77
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.c b/drivers/staging/rtl8712/rtl871x_xmit.c
index 8bbdee70f867..aa57e7754f04 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.c
+++ b/drivers/staging/rtl8712/rtl871x_xmit.c
@@ -71,8 +71,6 @@ sint _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
71 71
72 memset((unsigned char *)pxmitpriv, 0, sizeof(struct xmit_priv)); 72 memset((unsigned char *)pxmitpriv, 0, sizeof(struct xmit_priv));
73 spin_lock_init(&pxmitpriv->lock); 73 spin_lock_init(&pxmitpriv->lock);
74 sema_init(&pxmitpriv->xmit_sema, 0);
75 sema_init(&pxmitpriv->terminate_xmitthread_sema, 0);
76 /* 74 /*
77 Please insert all the queue initializaiton using _init_queue below 75 Please insert all the queue initializaiton using _init_queue below
78 */ 76 */
@@ -121,7 +119,6 @@ sint _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
121 _r8712_init_hw_txqueue(&pxmitpriv->bmc_txqueue, BMC_QUEUE_INX); 119 _r8712_init_hw_txqueue(&pxmitpriv->bmc_txqueue, BMC_QUEUE_INX);
122 pxmitpriv->frag_len = MAX_FRAG_THRESHOLD; 120 pxmitpriv->frag_len = MAX_FRAG_THRESHOLD;
123 pxmitpriv->txirp_cnt = 1; 121 pxmitpriv->txirp_cnt = 1;
124 sema_init(&(pxmitpriv->tx_retevt), 0);
125 /*per AC pending irp*/ 122 /*per AC pending irp*/
126 pxmitpriv->beq_cnt = 0; 123 pxmitpriv->beq_cnt = 0;
127 pxmitpriv->bkq_cnt = 0; 124 pxmitpriv->bkq_cnt = 0;
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.h b/drivers/staging/rtl8712/rtl871x_xmit.h
index a034c0fec718..638b79b4c5a8 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.h
+++ b/drivers/staging/rtl8712/rtl871x_xmit.h
@@ -202,8 +202,6 @@ struct hw_txqueue {
202 202
203struct xmit_priv { 203struct xmit_priv {
204 spinlock_t lock; 204 spinlock_t lock;
205 struct semaphore xmit_sema;
206 struct semaphore terminate_xmitthread_sema;
207 struct __queue be_pending; 205 struct __queue be_pending;
208 struct __queue bk_pending; 206 struct __queue bk_pending;
209 struct __queue vi_pending; 207 struct __queue vi_pending;
@@ -233,7 +231,6 @@ struct xmit_priv {
233 uint tx_drop; 231 uint tx_drop;
234 struct hw_xmit *hwxmits; 232 struct hw_xmit *hwxmits;
235 u8 hwxmit_entry; 233 u8 hwxmit_entry;
236 struct semaphore tx_retevt;/*all tx return event;*/
237 u8 txirp_cnt; 234 u8 txirp_cnt;
238 struct tasklet_struct xmit_tasklet; 235 struct tasklet_struct xmit_tasklet;
239 _workitem xmit_pipe4_reset_wi; 236 _workitem xmit_pipe4_reset_wi;
diff --git a/drivers/staging/rtl8712/sta_info.h b/drivers/staging/rtl8712/sta_info.h
index 48d6a14c8f5f..f8016e9abffd 100644
--- a/drivers/staging/rtl8712/sta_info.h
+++ b/drivers/staging/rtl8712/sta_info.h
@@ -90,7 +90,6 @@ struct sta_info {
90 * curr_network(mlme_priv/security_priv/qos/ht) : AP CAP/INFO 90 * curr_network(mlme_priv/security_priv/qos/ht) : AP CAP/INFO
91 * sta_info: (AP & STA) CAP/INFO 91 * sta_info: (AP & STA) CAP/INFO
92 */ 92 */
93#ifdef CONFIG_R8712_AP
94 struct list_head asoc_list; 93 struct list_head asoc_list;
95 struct list_head auth_list; 94 struct list_head auth_list;
96 unsigned int expire_to; 95 unsigned int expire_to;
@@ -98,7 +97,6 @@ struct sta_info {
98 unsigned int authalg; 97 unsigned int authalg;
99 unsigned char chg_txt[128]; 98 unsigned char chg_txt[128];
100 unsigned int tx_ra_bitmap; 99 unsigned int tx_ra_bitmap;
101#endif
102}; 100};
103 101
104struct sta_priv { 102struct sta_priv {
@@ -111,13 +109,11 @@ struct sta_priv {
111 struct __queue sleep_q; 109 struct __queue sleep_q;
112 struct __queue wakeup_q; 110 struct __queue wakeup_q;
113 struct _adapter *padapter; 111 struct _adapter *padapter;
114#ifdef CONFIG_R8712_AP
115 struct list_head asoc_list; 112 struct list_head asoc_list;
116 struct list_head auth_list; 113 struct list_head auth_list;
117 unsigned int auth_to; /* sec, time to expire in authenticating. */ 114 unsigned int auth_to; /* sec, time to expire in authenticating. */
118 unsigned int assoc_to; /* sec, time to expire before associating. */ 115 unsigned int assoc_to; /* sec, time to expire before associating. */
119 unsigned int expire_to; /* sec , time to expire after associated. */ 116 unsigned int expire_to; /* sec , time to expire after associated. */
120#endif
121}; 117};
122 118
123static inline u32 wifi_mac_hash(u8 *mac) 119static inline u32 wifi_mac_hash(u8 *mac)
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index 9bade184883b..e419b4fd82b9 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -30,6 +30,7 @@
30 30
31#include <linux/usb.h> 31#include <linux/usb.h>
32#include <linux/module.h> 32#include <linux/module.h>
33#include <linux/firmware.h>
33 34
34#include "osdep_service.h" 35#include "osdep_service.h"
35#include "drv_types.h" 36#include "drv_types.h"
@@ -105,10 +106,10 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = {
105/* RTL8191SU */ 106/* RTL8191SU */
106 /* Realtek */ 107 /* Realtek */
107 {USB_DEVICE(0x0BDA, 0x8172)}, 108 {USB_DEVICE(0x0BDA, 0x8172)},
109 {USB_DEVICE(0x0BDA, 0x8192)},
108 /* Amigo */ 110 /* Amigo */
109 {USB_DEVICE(0x0EB0, 0x9061)}, 111 {USB_DEVICE(0x0EB0, 0x9061)},
110 /* ASUS/EKB */ 112 /* ASUS/EKB */
111 {USB_DEVICE(0x0BDA, 0x8172)},
112 {USB_DEVICE(0x13D3, 0x3323)}, 113 {USB_DEVICE(0x13D3, 0x3323)},
113 {USB_DEVICE(0x13D3, 0x3311)}, /* 11n mode disable */ 114 {USB_DEVICE(0x13D3, 0x3311)}, /* 11n mode disable */
114 {USB_DEVICE(0x13D3, 0x3342)}, 115 {USB_DEVICE(0x13D3, 0x3342)},
@@ -160,7 +161,6 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = {
160/* RTL8192SU */ 161/* RTL8192SU */
161 /* Realtek */ 162 /* Realtek */
162 {USB_DEVICE(0x0BDA, 0x8174)}, 163 {USB_DEVICE(0x0BDA, 0x8174)},
163 {USB_DEVICE(0x0BDA, 0x8174)},
164 /* Belkin */ 164 /* Belkin */
165 {USB_DEVICE(0x050D, 0x845A)}, 165 {USB_DEVICE(0x050D, 0x845A)},
166 /* Corega */ 166 /* Corega */
@@ -281,7 +281,6 @@ static uint r8712_usb_dvobj_init(struct _adapter *padapter)
281 } 281 }
282 if ((r8712_alloc_io_queue(padapter)) == _FAIL) 282 if ((r8712_alloc_io_queue(padapter)) == _FAIL)
283 status = _FAIL; 283 status = _FAIL;
284 sema_init(&(padapter->dvobjpriv.usb_suspend_sema), 0);
285 return status; 284 return status;
286} 285}
287 286
@@ -623,6 +622,10 @@ static void r871xu_dev_remove(struct usb_interface *pusb_intf)
623 622
624 usb_set_intfdata(pusb_intf, NULL); 623 usb_set_intfdata(pusb_intf, NULL);
625 if (padapter) { 624 if (padapter) {
625 if (padapter->fw_found)
626 release_firmware(padapter->fw);
627 /* never exit with a firmware callback pending */
628 wait_for_completion(&padapter->rtl8712_fw_ready);
626 if (drvpriv.drv_registered == true) 629 if (drvpriv.drv_registered == true)
627 padapter->bSurpriseRemoved = true; 630 padapter->bSurpriseRemoved = true;
628 if (pnetdev != NULL) { 631 if (pnetdev != NULL) {
diff --git a/drivers/staging/rts5139/TODO b/drivers/staging/rts5139/TODO
index 4bde726ea5fa..dd5fabb8ea70 100644
--- a/drivers/staging/rts5139/TODO
+++ b/drivers/staging/rts5139/TODO
@@ -2,4 +2,8 @@ TODO:
2- support more USB card reader of Realtek family 2- support more USB card reader of Realtek family
3- use kernel coding style 3- use kernel coding style
4- checkpatch.pl fixes 4- checkpatch.pl fixes
5 5- stop having thousands of lines of code duplicated with staging/rts_pstor
6- This driver contains an entire SD/MMC stack -- it should use the stack in
7 drivers/mmc instead, as a host driver e.g. drivers/mmc/host/realtek-usb.c;
8 see drivers/mmc/host/ushc.c as an example.
9- This driver presents cards as SCSI devices, but they should be MMC devices.
diff --git a/drivers/staging/rts5139/ms.h b/drivers/staging/rts5139/ms.h
index f9d46d210f23..3ce1dc90f19d 100644
--- a/drivers/staging/rts5139/ms.h
+++ b/drivers/staging/rts5139/ms.h
@@ -249,9 +249,9 @@ int ms_delay_write(struct rts51x_chip *chip);
249#ifdef SUPPORT_MAGIC_GATE 249#ifdef SUPPORT_MAGIC_GATE
250 250
251int ms_switch_clock(struct rts51x_chip *chip); 251int ms_switch_clock(struct rts51x_chip *chip);
252int ms_write_bytes(struct rts51x_chip *chip, u8 tpc, u8 cnt, u8 cfg, u8 * data, 252int ms_write_bytes(struct rts51x_chip *chip, u8 tpc, u8 cnt, u8 cfg, u8 *data,
253 int data_len); 253 int data_len);
254int ms_read_bytes(struct rts51x_chip *chip, u8 tpc, u8 cnt, u8 cfg, u8 * data, 254int ms_read_bytes(struct rts51x_chip *chip, u8 tpc, u8 cnt, u8 cfg, u8 *data,
255 int data_len); 255 int data_len);
256int ms_set_rw_reg_addr(struct rts51x_chip *chip, u8 read_start, u8 read_cnt, 256int ms_set_rw_reg_addr(struct rts51x_chip *chip, u8 read_start, u8 read_cnt,
257 u8 write_start, u8 write_cnt); 257 u8 write_start, u8 write_cnt);
diff --git a/drivers/staging/rts5139/rts51x_chip.c b/drivers/staging/rts5139/rts51x_chip.c
index adc0d0005735..b3e0bb22b0ff 100644
--- a/drivers/staging/rts5139/rts51x_chip.c
+++ b/drivers/staging/rts5139/rts51x_chip.c
@@ -541,7 +541,7 @@ int rts51x_get_rsp(struct rts51x_chip *chip, int rsp_len, int timeout)
541 return STATUS_SUCCESS; 541 return STATUS_SUCCESS;
542} 542}
543 543
544int rts51x_get_card_status(struct rts51x_chip *chip, u16 * status) 544int rts51x_get_card_status(struct rts51x_chip *chip, u16 *status)
545{ 545{
546 int retval; 546 int retval;
547 u16 val; 547 u16 val;
@@ -577,7 +577,7 @@ int rts51x_write_register(struct rts51x_chip *chip, u16 addr, u8 mask, u8 data)
577 return STATUS_SUCCESS; 577 return STATUS_SUCCESS;
578} 578}
579 579
580int rts51x_read_register(struct rts51x_chip *chip, u16 addr, u8 * data) 580int rts51x_read_register(struct rts51x_chip *chip, u16 addr, u8 *data)
581{ 581{
582 int retval; 582 int retval;
583 583
@@ -620,7 +620,7 @@ int rts51x_ep0_write_register(struct rts51x_chip *chip, u16 addr, u8 mask,
620 return STATUS_SUCCESS; 620 return STATUS_SUCCESS;
621} 621}
622 622
623int rts51x_ep0_read_register(struct rts51x_chip *chip, u16 addr, u8 * data) 623int rts51x_ep0_read_register(struct rts51x_chip *chip, u16 addr, u8 *data)
624{ 624{
625 int retval; 625 int retval;
626 u16 value = 0; 626 u16 value = 0;
@@ -720,7 +720,7 @@ int rts51x_seq_read_register(struct rts51x_chip *chip, u16 addr, u16 len,
720 return STATUS_SUCCESS; 720 return STATUS_SUCCESS;
721} 721}
722 722
723int rts51x_read_ppbuf(struct rts51x_chip *chip, u8 * buf, int buf_len) 723int rts51x_read_ppbuf(struct rts51x_chip *chip, u8 *buf, int buf_len)
724{ 724{
725 int retval; 725 int retval;
726 726
@@ -735,7 +735,7 @@ int rts51x_read_ppbuf(struct rts51x_chip *chip, u8 * buf, int buf_len)
735 return STATUS_SUCCESS; 735 return STATUS_SUCCESS;
736} 736}
737 737
738int rts51x_write_ppbuf(struct rts51x_chip *chip, u8 * buf, int buf_len) 738int rts51x_write_ppbuf(struct rts51x_chip *chip, u8 *buf, int buf_len)
739{ 739{
740 int retval; 740 int retval;
741 741
@@ -776,7 +776,7 @@ int rts51x_write_phy_register(struct rts51x_chip *chip, u8 addr, u8 val)
776 return STATUS_SUCCESS; 776 return STATUS_SUCCESS;
777} 777}
778 778
779int rts51x_read_phy_register(struct rts51x_chip *chip, u8 addr, u8 * val) 779int rts51x_read_phy_register(struct rts51x_chip *chip, u8 addr, u8 *val)
780{ 780{
781 int retval; 781 int retval;
782 782
@@ -921,7 +921,7 @@ void rts51x_trace_msg(struct rts51x_chip *chip, unsigned char *buf, int clear)
921} 921}
922#endif 922#endif
923 923
924void rts51x_pp_status(struct rts51x_chip *chip, unsigned int lun, u8 * status, 924void rts51x_pp_status(struct rts51x_chip *chip, unsigned int lun, u8 *status,
925 u8 status_len) 925 u8 status_len)
926{ 926{
927 struct sd_info *sd_card = &(chip->sd_card); 927 struct sd_info *sd_card = &(chip->sd_card);
diff --git a/drivers/staging/rts5139/rts51x_chip.h b/drivers/staging/rts5139/rts51x_chip.h
index 321ece750ede..13fc2a410d90 100644
--- a/drivers/staging/rts5139/rts51x_chip.h
+++ b/drivers/staging/rts5139/rts51x_chip.h
@@ -857,12 +857,12 @@ static inline u8 *rts51x_get_rsp_data(struct rts51x_chip *chip)
857 return chip->rsp_buf; 857 return chip->rsp_buf;
858} 858}
859 859
860int rts51x_get_card_status(struct rts51x_chip *chip, u16 * status); 860int rts51x_get_card_status(struct rts51x_chip *chip, u16 *status);
861int rts51x_write_register(struct rts51x_chip *chip, u16 addr, u8 mask, u8 data); 861int rts51x_write_register(struct rts51x_chip *chip, u16 addr, u8 mask, u8 data);
862int rts51x_read_register(struct rts51x_chip *chip, u16 addr, u8 * data); 862int rts51x_read_register(struct rts51x_chip *chip, u16 addr, u8 *data);
863int rts51x_ep0_write_register(struct rts51x_chip *chip, u16 addr, u8 mask, 863int rts51x_ep0_write_register(struct rts51x_chip *chip, u16 addr, u8 mask,
864 u8 data); 864 u8 data);
865int rts51x_ep0_read_register(struct rts51x_chip *chip, u16 addr, u8 * data); 865int rts51x_ep0_read_register(struct rts51x_chip *chip, u16 addr, u8 *data);
866int rts51x_seq_write_register(struct rts51x_chip *chip, u16 addr, u16 len, 866int rts51x_seq_write_register(struct rts51x_chip *chip, u16 addr, u16 len,
867 u8 *data); 867 u8 *data);
868int rts51x_seq_read_register(struct rts51x_chip *chip, u16 addr, u16 len, 868int rts51x_seq_read_register(struct rts51x_chip *chip, u16 addr, u16 len,
diff --git a/drivers/staging/rts5139/rts51x_fop.h b/drivers/staging/rts5139/rts51x_fop.h
index 0453f57d1a84..94d75f08d255 100644
--- a/drivers/staging/rts5139/rts51x_fop.h
+++ b/drivers/staging/rts5139/rts51x_fop.h
@@ -48,7 +48,7 @@ int rts51x_open(struct inode *inode, struct file *filp);
48int rts51x_release(struct inode *inode, struct file *filp); 48int rts51x_release(struct inode *inode, struct file *filp);
49ssize_t rts51x_read(struct file *filp, char __user *buf, size_t count, 49ssize_t rts51x_read(struct file *filp, char __user *buf, size_t count,
50 loff_t *f_pos); 50 loff_t *f_pos);
51ssize_t rts51x_write(struct file *filp, const char __user * buf, size_t count, 51ssize_t rts51x_write(struct file *filp, const char __user *buf, size_t count,
52 loff_t *f_pos); 52 loff_t *f_pos);
53#if 0 /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36) */ 53#if 0 /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36) */
54int rts51x_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, 54int rts51x_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
diff --git a/drivers/staging/rts5139/rts51x_transport.c b/drivers/staging/rts5139/rts51x_transport.c
index e11467acc57b..da9c83b49426 100644
--- a/drivers/staging/rts5139/rts51x_transport.c
+++ b/drivers/staging/rts5139/rts51x_transport.c
@@ -883,7 +883,7 @@ int rts51x_transfer_data_partial(struct rts51x_chip *chip, unsigned int pipe,
883 return result; 883 return result;
884} 884}
885 885
886int rts51x_get_epc_status(struct rts51x_chip *chip, u16 * status) 886int rts51x_get_epc_status(struct rts51x_chip *chip, u16 *status)
887{ 887{
888 unsigned int pipe = RCV_INTR_PIPE(chip); 888 unsigned int pipe = RCV_INTR_PIPE(chip);
889 struct usb_host_endpoint *ep; 889 struct usb_host_endpoint *ep;
diff --git a/drivers/staging/rts5139/rts51x_transport.h b/drivers/staging/rts5139/rts51x_transport.h
index 8464c4836d5b..9dd556ea9c08 100644
--- a/drivers/staging/rts5139/rts51x_transport.h
+++ b/drivers/staging/rts5139/rts51x_transport.h
@@ -73,7 +73,7 @@ int rts51x_start_epc_transfer(struct rts51x_chip *chip);
73void rts51x_cancel_epc_transfer(struct rts51x_chip *chip); 73void rts51x_cancel_epc_transfer(struct rts51x_chip *chip);
74#endif 74#endif
75 75
76int rts51x_get_epc_status(struct rts51x_chip *chip, u16 * status); 76int rts51x_get_epc_status(struct rts51x_chip *chip, u16 *status);
77void rts51x_invoke_transport(struct scsi_cmnd *srb, struct rts51x_chip *chip); 77void rts51x_invoke_transport(struct scsi_cmnd *srb, struct rts51x_chip *chip);
78 78
79#endif /* __RTS51X_TRANSPORT_H */ 79#endif /* __RTS51X_TRANSPORT_H */
diff --git a/drivers/staging/rts5139/sd_cprm.c b/drivers/staging/rts5139/sd_cprm.c
index 407cd43ad3b1..d5969d992d84 100644
--- a/drivers/staging/rts5139/sd_cprm.c
+++ b/drivers/staging/rts5139/sd_cprm.c
@@ -233,7 +233,7 @@ RTY_SEND_CMD:
233 return STATUS_SUCCESS; 233 return STATUS_SUCCESS;
234} 234}
235 235
236int ext_sd_get_rsp(struct rts51x_chip *chip, int len, u8 * rsp, u8 rsp_type) 236int ext_sd_get_rsp(struct rts51x_chip *chip, int len, u8 *rsp, u8 rsp_type)
237{ 237{
238 int retval, rsp_len; 238 int retval, rsp_len;
239 u16 reg_addr; 239 u16 reg_addr;
diff --git a/drivers/staging/rts_pstor/TODO b/drivers/staging/rts_pstor/TODO
index 2f93a7c1b5ad..becb95e4f2cd 100644
--- a/drivers/staging/rts_pstor/TODO
+++ b/drivers/staging/rts_pstor/TODO
@@ -2,4 +2,8 @@ TODO:
2- support more pcie card reader of Realtek family 2- support more pcie card reader of Realtek family
3- use kernel coding style 3- use kernel coding style
4- checkpatch.pl fixes 4- checkpatch.pl fixes
5 5- stop having thousands of lines of code duplicated with staging/rts5139
6- This driver contains an entire SD/MMC stack -- it should use the stack in
7 drivers/mmc instead, as a host driver e.g. drivers/mmc/host/realtek-pci.c;
8 see drivers/mmc/host/via-sdmmc.c as an example.
9- This driver presents cards as SCSI devices, but they should be MMC devices.
diff --git a/drivers/staging/sbe-2t3e3/intr.c b/drivers/staging/sbe-2t3e3/intr.c
index 7ad1a8382037..1336aab11bdd 100644
--- a/drivers/staging/sbe-2t3e3/intr.c
+++ b/drivers/staging/sbe-2t3e3/intr.c
@@ -188,7 +188,7 @@ void dc_intr_rx(struct channel *sc)
188 } 188 }
189 189
190 if (sc->s.LOS) { 190 if (sc->s.LOS) {
191 error_mask &= ~(SBE_2T3E3_RX_DESC_DRIBBLING_BIT || 191 error_mask &= ~(SBE_2T3E3_RX_DESC_DRIBBLING_BIT |
192 SBE_2T3E3_RX_DESC_MII_ERROR); 192 SBE_2T3E3_RX_DESC_MII_ERROR);
193 } 193 }
194 194
diff --git a/drivers/staging/sep/Kconfig b/drivers/staging/sep/Kconfig
index 92bf16667d04..185b676d858a 100644
--- a/drivers/staging/sep/Kconfig
+++ b/drivers/staging/sep/Kconfig
@@ -3,7 +3,8 @@ config DX_SEP
3 depends on PCI 3 depends on PCI
4 help 4 help
5 Discretix SEP driver; used for the security processor subsystem 5 Discretix SEP driver; used for the security processor subsystem
6 on bard the Intel Mobile Internet Device. 6 on board the Intel Mobile Internet Device and adds SEP availability
7 to the kernel crypto infrastructure
7 8
8 The driver's name is sep_driver. 9 The driver's name is sep_driver.
9 10
diff --git a/drivers/staging/sep/Makefile b/drivers/staging/sep/Makefile
index 628d5f919414..e48a7959289e 100644
--- a/drivers/staging/sep/Makefile
+++ b/drivers/staging/sep/Makefile
@@ -1,2 +1,3 @@
1obj-$(CONFIG_DX_SEP) := sep_driver.o 1ccflags-y += -I$(srctree)/$(src)
2 2obj-$(CONFIG_DX_SEP) += sep_driver.o
3sep_driver-objs := sep_crypto.o sep_main.o
diff --git a/drivers/staging/sep/TODO b/drivers/staging/sep/TODO
index 8f3b878ad8ae..3524d0cf84ba 100644
--- a/drivers/staging/sep/TODO
+++ b/drivers/staging/sep/TODO
@@ -1,4 +1,3 @@
1Todo's so far (from Alan Cox) 1Todo's so far (from Alan Cox)
2- Check whether it can be plugged into any of the kernel crypto API 2- Clean up unused ioctls
3 interfaces - Crypto API 'glue' is still not ready to submit 3- Clean up unused fields in ioctl structures
4- Clean up un-needed debug prints - Started to work on this
diff --git a/drivers/staging/sep/sep_crypto.c b/drivers/staging/sep/sep_crypto.c
new file mode 100644
index 000000000000..1cc790e9fa07
--- /dev/null
+++ b/drivers/staging/sep/sep_crypto.c
@@ -0,0 +1,4058 @@
1/*
2 *
3 * sep_crypto.c - Crypto interface structures
4 *
5 * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
6 * Contributions(c) 2009-2010 Discretix. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 *
21 * CONTACTS:
22 *
23 * Mark Allyn mark.a.allyn@intel.com
24 * Jayant Mangalampalli jayant.mangalampalli@intel.com
25 *
26 * CHANGES:
27 *
28 * 2009.06.26 Initial publish
29 * 2010.09.14 Upgrade to Medfield
30 * 2011.02.22 Enable Kernel Crypto
31 *
32 */
33
34/* #define DEBUG */
35#include <linux/init.h>
36#include <linux/module.h>
37#include <linux/miscdevice.h>
38#include <linux/fs.h>
39#include <linux/cdev.h>
40#include <linux/kdev_t.h>
41#include <linux/mutex.h>
42#include <linux/sched.h>
43#include <linux/mm.h>
44#include <linux/poll.h>
45#include <linux/wait.h>
46#include <linux/pci.h>
47#include <linux/pci.h>
48#include <linux/pm_runtime.h>
49#include <linux/err.h>
50#include <linux/device.h>
51#include <linux/errno.h>
52#include <linux/interrupt.h>
53#include <linux/kernel.h>
54#include <linux/clk.h>
55#include <linux/irq.h>
56#include <linux/io.h>
57#include <linux/platform_device.h>
58#include <linux/list.h>
59#include <linux/dma-mapping.h>
60#include <linux/delay.h>
61#include <linux/jiffies.h>
62#include <linux/workqueue.h>
63#include <linux/crypto.h>
64#include <crypto/internal/hash.h>
65#include <crypto/scatterwalk.h>
66#include <crypto/sha.h>
67#include <crypto/md5.h>
68#include <crypto/aes.h>
69#include <crypto/des.h>
70#include <crypto/hash.h>
71#include "sep_driver_hw_defs.h"
72#include "sep_driver_config.h"
73#include "sep_driver_api.h"
74#include "sep_dev.h"
75#include "sep_crypto.h"
76
77#if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
78
79/* Globals for queuing */
80static spinlock_t queue_lock;
81static struct crypto_queue sep_queue;
82
83/* Declare of dequeuer */
84static void sep_dequeuer(void *data);
85
86/* TESTING */
87/**
88 * crypto_sep_dump_message - dump the message that is pending
89 * @sep: SEP device
90 * This will only print dump if DEBUG is set; it does
91 * follow kernel debug print enabling
92 */
93static void crypto_sep_dump_message(struct sep_device *sep, void *msg)
94{
95#if 0
96 u32 *p;
97 u32 *i;
98 int count;
99
100 p = sep->shared_addr;
101 i = (u32 *)msg;
102 for (count = 0; count < 10 * 4; count += 4)
103 dev_dbg(&sep->pdev->dev,
104 "[PID%d] Word %d of the message is %x (local)%x\n",
105 current->pid, count/4, *p++, *i++);
106#endif
107}
108
109/**
110 * sep_do_callback
111 * @work: pointer to work_struct
112 * This is what is called by the queue; it is generic so that it
113 * can be used by any type of operation as each different callback
114 * function can use the data parameter in its own way
115 */
116static void sep_do_callback(struct work_struct *work)
117{
118 struct sep_work_struct *sep_work = container_of(work,
119 struct sep_work_struct, work);
120 if (sep_work != NULL) {
121 (sep_work->callback)(sep_work->data);
122 kfree(sep_work);
123 } else {
124 pr_debug("sep crypto: do callback - NULL container\n");
125 }
126}
127
128/**
129 * sep_submit_work
130 * @work_queue: pointer to struct_workqueue
131 * @funct: pointer to function to execute
132 * @data: pointer to data; function will know
133 * how to use it
134 * This is a generic API to submit something to
135 * the queue. The callback function will depend
136 * on what operation is to be done
137 */
138static int sep_submit_work(struct workqueue_struct *work_queue,
139 void(*funct)(void *),
140 void *data)
141{
142 struct sep_work_struct *sep_work;
143 int result;
144
145 sep_work = kmalloc(sizeof(struct sep_work_struct), GFP_ATOMIC);
146
147 if (sep_work == NULL) {
148 pr_debug("sep crypto: cant allocate work structure\n");
149 return -ENOMEM;
150 }
151
152 sep_work->callback = funct;
153 sep_work->data = data;
154 INIT_WORK(&sep_work->work, sep_do_callback);
155 result = queue_work(work_queue, &sep_work->work);
156 if (!result) {
157 pr_debug("sep_crypto: queue_work failed\n");
158 return -EINVAL;
159 }
160 return 0;
161}
162
163/**
164 * sep_alloc_sg_buf -
165 * @sep: pointer to struct sep_device
166 * @size: total size of area
167 * @block_size: minimum size of chunks
168 * each page is minimum or modulo this size
169 * @returns: pointer to struct scatterlist for new
170 * buffer
171 **/
172static struct scatterlist *sep_alloc_sg_buf(
173 struct sep_device *sep,
174 size_t size,
175 size_t block_size)
176{
177 u32 nbr_pages;
178 u32 ct1;
179 void *buf;
180 size_t current_size;
181 size_t real_page_size;
182
183 struct scatterlist *sg, *sg_temp;
184
185 if (size == 0)
186 return NULL;
187
188 dev_dbg(&sep->pdev->dev, "sep alloc sg buf\n");
189
190 current_size = 0;
191 nbr_pages = 0;
192 real_page_size = PAGE_SIZE - (PAGE_SIZE % block_size);
193 /**
194 * The size of each page must be modulo of the operation
195 * block size; increment by the modified page size until
196 * the total size is reached, then you have the number of
197 * pages
198 */
199 while (current_size < size) {
200 current_size += real_page_size;
201 nbr_pages += 1;
202 }
203
204 sg = kmalloc((sizeof(struct scatterlist) * nbr_pages), GFP_ATOMIC);
205 if (!sg) {
206 dev_warn(&sep->pdev->dev, "Cannot allocate page for new sg\n");
207 return NULL;
208 }
209
210 sg_init_table(sg, nbr_pages);
211
212 current_size = 0;
213 sg_temp = sg;
214 for (ct1 = 0; ct1 < nbr_pages; ct1 += 1) {
215 buf = (void *)get_zeroed_page(GFP_ATOMIC);
216 if (!buf) {
217 dev_warn(&sep->pdev->dev,
218 "Cannot allocate page for new buffer\n");
219 kfree(sg);
220 return NULL;
221 }
222
223 sg_set_buf(sg_temp, buf, real_page_size);
224 if ((size - current_size) > real_page_size) {
225 sg_temp->length = real_page_size;
226 current_size += real_page_size;
227 } else {
228 sg_temp->length = (size - current_size);
229 current_size = size;
230 }
231 sg_temp = sg_next(sg);
232 }
233 return sg;
234}
235
236/**
237 * sep_free_sg_buf -
238 * @sg: pointer to struct scatterlist; points to area to free
239 */
240static void sep_free_sg_buf(struct scatterlist *sg)
241{
242 struct scatterlist *sg_temp = sg;
243 while (sg_temp) {
244 free_page((unsigned long)sg_virt(sg_temp));
245 sg_temp = sg_next(sg_temp);
246 }
247 kfree(sg);
248}
249
250/**
251 * sep_copy_sg -
252 * @sep: pointer to struct sep_device
253 * @sg_src: pointer to struct scatterlist for source
254 * @sg_dst: pointer to struct scatterlist for destination
255 * @size: size (in bytes) of data to copy
256 *
257 * Copy data from one scatterlist to another; both must
258 * be the same size
259 */
260static void sep_copy_sg(
261 struct sep_device *sep,
262 struct scatterlist *sg_src,
263 struct scatterlist *sg_dst,
264 size_t size)
265{
266 u32 seg_size;
267 u32 in_offset, out_offset;
268
269 u32 count = 0;
270 struct scatterlist *sg_src_tmp = sg_src;
271 struct scatterlist *sg_dst_tmp = sg_dst;
272 in_offset = 0;
273 out_offset = 0;
274
275 dev_dbg(&sep->pdev->dev, "sep copy sg\n");
276
277 if ((sg_src == NULL) || (sg_dst == NULL) || (size == 0))
278 return;
279
280 dev_dbg(&sep->pdev->dev, "sep copy sg not null\n");
281
282 while (count < size) {
283 if ((sg_src_tmp->length - in_offset) >
284 (sg_dst_tmp->length - out_offset))
285 seg_size = sg_dst_tmp->length - out_offset;
286 else
287 seg_size = sg_src_tmp->length - in_offset;
288
289 if (seg_size > (size - count))
290 seg_size = (size = count);
291
292 memcpy(sg_virt(sg_dst_tmp) + out_offset,
293 sg_virt(sg_src_tmp) + in_offset,
294 seg_size);
295
296 in_offset += seg_size;
297 out_offset += seg_size;
298 count += seg_size;
299
300 if (in_offset >= sg_src_tmp->length) {
301 sg_src_tmp = sg_next(sg_src_tmp);
302 in_offset = 0;
303 }
304
305 if (out_offset >= sg_dst_tmp->length) {
306 sg_dst_tmp = sg_next(sg_dst_tmp);
307 out_offset = 0;
308 }
309 }
310}
311
312/**
313 * sep_oddball_pages -
314 * @sep: pointer to struct sep_device
315 * @sg: pointer to struct scatterlist - buffer to check
316 * @size: total data size
317 * @blocksize: minimum block size; must be multiples of this size
318 * @to_copy: 1 means do copy, 0 means do not copy
319 * @new_sg: pointer to location to put pointer to new sg area
320 * @returns: 1 if new scatterlist is needed; 0 if not needed;
321 * error value if operation failed
322 *
323 * The SEP device requires all pages to be multiples of the
324 * minimum block size appropriate for the operation
325 * This function check all pages; if any are oddball sizes
326 * (not multiple of block sizes), it creates a new scatterlist.
327 * If the to_copy parameter is set to 1, then a scatter list
328 * copy is performed. The pointer to the new scatterlist is
329 * put into the address supplied by the new_sg parameter; if
330 * no new scatterlist is needed, then a NULL is put into
331 * the location at new_sg.
332 *
333 */
334static int sep_oddball_pages(
335 struct sep_device *sep,
336 struct scatterlist *sg,
337 size_t data_size,
338 u32 block_size,
339 struct scatterlist **new_sg,
340 u32 do_copy)
341{
342 struct scatterlist *sg_temp;
343 u32 flag;
344 u32 nbr_pages, page_count;
345
346 dev_dbg(&sep->pdev->dev, "sep oddball\n");
347 if ((sg == NULL) || (data_size == 0) || (data_size < block_size))
348 return 0;
349
350 dev_dbg(&sep->pdev->dev, "sep oddball not null\n");
351 flag = 0;
352 nbr_pages = 0;
353 page_count = 0;
354 sg_temp = sg;
355
356 while (sg_temp) {
357 nbr_pages += 1;
358 sg_temp = sg_next(sg_temp);
359 }
360
361 sg_temp = sg;
362 while ((sg_temp) && (flag == 0)) {
363 page_count += 1;
364 if (sg_temp->length % block_size)
365 flag = 1;
366 else
367 sg_temp = sg_next(sg_temp);
368 }
369
370 /* Do not process if last (or only) page is oddball */
371 if (nbr_pages == page_count)
372 flag = 0;
373
374 if (flag) {
375 dev_dbg(&sep->pdev->dev, "sep oddball processing\n");
376 *new_sg = sep_alloc_sg_buf(sep, data_size, block_size);
377 if (*new_sg == NULL) {
378 dev_warn(&sep->pdev->dev, "cannot allocate new sg\n");
379 return -ENOMEM;
380 }
381
382 if (do_copy)
383 sep_copy_sg(sep, sg, *new_sg, data_size);
384
385 return 1;
386 } else {
387 return 0;
388 }
389}
390
391/**
392 * sep_copy_offset_sg -
393 * @sep: pointer to struct sep_device;
394 * @sg: pointer to struct scatterlist
395 * @offset: offset into scatterlist memory
396 * @dst: place to put data
397 * @len: length of data
398 * @returns: number of bytes copies
399 *
400 * This copies data from scatterlist buffer
401 * offset from beginning - it is needed for
402 * handling tail data in hash
403 */
404static size_t sep_copy_offset_sg(
405 struct sep_device *sep,
406 struct scatterlist *sg,
407 u32 offset,
408 void *dst,
409 u32 len)
410{
411 size_t page_start;
412 size_t page_end;
413 size_t offset_within_page;
414 size_t length_within_page;
415 size_t length_remaining;
416 size_t current_offset;
417
418 /* Find which page is beginning of segment */
419 page_start = 0;
420 page_end = sg->length;
421 while ((sg) && (offset > page_end)) {
422 page_start += sg->length;
423 sg = sg_next(sg);
424 if (sg)
425 page_end += sg->length;
426 }
427
428 if (sg == NULL)
429 return -ENOMEM;
430
431 offset_within_page = offset - page_start;
432 if ((sg->length - offset_within_page) >= len) {
433 /* All within this page */
434 memcpy(dst, sg_virt(sg) + offset_within_page, len);
435 return len;
436 } else {
437 /* Scattered multiple pages */
438 current_offset = 0;
439 length_remaining = len;
440 while ((sg) && (current_offset < len)) {
441 length_within_page = sg->length - offset_within_page;
442 if (length_within_page >= length_remaining) {
443 memcpy(dst+current_offset,
444 sg_virt(sg) + offset_within_page,
445 length_remaining);
446 length_remaining = 0;
447 current_offset = len;
448 } else {
449 memcpy(dst+current_offset,
450 sg_virt(sg) + offset_within_page,
451 length_within_page);
452 length_remaining -= length_within_page;
453 current_offset += length_within_page;
454 offset_within_page = 0;
455 sg = sg_next(sg);
456 }
457 }
458
459 if (sg == NULL)
460 return -ENOMEM;
461 }
462 return len;
463}
464
465/**
466 * partial_overlap -
467 * @src_ptr: source pointer
468 * @dst_ptr: destination pointer
469 * @nbytes: number of bytes
470 * @returns: 0 for success; -1 for failure
471 * We cannot have any partial overlap. Total overlap
472 * where src is the same as dst is okay
473 */
474static int partial_overlap(void *src_ptr, void *dst_ptr, u32 nbytes)
475{
476 /* Check for partial overlap */
477 if (src_ptr != dst_ptr) {
478 if (src_ptr < dst_ptr) {
479 if ((src_ptr + nbytes) > dst_ptr)
480 return -EINVAL;
481 } else {
482 if ((dst_ptr + nbytes) > src_ptr)
483 return -EINVAL;
484 }
485 }
486
487 return 0;
488}
489
490/* Debug - prints only if DEBUG is defined; follows kernel debug model */
491static void sep_dump(struct sep_device *sep, char *stg, void *start, int len)
492{
493#if 0
494 int ct1;
495 u8 *ptt;
496
497 dev_dbg(&sep->pdev->dev,
498 "Dump of %s starting at %08lx for %08x bytes\n",
499 stg, (unsigned long)start, len);
500 for (ct1 = 0; ct1 < len; ct1 += 1) {
501 ptt = (u8 *)(start + ct1);
502 dev_dbg(&sep->pdev->dev, "%02x ", *ptt);
503 if (ct1 % 16 == 15)
504 dev_dbg(&sep->pdev->dev, "\n");
505 }
506 dev_dbg(&sep->pdev->dev, "\n");
507#endif
508}
509
510/* Debug - prints only if DEBUG is defined; follows kernel debug model */
511static void sep_dump_sg(struct sep_device *sep, char *stg,
512 struct scatterlist *sg)
513{
514#if 0
515 int ct1, ct2;
516 u8 *ptt;
517
518 dev_dbg(&sep->pdev->dev, "Dump of scatterlist %s\n", stg);
519
520 ct1 = 0;
521 while (sg) {
522 dev_dbg(&sep->pdev->dev, "page %x\n size %x", ct1,
523 sg->length);
524 dev_dbg(&sep->pdev->dev, "phys addr is %lx",
525 (unsigned long)sg_phys(sg));
526 ptt = sg_virt(sg);
527 for (ct2 = 0; ct2 < sg->length; ct2 += 1) {
528 dev_dbg(&sep->pdev->dev, "byte %x is %02x\n",
529 ct2, (unsigned char)*(ptt + ct2));
530 }
531
532 ct1 += 1;
533 sg = sg_next(sg);
534 }
535 dev_dbg(&sep->pdev->dev, "\n");
536#endif
537}
538
539/* Debug - prints only if DEBUG is defined */
540static void sep_dump_ivs(struct ablkcipher_request *req, char *reason)
541
542 {
543 unsigned char *cptr;
544 struct sep_aes_internal_context *aes_internal;
545 struct sep_des_internal_context *des_internal;
546 int ct1;
547
548 struct this_task_ctx *ta_ctx;
549 struct crypto_ablkcipher *tfm;
550 struct sep_system_ctx *sctx;
551
552 ta_ctx = ablkcipher_request_ctx(req);
553 tfm = crypto_ablkcipher_reqtfm(req);
554 sctx = crypto_ablkcipher_ctx(tfm);
555
556 dev_dbg(&ta_ctx->sep_used->pdev->dev, "IV DUMP - %s\n", reason);
557 if ((ta_ctx->current_request == DES_CBC) &&
558 (ta_ctx->des_opmode == SEP_DES_CBC)) {
559
560 des_internal = (struct sep_des_internal_context *)
561 sctx->des_private_ctx.ctx_buf;
562 /* print vendor */
563 dev_dbg(&ta_ctx->sep_used->pdev->dev,
564 "sep - vendor iv for DES\n");
565 cptr = (unsigned char *)des_internal->iv_context;
566 for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
567 dev_dbg(&ta_ctx->sep_used->pdev->dev,
568 "%02x\n", *(cptr + ct1));
569
570 /* print walk */
571 dev_dbg(&ta_ctx->sep_used->pdev->dev,
572 "sep - walk from kernel crypto iv for DES\n");
573 cptr = (unsigned char *)ta_ctx->walk.iv;
574 for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
575 dev_dbg(&ta_ctx->sep_used->pdev->dev,
576 "%02x\n", *(cptr + ct1));
577 } else if ((ta_ctx->current_request == AES_CBC) &&
578 (ta_ctx->aes_opmode == SEP_AES_CBC)) {
579
580 aes_internal = (struct sep_aes_internal_context *)
581 sctx->aes_private_ctx.cbuff;
582 /* print vendor */
583 dev_dbg(&ta_ctx->sep_used->pdev->dev,
584 "sep - vendor iv for AES\n");
585 cptr = (unsigned char *)aes_internal->aes_ctx_iv;
586 for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
587 dev_dbg(&ta_ctx->sep_used->pdev->dev,
588 "%02x\n", *(cptr + ct1));
589
590 /* print walk */
591 dev_dbg(&ta_ctx->sep_used->pdev->dev,
592 "sep - walk from kernel crypto iv for AES\n");
593 cptr = (unsigned char *)ta_ctx->walk.iv;
594 for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
595 dev_dbg(&ta_ctx->sep_used->pdev->dev,
596 "%02x\n", *(cptr + ct1));
597 }
598}
599
600/**
601 * RFC2451: Weak key check
602 * Returns: 1 (weak), 0 (not weak)
603 */
604static int sep_weak_key(const u8 *key, unsigned int keylen)
605{
606 static const u8 parity[] = {
607 8, 1, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 2, 8,
608 0, 8, 8, 0, 8, 0, 0, 8, 8,
609 0, 0, 8, 0, 8, 8, 3,
610 0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
611 8, 0, 0, 8, 0, 8, 8, 0, 0,
612 8, 8, 0, 8, 0, 0, 8,
613 0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
614 8, 0, 0, 8, 0, 8, 8, 0, 0,
615 8, 8, 0, 8, 0, 0, 8,
616 8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8,
617 0, 8, 8, 0, 8, 0, 0, 8, 8,
618 0, 0, 8, 0, 8, 8, 0,
619 0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
620 8, 0, 0, 8, 0, 8, 8, 0, 0,
621 8, 8, 0, 8, 0, 0, 8,
622 8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8,
623 0, 8, 8, 0, 8, 0, 0, 8, 8,
624 0, 0, 8, 0, 8, 8, 0,
625 8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8,
626 0, 8, 8, 0, 8, 0, 0, 8, 8,
627 0, 0, 8, 0, 8, 8, 0,
628 4, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
629 8, 5, 0, 8, 0, 8, 8, 0, 0,
630 8, 8, 0, 8, 0, 6, 8,
631 };
632
633 u32 n, w;
634
635 n = parity[key[0]]; n <<= 4;
636 n |= parity[key[1]]; n <<= 4;
637 n |= parity[key[2]]; n <<= 4;
638 n |= parity[key[3]]; n <<= 4;
639 n |= parity[key[4]]; n <<= 4;
640 n |= parity[key[5]]; n <<= 4;
641 n |= parity[key[6]]; n <<= 4;
642 n |= parity[key[7]];
643 w = 0x88888888L;
644
645 /* 1 in 10^10 keys passes this test */
646 if (!((n - (w >> 3)) & w)) {
647 if (n < 0x41415151) {
648 if (n < 0x31312121) {
649 if (n < 0x14141515) {
650 /* 01 01 01 01 01 01 01 01 */
651 if (n == 0x11111111)
652 goto weak;
653 /* 01 1F 01 1F 01 0E 01 0E */
654 if (n == 0x13131212)
655 goto weak;
656 } else {
657 /* 01 E0 01 E0 01 F1 01 F1 */
658 if (n == 0x14141515)
659 goto weak;
660 /* 01 FE 01 FE 01 FE 01 FE */
661 if (n == 0x16161616)
662 goto weak;
663 }
664 } else {
665 if (n < 0x34342525) {
666 /* 1F 01 1F 01 0E 01 0E 01 */
667 if (n == 0x31312121)
668 goto weak;
669 /* 1F 1F 1F 1F 0E 0E 0E 0E (?) */
670 if (n == 0x33332222)
671 goto weak;
672 } else {
673 /* 1F E0 1F E0 0E F1 0E F1 */
674 if (n == 0x34342525)
675 goto weak;
676 /* 1F FE 1F FE 0E FE 0E FE */
677 if (n == 0x36362626)
678 goto weak;
679 }
680 }
681 } else {
682 if (n < 0x61616161) {
683 if (n < 0x44445555) {
684 /* E0 01 E0 01 F1 01 F1 01 */
685 if (n == 0x41415151)
686 goto weak;
687 /* E0 1F E0 1F F1 0E F1 0E */
688 if (n == 0x43435252)
689 goto weak;
690 } else {
691 /* E0 E0 E0 E0 F1 F1 F1 F1 (?) */
692 if (n == 0x44445555)
693 goto weak;
694 /* E0 FE E0 FE F1 FE F1 FE */
695 if (n == 0x46465656)
696 goto weak;
697 }
698 } else {
699 if (n < 0x64646565) {
700 /* FE 01 FE 01 FE 01 FE 01 */
701 if (n == 0x61616161)
702 goto weak;
703 /* FE 1F FE 1F FE 0E FE 0E */
704 if (n == 0x63636262)
705 goto weak;
706 } else {
707 /* FE E0 FE E0 FE F1 FE F1 */
708 if (n == 0x64646565)
709 goto weak;
710 /* FE FE FE FE FE FE FE FE */
711 if (n == 0x66666666)
712 goto weak;
713 }
714 }
715 }
716 }
717 return 0;
718weak:
719 return 1;
720}
721/**
722 * sep_sg_nents
723 */
724static u32 sep_sg_nents(struct scatterlist *sg)
725{
726 u32 ct1 = 0;
727 while (sg) {
728 ct1 += 1;
729 sg = sg_next(sg);
730 }
731
732 return ct1;
733}
734
735/**
736 * sep_start_msg -
737 * @ta_ctx: pointer to struct this_task_ctx
738 * @returns: offset to place for the next word in the message
739 * Set up pointer in message pool for new message
740 */
741static u32 sep_start_msg(struct this_task_ctx *ta_ctx)
742{
743 u32 *word_ptr;
744 ta_ctx->msg_len_words = 2;
745 ta_ctx->msgptr = ta_ctx->msg;
746 memset(ta_ctx->msg, 0, SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
747 ta_ctx->msgptr += sizeof(u32) * 2;
748 word_ptr = (u32 *)ta_ctx->msgptr;
749 *word_ptr = SEP_START_MSG_TOKEN;
750 return sizeof(u32) * 2;
751}
752
753/**
754 * sep_end_msg -
755 * @ta_ctx: pointer to struct this_task_ctx
756 * @messages_offset: current message offset
757 * Returns: 0 for success; <0 otherwise
758 * End message; set length and CRC; and
759 * send interrupt to the SEP
760 */
761static void sep_end_msg(struct this_task_ctx *ta_ctx, u32 msg_offset)
762{
763 u32 *word_ptr;
764 /* Msg size goes into msg after token */
765 ta_ctx->msg_len_words = msg_offset / sizeof(u32) + 1;
766 word_ptr = (u32 *)ta_ctx->msgptr;
767 word_ptr += 1;
768 *word_ptr = ta_ctx->msg_len_words;
769
770 /* CRC (currently 0) goes at end of msg */
771 word_ptr = (u32 *)(ta_ctx->msgptr + msg_offset);
772 *word_ptr = 0;
773}
774
775/**
776 * sep_start_inbound_msg -
777 * @ta_ctx: pointer to struct this_task_ctx
778 * @msg_offset: offset to place for the next word in the message
779 * @returns: 0 for success; error value for failure
780 * Set up pointer in message pool for inbound message
781 */
782static u32 sep_start_inbound_msg(struct this_task_ctx *ta_ctx, u32 *msg_offset)
783{
784 u32 *word_ptr;
785 u32 token;
786 u32 error = SEP_OK;
787
788 *msg_offset = sizeof(u32) * 2;
789 word_ptr = (u32 *)ta_ctx->msgptr;
790 token = *word_ptr;
791 ta_ctx->msg_len_words = *(word_ptr + 1);
792
793 if (token != SEP_START_MSG_TOKEN) {
794 error = SEP_INVALID_START;
795 goto end_function;
796 }
797
798end_function:
799
800 return error;
801}
802
803/**
804 * sep_write_msg -
805 * @ta_ctx: pointer to struct this_task_ctx
806 * @in_addr: pointer to start of parameter
807 * @size: size of parameter to copy (in bytes)
808 * @max_size: size to move up offset; SEP mesg is in word sizes
809 * @msg_offset: pointer to current offset (is updated)
810 * @byte_array: flag ti indicate wheter endian must be changed
811 * Copies data into the message area from caller
812 */
813static void sep_write_msg(struct this_task_ctx *ta_ctx, void *in_addr,
814 u32 size, u32 max_size, u32 *msg_offset, u32 byte_array)
815{
816 u32 *word_ptr;
817 void *void_ptr;
818 void_ptr = ta_ctx->msgptr + *msg_offset;
819 word_ptr = (u32 *)void_ptr;
820 memcpy(void_ptr, in_addr, size);
821 *msg_offset += max_size;
822
823 /* Do we need to manipulate endian? */
824 if (byte_array) {
825 u32 i;
826 for (i = 0; i < ((size + 3) / 4); i += 1)
827 *(word_ptr + i) = CHG_ENDIAN(*(word_ptr + i));
828 }
829}
830
831/**
832 * sep_make_header
833 * @ta_ctx: pointer to struct this_task_ctx
834 * @msg_offset: pointer to current offset (is updated)
835 * @op_code: op code to put into message
836 * Puts op code into message and updates offset
837 */
838static void sep_make_header(struct this_task_ctx *ta_ctx, u32 *msg_offset,
839 u32 op_code)
840{
841 u32 *word_ptr;
842
843 *msg_offset = sep_start_msg(ta_ctx);
844 word_ptr = (u32 *)(ta_ctx->msgptr + *msg_offset);
845 *word_ptr = op_code;
846 *msg_offset += sizeof(u32);
847}
848
849
850
851/**
852 * sep_read_msg -
853 * @ta_ctx: pointer to struct this_task_ctx
854 * @in_addr: pointer to start of parameter
855 * @size: size of parameter to copy (in bytes)
856 * @max_size: size to move up offset; SEP mesg is in word sizes
857 * @msg_offset: pointer to current offset (is updated)
858 * @byte_array: flag ti indicate wheter endian must be changed
859 * Copies data out of the message area to caller
860 */
861static void sep_read_msg(struct this_task_ctx *ta_ctx, void *in_addr,
862 u32 size, u32 max_size, u32 *msg_offset, u32 byte_array)
863{
864 u32 *word_ptr;
865 void *void_ptr;
866 void_ptr = ta_ctx->msgptr + *msg_offset;
867 word_ptr = (u32 *)void_ptr;
868
869 /* Do we need to manipulate endian? */
870 if (byte_array) {
871 u32 i;
872 for (i = 0; i < ((size + 3) / 4); i += 1)
873 *(word_ptr + i) = CHG_ENDIAN(*(word_ptr + i));
874 }
875
876 memcpy(in_addr, void_ptr, size);
877 *msg_offset += max_size;
878}
879
880/**
881 * sep_verify_op -
882 * @ta_ctx: pointer to struct this_task_ctx
883 * @op_code: expected op_code
884 * @msg_offset: pointer to current offset (is updated)
885 * @returns: 0 for success; error for failure
886 */
887static u32 sep_verify_op(struct this_task_ctx *ta_ctx, u32 op_code,
888 u32 *msg_offset)
889{
890 u32 error;
891 u32 in_ary[2];
892
893 struct sep_device *sep = ta_ctx->sep_used;
894
895 dev_dbg(&sep->pdev->dev, "dumping return message\n");
896 error = sep_start_inbound_msg(ta_ctx, msg_offset);
897 if (error) {
898 dev_warn(&sep->pdev->dev,
899 "sep_start_inbound_msg error\n");
900 return error;
901 }
902
903 sep_read_msg(ta_ctx, in_ary, sizeof(u32) * 2, sizeof(u32) * 2,
904 msg_offset, 0);
905
906 if (in_ary[0] != op_code) {
907 dev_warn(&sep->pdev->dev,
908 "sep got back wrong opcode\n");
909 dev_warn(&sep->pdev->dev,
910 "got back %x; expected %x\n",
911 in_ary[0], op_code);
912 return SEP_WRONG_OPCODE;
913 }
914
915 if (in_ary[1] != SEP_OK) {
916 dev_warn(&sep->pdev->dev,
917 "sep execution error\n");
918 dev_warn(&sep->pdev->dev,
919 "got back %x; expected %x\n",
920 in_ary[1], SEP_OK);
921 return in_ary[0];
922 }
923
924return 0;
925}
926
927/**
928 * sep_read_context -
929 * @ta_ctx: pointer to struct this_task_ctx
930 * @msg_offset: point to current place in SEP msg; is updated
931 * @dst: pointer to place to put the context
932 * @len: size of the context structure (differs for crypro/hash)
933 * This function reads the context from the msg area
934 * There is a special way the vendor needs to have the maximum
935 * length calculated so that the msg_offset is updated properly;
936 * it skips over some words in the msg area depending on the size
937 * of the context
938 */
939static void sep_read_context(struct this_task_ctx *ta_ctx, u32 *msg_offset,
940 void *dst, u32 len)
941{
942 u32 max_length = ((len + 3) / sizeof(u32)) * sizeof(u32);
943 sep_read_msg(ta_ctx, dst, len, max_length, msg_offset, 0);
944}
945
946/**
947 * sep_write_context -
948 * @ta_ctx: pointer to struct this_task_ctx
949 * @msg_offset: point to current place in SEP msg; is updated
950 * @src: pointer to the current context
951 * @len: size of the context structure (differs for crypro/hash)
952 * This function writes the context to the msg area
953 * There is a special way the vendor needs to have the maximum
954 * length calculated so that the msg_offset is updated properly;
955 * it skips over some words in the msg area depending on the size
956 * of the context
957 */
958static void sep_write_context(struct this_task_ctx *ta_ctx, u32 *msg_offset,
959 void *src, u32 len)
960{
961 u32 max_length = ((len + 3) / sizeof(u32)) * sizeof(u32);
962 sep_write_msg(ta_ctx, src, len, max_length, msg_offset, 0);
963}
964
965/**
966 * sep_clear_out -
967 * @ta_ctx: pointer to struct this_task_ctx
968 * Clear out crypto related values in sep device structure
969 * to enable device to be used by anyone; either kernel
970 * crypto or userspace app via middleware
971 */
972static void sep_clear_out(struct this_task_ctx *ta_ctx)
973{
974 if (ta_ctx->src_sg_hold) {
975 sep_free_sg_buf(ta_ctx->src_sg_hold);
976 ta_ctx->src_sg_hold = NULL;
977 }
978
979 if (ta_ctx->dst_sg_hold) {
980 sep_free_sg_buf(ta_ctx->dst_sg_hold);
981 ta_ctx->dst_sg_hold = NULL;
982 }
983
984 ta_ctx->src_sg = NULL;
985 ta_ctx->dst_sg = NULL;
986
987 sep_free_dma_table_data_handler(ta_ctx->sep_used, &ta_ctx->dma_ctx);
988
989 if (ta_ctx->i_own_sep) {
990 /**
991 * The following unlocks the sep and makes it available
992 * to any other application
993 * First, null out crypto entries in sep before relesing it
994 */
995 ta_ctx->sep_used->current_hash_req = NULL;
996 ta_ctx->sep_used->current_cypher_req = NULL;
997 ta_ctx->sep_used->current_request = 0;
998 ta_ctx->sep_used->current_hash_stage = 0;
999 ta_ctx->sep_used->ta_ctx = NULL;
1000 ta_ctx->sep_used->in_kernel = 0;
1001
1002 ta_ctx->call_status.status = 0;
1003
1004 /* Remove anything confidentail */
1005 memset(ta_ctx->sep_used->shared_addr, 0,
1006 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1007
1008 sep_queue_status_remove(ta_ctx->sep_used, &ta_ctx->queue_elem);
1009
1010#ifdef SEP_ENABLE_RUNTIME_PM
1011 ta_ctx->sep_used->in_use = 0;
1012 pm_runtime_mark_last_busy(&ta_ctx->sep_used->pdev->dev);
1013 pm_runtime_put_autosuspend(&ta_ctx->sep_used->pdev->dev);
1014#endif
1015
1016 clear_bit(SEP_WORKING_LOCK_BIT,
1017 &ta_ctx->sep_used->in_use_flags);
1018 ta_ctx->sep_used->pid_doing_transaction = 0;
1019
1020 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1021 "[PID%d] waking up next transaction\n",
1022 current->pid);
1023
1024 clear_bit(SEP_TRANSACTION_STARTED_LOCK_BIT,
1025 &ta_ctx->sep_used->in_use_flags);
1026 wake_up(&ta_ctx->sep_used->event_transactions);
1027
1028 ta_ctx->i_own_sep = 0;
1029 }
1030}
1031
1032/**
1033 * Release crypto infrastructure from EINPROGRESS and
1034 * clear sep_dev so that SEP is available to anyone
1035 */
1036static void sep_crypto_release(struct sep_system_ctx *sctx,
1037 struct this_task_ctx *ta_ctx, u32 error)
1038{
1039 struct ahash_request *hash_req = ta_ctx->current_hash_req;
1040 struct ablkcipher_request *cypher_req =
1041 ta_ctx->current_cypher_req;
1042 struct sep_device *sep = ta_ctx->sep_used;
1043
1044 sep_clear_out(ta_ctx);
1045
1046 /**
1047 * This may not yet exist depending when we
1048 * chose to bail out. If it does exist, set
1049 * it to 1
1050 */
1051 if (ta_ctx->are_we_done_yet != NULL)
1052 *ta_ctx->are_we_done_yet = 1;
1053
1054 if (cypher_req != NULL) {
1055 if ((sctx->key_sent == 1) ||
1056 ((error != 0) && (error != -EINPROGRESS))) {
1057 if (cypher_req->base.complete == NULL) {
1058 dev_dbg(&sep->pdev->dev,
1059 "release is null for cypher!");
1060 } else {
1061 cypher_req->base.complete(
1062 &cypher_req->base, error);
1063 }
1064 }
1065 }
1066
1067 if (hash_req != NULL) {
1068 if (hash_req->base.complete == NULL) {
1069 dev_dbg(&sep->pdev->dev,
1070 "release is null for hash!");
1071 } else {
1072 hash_req->base.complete(
1073 &hash_req->base, error);
1074 }
1075 }
1076}
1077
1078/**
1079 * This is where we grab the sep itself and tell it to do something.
1080 * It will sleep if the sep is currently busy
1081 * and it will return 0 if sep is now ours; error value if there
1082 * were problems
1083 */
1084static int sep_crypto_take_sep(struct this_task_ctx *ta_ctx)
1085{
1086 struct sep_device *sep = ta_ctx->sep_used;
1087 int result;
1088 struct sep_msgarea_hdr *my_msg_header;
1089
1090 my_msg_header = (struct sep_msgarea_hdr *)ta_ctx->msg;
1091
1092 /* add to status queue */
1093 ta_ctx->queue_elem = sep_queue_status_add(sep, my_msg_header->opcode,
1094 ta_ctx->nbytes, current->pid,
1095 current->comm, sizeof(current->comm));
1096
1097 if (!ta_ctx->queue_elem) {
1098 dev_dbg(&sep->pdev->dev, "[PID%d] updating queue"
1099 " status error\n", current->pid);
1100 return -EINVAL;
1101 }
1102
1103 /* get the device; this can sleep */
1104 result = sep_wait_transaction(sep);
1105 if (result)
1106 return result;
1107
1108 if (sep_dev->power_save_setup == 1)
1109 pm_runtime_get_sync(&sep_dev->pdev->dev);
1110
1111 /* Copy in the message */
1112 memcpy(sep->shared_addr, ta_ctx->msg,
1113 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1114
1115 /* Copy in the dcb information if there is any */
1116 if (ta_ctx->dcb_region) {
1117 result = sep_activate_dcb_dmatables_context(sep,
1118 &ta_ctx->dcb_region, &ta_ctx->dmatables_region,
1119 ta_ctx->dma_ctx);
1120 if (result)
1121 return result;
1122 }
1123
1124 /* Mark the device so we know how to finish the job in the tasklet */
1125 if (ta_ctx->current_hash_req)
1126 sep->current_hash_req = ta_ctx->current_hash_req;
1127 else
1128 sep->current_cypher_req = ta_ctx->current_cypher_req;
1129
1130 sep->current_request = ta_ctx->current_request;
1131 sep->current_hash_stage = ta_ctx->current_hash_stage;
1132 sep->ta_ctx = ta_ctx;
1133 sep->in_kernel = 1;
1134 ta_ctx->i_own_sep = 1;
1135
1136 /* need to set bit first to avoid race condition with interrupt */
1137 set_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET, &ta_ctx->call_status.status);
1138
1139 result = sep_send_command_handler(sep);
1140
1141 dev_dbg(&sep->pdev->dev, "[PID%d]: sending command to the sep\n",
1142 current->pid);
1143
1144 if (!result)
1145 dev_dbg(&sep->pdev->dev, "[PID%d]: command sent okay\n",
1146 current->pid);
1147 else {
1148 dev_dbg(&sep->pdev->dev, "[PID%d]: cant send command\n",
1149 current->pid);
1150 clear_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
1151 &ta_ctx->call_status.status);
1152 }
1153
1154 return result;
1155}
1156
1157/**
1158 * This function sets things up for a crypto data block process
1159 * This does all preparation, but does not try to grab the
1160 * sep
1161 * @req: pointer to struct ablkcipher_request
1162 * returns: 0 if all went well, non zero if error
1163 */
1164static int sep_crypto_block_data(struct ablkcipher_request *req)
1165{
1166
1167 int int_error;
1168 u32 msg_offset;
1169 static u32 msg[10];
1170 void *src_ptr;
1171 void *dst_ptr;
1172
1173 static char small_buf[100];
1174 ssize_t copy_result;
1175 int result;
1176
1177 struct scatterlist *new_sg;
1178 struct this_task_ctx *ta_ctx;
1179 struct crypto_ablkcipher *tfm;
1180 struct sep_system_ctx *sctx;
1181
1182 struct sep_des_internal_context *des_internal;
1183 struct sep_aes_internal_context *aes_internal;
1184
1185 ta_ctx = ablkcipher_request_ctx(req);
1186 tfm = crypto_ablkcipher_reqtfm(req);
1187 sctx = crypto_ablkcipher_ctx(tfm);
1188
1189 /* start the walk on scatterlists */
1190 ablkcipher_walk_init(&ta_ctx->walk, req->src, req->dst, req->nbytes);
1191 dev_dbg(&ta_ctx->sep_used->pdev->dev, "sep crypto block data size of %x\n",
1192 req->nbytes);
1193
1194 int_error = ablkcipher_walk_phys(req, &ta_ctx->walk);
1195 if (int_error) {
1196 dev_warn(&ta_ctx->sep_used->pdev->dev, "walk phys error %x\n",
1197 int_error);
1198 return -ENOMEM;
1199 }
1200
1201 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1202 "crypto block: src is %lx dst is %lx\n",
1203 (unsigned long)req->src, (unsigned long)req->dst);
1204
1205 /* Make sure all pages are even block */
1206 int_error = sep_oddball_pages(ta_ctx->sep_used, req->src,
1207 req->nbytes, ta_ctx->walk.blocksize, &new_sg, 1);
1208
1209 if (int_error < 0) {
1210 dev_warn(&ta_ctx->sep_used->pdev->dev, "oddball page eerror\n");
1211 return -ENOMEM;
1212 } else if (int_error == 1) {
1213 ta_ctx->src_sg = new_sg;
1214 ta_ctx->src_sg_hold = new_sg;
1215 } else {
1216 ta_ctx->src_sg = req->src;
1217 ta_ctx->src_sg_hold = NULL;
1218 }
1219
1220 int_error = sep_oddball_pages(ta_ctx->sep_used, req->dst,
1221 req->nbytes, ta_ctx->walk.blocksize, &new_sg, 0);
1222
1223 if (int_error < 0) {
1224 dev_warn(&ta_ctx->sep_used->pdev->dev, "walk phys error %x\n",
1225 int_error);
1226 return -ENOMEM;
1227 } else if (int_error == 1) {
1228 ta_ctx->dst_sg = new_sg;
1229 ta_ctx->dst_sg_hold = new_sg;
1230 } else {
1231 ta_ctx->dst_sg = req->dst;
1232 ta_ctx->dst_sg_hold = NULL;
1233 }
1234
1235 /* set nbytes for queue status */
1236 ta_ctx->nbytes = req->nbytes;
1237
1238 /* Key already done; this is for data */
1239 dev_dbg(&ta_ctx->sep_used->pdev->dev, "sending data\n");
1240
1241 sep_dump_sg(ta_ctx->sep_used,
1242 "block sg in", ta_ctx->src_sg);
1243
1244 /* check for valid data and proper spacing */
1245 src_ptr = sg_virt(ta_ctx->src_sg);
1246 dst_ptr = sg_virt(ta_ctx->dst_sg);
1247
1248 if (!src_ptr || !dst_ptr ||
1249 (ta_ctx->current_cypher_req->nbytes %
1250 crypto_ablkcipher_blocksize(tfm))) {
1251
1252 dev_warn(&ta_ctx->sep_used->pdev->dev,
1253 "cipher block size odd\n");
1254 dev_warn(&ta_ctx->sep_used->pdev->dev,
1255 "cipher block size is %x\n",
1256 crypto_ablkcipher_blocksize(tfm));
1257 dev_warn(&ta_ctx->sep_used->pdev->dev,
1258 "cipher data size is %x\n",
1259 ta_ctx->current_cypher_req->nbytes);
1260 return -EINVAL;
1261 }
1262
1263 if (partial_overlap(src_ptr, dst_ptr,
1264 ta_ctx->current_cypher_req->nbytes)) {
1265 dev_warn(&ta_ctx->sep_used->pdev->dev,
1266 "block partial overlap\n");
1267 return -EINVAL;
1268 }
1269
1270 /* Put together the message */
1271 sep_make_header(ta_ctx, &msg_offset, ta_ctx->block_opcode);
1272
1273 /* If des, and size is 1 block, put directly in msg */
1274 if ((ta_ctx->block_opcode == SEP_DES_BLOCK_OPCODE) &&
1275 (req->nbytes == crypto_ablkcipher_blocksize(tfm))) {
1276
1277 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1278 "writing out one block des\n");
1279
1280 copy_result = sg_copy_to_buffer(
1281 ta_ctx->src_sg, sep_sg_nents(ta_ctx->src_sg),
1282 small_buf, crypto_ablkcipher_blocksize(tfm));
1283
1284 if (copy_result != crypto_ablkcipher_blocksize(tfm)) {
1285 dev_warn(&ta_ctx->sep_used->pdev->dev,
1286 "des block copy faild\n");
1287 return -ENOMEM;
1288 }
1289
1290 /* Put data into message */
1291 sep_write_msg(ta_ctx, small_buf,
1292 crypto_ablkcipher_blocksize(tfm),
1293 crypto_ablkcipher_blocksize(tfm) * 2,
1294 &msg_offset, 1);
1295
1296 /* Put size into message */
1297 sep_write_msg(ta_ctx, &req->nbytes,
1298 sizeof(u32), sizeof(u32), &msg_offset, 0);
1299 } else {
1300 /* Otherwise, fill out dma tables */
1301 ta_ctx->dcb_input_data.app_in_address = src_ptr;
1302 ta_ctx->dcb_input_data.data_in_size = req->nbytes;
1303 ta_ctx->dcb_input_data.app_out_address = dst_ptr;
1304 ta_ctx->dcb_input_data.block_size =
1305 crypto_ablkcipher_blocksize(tfm);
1306 ta_ctx->dcb_input_data.tail_block_size = 0;
1307 ta_ctx->dcb_input_data.is_applet = 0;
1308 ta_ctx->dcb_input_data.src_sg = ta_ctx->src_sg;
1309 ta_ctx->dcb_input_data.dst_sg = ta_ctx->dst_sg;
1310
1311 result = sep_create_dcb_dmatables_context_kernel(
1312 ta_ctx->sep_used,
1313 &ta_ctx->dcb_region,
1314 &ta_ctx->dmatables_region,
1315 &ta_ctx->dma_ctx,
1316 &ta_ctx->dcb_input_data,
1317 1);
1318 if (result) {
1319 dev_warn(&ta_ctx->sep_used->pdev->dev,
1320 "crypto dma table create failed\n");
1321 return -EINVAL;
1322 }
1323
1324 /* Portion of msg is nulled (no data) */
1325 msg[0] = (u32)0;
1326 msg[1] = (u32)0;
1327 msg[2] = (u32)0;
1328 msg[3] = (u32)0;
1329 msg[4] = (u32)0;
1330 sep_write_msg(ta_ctx, (void *)msg, sizeof(u32) * 5,
1331 sizeof(u32) * 5, &msg_offset, 0);
1332 }
1333
1334 /**
1335 * Before we write the message, we need to overwrite the
1336 * vendor's IV with the one from our own ablkcipher walk
1337 * iv because this is needed for dm-crypt
1338 */
1339 sep_dump_ivs(req, "sending data block to sep\n");
1340 if ((ta_ctx->current_request == DES_CBC) &&
1341 (ta_ctx->des_opmode == SEP_DES_CBC)) {
1342
1343 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1344 "overwrite vendor iv on DES\n");
1345 des_internal = (struct sep_des_internal_context *)
1346 sctx->des_private_ctx.ctx_buf;
1347 memcpy((void *)des_internal->iv_context,
1348 ta_ctx->walk.iv, crypto_ablkcipher_ivsize(tfm));
1349 } else if ((ta_ctx->current_request == AES_CBC) &&
1350 (ta_ctx->aes_opmode == SEP_AES_CBC)) {
1351
1352 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1353 "overwrite vendor iv on AES\n");
1354 aes_internal = (struct sep_aes_internal_context *)
1355 sctx->aes_private_ctx.cbuff;
1356 memcpy((void *)aes_internal->aes_ctx_iv,
1357 ta_ctx->walk.iv, crypto_ablkcipher_ivsize(tfm));
1358 }
1359
1360 /* Write context into message */
1361 if (ta_ctx->block_opcode == SEP_DES_BLOCK_OPCODE) {
1362 sep_write_context(ta_ctx, &msg_offset,
1363 &sctx->des_private_ctx,
1364 sizeof(struct sep_des_private_context));
1365 sep_dump(ta_ctx->sep_used, "ctx to block des",
1366 &sctx->des_private_ctx, 40);
1367 } else {
1368 sep_write_context(ta_ctx, &msg_offset,
1369 &sctx->aes_private_ctx,
1370 sizeof(struct sep_aes_private_context));
1371 sep_dump(ta_ctx->sep_used, "ctx to block aes",
1372 &sctx->aes_private_ctx, 20);
1373 }
1374
1375 /* conclude message */
1376 sep_end_msg(ta_ctx, msg_offset);
1377
1378 /* Parent (caller) is now ready to tell the sep to do ahead */
1379 return 0;
1380}
1381
1382
1383/**
1384 * This function sets things up for a crypto key submit process
1385 * This does all preparation, but does not try to grab the
1386 * sep
1387 * @req: pointer to struct ablkcipher_request
1388 * returns: 0 if all went well, non zero if error
1389 */
1390static int sep_crypto_send_key(struct ablkcipher_request *req)
1391{
1392
1393 int int_error;
1394 u32 msg_offset;
1395 static u32 msg[10];
1396
1397 u32 max_length;
1398 struct this_task_ctx *ta_ctx;
1399 struct crypto_ablkcipher *tfm;
1400 struct sep_system_ctx *sctx;
1401
1402 ta_ctx = ablkcipher_request_ctx(req);
1403 tfm = crypto_ablkcipher_reqtfm(req);
1404 sctx = crypto_ablkcipher_ctx(tfm);
1405
1406 dev_dbg(&ta_ctx->sep_used->pdev->dev, "sending key\n");
1407
1408 /* start the walk on scatterlists */
1409 ablkcipher_walk_init(&ta_ctx->walk, req->src, req->dst, req->nbytes);
1410 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1411 "sep crypto block data size of %x\n", req->nbytes);
1412
1413 int_error = ablkcipher_walk_phys(req, &ta_ctx->walk);
1414 if (int_error) {
1415 dev_warn(&ta_ctx->sep_used->pdev->dev, "walk phys error %x\n",
1416 int_error);
1417 return -ENOMEM;
1418 }
1419
1420 /* check iv */
1421 if ((ta_ctx->current_request == DES_CBC) &&
1422 (ta_ctx->des_opmode == SEP_DES_CBC)) {
1423 if (!ta_ctx->walk.iv) {
1424 dev_warn(&ta_ctx->sep_used->pdev->dev, "no iv found\n");
1425 return -EINVAL;
1426 }
1427
1428 memcpy(ta_ctx->iv, ta_ctx->walk.iv, SEP_DES_IV_SIZE_BYTES);
1429 sep_dump(ta_ctx->sep_used, "iv",
1430 ta_ctx->iv, SEP_DES_IV_SIZE_BYTES);
1431 }
1432
1433 if ((ta_ctx->current_request == AES_CBC) &&
1434 (ta_ctx->aes_opmode == SEP_AES_CBC)) {
1435 if (!ta_ctx->walk.iv) {
1436 dev_warn(&ta_ctx->sep_used->pdev->dev, "no iv found\n");
1437 return -EINVAL;
1438 }
1439
1440 memcpy(ta_ctx->iv, ta_ctx->walk.iv, SEP_AES_IV_SIZE_BYTES);
1441 sep_dump(ta_ctx->sep_used, "iv",
1442 ta_ctx->iv, SEP_AES_IV_SIZE_BYTES);
1443 }
1444
1445 /* put together message to SEP */
1446 /* Start with op code */
1447 sep_make_header(ta_ctx, &msg_offset, ta_ctx->init_opcode);
1448
1449 /* now deal with IV */
1450 if (ta_ctx->init_opcode == SEP_DES_INIT_OPCODE) {
1451 if (ta_ctx->des_opmode == SEP_DES_CBC) {
1452 sep_write_msg(ta_ctx, ta_ctx->iv,
1453 SEP_DES_IV_SIZE_BYTES, sizeof(u32) * 4,
1454 &msg_offset, 1);
1455 sep_dump(ta_ctx->sep_used, "initial IV",
1456 ta_ctx->walk.iv, SEP_DES_IV_SIZE_BYTES);
1457 } else {
1458 /* Skip if ECB */
1459 msg_offset += 4 * sizeof(u32);
1460 }
1461 } else {
1462 max_length = ((SEP_AES_IV_SIZE_BYTES + 3) /
1463 sizeof(u32)) * sizeof(u32);
1464 if (ta_ctx->aes_opmode == SEP_AES_CBC) {
1465 sep_write_msg(ta_ctx, ta_ctx->iv,
1466 SEP_AES_IV_SIZE_BYTES, max_length,
1467 &msg_offset, 1);
1468 sep_dump(ta_ctx->sep_used, "initial IV",
1469 ta_ctx->walk.iv, SEP_AES_IV_SIZE_BYTES);
1470 } else {
1471 /* Skip if ECB */
1472 msg_offset += max_length;
1473 }
1474 }
1475
1476 /* load the key */
1477 if (ta_ctx->init_opcode == SEP_DES_INIT_OPCODE) {
1478 sep_write_msg(ta_ctx, (void *)&sctx->key.des.key1,
1479 sizeof(u32) * 8, sizeof(u32) * 8,
1480 &msg_offset, 1);
1481
1482 msg[0] = (u32)sctx->des_nbr_keys;
1483 msg[1] = (u32)ta_ctx->des_encmode;
1484 msg[2] = (u32)ta_ctx->des_opmode;
1485
1486 sep_write_msg(ta_ctx, (void *)msg,
1487 sizeof(u32) * 3, sizeof(u32) * 3,
1488 &msg_offset, 0);
1489 } else {
1490 sep_write_msg(ta_ctx, (void *)&sctx->key.aes,
1491 sctx->keylen,
1492 SEP_AES_MAX_KEY_SIZE_BYTES,
1493 &msg_offset, 1);
1494
1495 msg[0] = (u32)sctx->aes_key_size;
1496 msg[1] = (u32)ta_ctx->aes_encmode;
1497 msg[2] = (u32)ta_ctx->aes_opmode;
1498 msg[3] = (u32)0; /* Secret key is not used */
1499 sep_write_msg(ta_ctx, (void *)msg,
1500 sizeof(u32) * 4, sizeof(u32) * 4,
1501 &msg_offset, 0);
1502 }
1503
1504 /* conclude message */
1505 sep_end_msg(ta_ctx, msg_offset);
1506
1507 /* Parent (caller) is now ready to tell the sep to do ahead */
1508 return 0;
1509}
1510
1511
1512/* This needs to be run as a work queue as it can be put asleep */
1513static void sep_crypto_block(void *data)
1514{
1515 unsigned long end_time;
1516
1517 int result;
1518
1519 struct ablkcipher_request *req;
1520 struct this_task_ctx *ta_ctx;
1521 struct crypto_ablkcipher *tfm;
1522 struct sep_system_ctx *sctx;
1523 int are_we_done_yet;
1524
1525 req = (struct ablkcipher_request *)data;
1526 ta_ctx = ablkcipher_request_ctx(req);
1527 tfm = crypto_ablkcipher_reqtfm(req);
1528 sctx = crypto_ablkcipher_ctx(tfm);
1529
1530 ta_ctx->are_we_done_yet = &are_we_done_yet;
1531
1532 pr_debug("sep_crypto_block\n");
1533 pr_debug("tfm is %p sctx is %p ta_ctx is %p\n",
1534 tfm, sctx, ta_ctx);
1535 pr_debug("key_sent is %d\n", sctx->key_sent);
1536
1537 /* do we need to send the key */
1538 if (sctx->key_sent == 0) {
1539 are_we_done_yet = 0;
1540 result = sep_crypto_send_key(req); /* prep to send key */
1541 if (result != 0) {
1542 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1543 "could not prep key %x\n", result);
1544 sep_crypto_release(sctx, ta_ctx, result);
1545 return;
1546 }
1547
1548 result = sep_crypto_take_sep(ta_ctx);
1549 if (result) {
1550 dev_warn(&ta_ctx->sep_used->pdev->dev,
1551 "sep_crypto_take_sep for key send failed\n");
1552 sep_crypto_release(sctx, ta_ctx, result);
1553 return;
1554 }
1555
1556 /* now we sit and wait up to a fixed time for completion */
1557 end_time = jiffies + (WAIT_TIME * HZ);
1558 while ((time_before(jiffies, end_time)) &&
1559 (are_we_done_yet == 0))
1560 schedule();
1561
1562 /* Done waiting; still not done yet? */
1563 if (are_we_done_yet == 0) {
1564 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1565 "Send key job never got done\n");
1566 sep_crypto_release(sctx, ta_ctx, -EINVAL);
1567 return;
1568 }
1569
1570 /* Set the key sent variable so this can be skipped later */
1571 sctx->key_sent = 1;
1572 }
1573
1574 /* Key sent (or maybe not if we did not have to), now send block */
1575 are_we_done_yet = 0;
1576
1577 result = sep_crypto_block_data(req);
1578
1579 if (result != 0) {
1580 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1581 "could prep not send block %x\n", result);
1582 sep_crypto_release(sctx, ta_ctx, result);
1583 return;
1584 }
1585
1586 result = sep_crypto_take_sep(ta_ctx);
1587 if (result) {
1588 dev_warn(&ta_ctx->sep_used->pdev->dev,
1589 "sep_crypto_take_sep for block send failed\n");
1590 sep_crypto_release(sctx, ta_ctx, result);
1591 return;
1592 }
1593
1594 /* now we sit and wait up to a fixed time for completion */
1595 end_time = jiffies + (WAIT_TIME * HZ);
1596 while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
1597 schedule();
1598
1599 /* Done waiting; still not done yet? */
1600 if (are_we_done_yet == 0) {
1601 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1602 "Send block job never got done\n");
1603 sep_crypto_release(sctx, ta_ctx, -EINVAL);
1604 return;
1605 }
1606
1607 /* That's it; entire thing done, get out of queue */
1608
1609 pr_debug("crypto_block leaving\n");
1610 pr_debug("tfm is %p sctx is %p ta_ctx is %p\n", tfm, sctx, ta_ctx);
1611}
1612
1613/**
1614 * Post operation (after interrupt) for crypto block
1615 */
1616static u32 crypto_post_op(struct sep_device *sep)
1617{
1618 /* HERE */
1619 u32 u32_error;
1620 u32 msg_offset;
1621
1622 ssize_t copy_result;
1623 static char small_buf[100];
1624
1625 struct ablkcipher_request *req;
1626 struct this_task_ctx *ta_ctx;
1627 struct sep_system_ctx *sctx;
1628 struct crypto_ablkcipher *tfm;
1629
1630 struct sep_des_internal_context *des_internal;
1631 struct sep_aes_internal_context *aes_internal;
1632
1633 if (!sep->current_cypher_req)
1634 return -EINVAL;
1635
1636 /* hold req since we need to submit work after clearing sep */
1637 req = sep->current_cypher_req;
1638
1639 ta_ctx = ablkcipher_request_ctx(sep->current_cypher_req);
1640 tfm = crypto_ablkcipher_reqtfm(sep->current_cypher_req);
1641 sctx = crypto_ablkcipher_ctx(tfm);
1642
1643 pr_debug("crypto_post op\n");
1644 pr_debug("key_sent is %d tfm is %p sctx is %p ta_ctx is %p\n",
1645 sctx->key_sent, tfm, sctx, ta_ctx);
1646
1647 dev_dbg(&ta_ctx->sep_used->pdev->dev, "crypto post_op\n");
1648 dev_dbg(&ta_ctx->sep_used->pdev->dev, "crypto post_op message dump\n");
1649 crypto_sep_dump_message(ta_ctx->sep_used, ta_ctx->msg);
1650
1651 /* first bring msg from shared area to local area */
1652 memcpy(ta_ctx->msg, sep->shared_addr,
1653 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1654
1655 /* Is this the result of performing init (key to SEP */
1656 if (sctx->key_sent == 0) {
1657
1658 /* Did SEP do it okay */
1659 u32_error = sep_verify_op(ta_ctx, ta_ctx->init_opcode,
1660 &msg_offset);
1661 if (u32_error) {
1662 dev_warn(&ta_ctx->sep_used->pdev->dev,
1663 "aes init error %x\n", u32_error);
1664 sep_crypto_release(sctx, ta_ctx, u32_error);
1665 return u32_error;
1666 }
1667
1668 /* Read Context */
1669 if (ta_ctx->init_opcode == SEP_DES_INIT_OPCODE) {
1670 sep_read_context(ta_ctx, &msg_offset,
1671 &sctx->des_private_ctx,
1672 sizeof(struct sep_des_private_context));
1673
1674 sep_dump(ta_ctx->sep_used, "ctx init des",
1675 &sctx->des_private_ctx, 40);
1676 } else {
1677 sep_read_context(ta_ctx, &msg_offset,
1678 &sctx->aes_private_ctx,
1679 sizeof(struct sep_aes_private_context));
1680
1681 sep_dump(ta_ctx->sep_used, "ctx init aes",
1682 &sctx->aes_private_ctx, 20);
1683 }
1684
1685 sep_dump_ivs(req, "after sending key to sep\n");
1686
1687 /* key sent went okay; release sep, and set are_we_done_yet */
1688 sctx->key_sent = 1;
1689 sep_crypto_release(sctx, ta_ctx, -EINPROGRESS);
1690
1691 } else {
1692
1693 /**
1694 * This is the result of a block request
1695 */
1696 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1697 "crypto_post_op block response\n");
1698
1699 u32_error = sep_verify_op(ta_ctx, ta_ctx->block_opcode,
1700 &msg_offset);
1701
1702 if (u32_error) {
1703 dev_warn(&ta_ctx->sep_used->pdev->dev,
1704 "sep block error %x\n", u32_error);
1705 sep_crypto_release(sctx, ta_ctx, u32_error);
1706 return -EINVAL;
1707 }
1708
1709 if (ta_ctx->block_opcode == SEP_DES_BLOCK_OPCODE) {
1710
1711 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1712 "post op for DES\n");
1713
1714 /* special case for 1 block des */
1715 if (sep->current_cypher_req->nbytes ==
1716 crypto_ablkcipher_blocksize(tfm)) {
1717
1718 sep_read_msg(ta_ctx, small_buf,
1719 crypto_ablkcipher_blocksize(tfm),
1720 crypto_ablkcipher_blocksize(tfm) * 2,
1721 &msg_offset, 1);
1722
1723 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1724 "reading in block des\n");
1725
1726 copy_result = sg_copy_from_buffer(
1727 ta_ctx->dst_sg,
1728 sep_sg_nents(ta_ctx->dst_sg),
1729 small_buf,
1730 crypto_ablkcipher_blocksize(tfm));
1731
1732 if (copy_result !=
1733 crypto_ablkcipher_blocksize(tfm)) {
1734
1735 dev_warn(&ta_ctx->sep_used->pdev->dev,
1736 "des block copy faild\n");
1737 sep_crypto_release(sctx, ta_ctx,
1738 -ENOMEM);
1739 return -ENOMEM;
1740 }
1741 }
1742
1743 /* Read Context */
1744 sep_read_context(ta_ctx, &msg_offset,
1745 &sctx->des_private_ctx,
1746 sizeof(struct sep_des_private_context));
1747 } else {
1748
1749 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1750 "post op for AES\n");
1751
1752 /* Skip the MAC Output */
1753 msg_offset += (sizeof(u32) * 4);
1754
1755 /* Read Context */
1756 sep_read_context(ta_ctx, &msg_offset,
1757 &sctx->aes_private_ctx,
1758 sizeof(struct sep_aes_private_context));
1759 }
1760
1761 sep_dump_sg(ta_ctx->sep_used,
1762 "block sg out", ta_ctx->dst_sg);
1763
1764 /* Copy to correct sg if this block had oddball pages */
1765 if (ta_ctx->dst_sg_hold)
1766 sep_copy_sg(ta_ctx->sep_used,
1767 ta_ctx->dst_sg,
1768 ta_ctx->current_cypher_req->dst,
1769 ta_ctx->current_cypher_req->nbytes);
1770
1771 /**
1772 * Copy the iv's back to the walk.iv
1773 * This is required for dm_crypt
1774 */
1775 sep_dump_ivs(req, "got data block from sep\n");
1776 if ((ta_ctx->current_request == DES_CBC) &&
1777 (ta_ctx->des_opmode == SEP_DES_CBC)) {
1778
1779 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1780 "returning result iv to walk on DES\n");
1781 des_internal = (struct sep_des_internal_context *)
1782 sctx->des_private_ctx.ctx_buf;
1783 memcpy(ta_ctx->walk.iv,
1784 (void *)des_internal->iv_context,
1785 crypto_ablkcipher_ivsize(tfm));
1786 } else if ((ta_ctx->current_request == AES_CBC) &&
1787 (ta_ctx->aes_opmode == SEP_AES_CBC)) {
1788
1789 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1790 "returning result iv to walk on AES\n");
1791 aes_internal = (struct sep_aes_internal_context *)
1792 sctx->aes_private_ctx.cbuff;
1793 memcpy(ta_ctx->walk.iv,
1794 (void *)aes_internal->aes_ctx_iv,
1795 crypto_ablkcipher_ivsize(tfm));
1796 }
1797
1798 /* finished, release everything */
1799 sep_crypto_release(sctx, ta_ctx, 0);
1800 }
1801 pr_debug("crypto_post_op done\n");
1802 pr_debug("key_sent is %d tfm is %p sctx is %p ta_ctx is %p\n",
1803 sctx->key_sent, tfm, sctx, ta_ctx);
1804
1805 return 0;
1806}
1807
1808static u32 hash_init_post_op(struct sep_device *sep)
1809{
1810 u32 u32_error;
1811 u32 msg_offset;
1812 struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
1813 struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
1814 struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
1815 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1816 "hash init post op\n");
1817
1818 /* first bring msg from shared area to local area */
1819 memcpy(ta_ctx->msg, sep->shared_addr,
1820 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1821
1822 u32_error = sep_verify_op(ta_ctx, SEP_HASH_INIT_OPCODE,
1823 &msg_offset);
1824
1825 if (u32_error) {
1826 dev_warn(&ta_ctx->sep_used->pdev->dev, "hash init error %x\n",
1827 u32_error);
1828 sep_crypto_release(sctx, ta_ctx, u32_error);
1829 return u32_error;
1830 }
1831
1832 /* Read Context */
1833 sep_read_context(ta_ctx, &msg_offset,
1834 &sctx->hash_private_ctx,
1835 sizeof(struct sep_hash_private_context));
1836
1837 /* Signal to crypto infrastructure and clear out */
1838 dev_dbg(&ta_ctx->sep_used->pdev->dev, "hash init post op done\n");
1839 sep_crypto_release(sctx, ta_ctx, 0);
1840 return 0;
1841}
1842
1843static u32 hash_update_post_op(struct sep_device *sep)
1844{
1845 u32 u32_error;
1846 u32 msg_offset;
1847 struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
1848 struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
1849 struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
1850 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1851 "hash update post op\n");
1852
1853 /* first bring msg from shared area to local area */
1854 memcpy(ta_ctx->msg, sep->shared_addr,
1855 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1856
1857 u32_error = sep_verify_op(ta_ctx, SEP_HASH_UPDATE_OPCODE,
1858 &msg_offset);
1859
1860 if (u32_error) {
1861 dev_warn(&ta_ctx->sep_used->pdev->dev, "hash init error %x\n",
1862 u32_error);
1863 sep_crypto_release(sctx, ta_ctx, u32_error);
1864 return u32_error;
1865 }
1866
1867 /* Read Context */
1868 sep_read_context(ta_ctx, &msg_offset,
1869 &sctx->hash_private_ctx,
1870 sizeof(struct sep_hash_private_context));
1871
1872 /**
1873 * Following is only for finup; if we just completd the
1874 * data portion of finup, we now need to kick off the
1875 * finish portion of finup.
1876 */
1877
1878 if (ta_ctx->sep_used->current_hash_stage == HASH_FINUP_DATA) {
1879
1880 /* first reset stage to HASH_FINUP_FINISH */
1881 ta_ctx->sep_used->current_hash_stage = HASH_FINUP_FINISH;
1882
1883 /* now enqueue the finish operation */
1884 spin_lock_irq(&queue_lock);
1885 u32_error = crypto_enqueue_request(&sep_queue,
1886 &ta_ctx->sep_used->current_hash_req->base);
1887 spin_unlock_irq(&queue_lock);
1888
1889 if ((u32_error != 0) && (u32_error != -EINPROGRESS)) {
1890 dev_warn(&ta_ctx->sep_used->pdev->dev,
1891 "spe cypher post op cant queue\n");
1892 sep_crypto_release(sctx, ta_ctx, u32_error);
1893 return u32_error;
1894 }
1895
1896 /* schedule the data send */
1897 u32_error = sep_submit_work(ta_ctx->sep_used->workqueue,
1898 sep_dequeuer, (void *)&sep_queue);
1899
1900 if (u32_error) {
1901 dev_warn(&ta_ctx->sep_used->pdev->dev,
1902 "cant submit work sep_crypto_block\n");
1903 sep_crypto_release(sctx, ta_ctx, -EINVAL);
1904 return -EINVAL;
1905 }
1906 }
1907
1908 /* Signal to crypto infrastructure and clear out */
1909 dev_dbg(&ta_ctx->sep_used->pdev->dev, "hash update post op done\n");
1910 sep_crypto_release(sctx, ta_ctx, 0);
1911 return 0;
1912}
1913
1914static u32 hash_final_post_op(struct sep_device *sep)
1915{
1916 int max_length;
1917 u32 u32_error;
1918 u32 msg_offset;
1919 struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
1920 struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
1921 struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
1922 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1923 "hash final post op\n");
1924
1925 /* first bring msg from shared area to local area */
1926 memcpy(ta_ctx->msg, sep->shared_addr,
1927 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1928
1929 u32_error = sep_verify_op(ta_ctx, SEP_HASH_FINISH_OPCODE,
1930 &msg_offset);
1931
1932 if (u32_error) {
1933 dev_warn(&ta_ctx->sep_used->pdev->dev, "hash finish error %x\n",
1934 u32_error);
1935 sep_crypto_release(sctx, ta_ctx, u32_error);
1936 return u32_error;
1937 }
1938
1939 /* Grab the result */
1940 if (ta_ctx->current_hash_req->result == NULL) {
1941 /* Oops, null buffer; error out here */
1942 dev_warn(&ta_ctx->sep_used->pdev->dev,
1943 "hash finish null buffer\n");
1944 sep_crypto_release(sctx, ta_ctx, (u32)-ENOMEM);
1945 return -ENOMEM;
1946 }
1947
1948 max_length = (((SEP_HASH_RESULT_SIZE_WORDS * sizeof(u32)) + 3) /
1949 sizeof(u32)) * sizeof(u32);
1950
1951 sep_read_msg(ta_ctx,
1952 ta_ctx->current_hash_req->result,
1953 crypto_ahash_digestsize(tfm), max_length,
1954 &msg_offset, 0);
1955
1956 /* Signal to crypto infrastructure and clear out */
1957 dev_dbg(&ta_ctx->sep_used->pdev->dev, "hash finish post op done\n");
1958 sep_crypto_release(sctx, ta_ctx, 0);
1959 return 0;
1960}
1961
1962static u32 hash_digest_post_op(struct sep_device *sep)
1963{
1964 int max_length;
1965 u32 u32_error;
1966 u32 msg_offset;
1967 struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
1968 struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
1969 struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
1970 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1971 "hash digest post op\n");
1972
1973 /* first bring msg from shared area to local area */
1974 memcpy(ta_ctx->msg, sep->shared_addr,
1975 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1976
1977 u32_error = sep_verify_op(ta_ctx, SEP_HASH_SINGLE_OPCODE,
1978 &msg_offset);
1979
1980 if (u32_error) {
1981 dev_warn(&ta_ctx->sep_used->pdev->dev,
1982 "hash digest finish error %x\n", u32_error);
1983
1984 sep_crypto_release(sctx, ta_ctx, u32_error);
1985 return u32_error;
1986 }
1987
1988 /* Grab the result */
1989 if (ta_ctx->current_hash_req->result == NULL) {
1990 /* Oops, null buffer; error out here */
1991 dev_warn(&ta_ctx->sep_used->pdev->dev,
1992 "hash digest finish null buffer\n");
1993 sep_crypto_release(sctx, ta_ctx, (u32)-ENOMEM);
1994 return -ENOMEM;
1995 }
1996
1997 max_length = (((SEP_HASH_RESULT_SIZE_WORDS * sizeof(u32)) + 3) /
1998 sizeof(u32)) * sizeof(u32);
1999
2000 sep_read_msg(ta_ctx,
2001 ta_ctx->current_hash_req->result,
2002 crypto_ahash_digestsize(tfm), max_length,
2003 &msg_offset, 0);
2004
2005 /* Signal to crypto infrastructure and clear out */
2006 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2007 "hash digest finish post op done\n");
2008
2009 sep_crypto_release(sctx, ta_ctx, 0);
2010 return 0;
2011}
2012
2013/**
2014 * The sep_finish function is the function that is schedule (via tasket)
2015 * by the interrupt service routine when the SEP sends and interrupt
2016 * This is only called by the interrupt handler as a tasklet.
2017 */
2018static void sep_finish(unsigned long data)
2019{
2020 struct sep_device *sep_dev;
2021 int res;
2022
2023 res = 0;
2024
2025 if (data == 0) {
2026 pr_debug("sep_finish called with null data\n");
2027 return;
2028 }
2029
2030 sep_dev = (struct sep_device *)data;
2031 if (sep_dev == NULL) {
2032 pr_debug("sep_finish; sep_dev is NULL\n");
2033 return;
2034 }
2035
2036 if (sep_dev->in_kernel == (u32)0) {
2037 dev_warn(&sep_dev->pdev->dev,
2038 "sep_finish; not in kernel operation\n");
2039 return;
2040 }
2041
2042 /* Did we really do a sep command prior to this? */
2043 if (0 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
2044 &sep_dev->ta_ctx->call_status.status)) {
2045
2046 dev_warn(&sep_dev->pdev->dev, "[PID%d] sendmsg not called\n",
2047 current->pid);
2048 return;
2049 }
2050
2051 if (sep_dev->send_ct != sep_dev->reply_ct) {
2052 dev_warn(&sep_dev->pdev->dev,
2053 "[PID%d] poll; no message came back\n",
2054 current->pid);
2055 return;
2056 }
2057
2058 /* Check for error (In case time ran out) */
2059 if ((res != 0x0) && (res != 0x8)) {
2060 dev_warn(&sep_dev->pdev->dev,
2061 "[PID%d] poll; poll error GPR3 is %x\n",
2062 current->pid, res);
2063 return;
2064 }
2065
2066 /* What kind of interrupt from sep was this? */
2067 res = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
2068
2069 dev_dbg(&sep_dev->pdev->dev, "[PID%d] GPR2 at crypto finish is %x\n",
2070 current->pid, res);
2071
2072 /* Print request? */
2073 if ((res >> 30) & 0x1) {
2074 dev_dbg(&sep_dev->pdev->dev, "[PID%d] sep print req\n",
2075 current->pid);
2076 dev_dbg(&sep_dev->pdev->dev, "[PID%d] contents: %s\n",
2077 current->pid,
2078 (char *)(sep_dev->shared_addr +
2079 SEP_DRIVER_PRINTF_OFFSET_IN_BYTES));
2080 return;
2081 }
2082
2083 /* Request for daemon (not currently in POR)? */
2084 if (res >> 31) {
2085 dev_dbg(&sep_dev->pdev->dev,
2086 "[PID%d] sep request; ignoring\n",
2087 current->pid);
2088 return;
2089 }
2090
2091 /* If we got here, then we have a replay to a sep command */
2092
2093 dev_dbg(&sep_dev->pdev->dev,
2094 "[PID%d] sep reply to command; processing request: %x\n",
2095 current->pid, sep_dev->current_request);
2096
2097 switch (sep_dev->current_request) {
2098 case AES_CBC:
2099 case AES_ECB:
2100 case DES_CBC:
2101 case DES_ECB:
2102 res = crypto_post_op(sep_dev);
2103 break;
2104 case SHA1:
2105 case MD5:
2106 case SHA224:
2107 case SHA256:
2108 switch (sep_dev->current_hash_stage) {
2109 case HASH_INIT:
2110 res = hash_init_post_op(sep_dev);
2111 break;
2112 case HASH_UPDATE:
2113 case HASH_FINUP_DATA:
2114 res = hash_update_post_op(sep_dev);
2115 break;
2116 case HASH_FINUP_FINISH:
2117 case HASH_FINISH:
2118 res = hash_final_post_op(sep_dev);
2119 break;
2120 case HASH_DIGEST:
2121 res = hash_digest_post_op(sep_dev);
2122 break;
2123 default:
2124 pr_debug("sep - invalid stage for hash finish\n");
2125 }
2126 break;
2127 default:
2128 pr_debug("sep - invalid request for finish\n");
2129 }
2130
2131 if (res)
2132 pr_debug("sep - finish returned error %x\n", res);
2133}
2134
2135static int sep_hash_cra_init(struct crypto_tfm *tfm)
2136 {
2137 const char *alg_name = crypto_tfm_alg_name(tfm);
2138
2139 pr_debug("sep_hash_cra_init name is %s\n", alg_name);
2140
2141 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2142 sizeof(struct this_task_ctx));
2143 return 0;
2144 }
2145
2146static void sep_hash_cra_exit(struct crypto_tfm *tfm)
2147{
2148 pr_debug("sep_hash_cra_exit\n");
2149}
2150
2151static void sep_hash_init(void *data)
2152{
2153 u32 msg_offset;
2154 int result;
2155 struct ahash_request *req;
2156 struct crypto_ahash *tfm;
2157 struct this_task_ctx *ta_ctx;
2158 struct sep_system_ctx *sctx;
2159 unsigned long end_time;
2160 int are_we_done_yet;
2161
2162 req = (struct ahash_request *)data;
2163 tfm = crypto_ahash_reqtfm(req);
2164 sctx = crypto_ahash_ctx(tfm);
2165 ta_ctx = ahash_request_ctx(req);
2166 ta_ctx->sep_used = sep_dev;
2167
2168 ta_ctx->are_we_done_yet = &are_we_done_yet;
2169
2170 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2171 "sep_hash_init\n");
2172 ta_ctx->current_hash_stage = HASH_INIT;
2173 /* opcode and mode */
2174 sep_make_header(ta_ctx, &msg_offset, SEP_HASH_INIT_OPCODE);
2175 sep_write_msg(ta_ctx, &ta_ctx->hash_opmode,
2176 sizeof(u32), sizeof(u32), &msg_offset, 0);
2177 sep_end_msg(ta_ctx, msg_offset);
2178
2179 are_we_done_yet = 0;
2180 result = sep_crypto_take_sep(ta_ctx);
2181 if (result) {
2182 dev_warn(&ta_ctx->sep_used->pdev->dev,
2183 "sep_hash_init take sep failed\n");
2184 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2185 }
2186
2187 /* now we sit and wait up to a fixed time for completion */
2188 end_time = jiffies + (WAIT_TIME * HZ);
2189 while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
2190 schedule();
2191
2192 /* Done waiting; still not done yet? */
2193 if (are_we_done_yet == 0) {
2194 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2195 "hash init never got done\n");
2196 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2197 return;
2198 }
2199
2200}
2201
2202static void sep_hash_update(void *data)
2203{
2204 int int_error;
2205 u32 msg_offset;
2206 u32 len;
2207 struct sep_hash_internal_context *int_ctx;
2208 u32 block_size;
2209 u32 head_len;
2210 u32 tail_len;
2211 int are_we_done_yet;
2212
2213 static u32 msg[10];
2214 static char small_buf[100];
2215 void *src_ptr;
2216 struct scatterlist *new_sg;
2217 ssize_t copy_result;
2218 struct ahash_request *req;
2219 struct crypto_ahash *tfm;
2220 struct this_task_ctx *ta_ctx;
2221 struct sep_system_ctx *sctx;
2222 unsigned long end_time;
2223
2224 req = (struct ahash_request *)data;
2225 tfm = crypto_ahash_reqtfm(req);
2226 sctx = crypto_ahash_ctx(tfm);
2227 ta_ctx = ahash_request_ctx(req);
2228 ta_ctx->sep_used = sep_dev;
2229
2230 ta_ctx->are_we_done_yet = &are_we_done_yet;
2231
2232 /* length for queue status */
2233 ta_ctx->nbytes = req->nbytes;
2234
2235 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2236 "sep_hash_update\n");
2237 ta_ctx->current_hash_stage = HASH_UPDATE;
2238 len = req->nbytes;
2239
2240 block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2241 tail_len = req->nbytes % block_size;
2242 dev_dbg(&ta_ctx->sep_used->pdev->dev, "length is %x\n", len);
2243 dev_dbg(&ta_ctx->sep_used->pdev->dev, "block_size is %x\n", block_size);
2244 dev_dbg(&ta_ctx->sep_used->pdev->dev, "tail len is %x\n", tail_len);
2245
2246 /* Compute header/tail sizes */
2247 int_ctx = (struct sep_hash_internal_context *)&sctx->
2248 hash_private_ctx.internal_context;
2249 head_len = (block_size - int_ctx->prev_update_bytes) % block_size;
2250 tail_len = (req->nbytes - head_len) % block_size;
2251
2252 /* Make sure all pages are even block */
2253 int_error = sep_oddball_pages(ta_ctx->sep_used, req->src,
2254 req->nbytes,
2255 block_size, &new_sg, 1);
2256
2257 if (int_error < 0) {
2258 dev_warn(&ta_ctx->sep_used->pdev->dev,
2259 "oddball pages error in crash update\n");
2260 sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2261 return;
2262 } else if (int_error == 1) {
2263 ta_ctx->src_sg = new_sg;
2264 ta_ctx->src_sg_hold = new_sg;
2265 } else {
2266 ta_ctx->src_sg = req->src;
2267 ta_ctx->src_sg_hold = NULL;
2268 }
2269
2270 src_ptr = sg_virt(ta_ctx->src_sg);
2271
2272 if ((!req->nbytes) || (!ta_ctx->src_sg)) {
2273 /* null data */
2274 src_ptr = NULL;
2275 }
2276
2277 sep_dump_sg(ta_ctx->sep_used, "hash block sg in", ta_ctx->src_sg);
2278
2279 ta_ctx->dcb_input_data.app_in_address = src_ptr;
2280 ta_ctx->dcb_input_data.data_in_size =
2281 req->nbytes - (head_len + tail_len);
2282 ta_ctx->dcb_input_data.app_out_address = NULL;
2283 ta_ctx->dcb_input_data.block_size = block_size;
2284 ta_ctx->dcb_input_data.tail_block_size = 0;
2285 ta_ctx->dcb_input_data.is_applet = 0;
2286 ta_ctx->dcb_input_data.src_sg = ta_ctx->src_sg;
2287 ta_ctx->dcb_input_data.dst_sg = NULL;
2288
2289 int_error = sep_create_dcb_dmatables_context_kernel(
2290 ta_ctx->sep_used,
2291 &ta_ctx->dcb_region,
2292 &ta_ctx->dmatables_region,
2293 &ta_ctx->dma_ctx,
2294 &ta_ctx->dcb_input_data,
2295 1);
2296 if (int_error) {
2297 dev_warn(&ta_ctx->sep_used->pdev->dev,
2298 "hash update dma table create failed\n");
2299 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2300 return;
2301 }
2302
2303 /* Construct message to SEP */
2304 sep_make_header(ta_ctx, &msg_offset, SEP_HASH_UPDATE_OPCODE);
2305
2306 msg[0] = (u32)0;
2307 msg[1] = (u32)0;
2308 msg[2] = (u32)0;
2309
2310 sep_write_msg(ta_ctx, msg, sizeof(u32) * 3, sizeof(u32) * 3,
2311 &msg_offset, 0);
2312
2313 /* Handle remainders */
2314
2315 /* Head */
2316 sep_write_msg(ta_ctx, &head_len, sizeof(u32),
2317 sizeof(u32), &msg_offset, 0);
2318
2319 if (head_len) {
2320 copy_result = sg_copy_to_buffer(
2321 req->src,
2322 sep_sg_nents(ta_ctx->src_sg),
2323 small_buf, head_len);
2324
2325 if (copy_result != head_len) {
2326 dev_warn(&ta_ctx->sep_used->pdev->dev,
2327 "sg head copy failure in hash block\n");
2328 sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2329 return;
2330 }
2331
2332 sep_write_msg(ta_ctx, small_buf, head_len,
2333 sizeof(u32) * 32, &msg_offset, 1);
2334 } else {
2335 msg_offset += sizeof(u32) * 32;
2336 }
2337
2338 /* Tail */
2339 sep_write_msg(ta_ctx, &tail_len, sizeof(u32),
2340 sizeof(u32), &msg_offset, 0);
2341
2342 if (tail_len) {
2343 copy_result = sep_copy_offset_sg(
2344 ta_ctx->sep_used,
2345 ta_ctx->src_sg,
2346 req->nbytes - tail_len,
2347 small_buf, tail_len);
2348
2349 if (copy_result != tail_len) {
2350 dev_warn(&ta_ctx->sep_used->pdev->dev,
2351 "sg tail copy failure in hash block\n");
2352 sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2353 return;
2354 }
2355
2356 sep_write_msg(ta_ctx, small_buf, tail_len,
2357 sizeof(u32) * 32, &msg_offset, 1);
2358 } else {
2359 msg_offset += sizeof(u32) * 32;
2360 }
2361
2362 /* Context */
2363 sep_write_context(ta_ctx, &msg_offset, &sctx->hash_private_ctx,
2364 sizeof(struct sep_hash_private_context));
2365
2366 sep_end_msg(ta_ctx, msg_offset);
2367 are_we_done_yet = 0;
2368 int_error = sep_crypto_take_sep(ta_ctx);
2369 if (int_error) {
2370 dev_warn(&ta_ctx->sep_used->pdev->dev,
2371 "sep_hash_update take sep failed\n");
2372 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2373 }
2374
2375 /* now we sit and wait up to a fixed time for completion */
2376 end_time = jiffies + (WAIT_TIME * HZ);
2377 while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
2378 schedule();
2379
2380 /* Done waiting; still not done yet? */
2381 if (are_we_done_yet == 0) {
2382 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2383 "hash update never got done\n");
2384 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2385 return;
2386 }
2387
2388}
2389
2390static void sep_hash_final(void *data)
2391{
2392 u32 msg_offset;
2393 struct ahash_request *req;
2394 struct crypto_ahash *tfm;
2395 struct this_task_ctx *ta_ctx;
2396 struct sep_system_ctx *sctx;
2397 int result;
2398 unsigned long end_time;
2399 int are_we_done_yet;
2400
2401 req = (struct ahash_request *)data;
2402 tfm = crypto_ahash_reqtfm(req);
2403 sctx = crypto_ahash_ctx(tfm);
2404 ta_ctx = ahash_request_ctx(req);
2405 ta_ctx->sep_used = sep_dev;
2406
2407 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2408 "sep_hash_final\n");
2409 ta_ctx->current_hash_stage = HASH_FINISH;
2410
2411 ta_ctx->are_we_done_yet = &are_we_done_yet;
2412
2413 /* opcode and mode */
2414 sep_make_header(ta_ctx, &msg_offset, SEP_HASH_FINISH_OPCODE);
2415
2416 /* Context */
2417 sep_write_context(ta_ctx, &msg_offset, &sctx->hash_private_ctx,
2418 sizeof(struct sep_hash_private_context));
2419
2420 sep_end_msg(ta_ctx, msg_offset);
2421 are_we_done_yet = 0;
2422 result = sep_crypto_take_sep(ta_ctx);
2423 if (result) {
2424 dev_warn(&ta_ctx->sep_used->pdev->dev,
2425 "sep_hash_final take sep failed\n");
2426 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2427 }
2428
2429 /* now we sit and wait up to a fixed time for completion */
2430 end_time = jiffies + (WAIT_TIME * HZ);
2431 while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
2432 schedule();
2433
2434 /* Done waiting; still not done yet? */
2435 if (are_we_done_yet == 0) {
2436 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2437 "hash final job never got done\n");
2438 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2439 return;
2440 }
2441
2442}
2443
2444static void sep_hash_digest(void *data)
2445{
2446 int int_error;
2447 u32 msg_offset;
2448 u32 block_size;
2449 u32 msg[10];
2450 size_t copy_result;
2451 int result;
2452 int are_we_done_yet;
2453 u32 tail_len;
2454 static char small_buf[100];
2455 struct scatterlist *new_sg;
2456 void *src_ptr;
2457
2458 struct ahash_request *req;
2459 struct crypto_ahash *tfm;
2460 struct this_task_ctx *ta_ctx;
2461 struct sep_system_ctx *sctx;
2462 unsigned long end_time;
2463
2464 req = (struct ahash_request *)data;
2465 tfm = crypto_ahash_reqtfm(req);
2466 sctx = crypto_ahash_ctx(tfm);
2467 ta_ctx = ahash_request_ctx(req);
2468 ta_ctx->sep_used = sep_dev;
2469
2470 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2471 "sep_hash_digest\n");
2472 ta_ctx->current_hash_stage = HASH_DIGEST;
2473
2474 ta_ctx->are_we_done_yet = &are_we_done_yet;
2475
2476 /* length for queue status */
2477 ta_ctx->nbytes = req->nbytes;
2478
2479 block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2480 tail_len = req->nbytes % block_size;
2481 dev_dbg(&ta_ctx->sep_used->pdev->dev, "length is %x\n", req->nbytes);
2482 dev_dbg(&ta_ctx->sep_used->pdev->dev, "block_size is %x\n", block_size);
2483 dev_dbg(&ta_ctx->sep_used->pdev->dev, "tail len is %x\n", tail_len);
2484
2485 /* Make sure all pages are even block */
2486 int_error = sep_oddball_pages(ta_ctx->sep_used, req->src,
2487 req->nbytes,
2488 block_size, &new_sg, 1);
2489
2490 if (int_error < 0) {
2491 dev_warn(&ta_ctx->sep_used->pdev->dev,
2492 "oddball pages error in crash update\n");
2493 sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2494 return;
2495 } else if (int_error == 1) {
2496 ta_ctx->src_sg = new_sg;
2497 ta_ctx->src_sg_hold = new_sg;
2498 } else {
2499 ta_ctx->src_sg = req->src;
2500 ta_ctx->src_sg_hold = NULL;
2501 }
2502
2503 src_ptr = sg_virt(ta_ctx->src_sg);
2504
2505 if ((!req->nbytes) || (!ta_ctx->src_sg)) {
2506 /* null data */
2507 src_ptr = NULL;
2508 }
2509
2510 sep_dump_sg(ta_ctx->sep_used, "hash block sg in", ta_ctx->src_sg);
2511
2512 ta_ctx->dcb_input_data.app_in_address = src_ptr;
2513 ta_ctx->dcb_input_data.data_in_size = req->nbytes - tail_len;
2514 ta_ctx->dcb_input_data.app_out_address = NULL;
2515 ta_ctx->dcb_input_data.block_size = block_size;
2516 ta_ctx->dcb_input_data.tail_block_size = 0;
2517 ta_ctx->dcb_input_data.is_applet = 0;
2518 ta_ctx->dcb_input_data.src_sg = ta_ctx->src_sg;
2519 ta_ctx->dcb_input_data.dst_sg = NULL;
2520
2521 int_error = sep_create_dcb_dmatables_context_kernel(
2522 ta_ctx->sep_used,
2523 &ta_ctx->dcb_region,
2524 &ta_ctx->dmatables_region,
2525 &ta_ctx->dma_ctx,
2526 &ta_ctx->dcb_input_data,
2527 1);
2528 if (int_error) {
2529 dev_warn(&ta_ctx->sep_used->pdev->dev,
2530 "hash update dma table create failed\n");
2531 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2532 return;
2533 }
2534
2535 /* Construct message to SEP */
2536 sep_make_header(ta_ctx, &msg_offset, SEP_HASH_SINGLE_OPCODE);
2537 sep_write_msg(ta_ctx, &ta_ctx->hash_opmode,
2538 sizeof(u32), sizeof(u32), &msg_offset, 0);
2539
2540 msg[0] = (u32)0;
2541 msg[1] = (u32)0;
2542 msg[2] = (u32)0;
2543
2544 sep_write_msg(ta_ctx, msg, sizeof(u32) * 3, sizeof(u32) * 3,
2545 &msg_offset, 0);
2546
2547 /* Tail */
2548 sep_write_msg(ta_ctx, &tail_len, sizeof(u32),
2549 sizeof(u32), &msg_offset, 0);
2550
2551 if (tail_len) {
2552 copy_result = sep_copy_offset_sg(
2553 ta_ctx->sep_used,
2554 ta_ctx->src_sg,
2555 req->nbytes - tail_len,
2556 small_buf, tail_len);
2557
2558 if (copy_result != tail_len) {
2559 dev_warn(&ta_ctx->sep_used->pdev->dev,
2560 "sg tail copy failure in hash block\n");
2561 sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2562 return;
2563 }
2564
2565 sep_write_msg(ta_ctx, small_buf, tail_len,
2566 sizeof(u32) * 32, &msg_offset, 1);
2567 } else {
2568 msg_offset += sizeof(u32) * 32;
2569 }
2570
2571 sep_end_msg(ta_ctx, msg_offset);
2572
2573 are_we_done_yet = 0;
2574 result = sep_crypto_take_sep(ta_ctx);
2575 if (result) {
2576 dev_warn(&ta_ctx->sep_used->pdev->dev,
2577 "sep_hash_digest take sep failed\n");
2578 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2579 }
2580
2581 /* now we sit and wait up to a fixed time for completion */
2582 end_time = jiffies + (WAIT_TIME * HZ);
2583 while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
2584 schedule();
2585
2586 /* Done waiting; still not done yet? */
2587 if (are_we_done_yet == 0) {
2588 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2589 "hash digest job never got done\n");
2590 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2591 return;
2592 }
2593
2594}
2595
2596/**
2597 * This is what is called by each of the API's provided
2598 * in the kernel crypto descriptors. It is run in a process
2599 * context using the kernel workqueues. Therefore it can
2600 * be put to sleep.
2601 */
2602static void sep_dequeuer(void *data)
2603{
2604 struct crypto_queue *this_queue;
2605 struct crypto_async_request *async_req;
2606 struct crypto_async_request *backlog;
2607 struct ablkcipher_request *cypher_req;
2608 struct ahash_request *hash_req;
2609 struct sep_system_ctx *sctx;
2610 struct crypto_ahash *hash_tfm;
2611 struct this_task_ctx *ta_ctx;
2612
2613
2614 this_queue = (struct crypto_queue *)data;
2615
2616 spin_lock_irq(&queue_lock);
2617 backlog = crypto_get_backlog(this_queue);
2618 async_req = crypto_dequeue_request(this_queue);
2619 spin_unlock_irq(&queue_lock);
2620
2621 if (!async_req) {
2622 pr_debug("sep crypto queue is empty\n");
2623 return;
2624 }
2625
2626 if (backlog) {
2627 pr_debug("sep crypto backlog set\n");
2628 if (backlog->complete)
2629 backlog->complete(backlog, -EINPROGRESS);
2630 backlog = NULL;
2631 }
2632
2633 if (!async_req->tfm) {
2634 pr_debug("sep crypto queue null tfm\n");
2635 return;
2636 }
2637
2638 if (!async_req->tfm->__crt_alg) {
2639 pr_debug("sep crypto queue null __crt_alg\n");
2640 return;
2641 }
2642
2643 if (!async_req->tfm->__crt_alg->cra_type) {
2644 pr_debug("sep crypto queue null cra_type\n");
2645 return;
2646 }
2647
2648 /* we have stuff in the queue */
2649 if (async_req->tfm->__crt_alg->cra_type !=
2650 &crypto_ahash_type) {
2651 /* This is for a cypher */
2652 pr_debug("sep crypto queue doing cipher\n");
2653 cypher_req = container_of(async_req,
2654 struct ablkcipher_request,
2655 base);
2656 if (!cypher_req) {
2657 pr_debug("sep crypto queue null cypher_req\n");
2658 return;
2659 }
2660
2661 sep_crypto_block((void *)cypher_req);
2662 return;
2663 } else {
2664 /* This is a hash */
2665 pr_debug("sep crypto queue doing hash\n");
2666 /**
2667 * This is a bit more complex than cipher; we
2668 * need to figure out what type of operation
2669 */
2670 hash_req = ahash_request_cast(async_req);
2671 if (!hash_req) {
2672 pr_debug("sep crypto queue null hash_req\n");
2673 return;
2674 }
2675
2676 hash_tfm = crypto_ahash_reqtfm(hash_req);
2677 if (!hash_tfm) {
2678 pr_debug("sep crypto queue null hash_tfm\n");
2679 return;
2680 }
2681
2682
2683 sctx = crypto_ahash_ctx(hash_tfm);
2684 if (!sctx) {
2685 pr_debug("sep crypto queue null sctx\n");
2686 return;
2687 }
2688
2689 ta_ctx = ahash_request_ctx(hash_req);
2690
2691 if (ta_ctx->current_hash_stage == HASH_INIT) {
2692 pr_debug("sep crypto queue hash init\n");
2693 sep_hash_init((void *)hash_req);
2694 return;
2695 } else if (ta_ctx->current_hash_stage == HASH_UPDATE) {
2696 pr_debug("sep crypto queue hash update\n");
2697 sep_hash_update((void *)hash_req);
2698 return;
2699 } else if (ta_ctx->current_hash_stage == HASH_FINISH) {
2700 pr_debug("sep crypto queue hash final\n");
2701 sep_hash_final((void *)hash_req);
2702 return;
2703 } else if (ta_ctx->current_hash_stage == HASH_DIGEST) {
2704 pr_debug("sep crypto queue hash digest\n");
2705 sep_hash_digest((void *)hash_req);
2706 return;
2707 } else if (ta_ctx->current_hash_stage == HASH_FINUP_DATA) {
2708 pr_debug("sep crypto queue hash digest\n");
2709 sep_hash_update((void *)hash_req);
2710 return;
2711 } else if (ta_ctx->current_hash_stage == HASH_FINUP_FINISH) {
2712 pr_debug("sep crypto queue hash digest\n");
2713 sep_hash_final((void *)hash_req);
2714 return;
2715 } else {
2716 pr_debug("sep crypto queue hash oops nothing\n");
2717 return;
2718 }
2719 }
2720}
2721
2722static int sep_sha1_init(struct ahash_request *req)
2723{
2724 int error;
2725 int error1;
2726 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2727
2728 pr_debug("sep - doing sha1 init\n");
2729
2730 /* Clear out task context */
2731 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
2732
2733 ta_ctx->sep_used = sep_dev;
2734 ta_ctx->current_request = SHA1;
2735 ta_ctx->current_hash_req = req;
2736 ta_ctx->current_cypher_req = NULL;
2737 ta_ctx->hash_opmode = SEP_HASH_SHA1;
2738 ta_ctx->current_hash_stage = HASH_INIT;
2739
2740 /* lock necessary so that only one entity touches the queues */
2741 spin_lock_irq(&queue_lock);
2742 error = crypto_enqueue_request(&sep_queue, &req->base);
2743
2744 if ((error != 0) && (error != -EINPROGRESS))
2745 pr_debug(" sep - crypto enqueue failed: %x\n",
2746 error);
2747 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2748 sep_dequeuer, (void *)&sep_queue);
2749 if (error1)
2750 pr_debug(" sep - workqueue submit failed: %x\n",
2751 error1);
2752 spin_unlock_irq(&queue_lock);
2753 /* We return result of crypto enqueue */
2754 return error;
2755}
2756
2757static int sep_sha1_update(struct ahash_request *req)
2758{
2759 int error;
2760 int error1;
2761 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2762
2763 pr_debug("sep - doing sha1 update\n");
2764
2765 ta_ctx->sep_used = sep_dev;
2766 ta_ctx->current_request = SHA1;
2767 ta_ctx->current_hash_req = req;
2768 ta_ctx->current_cypher_req = NULL;
2769 ta_ctx->hash_opmode = SEP_HASH_SHA1;
2770 ta_ctx->current_hash_stage = HASH_UPDATE;
2771
2772 /* lock necessary so that only one entity touches the queues */
2773 spin_lock_irq(&queue_lock);
2774 error = crypto_enqueue_request(&sep_queue, &req->base);
2775
2776 if ((error != 0) && (error != -EINPROGRESS))
2777 pr_debug(" sep - crypto enqueue failed: %x\n",
2778 error);
2779 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2780 sep_dequeuer, (void *)&sep_queue);
2781 if (error1)
2782 pr_debug(" sep - workqueue submit failed: %x\n",
2783 error1);
2784 spin_unlock_irq(&queue_lock);
2785 /* We return result of crypto enqueue */
2786 return error;
2787}
2788
2789static int sep_sha1_final(struct ahash_request *req)
2790{
2791 int error;
2792 int error1;
2793 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2794 pr_debug("sep - doing sha1 final\n");
2795
2796 ta_ctx->sep_used = sep_dev;
2797 ta_ctx->current_request = SHA1;
2798 ta_ctx->current_hash_req = req;
2799 ta_ctx->current_cypher_req = NULL;
2800 ta_ctx->hash_opmode = SEP_HASH_SHA1;
2801 ta_ctx->current_hash_stage = HASH_FINISH;
2802
2803 /* lock necessary so that only one entity touches the queues */
2804 spin_lock_irq(&queue_lock);
2805 error = crypto_enqueue_request(&sep_queue, &req->base);
2806
2807 if ((error != 0) && (error != -EINPROGRESS))
2808 pr_debug(" sep - crypto enqueue failed: %x\n",
2809 error);
2810 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2811 sep_dequeuer, (void *)&sep_queue);
2812 if (error1)
2813 pr_debug(" sep - workqueue submit failed: %x\n",
2814 error1);
2815 spin_unlock_irq(&queue_lock);
2816 /* We return result of crypto enqueue */
2817 return error;
2818}
2819
2820static int sep_sha1_digest(struct ahash_request *req)
2821{
2822 int error;
2823 int error1;
2824 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2825 pr_debug("sep - doing sha1 digest\n");
2826
2827 /* Clear out task context */
2828 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
2829
2830 ta_ctx->sep_used = sep_dev;
2831 ta_ctx->current_request = SHA1;
2832 ta_ctx->current_hash_req = req;
2833 ta_ctx->current_cypher_req = NULL;
2834 ta_ctx->hash_opmode = SEP_HASH_SHA1;
2835 ta_ctx->current_hash_stage = HASH_DIGEST;
2836
2837 /* lock necessary so that only one entity touches the queues */
2838 spin_lock_irq(&queue_lock);
2839 error = crypto_enqueue_request(&sep_queue, &req->base);
2840
2841 if ((error != 0) && (error != -EINPROGRESS))
2842 pr_debug(" sep - crypto enqueue failed: %x\n",
2843 error);
2844 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2845 sep_dequeuer, (void *)&sep_queue);
2846 if (error1)
2847 pr_debug(" sep - workqueue submit failed: %x\n",
2848 error1);
2849 spin_unlock_irq(&queue_lock);
2850 /* We return result of crypto enqueue */
2851 return error;
2852}
2853
2854static int sep_sha1_finup(struct ahash_request *req)
2855{
2856 int error;
2857 int error1;
2858 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2859 pr_debug("sep - doing sha1 finup\n");
2860
2861 ta_ctx->sep_used = sep_dev;
2862 ta_ctx->current_request = SHA1;
2863 ta_ctx->current_hash_req = req;
2864 ta_ctx->current_cypher_req = NULL;
2865 ta_ctx->hash_opmode = SEP_HASH_SHA1;
2866 ta_ctx->current_hash_stage = HASH_FINUP_DATA;
2867
2868 /* lock necessary so that only one entity touches the queues */
2869 spin_lock_irq(&queue_lock);
2870 error = crypto_enqueue_request(&sep_queue, &req->base);
2871
2872 if ((error != 0) && (error != -EINPROGRESS))
2873 pr_debug(" sep - crypto enqueue failed: %x\n",
2874 error);
2875 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2876 sep_dequeuer, (void *)&sep_queue);
2877 if (error1)
2878 pr_debug(" sep - workqueue submit failed: %x\n",
2879 error1);
2880 spin_unlock_irq(&queue_lock);
2881 /* We return result of crypto enqueue */
2882 return error;
2883}
2884
2885static int sep_md5_init(struct ahash_request *req)
2886{
2887 int error;
2888 int error1;
2889 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2890 pr_debug("sep - doing md5 init\n");
2891
2892 /* Clear out task context */
2893 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
2894
2895 ta_ctx->sep_used = sep_dev;
2896 ta_ctx->current_request = MD5;
2897 ta_ctx->current_hash_req = req;
2898 ta_ctx->current_cypher_req = NULL;
2899 ta_ctx->hash_opmode = SEP_HASH_MD5;
2900 ta_ctx->current_hash_stage = HASH_INIT;
2901
2902 /* lock necessary so that only one entity touches the queues */
2903 spin_lock_irq(&queue_lock);
2904 error = crypto_enqueue_request(&sep_queue, &req->base);
2905
2906 if ((error != 0) && (error != -EINPROGRESS))
2907 pr_debug(" sep - crypto enqueue failed: %x\n",
2908 error);
2909 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2910 sep_dequeuer, (void *)&sep_queue);
2911 if (error1)
2912 pr_debug(" sep - workqueue submit failed: %x\n",
2913 error1);
2914 spin_unlock_irq(&queue_lock);
2915 /* We return result of crypto enqueue */
2916 return error;
2917}
2918
2919static int sep_md5_update(struct ahash_request *req)
2920{
2921 int error;
2922 int error1;
2923 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2924 pr_debug("sep - doing md5 update\n");
2925
2926 ta_ctx->sep_used = sep_dev;
2927 ta_ctx->current_request = MD5;
2928 ta_ctx->current_hash_req = req;
2929 ta_ctx->current_cypher_req = NULL;
2930 ta_ctx->hash_opmode = SEP_HASH_MD5;
2931 ta_ctx->current_hash_stage = HASH_UPDATE;
2932
2933 /* lock necessary so that only one entity touches the queues */
2934 spin_lock_irq(&queue_lock);
2935 error = crypto_enqueue_request(&sep_queue, &req->base);
2936
2937 if ((error != 0) && (error != -EINPROGRESS))
2938 pr_debug(" sep - crypto enqueue failed: %x\n",
2939 error);
2940 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2941 sep_dequeuer, (void *)&sep_queue);
2942 if (error1)
2943 pr_debug(" sep - workqueue submit failed: %x\n",
2944 error1);
2945 spin_unlock_irq(&queue_lock);
2946 /* We return result of crypto enqueue */
2947 return error;
2948}
2949
2950static int sep_md5_final(struct ahash_request *req)
2951{
2952 int error;
2953 int error1;
2954 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2955 pr_debug("sep - doing md5 final\n");
2956
2957 ta_ctx->sep_used = sep_dev;
2958 ta_ctx->current_request = MD5;
2959 ta_ctx->current_hash_req = req;
2960 ta_ctx->current_cypher_req = NULL;
2961 ta_ctx->hash_opmode = SEP_HASH_MD5;
2962 ta_ctx->current_hash_stage = HASH_FINISH;
2963
2964 /* lock necessary so that only one entity touches the queues */
2965 spin_lock_irq(&queue_lock);
2966 error = crypto_enqueue_request(&sep_queue, &req->base);
2967
2968 if ((error != 0) && (error != -EINPROGRESS))
2969 pr_debug(" sep - crypto enqueue failed: %x\n",
2970 error);
2971 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2972 sep_dequeuer, (void *)&sep_queue);
2973 if (error1)
2974 pr_debug(" sep - workqueue submit failed: %x\n",
2975 error1);
2976 spin_unlock_irq(&queue_lock);
2977 /* We return result of crypto enqueue */
2978 return error;
2979}
2980
2981static int sep_md5_digest(struct ahash_request *req)
2982{
2983 int error;
2984 int error1;
2985 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2986
2987 pr_debug("sep - doing md5 digest\n");
2988
2989 /* Clear out task context */
2990 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
2991
2992 ta_ctx->sep_used = sep_dev;
2993 ta_ctx->current_request = MD5;
2994 ta_ctx->current_hash_req = req;
2995 ta_ctx->current_cypher_req = NULL;
2996 ta_ctx->hash_opmode = SEP_HASH_MD5;
2997 ta_ctx->current_hash_stage = HASH_DIGEST;
2998
2999 /* lock necessary so that only one entity touches the queues */
3000 spin_lock_irq(&queue_lock);
3001 error = crypto_enqueue_request(&sep_queue, &req->base);
3002
3003 if ((error != 0) && (error != -EINPROGRESS))
3004 pr_debug(" sep - crypto enqueue failed: %x\n",
3005 error);
3006 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3007 sep_dequeuer, (void *)&sep_queue);
3008 if (error1)
3009 pr_debug(" sep - workqueue submit failed: %x\n",
3010 error1);
3011 spin_unlock_irq(&queue_lock);
3012 /* We return result of crypto enqueue */
3013 return error;
3014}
3015
3016static int sep_md5_finup(struct ahash_request *req)
3017{
3018 int error;
3019 int error1;
3020 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3021
3022 pr_debug("sep - doing md5 finup\n");
3023
3024 ta_ctx->sep_used = sep_dev;
3025 ta_ctx->current_request = MD5;
3026 ta_ctx->current_hash_req = req;
3027 ta_ctx->current_cypher_req = NULL;
3028 ta_ctx->hash_opmode = SEP_HASH_MD5;
3029 ta_ctx->current_hash_stage = HASH_FINUP_DATA;
3030
3031 /* lock necessary so that only one entity touches the queues */
3032 spin_lock_irq(&queue_lock);
3033 error = crypto_enqueue_request(&sep_queue, &req->base);
3034
3035 if ((error != 0) && (error != -EINPROGRESS))
3036 pr_debug(" sep - crypto enqueue failed: %x\n",
3037 error);
3038 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3039 sep_dequeuer, (void *)&sep_queue);
3040 if (error1)
3041 pr_debug(" sep - workqueue submit failed: %x\n",
3042 error1);
3043 spin_unlock_irq(&queue_lock);
3044 /* We return result of crypto enqueue */
3045 return error;
3046}
3047
3048static int sep_sha224_init(struct ahash_request *req)
3049{
3050 int error;
3051 int error1;
3052 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3053 pr_debug("sep - doing sha224 init\n");
3054
3055 /* Clear out task context */
3056 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3057
3058 ta_ctx->sep_used = sep_dev;
3059 ta_ctx->current_request = SHA224;
3060 ta_ctx->current_hash_req = req;
3061 ta_ctx->current_cypher_req = NULL;
3062 ta_ctx->hash_opmode = SEP_HASH_SHA224;
3063 ta_ctx->current_hash_stage = HASH_INIT;
3064
3065 /* lock necessary so that only one entity touches the queues */
3066 spin_lock_irq(&queue_lock);
3067 error = crypto_enqueue_request(&sep_queue, &req->base);
3068
3069 if ((error != 0) && (error != -EINPROGRESS))
3070 pr_debug(" sep - crypto enqueue failed: %x\n",
3071 error);
3072 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3073 sep_dequeuer, (void *)&sep_queue);
3074 if (error1)
3075 pr_debug(" sep - workqueue submit failed: %x\n",
3076 error1);
3077 spin_unlock_irq(&queue_lock);
3078 /* We return result of crypto enqueue */
3079 return error;
3080}
3081
3082static int sep_sha224_update(struct ahash_request *req)
3083{
3084 int error;
3085 int error1;
3086 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3087 pr_debug("sep - doing sha224 update\n");
3088
3089 ta_ctx->sep_used = sep_dev;
3090 ta_ctx->current_request = SHA224;
3091 ta_ctx->current_hash_req = req;
3092 ta_ctx->current_cypher_req = NULL;
3093 ta_ctx->hash_opmode = SEP_HASH_SHA224;
3094 ta_ctx->current_hash_stage = HASH_UPDATE;
3095
3096 /* lock necessary so that only one entity touches the queues */
3097 spin_lock_irq(&queue_lock);
3098 error = crypto_enqueue_request(&sep_queue, &req->base);
3099
3100 if ((error != 0) && (error != -EINPROGRESS))
3101 pr_debug(" sep - crypto enqueue failed: %x\n",
3102 error);
3103 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3104 sep_dequeuer, (void *)&sep_queue);
3105 if (error1)
3106 pr_debug(" sep - workqueue submit failed: %x\n",
3107 error1);
3108 spin_unlock_irq(&queue_lock);
3109 /* We return result of crypto enqueue */
3110 return error;
3111}
3112
3113static int sep_sha224_final(struct ahash_request *req)
3114{
3115 int error;
3116 int error1;
3117 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3118 pr_debug("sep - doing sha224 final\n");
3119
3120 ta_ctx->sep_used = sep_dev;
3121 ta_ctx->current_request = SHA224;
3122 ta_ctx->current_hash_req = req;
3123 ta_ctx->current_cypher_req = NULL;
3124 ta_ctx->hash_opmode = SEP_HASH_SHA224;
3125 ta_ctx->current_hash_stage = HASH_FINISH;
3126
3127 /* lock necessary so that only one entity touches the queues */
3128 spin_lock_irq(&queue_lock);
3129 error = crypto_enqueue_request(&sep_queue, &req->base);
3130
3131 if ((error != 0) && (error != -EINPROGRESS))
3132 pr_debug(" sep - crypto enqueue failed: %x\n",
3133 error);
3134 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3135 sep_dequeuer, (void *)&sep_queue);
3136 if (error1)
3137 pr_debug(" sep - workqueue submit failed: %x\n",
3138 error1);
3139 spin_unlock_irq(&queue_lock);
3140 /* We return result of crypto enqueue */
3141 return error;
3142}
3143
3144static int sep_sha224_digest(struct ahash_request *req)
3145{
3146 int error;
3147 int error1;
3148 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3149
3150 pr_debug("sep - doing sha224 digest\n");
3151
3152 /* Clear out task context */
3153 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3154
3155 ta_ctx->sep_used = sep_dev;
3156 ta_ctx->current_request = SHA224;
3157 ta_ctx->current_hash_req = req;
3158 ta_ctx->current_cypher_req = NULL;
3159 ta_ctx->hash_opmode = SEP_HASH_SHA224;
3160 ta_ctx->current_hash_stage = HASH_DIGEST;
3161
3162 /* lock necessary so that only one entity touches the queues */
3163 spin_lock_irq(&queue_lock);
3164 error = crypto_enqueue_request(&sep_queue, &req->base);
3165
3166 if ((error != 0) && (error != -EINPROGRESS))
3167 pr_debug(" sep - crypto enqueue failed: %x\n",
3168 error);
3169 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3170 sep_dequeuer, (void *)&sep_queue);
3171 if (error1)
3172 pr_debug(" sep - workqueue submit failed: %x\n",
3173 error1);
3174 spin_unlock_irq(&queue_lock);
3175 /* We return result of crypto enqueue */
3176 return error;
3177}
3178
3179static int sep_sha224_finup(struct ahash_request *req)
3180{
3181 int error;
3182 int error1;
3183 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3184
3185 pr_debug("sep - doing sha224 finup\n");
3186
3187 ta_ctx->sep_used = sep_dev;
3188 ta_ctx->current_request = SHA224;
3189 ta_ctx->current_hash_req = req;
3190 ta_ctx->current_cypher_req = NULL;
3191 ta_ctx->hash_opmode = SEP_HASH_SHA224;
3192 ta_ctx->current_hash_stage = HASH_FINUP_DATA;
3193
3194 /* lock necessary so that only one entity touches the queues */
3195 spin_lock_irq(&queue_lock);
3196 error = crypto_enqueue_request(&sep_queue, &req->base);
3197
3198 if ((error != 0) && (error != -EINPROGRESS))
3199 pr_debug(" sep - crypto enqueue failed: %x\n",
3200 error);
3201 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3202 sep_dequeuer, (void *)&sep_queue);
3203 if (error1)
3204 pr_debug(" sep - workqueue submit failed: %x\n",
3205 error1);
3206 spin_unlock_irq(&queue_lock);
3207 /* We return result of crypto enqueue */
3208 return error;
3209}
3210
3211static int sep_sha256_init(struct ahash_request *req)
3212{
3213 int error;
3214 int error1;
3215 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3216 pr_debug("sep - doing sha256 init\n");
3217
3218 /* Clear out task context */
3219 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3220
3221 ta_ctx->sep_used = sep_dev;
3222 ta_ctx->current_request = SHA256;
3223 ta_ctx->current_hash_req = req;
3224 ta_ctx->current_cypher_req = NULL;
3225 ta_ctx->hash_opmode = SEP_HASH_SHA256;
3226 ta_ctx->current_hash_stage = HASH_INIT;
3227
3228 /* lock necessary so that only one entity touches the queues */
3229 spin_lock_irq(&queue_lock);
3230 error = crypto_enqueue_request(&sep_queue, &req->base);
3231
3232 if ((error != 0) && (error != -EINPROGRESS))
3233 pr_debug(" sep - crypto enqueue failed: %x\n",
3234 error);
3235 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3236 sep_dequeuer, (void *)&sep_queue);
3237 if (error1)
3238 pr_debug(" sep - workqueue submit failed: %x\n",
3239 error1);
3240 spin_unlock_irq(&queue_lock);
3241 /* We return result of crypto enqueue */
3242 return error;
3243}
3244
3245static int sep_sha256_update(struct ahash_request *req)
3246{
3247 int error;
3248 int error1;
3249 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3250 pr_debug("sep - doing sha256 update\n");
3251
3252 ta_ctx->sep_used = sep_dev;
3253 ta_ctx->current_request = SHA256;
3254 ta_ctx->current_hash_req = req;
3255 ta_ctx->current_cypher_req = NULL;
3256 ta_ctx->hash_opmode = SEP_HASH_SHA256;
3257 ta_ctx->current_hash_stage = HASH_UPDATE;
3258
3259 /* lock necessary so that only one entity touches the queues */
3260 spin_lock_irq(&queue_lock);
3261 error = crypto_enqueue_request(&sep_queue, &req->base);
3262
3263 if ((error != 0) && (error != -EINPROGRESS))
3264 pr_debug(" sep - crypto enqueue failed: %x\n",
3265 error);
3266 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3267 sep_dequeuer, (void *)&sep_queue);
3268 if (error1)
3269 pr_debug(" sep - workqueue submit failed: %x\n",
3270 error1);
3271 spin_unlock_irq(&queue_lock);
3272 /* We return result of crypto enqueue */
3273 return error;
3274}
3275
3276static int sep_sha256_final(struct ahash_request *req)
3277{
3278 int error;
3279 int error1;
3280 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3281 pr_debug("sep - doing sha256 final\n");
3282
3283 ta_ctx->sep_used = sep_dev;
3284 ta_ctx->current_request = SHA256;
3285 ta_ctx->current_hash_req = req;
3286 ta_ctx->current_cypher_req = NULL;
3287 ta_ctx->hash_opmode = SEP_HASH_SHA256;
3288 ta_ctx->current_hash_stage = HASH_FINISH;
3289
3290 /* lock necessary so that only one entity touches the queues */
3291 spin_lock_irq(&queue_lock);
3292 error = crypto_enqueue_request(&sep_queue, &req->base);
3293
3294 if ((error != 0) && (error != -EINPROGRESS))
3295 pr_debug(" sep - crypto enqueue failed: %x\n",
3296 error);
3297 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3298 sep_dequeuer, (void *)&sep_queue);
3299 if (error1)
3300 pr_debug(" sep - workqueue submit failed: %x\n",
3301 error1);
3302 spin_unlock_irq(&queue_lock);
3303 /* We return result of crypto enqueue */
3304 return error;
3305}
3306
3307static int sep_sha256_digest(struct ahash_request *req)
3308{
3309 int error;
3310 int error1;
3311 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3312
3313 pr_debug("sep - doing sha256 digest\n");
3314
3315 /* Clear out task context */
3316 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3317
3318 ta_ctx->sep_used = sep_dev;
3319 ta_ctx->current_request = SHA256;
3320 ta_ctx->current_hash_req = req;
3321 ta_ctx->current_cypher_req = NULL;
3322 ta_ctx->hash_opmode = SEP_HASH_SHA256;
3323 ta_ctx->current_hash_stage = HASH_DIGEST;
3324
3325 /* lock necessary so that only one entity touches the queues */
3326 spin_lock_irq(&queue_lock);
3327 error = crypto_enqueue_request(&sep_queue, &req->base);
3328
3329 if ((error != 0) && (error != -EINPROGRESS))
3330 pr_debug(" sep - crypto enqueue failed: %x\n",
3331 error);
3332 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3333 sep_dequeuer, (void *)&sep_queue);
3334 if (error1)
3335 pr_debug(" sep - workqueue submit failed: %x\n",
3336 error1);
3337 spin_unlock_irq(&queue_lock);
3338 /* We return result of crypto enqueue */
3339 return error;
3340}
3341
3342static int sep_sha256_finup(struct ahash_request *req)
3343{
3344 int error;
3345 int error1;
3346 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3347
3348 pr_debug("sep - doing sha256 finup\n");
3349
3350 ta_ctx->sep_used = sep_dev;
3351 ta_ctx->current_request = SHA256;
3352 ta_ctx->current_hash_req = req;
3353 ta_ctx->current_cypher_req = NULL;
3354 ta_ctx->hash_opmode = SEP_HASH_SHA256;
3355 ta_ctx->current_hash_stage = HASH_FINUP_DATA;
3356
3357 /* lock necessary so that only one entity touches the queues */
3358 spin_lock_irq(&queue_lock);
3359 error = crypto_enqueue_request(&sep_queue, &req->base);
3360
3361 if ((error != 0) && (error != -EINPROGRESS))
3362 pr_debug(" sep - crypto enqueue failed: %x\n",
3363 error);
3364 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3365 sep_dequeuer, (void *)&sep_queue);
3366 if (error1)
3367 pr_debug(" sep - workqueue submit failed: %x\n",
3368 error1);
3369 spin_unlock_irq(&queue_lock);
3370 /* We return result of crypto enqueue */
3371 return error;
3372}
3373
3374static int sep_crypto_init(struct crypto_tfm *tfm)
3375{
3376 const char *alg_name = crypto_tfm_alg_name(tfm);
3377
3378 if (alg_name == NULL)
3379 pr_debug("sep_crypto_init alg is NULL\n");
3380 else
3381 pr_debug("sep_crypto_init alg is %s\n", alg_name);
3382
3383 tfm->crt_ablkcipher.reqsize = sizeof(struct this_task_ctx);
3384 return 0;
3385}
3386
3387static void sep_crypto_exit(struct crypto_tfm *tfm)
3388{
3389 pr_debug("sep_crypto_exit\n");
3390}
3391
3392static int sep_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
3393 unsigned int keylen)
3394{
3395 struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(tfm);
3396
3397 pr_debug("sep aes setkey\n");
3398
3399 pr_debug("tfm is %p sctx is %p\n", tfm, sctx);
3400 switch (keylen) {
3401 case SEP_AES_KEY_128_SIZE:
3402 sctx->aes_key_size = AES_128;
3403 break;
3404 case SEP_AES_KEY_192_SIZE:
3405 sctx->aes_key_size = AES_192;
3406 break;
3407 case SEP_AES_KEY_256_SIZE:
3408 sctx->aes_key_size = AES_256;
3409 break;
3410 case SEP_AES_KEY_512_SIZE:
3411 sctx->aes_key_size = AES_512;
3412 break;
3413 default:
3414 pr_debug("invalid sep aes key size %x\n",
3415 keylen);
3416 return -EINVAL;
3417 }
3418
3419 memset(&sctx->key.aes, 0, sizeof(u32) *
3420 SEP_AES_MAX_KEY_SIZE_WORDS);
3421 memcpy(&sctx->key.aes, key, keylen);
3422 sctx->keylen = keylen;
3423 /* Indicate to encrypt/decrypt function to send key to SEP */
3424 sctx->key_sent = 0;
3425
3426 return 0;
3427}
3428
3429static int sep_aes_ecb_encrypt(struct ablkcipher_request *req)
3430{
3431 int error;
3432 int error1;
3433 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3434
3435 pr_debug("sep - doing aes ecb encrypt\n");
3436
3437 /* Clear out task context */
3438 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3439
3440 ta_ctx->sep_used = sep_dev;
3441 ta_ctx->current_request = AES_ECB;
3442 ta_ctx->current_hash_req = NULL;
3443 ta_ctx->current_cypher_req = req;
3444 ta_ctx->aes_encmode = SEP_AES_ENCRYPT;
3445 ta_ctx->aes_opmode = SEP_AES_ECB;
3446 ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
3447 ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
3448
3449 /* lock necessary so that only one entity touches the queues */
3450 spin_lock_irq(&queue_lock);
3451 error = crypto_enqueue_request(&sep_queue, &req->base);
3452
3453 if ((error != 0) && (error != -EINPROGRESS))
3454 pr_debug(" sep - crypto enqueue failed: %x\n",
3455 error);
3456 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3457 sep_dequeuer, (void *)&sep_queue);
3458 if (error1)
3459 pr_debug(" sep - workqueue submit failed: %x\n",
3460 error1);
3461 spin_unlock_irq(&queue_lock);
3462 /* We return result of crypto enqueue */
3463 return error;
3464}
3465
3466static int sep_aes_ecb_decrypt(struct ablkcipher_request *req)
3467{
3468 int error;
3469 int error1;
3470 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3471
3472 pr_debug("sep - doing aes ecb decrypt\n");
3473
3474 /* Clear out task context */
3475 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3476
3477 ta_ctx->sep_used = sep_dev;
3478 ta_ctx->current_request = AES_ECB;
3479 ta_ctx->current_hash_req = NULL;
3480 ta_ctx->current_cypher_req = req;
3481 ta_ctx->aes_encmode = SEP_AES_DECRYPT;
3482 ta_ctx->aes_opmode = SEP_AES_ECB;
3483 ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
3484 ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
3485
3486 /* lock necessary so that only one entity touches the queues */
3487 spin_lock_irq(&queue_lock);
3488 error = crypto_enqueue_request(&sep_queue, &req->base);
3489
3490 if ((error != 0) && (error != -EINPROGRESS))
3491 pr_debug(" sep - crypto enqueue failed: %x\n",
3492 error);
3493 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3494 sep_dequeuer, (void *)&sep_queue);
3495 if (error1)
3496 pr_debug(" sep - workqueue submit failed: %x\n",
3497 error1);
3498 spin_unlock_irq(&queue_lock);
3499 /* We return result of crypto enqueue */
3500 return error;
3501}
3502
3503static int sep_aes_cbc_encrypt(struct ablkcipher_request *req)
3504{
3505 int error;
3506 int error1;
3507 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3508 struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(
3509 crypto_ablkcipher_reqtfm(req));
3510
3511 pr_debug("sep - doing aes cbc encrypt\n");
3512
3513 /* Clear out task context */
3514 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3515
3516 pr_debug("tfm is %p sctx is %p and ta_ctx is %p\n",
3517 crypto_ablkcipher_reqtfm(req), sctx, ta_ctx);
3518
3519 ta_ctx->sep_used = sep_dev;
3520 ta_ctx->current_request = AES_CBC;
3521 ta_ctx->current_hash_req = NULL;
3522 ta_ctx->current_cypher_req = req;
3523 ta_ctx->aes_encmode = SEP_AES_ENCRYPT;
3524 ta_ctx->aes_opmode = SEP_AES_CBC;
3525 ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
3526 ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
3527
3528 /* lock necessary so that only one entity touches the queues */
3529 spin_lock_irq(&queue_lock);
3530 error = crypto_enqueue_request(&sep_queue, &req->base);
3531
3532 if ((error != 0) && (error != -EINPROGRESS))
3533 pr_debug(" sep - crypto enqueue failed: %x\n",
3534 error);
3535 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3536 sep_dequeuer, (void *)&sep_queue);
3537 if (error1)
3538 pr_debug(" sep - workqueue submit failed: %x\n",
3539 error1);
3540 spin_unlock_irq(&queue_lock);
3541 /* We return result of crypto enqueue */
3542 return error;
3543}
3544
3545static int sep_aes_cbc_decrypt(struct ablkcipher_request *req)
3546{
3547 int error;
3548 int error1;
3549 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3550 struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(
3551 crypto_ablkcipher_reqtfm(req));
3552
3553 pr_debug("sep - doing aes cbc decrypt\n");
3554
3555 pr_debug("tfm is %p sctx is %p and ta_ctx is %p\n",
3556 crypto_ablkcipher_reqtfm(req), sctx, ta_ctx);
3557
3558 /* Clear out task context */
3559 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3560
3561 ta_ctx->sep_used = sep_dev;
3562 ta_ctx->current_request = AES_CBC;
3563 ta_ctx->current_hash_req = NULL;
3564 ta_ctx->current_cypher_req = req;
3565 ta_ctx->aes_encmode = SEP_AES_DECRYPT;
3566 ta_ctx->aes_opmode = SEP_AES_CBC;
3567 ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
3568 ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
3569
3570 /* lock necessary so that only one entity touches the queues */
3571 spin_lock_irq(&queue_lock);
3572 error = crypto_enqueue_request(&sep_queue, &req->base);
3573
3574 if ((error != 0) && (error != -EINPROGRESS))
3575 pr_debug(" sep - crypto enqueue failed: %x\n",
3576 error);
3577 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3578 sep_dequeuer, (void *)&sep_queue);
3579 if (error1)
3580 pr_debug(" sep - workqueue submit failed: %x\n",
3581 error1);
3582 spin_unlock_irq(&queue_lock);
3583 /* We return result of crypto enqueue */
3584 return error;
3585}
3586
3587static int sep_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
3588 unsigned int keylen)
3589{
3590 struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(tfm);
3591 struct crypto_tfm *ctfm = crypto_ablkcipher_tfm(tfm);
3592 u32 *flags = &ctfm->crt_flags;
3593
3594 pr_debug("sep des setkey\n");
3595
3596 switch (keylen) {
3597 case DES_KEY_SIZE:
3598 sctx->des_nbr_keys = DES_KEY_1;
3599 break;
3600 case DES_KEY_SIZE * 2:
3601 sctx->des_nbr_keys = DES_KEY_2;
3602 break;
3603 case DES_KEY_SIZE * 3:
3604 sctx->des_nbr_keys = DES_KEY_3;
3605 break;
3606 default:
3607 pr_debug("invalid key size %x\n",
3608 keylen);
3609 return -EINVAL;
3610 }
3611
3612 if ((*flags & CRYPTO_TFM_REQ_WEAK_KEY) &&
3613 (sep_weak_key(key, keylen))) {
3614
3615 *flags |= CRYPTO_TFM_RES_WEAK_KEY;
3616 pr_debug("weak key\n");
3617 return -EINVAL;
3618 }
3619
3620 memset(&sctx->key.des, 0, sizeof(struct sep_des_key));
3621 memcpy(&sctx->key.des.key1, key, keylen);
3622 sctx->keylen = keylen;
3623 /* Indicate to encrypt/decrypt function to send key to SEP */
3624 sctx->key_sent = 0;
3625
3626 return 0;
3627}
3628
3629static int sep_des_ebc_encrypt(struct ablkcipher_request *req)
3630{
3631 int error;
3632 int error1;
3633 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3634
3635 pr_debug("sep - doing des ecb encrypt\n");
3636
3637 /* Clear out task context */
3638 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3639
3640 ta_ctx->sep_used = sep_dev;
3641 ta_ctx->current_request = DES_ECB;
3642 ta_ctx->current_hash_req = NULL;
3643 ta_ctx->current_cypher_req = req;
3644 ta_ctx->des_encmode = SEP_DES_ENCRYPT;
3645 ta_ctx->des_opmode = SEP_DES_ECB;
3646 ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
3647 ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
3648
3649 /* lock necessary so that only one entity touches the queues */
3650 spin_lock_irq(&queue_lock);
3651 error = crypto_enqueue_request(&sep_queue, &req->base);
3652
3653 if ((error != 0) && (error != -EINPROGRESS))
3654 pr_debug(" sep - crypto enqueue failed: %x\n",
3655 error);
3656 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3657 sep_dequeuer, (void *)&sep_queue);
3658 if (error1)
3659 pr_debug(" sep - workqueue submit failed: %x\n",
3660 error1);
3661 spin_unlock_irq(&queue_lock);
3662 /* We return result of crypto enqueue */
3663 return error;
3664}
3665
3666static int sep_des_ebc_decrypt(struct ablkcipher_request *req)
3667{
3668 int error;
3669 int error1;
3670 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3671
3672 pr_debug("sep - doing des ecb decrypt\n");
3673
3674 /* Clear out task context */
3675 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3676
3677 ta_ctx->sep_used = sep_dev;
3678 ta_ctx->current_request = DES_ECB;
3679 ta_ctx->current_hash_req = NULL;
3680 ta_ctx->current_cypher_req = req;
3681 ta_ctx->des_encmode = SEP_DES_DECRYPT;
3682 ta_ctx->des_opmode = SEP_DES_ECB;
3683 ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
3684 ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
3685
3686 /* lock necessary so that only one entity touches the queues */
3687 spin_lock_irq(&queue_lock);
3688 error = crypto_enqueue_request(&sep_queue, &req->base);
3689
3690 if ((error != 0) && (error != -EINPROGRESS))
3691 pr_debug(" sep - crypto enqueue failed: %x\n",
3692 error);
3693 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3694 sep_dequeuer, (void *)&sep_queue);
3695 if (error1)
3696 pr_debug(" sep - workqueue submit failed: %x\n",
3697 error1);
3698 spin_unlock_irq(&queue_lock);
3699 /* We return result of crypto enqueue */
3700 return error;
3701}
3702
3703static int sep_des_cbc_encrypt(struct ablkcipher_request *req)
3704{
3705 int error;
3706 int error1;
3707 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3708
3709 pr_debug("sep - doing des cbc encrypt\n");
3710
3711 /* Clear out task context */
3712 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3713
3714 ta_ctx->sep_used = sep_dev;
3715 ta_ctx->current_request = DES_CBC;
3716 ta_ctx->current_hash_req = NULL;
3717 ta_ctx->current_cypher_req = req;
3718 ta_ctx->des_encmode = SEP_DES_ENCRYPT;
3719 ta_ctx->des_opmode = SEP_DES_CBC;
3720 ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
3721 ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
3722
3723 /* lock necessary so that only one entity touches the queues */
3724 spin_lock_irq(&queue_lock);
3725 error = crypto_enqueue_request(&sep_queue, &req->base);
3726
3727 if ((error != 0) && (error != -EINPROGRESS))
3728 pr_debug(" sep - crypto enqueue failed: %x\n",
3729 error);
3730 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3731 sep_dequeuer, (void *)&sep_queue);
3732 if (error1)
3733 pr_debug(" sep - workqueue submit failed: %x\n",
3734 error1);
3735 spin_unlock_irq(&queue_lock);
3736 /* We return result of crypto enqueue */
3737 return error;
3738}
3739
3740static int sep_des_cbc_decrypt(struct ablkcipher_request *req)
3741{
3742 int error;
3743 int error1;
3744 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3745
3746 pr_debug("sep - doing des ecb decrypt\n");
3747
3748 /* Clear out task context */
3749 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3750
3751 ta_ctx->sep_used = sep_dev;
3752 ta_ctx->current_request = DES_CBC;
3753 ta_ctx->current_hash_req = NULL;
3754 ta_ctx->current_cypher_req = req;
3755 ta_ctx->des_encmode = SEP_DES_DECRYPT;
3756 ta_ctx->des_opmode = SEP_DES_CBC;
3757 ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
3758 ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
3759
3760 /* lock necessary so that only one entity touches the queues */
3761 spin_lock_irq(&queue_lock);
3762 error = crypto_enqueue_request(&sep_queue, &req->base);
3763
3764 if ((error != 0) && (error != -EINPROGRESS))
3765 pr_debug(" sep - crypto enqueue failed: %x\n",
3766 error);
3767 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3768 sep_dequeuer, (void *)&sep_queue);
3769 if (error1)
3770 pr_debug(" sep - workqueue submit failed: %x\n",
3771 error1);
3772 spin_unlock_irq(&queue_lock);
3773 /* We return result of crypto enqueue */
3774 return error;
3775}
3776
3777static struct ahash_alg hash_algs[] = {
3778{
3779 .init = sep_sha1_init,
3780 .update = sep_sha1_update,
3781 .final = sep_sha1_final,
3782 .digest = sep_sha1_digest,
3783 .finup = sep_sha1_finup,
3784 .halg = {
3785 .digestsize = SHA1_DIGEST_SIZE,
3786 .base = {
3787 .cra_name = "sha1",
3788 .cra_driver_name = "sha1-sep",
3789 .cra_priority = 100,
3790 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
3791 CRYPTO_ALG_ASYNC,
3792 .cra_blocksize = SHA1_BLOCK_SIZE,
3793 .cra_ctxsize = sizeof(struct sep_system_ctx),
3794 .cra_alignmask = 0,
3795 .cra_module = THIS_MODULE,
3796 .cra_init = sep_hash_cra_init,
3797 .cra_exit = sep_hash_cra_exit,
3798 }
3799 }
3800},
3801{
3802 .init = sep_md5_init,
3803 .update = sep_md5_update,
3804 .final = sep_md5_final,
3805 .digest = sep_md5_digest,
3806 .finup = sep_md5_finup,
3807 .halg = {
3808 .digestsize = MD5_DIGEST_SIZE,
3809 .base = {
3810 .cra_name = "md5",
3811 .cra_driver_name = "md5-sep",
3812 .cra_priority = 100,
3813 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
3814 CRYPTO_ALG_ASYNC,
3815 .cra_blocksize = SHA1_BLOCK_SIZE,
3816 .cra_ctxsize = sizeof(struct sep_system_ctx),
3817 .cra_alignmask = 0,
3818 .cra_module = THIS_MODULE,
3819 .cra_init = sep_hash_cra_init,
3820 .cra_exit = sep_hash_cra_exit,
3821 }
3822 }
3823},
3824{
3825 .init = sep_sha224_init,
3826 .update = sep_sha224_update,
3827 .final = sep_sha224_final,
3828 .digest = sep_sha224_digest,
3829 .finup = sep_sha224_finup,
3830 .halg = {
3831 .digestsize = SHA224_DIGEST_SIZE,
3832 .base = {
3833 .cra_name = "sha224",
3834 .cra_driver_name = "sha224-sep",
3835 .cra_priority = 100,
3836 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
3837 CRYPTO_ALG_ASYNC,
3838 .cra_blocksize = SHA224_BLOCK_SIZE,
3839 .cra_ctxsize = sizeof(struct sep_system_ctx),
3840 .cra_alignmask = 0,
3841 .cra_module = THIS_MODULE,
3842 .cra_init = sep_hash_cra_init,
3843 .cra_exit = sep_hash_cra_exit,
3844 }
3845 }
3846},
3847{
3848 .init = sep_sha256_init,
3849 .update = sep_sha256_update,
3850 .final = sep_sha256_final,
3851 .digest = sep_sha256_digest,
3852 .finup = sep_sha256_finup,
3853 .halg = {
3854 .digestsize = SHA256_DIGEST_SIZE,
3855 .base = {
3856 .cra_name = "sha256",
3857 .cra_driver_name = "sha256-sep",
3858 .cra_priority = 100,
3859 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
3860 CRYPTO_ALG_ASYNC,
3861 .cra_blocksize = SHA256_BLOCK_SIZE,
3862 .cra_ctxsize = sizeof(struct sep_system_ctx),
3863 .cra_alignmask = 0,
3864 .cra_module = THIS_MODULE,
3865 .cra_init = sep_hash_cra_init,
3866 .cra_exit = sep_hash_cra_exit,
3867 }
3868 }
3869}
3870};
3871
3872static struct crypto_alg crypto_algs[] = {
3873{
3874 .cra_name = "ecb(aes)",
3875 .cra_driver_name = "ecb-aes-sep",
3876 .cra_priority = 100,
3877 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3878 .cra_blocksize = AES_BLOCK_SIZE,
3879 .cra_ctxsize = sizeof(struct sep_system_ctx),
3880 .cra_alignmask = 0,
3881 .cra_type = &crypto_ablkcipher_type,
3882 .cra_module = THIS_MODULE,
3883 .cra_init = sep_crypto_init,
3884 .cra_exit = sep_crypto_exit,
3885 .cra_u.ablkcipher = {
3886 .min_keysize = AES_MIN_KEY_SIZE,
3887 .max_keysize = AES_MAX_KEY_SIZE,
3888 .setkey = sep_aes_setkey,
3889 .encrypt = sep_aes_ecb_encrypt,
3890 .decrypt = sep_aes_ecb_decrypt,
3891 }
3892},
3893{
3894 .cra_name = "cbc(aes)",
3895 .cra_driver_name = "cbc-aes-sep",
3896 .cra_priority = 100,
3897 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3898 .cra_blocksize = AES_BLOCK_SIZE,
3899 .cra_ctxsize = sizeof(struct sep_system_ctx),
3900 .cra_alignmask = 0,
3901 .cra_type = &crypto_ablkcipher_type,
3902 .cra_module = THIS_MODULE,
3903 .cra_init = sep_crypto_init,
3904 .cra_exit = sep_crypto_exit,
3905 .cra_u.ablkcipher = {
3906 .min_keysize = AES_MIN_KEY_SIZE,
3907 .max_keysize = AES_MAX_KEY_SIZE,
3908 .setkey = sep_aes_setkey,
3909 .encrypt = sep_aes_cbc_encrypt,
3910 .ivsize = AES_BLOCK_SIZE,
3911 .decrypt = sep_aes_cbc_decrypt,
3912 }
3913},
3914{
3915 .cra_name = "ebc(des)",
3916 .cra_driver_name = "ebc-des-sep",
3917 .cra_priority = 100,
3918 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3919 .cra_blocksize = DES_BLOCK_SIZE,
3920 .cra_ctxsize = sizeof(struct sep_system_ctx),
3921 .cra_alignmask = 0,
3922 .cra_type = &crypto_ablkcipher_type,
3923 .cra_module = THIS_MODULE,
3924 .cra_init = sep_crypto_init,
3925 .cra_exit = sep_crypto_exit,
3926 .cra_u.ablkcipher = {
3927 .min_keysize = DES_KEY_SIZE,
3928 .max_keysize = DES_KEY_SIZE,
3929 .setkey = sep_des_setkey,
3930 .encrypt = sep_des_ebc_encrypt,
3931 .decrypt = sep_des_ebc_decrypt,
3932 }
3933},
3934{
3935 .cra_name = "cbc(des)",
3936 .cra_driver_name = "cbc-des-sep",
3937 .cra_priority = 100,
3938 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3939 .cra_blocksize = DES_BLOCK_SIZE,
3940 .cra_ctxsize = sizeof(struct sep_system_ctx),
3941 .cra_alignmask = 0,
3942 .cra_type = &crypto_ablkcipher_type,
3943 .cra_module = THIS_MODULE,
3944 .cra_init = sep_crypto_init,
3945 .cra_exit = sep_crypto_exit,
3946 .cra_u.ablkcipher = {
3947 .min_keysize = DES_KEY_SIZE,
3948 .max_keysize = DES_KEY_SIZE,
3949 .setkey = sep_des_setkey,
3950 .encrypt = sep_des_cbc_encrypt,
3951 .ivsize = DES_BLOCK_SIZE,
3952 .decrypt = sep_des_cbc_decrypt,
3953 }
3954},
3955{
3956 .cra_name = "ebc(des3-ede)",
3957 .cra_driver_name = "ebc-des3-ede-sep",
3958 .cra_priority = 100,
3959 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3960 .cra_blocksize = DES_BLOCK_SIZE,
3961 .cra_ctxsize = sizeof(struct sep_system_ctx),
3962 .cra_alignmask = 0,
3963 .cra_type = &crypto_ablkcipher_type,
3964 .cra_module = THIS_MODULE,
3965 .cra_init = sep_crypto_init,
3966 .cra_exit = sep_crypto_exit,
3967 .cra_u.ablkcipher = {
3968 .min_keysize = DES3_EDE_KEY_SIZE,
3969 .max_keysize = DES3_EDE_KEY_SIZE,
3970 .setkey = sep_des_setkey,
3971 .encrypt = sep_des_ebc_encrypt,
3972 .decrypt = sep_des_ebc_decrypt,
3973 }
3974},
3975{
3976 .cra_name = "cbc(des3-ede)",
3977 .cra_driver_name = "cbc-des3--ede-sep",
3978 .cra_priority = 100,
3979 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3980 .cra_blocksize = DES_BLOCK_SIZE,
3981 .cra_ctxsize = sizeof(struct sep_system_ctx),
3982 .cra_alignmask = 0,
3983 .cra_type = &crypto_ablkcipher_type,
3984 .cra_module = THIS_MODULE,
3985 .cra_init = sep_crypto_init,
3986 .cra_exit = sep_crypto_exit,
3987 .cra_u.ablkcipher = {
3988 .min_keysize = DES3_EDE_KEY_SIZE,
3989 .max_keysize = DES3_EDE_KEY_SIZE,
3990 .setkey = sep_des_setkey,
3991 .encrypt = sep_des_cbc_encrypt,
3992 .decrypt = sep_des_cbc_decrypt,
3993 }
3994}
3995};
3996
3997int sep_crypto_setup(void)
3998{
3999 int err, i, j, k;
4000 tasklet_init(&sep_dev->finish_tasklet, sep_finish,
4001 (unsigned long)sep_dev);
4002
4003 crypto_init_queue(&sep_queue, SEP_QUEUE_LENGTH);
4004
4005 sep_dev->workqueue = create_singlethread_workqueue(
4006 "sep_crypto_workqueue");
4007 if (!sep_dev->workqueue) {
4008 dev_warn(&sep_dev->pdev->dev, "cant create workqueue\n");
4009 return -ENOMEM;
4010 }
4011
4012 i = 0;
4013 j = 0;
4014
4015 spin_lock_init(&queue_lock);
4016
4017 err = 0;
4018
4019 for (i = 0; i < ARRAY_SIZE(hash_algs); i++) {
4020 err = crypto_register_ahash(&hash_algs[i]);
4021 if (err)
4022 goto err_algs;
4023 }
4024
4025 err = 0;
4026 for (j = 0; j < ARRAY_SIZE(crypto_algs); j++) {
4027 err = crypto_register_alg(&crypto_algs[j]);
4028 if (err)
4029 goto err_crypto_algs;
4030 }
4031
4032 return err;
4033
4034err_algs:
4035 for (k = 0; k < i; k++)
4036 crypto_unregister_ahash(&hash_algs[k]);
4037 return err;
4038
4039err_crypto_algs:
4040 for (k = 0; k < j; k++)
4041 crypto_unregister_alg(&crypto_algs[k]);
4042 goto err_algs;
4043}
4044
4045void sep_crypto_takedown(void)
4046{
4047
4048 int i;
4049
4050 for (i = 0; i < ARRAY_SIZE(hash_algs); i++)
4051 crypto_unregister_ahash(&hash_algs[i]);
4052 for (i = 0; i < ARRAY_SIZE(crypto_algs); i++)
4053 crypto_unregister_alg(&crypto_algs[i]);
4054
4055 tasklet_kill(&sep_dev->finish_tasklet);
4056}
4057
4058#endif
diff --git a/drivers/staging/sep/sep_crypto.h b/drivers/staging/sep/sep_crypto.h
new file mode 100644
index 000000000000..155c3c9b87c2
--- /dev/null
+++ b/drivers/staging/sep/sep_crypto.h
@@ -0,0 +1,359 @@
1/*
2 *
3 * sep_crypto.h - Crypto interface structures
4 *
5 * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
6 * Contributions(c) 2009-2010 Discretix. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 *
21 * CONTACTS:
22 *
23 * Mark Allyn mark.a.allyn@intel.com
24 * Jayant Mangalampalli jayant.mangalampalli@intel.com
25 *
26 * CHANGES:
27 *
28 * 2009.06.26 Initial publish
29 * 2011.02.22 Enable Kernel Crypto
30 *
31 */
32
33/* Constants for SEP (from vendor) */
34#define SEP_START_MSG_TOKEN 0x02558808
35
36#define SEP_DES_IV_SIZE_WORDS 2
37#define SEP_DES_IV_SIZE_BYTES (SEP_DES_IV_SIZE_WORDS * \
38 sizeof(u32))
39#define SEP_DES_KEY_SIZE_WORDS 2
40#define SEP_DES_KEY_SIZE_BYTES (SEP_DES_KEY_SIZE_WORDS * \
41 sizeof(u32))
42#define SEP_DES_BLOCK_SIZE 8
43#define SEP_DES_DUMMY_SIZE 16
44
45#define SEP_DES_INIT_OPCODE 0x10
46#define SEP_DES_BLOCK_OPCODE 0x11
47
48#define SEP_AES_BLOCK_SIZE_WORDS 4
49#define SEP_AES_BLOCK_SIZE_BYTES \
50 (SEP_AES_BLOCK_SIZE_WORDS * sizeof(u32))
51
52#define SEP_AES_DUMMY_BLOCK_SIZE 16
53#define SEP_AES_IV_SIZE_WORDS SEP_AES_BLOCK_SIZE_WORDS
54#define SEP_AES_IV_SIZE_BYTES \
55 (SEP_AES_IV_SIZE_WORDS * sizeof(u32))
56
57#define SEP_AES_KEY_128_SIZE 16
58#define SEP_AES_KEY_192_SIZE 24
59#define SEP_AES_KEY_256_SIZE 32
60#define SEP_AES_KEY_512_SIZE 64
61#define SEP_AES_MAX_KEY_SIZE_WORDS 16
62#define SEP_AES_MAX_KEY_SIZE_BYTES \
63 (SEP_AES_MAX_KEY_SIZE_WORDS * sizeof(u32))
64
65#define SEP_AES_WRAP_MIN_SIZE 8
66#define SEP_AES_WRAP_MAX_SIZE 0x10000000
67
68#define SEP_AES_WRAP_BLOCK_SIZE_WORDS 2
69#define SEP_AES_WRAP_BLOCK_SIZE_BYTES \
70 (SEP_AES_WRAP_BLOCK_SIZE_WORDS * sizeof(u32))
71
72#define SEP_AES_SECRET_RKEK1 0x1
73#define SEP_AES_SECRET_RKEK2 0x2
74
75#define SEP_AES_INIT_OPCODE 0x2
76#define SEP_AES_BLOCK_OPCODE 0x3
77#define SEP_AES_FINISH_OPCODE 0x4
78#define SEP_AES_WRAP_OPCODE 0x6
79#define SEP_AES_UNWRAP_OPCODE 0x7
80#define SEP_AES_XTS_FINISH_OPCODE 0x8
81
82#define SEP_HASH_RESULT_SIZE_WORDS 16
83#define SEP_MD5_DIGEST_SIZE_WORDS 4
84#define SEP_MD5_DIGEST_SIZE_BYTES \
85 (SEP_MD5_DIGEST_SIZE_WORDS * sizeof(u32))
86#define SEP_SHA1_DIGEST_SIZE_WORDS 5
87#define SEP_SHA1_DIGEST_SIZE_BYTES \
88 (SEP_SHA1_DIGEST_SIZE_WORDS * sizeof(u32))
89#define SEP_SHA224_DIGEST_SIZE_WORDS 7
90#define SEP_SHA224_DIGEST_SIZE_BYTES \
91 (SEP_SHA224_DIGEST_SIZE_WORDS * sizeof(u32))
92#define SEP_SHA256_DIGEST_SIZE_WORDS 8
93#define SEP_SHA256_DIGEST_SIZE_BYTES \
94 (SEP_SHA256_DIGEST_SIZE_WORDS * sizeof(u32))
95#define SEP_SHA384_DIGEST_SIZE_WORDS 12
96#define SEP_SHA384_DIGEST_SIZE_BYTES \
97 (SEP_SHA384_DIGEST_SIZE_WORDS * sizeof(u32))
98#define SEP_SHA512_DIGEST_SIZE_WORDS 16
99#define SEP_SHA512_DIGEST_SIZE_BYTES \
100 (SEP_SHA512_DIGEST_SIZE_WORDS * sizeof(u32))
101#define SEP_HASH_BLOCK_SIZE_WORDS 16
102#define SEP_HASH_BLOCK_SIZE_BYTES \
103 (SEP_HASH_BLOCK_SIZE_WORDS * sizeof(u32))
104#define SEP_SHA2_BLOCK_SIZE_WORDS 32
105#define SEP_SHA2_BLOCK_SIZE_BYTES \
106 (SEP_SHA2_BLOCK_SIZE_WORDS * sizeof(u32))
107
108#define SEP_HASH_INIT_OPCODE 0x20
109#define SEP_HASH_UPDATE_OPCODE 0x21
110#define SEP_HASH_FINISH_OPCODE 0x22
111#define SEP_HASH_SINGLE_OPCODE 0x23
112
113#define SEP_HOST_ERROR 0x0b000000
114#define SEP_OK 0x0
115#define SEP_INVALID_START (SEP_HOST_ERROR + 0x3)
116#define SEP_WRONG_OPCODE (SEP_HOST_ERROR + 0x1)
117
118#define SEP_TRANSACTION_WAIT_TIME 5
119
120#define SEP_QUEUE_LENGTH 2
121/* Macros */
122#ifndef __LITTLE_ENDIAN
123#define CHG_ENDIAN(val) \
124 (((val) >> 24) | \
125 (((val) & 0x00FF0000) >> 8) | \
126 (((val) & 0x0000FF00) << 8) | \
127 (((val) & 0x000000FF) << 24))
128#else
129#define CHG_ENDIAN(val) val
130#endif
131/* Enums for SEP (from vendor) */
132enum des_numkey {
133 DES_KEY_1 = 1,
134 DES_KEY_2 = 2,
135 DES_KEY_3 = 3,
136 SEP_NUMKEY_OPTIONS,
137 SEP_NUMKEY_LAST = 0x7fffffff,
138};
139
140enum des_enc_mode {
141 SEP_DES_ENCRYPT = 0,
142 SEP_DES_DECRYPT = 1,
143 SEP_DES_ENC_OPTIONS,
144 SEP_DES_ENC_LAST = 0x7fffffff,
145};
146
147enum des_op_mode {
148 SEP_DES_ECB = 0,
149 SEP_DES_CBC = 1,
150 SEP_OP_OPTIONS,
151 SEP_OP_LAST = 0x7fffffff,
152};
153
154enum aes_keysize {
155 AES_128 = 0,
156 AES_192 = 1,
157 AES_256 = 2,
158 AES_512 = 3,
159 AES_SIZE_OPTIONS,
160 AEA_SIZE_LAST = 0x7FFFFFFF,
161};
162
163enum aes_enc_mode {
164 SEP_AES_ENCRYPT = 0,
165 SEP_AES_DECRYPT = 1,
166 SEP_AES_ENC_OPTIONS,
167 SEP_AES_ENC_LAST = 0x7FFFFFFF,
168};
169
170enum aes_op_mode {
171 SEP_AES_ECB = 0,
172 SEP_AES_CBC = 1,
173 SEP_AES_MAC = 2,
174 SEP_AES_CTR = 3,
175 SEP_AES_XCBC = 4,
176 SEP_AES_CMAC = 5,
177 SEP_AES_XTS = 6,
178 SEP_AES_OP_OPTIONS,
179 SEP_AES_OP_LAST = 0x7FFFFFFF,
180};
181
182enum hash_op_mode {
183 SEP_HASH_SHA1 = 0,
184 SEP_HASH_SHA224 = 1,
185 SEP_HASH_SHA256 = 2,
186 SEP_HASH_SHA384 = 3,
187 SEP_HASH_SHA512 = 4,
188 SEP_HASH_MD5 = 5,
189 SEP_HASH_OPTIONS,
190 SEP_HASH_LAST_MODE = 0x7FFFFFFF,
191};
192
193/* Structures for SEP (from vendor) */
194struct sep_des_internal_key {
195 u32 key1[SEP_DES_KEY_SIZE_WORDS];
196 u32 key2[SEP_DES_KEY_SIZE_WORDS];
197 u32 key3[SEP_DES_KEY_SIZE_WORDS];
198};
199
200struct sep_des_internal_context {
201 u32 iv_context[SEP_DES_IV_SIZE_WORDS];
202 struct sep_des_internal_key context_key;
203 enum des_numkey nbr_keys;
204 enum des_enc_mode encryption;
205 enum des_op_mode operation;
206 u8 dummy_block[SEP_DES_DUMMY_SIZE];
207};
208
209struct sep_des_private_context {
210 u32 valid_tag;
211 u32 iv;
212 u8 ctx_buf[sizeof(struct sep_des_internal_context)];
213};
214
215/* This is the structure passed to SEP via msg area */
216struct sep_des_key {
217 u32 key1[SEP_DES_KEY_SIZE_WORDS];
218 u32 key2[SEP_DES_KEY_SIZE_WORDS];
219 u32 key3[SEP_DES_KEY_SIZE_WORDS];
220 u32 pad[SEP_DES_KEY_SIZE_WORDS];
221};
222
223struct sep_aes_internal_context {
224 u32 aes_ctx_iv[SEP_AES_IV_SIZE_WORDS];
225 u32 aes_ctx_key[SEP_AES_MAX_KEY_SIZE_WORDS / 2];
226 enum aes_keysize keysize;
227 enum aes_enc_mode encmode;
228 enum aes_op_mode opmode;
229 u8 secret_key;
230 u32 no_add_blocks;
231 u32 last_block_size;
232 u32 last_block[SEP_AES_BLOCK_SIZE_WORDS];
233 u32 prev_iv[SEP_AES_BLOCK_SIZE_WORDS];
234 u32 remaining_size;
235 union {
236 struct {
237 u32 dkey1[SEP_AES_BLOCK_SIZE_WORDS];
238 u32 dkey2[SEP_AES_BLOCK_SIZE_WORDS];
239 u32 dkey3[SEP_AES_BLOCK_SIZE_WORDS];
240 } cmac_data;
241 struct {
242 u32 xts_key[SEP_AES_MAX_KEY_SIZE_WORDS / 2];
243 u32 temp1[SEP_AES_BLOCK_SIZE_WORDS];
244 u32 temp2[SEP_AES_BLOCK_SIZE_WORDS];
245 } xtx_data;
246 } s_data;
247 u8 dummy_block[SEP_AES_DUMMY_BLOCK_SIZE];
248};
249
250struct sep_aes_private_context {
251 u32 valid_tag;
252 u32 aes_iv;
253 u32 op_mode;
254 u8 cbuff[sizeof(struct sep_aes_internal_context)];
255};
256
257struct sep_hash_internal_context {
258 u32 hash_result[SEP_HASH_RESULT_SIZE_WORDS];
259 enum hash_op_mode hash_opmode;
260 u32 previous_data[SEP_SHA2_BLOCK_SIZE_WORDS];
261 u16 prev_update_bytes;
262 u32 total_proc_128bit[4];
263 u16 op_mode_block_size;
264 u8 dummy_aes_block[SEP_AES_DUMMY_BLOCK_SIZE];
265};
266
267struct sep_hash_private_context {
268 u32 valid_tag;
269 u32 iv;
270 u8 internal_context[sizeof(struct sep_hash_internal_context)];
271};
272
273union key_t {
274 struct sep_des_key des;
275 u32 aes[SEP_AES_MAX_KEY_SIZE_WORDS];
276};
277
278/* Context structures for crypto API */
279/**
280 * Structure for this current task context
281 * This same structure is used for both hash
282 * and crypt in order to reduce duplicate code
283 * for stuff that is done for both hash operations
284 * and crypto operations. We cannot trust that the
285 * system context is not pulled out from under
286 * us during operation to operation, so all
287 * critical stuff such as data pointers must
288 * be in in a context that is exclusive for this
289 * particular task at hand.
290 */
291struct this_task_ctx {
292 struct sep_device *sep_used;
293 u32 done;
294 unsigned char iv[100];
295 enum des_enc_mode des_encmode;
296 enum des_op_mode des_opmode;
297 enum aes_enc_mode aes_encmode;
298 enum aes_op_mode aes_opmode;
299 u32 init_opcode;
300 u32 block_opcode;
301 size_t data_length;
302 size_t ivlen;
303 struct ablkcipher_walk walk;
304 int i_own_sep; /* Do I have custody of the sep? */
305 struct sep_call_status call_status;
306 struct build_dcb_struct_kernel dcb_input_data;
307 struct sep_dma_context *dma_ctx;
308 void *dmatables_region;
309 size_t nbytes;
310 struct sep_dcblock *dcb_region;
311 struct sep_queue_info *queue_elem;
312 int msg_len_words;
313 unsigned char msg[SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES];
314 void *msgptr;
315 struct scatterlist *src_sg;
316 struct scatterlist *dst_sg;
317 struct scatterlist *src_sg_hold;
318 struct scatterlist *dst_sg_hold;
319 struct ahash_request *current_hash_req;
320 struct ablkcipher_request *current_cypher_req;
321 enum type_of_request current_request;
322 int digest_size_words;
323 int digest_size_bytes;
324 int block_size_words;
325 int block_size_bytes;
326 enum hash_op_mode hash_opmode;
327 enum hash_stage current_hash_stage;
328 /**
329 * Not that this is a pointer. The are_we_done_yet variable is
330 * allocated by the task function. This way, even if the kernel
331 * crypto infrastructure has grabbed the task structure out from
332 * under us, the task function can still see this variable.
333 */
334 int *are_we_done_yet;
335 unsigned long end_time;
336 };
337
338struct sep_system_ctx {
339 union key_t key;
340 size_t keylen;
341 int key_sent;
342 enum des_numkey des_nbr_keys;
343 enum aes_keysize aes_key_size;
344 unsigned long end_time;
345 struct sep_des_private_context des_private_ctx;
346 struct sep_aes_private_context aes_private_ctx;
347 struct sep_hash_private_context hash_private_ctx;
348 };
349
350/* work queue structures */
351struct sep_work_struct {
352 struct work_struct work;
353 void (*callback)(void *);
354 void *data;
355 };
356
357/* Functions */
358int sep_crypto_setup(void);
359void sep_crypto_takedown(void);
diff --git a/drivers/staging/sep/sep_dev.h b/drivers/staging/sep/sep_dev.h
index 696ab0dd2b79..5f6a07f59dd7 100644
--- a/drivers/staging/sep/sep_dev.h
+++ b/drivers/staging/sep/sep_dev.h
@@ -5,8 +5,8 @@
5 * 5 *
6 * sep_dev.h - Security Processor Device Structures 6 * sep_dev.h - Security Processor Device Structures
7 * 7 *
8 * Copyright(c) 2009,2010 Intel Corporation. All rights reserved. 8 * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
9 * Contributions(c) 2009,2010 Discretix. All rights reserved. 9 * Contributions(c) 2009-2011 Discretix. All rights reserved.
10 * 10 *
11 * This program is free software; you can redistribute it and/or modify it 11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the Free 12 * under the terms of the GNU General Public License as published by the Free
@@ -28,6 +28,7 @@
28 * 28 *
29 * CHANGES 29 * CHANGES
30 * 2010.09.14 upgrade to Medfield 30 * 2010.09.14 upgrade to Medfield
31 * 2011.02.22 enable kernel crypto
31 */ 32 */
32 33
33struct sep_device { 34struct sep_device {
@@ -36,33 +37,21 @@ struct sep_device {
36 37
37 /* character device file */ 38 /* character device file */
38 struct cdev sep_cdev; 39 struct cdev sep_cdev;
39 struct cdev sep_daemon_cdev;
40 struct cdev sep_singleton_cdev;
41 40
42 /* devices (using misc dev) */ 41 /* devices (using misc dev) */
43 struct miscdevice miscdev_sep; 42 struct miscdevice miscdev_sep;
44 struct miscdevice miscdev_singleton;
45 struct miscdevice miscdev_daemon;
46 43
47 /* major / minor numbers of device */ 44 /* major / minor numbers of device */
48 dev_t sep_devno; 45 dev_t sep_devno;
49 dev_t sep_daemon_devno; 46 /* guards command sent counter */
50 dev_t sep_singleton_devno;
51
52 struct mutex sep_mutex;
53 struct mutex ioctl_mutex;
54 spinlock_t snd_rply_lck; 47 spinlock_t snd_rply_lck;
48 /* guards driver memory usage in fastcall if */
49 struct semaphore sep_doublebuf;
55 50
56 /* flags to indicate use and lock status of sep */ 51 /* flags to indicate use and lock status of sep */
57 u32 pid_doing_transaction; 52 u32 pid_doing_transaction;
58 unsigned long in_use_flags; 53 unsigned long in_use_flags;
59 54
60 /* request daemon alread open */
61 unsigned long request_daemon_open;
62
63 /* 1 = Moorestown; 0 = Medfield */
64 int mrst;
65
66 /* address of the shared memory allocated during init for SEP driver 55 /* address of the shared memory allocated during init for SEP driver
67 (coherent alloc) */ 56 (coherent alloc) */
68 dma_addr_t shared_bus; 57 dma_addr_t shared_bus;
@@ -74,36 +63,77 @@ struct sep_device {
74 dma_addr_t reg_physical_end; 63 dma_addr_t reg_physical_end;
75 void __iomem *reg_addr; 64 void __iomem *reg_addr;
76 65
77 /* wait queue head (event) of the driver */ 66 /* wait queue heads of the driver */
78 wait_queue_head_t event; 67 wait_queue_head_t event_interrupt;
79 wait_queue_head_t event_request_daemon; 68 wait_queue_head_t event_transactions;
80 wait_queue_head_t event_mmap;
81 69
82 struct sep_caller_id_entry 70 struct list_head sep_queue_status;
83 caller_id_table[SEP_CALLER_ID_TABLE_NUM_ENTRIES]; 71 u32 sep_queue_num;
72 spinlock_t sep_queue_lock;
84 73
85 /* access flag for singleton device */ 74 /* Is this in use? */
86 unsigned long singleton_access_flag; 75 u32 in_use;
76
77 /* indicates whether power save is set up */
78 u32 power_save_setup;
79
80 /* Power state */
81 u32 power_state;
87 82
88 /* transaction counter that coordinates the 83 /* transaction counter that coordinates the
89 transactions between SEP and HOST */ 84 transactions between SEP and HOST */
90 unsigned long send_ct; 85 unsigned long send_ct;
91 /* counter for the messages from sep */ 86 /* counter for the messages from sep */
92 unsigned long reply_ct; 87 unsigned long reply_ct;
93 /* counter for the number of bytes allocated in the pool for the
94 current transaction */
95 long data_pool_bytes_allocated;
96 88
97 u32 num_of_data_allocations; 89 /* The following are used for kernel crypto client requests */
90 u32 in_kernel; /* Set for kernel client request */
91 struct tasklet_struct finish_tasklet;
92 enum type_of_request current_request;
93 enum hash_stage current_hash_stage;
94 struct ahash_request *current_hash_req;
95 struct ablkcipher_request *current_cypher_req;
96 struct this_task_ctx *ta_ctx;
97 struct workqueue_struct *workqueue;
98};
98 99
99 /* number of the lli tables created in the current transaction */ 100extern struct sep_device *sep_dev;
100 u32 num_lli_tables_created;
101 101
102 /* number of data control blocks */ 102/**
103 u32 nr_dcb_creat; 103 * SEP message header for a transaction
104 * @reserved: reserved memory (two words)
105 * @token: SEP message token
106 * @msg_len: message length
107 * @opcpde: message opcode
108 */
109struct sep_msgarea_hdr {
110 u32 reserved[2];
111 u32 token;
112 u32 msg_len;
113 u32 opcode;
114};
104 115
105 struct sep_dma_resource dma_res_arr[SEP_MAX_NUM_SYNC_DMA_OPS]; 116/**
117 * sep_queue_data - data to be maintained in status queue for a transaction
118 * @opcode : transaction opcode
119 * @size : message size
120 * @pid: owner process
121 * @name: owner process name
122 */
123struct sep_queue_data {
124 u32 opcode;
125 u32 size;
126 s32 pid;
127 u8 name[TASK_COMM_LEN];
128};
106 129
130/** sep_queue_info - maintains status info of all transactions
131 * @list: head of list
132 * @sep_queue_data : data for transaction
133 */
134struct sep_queue_info {
135 struct list_head list;
136 struct sep_queue_data data;
107}; 137};
108 138
109static inline void sep_write_reg(struct sep_device *dev, int reg, u32 value) 139static inline void sep_write_reg(struct sep_device *dev, int reg, u32 value)
diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c
deleted file mode 100644
index 6b3d156d4140..000000000000
--- a/drivers/staging/sep/sep_driver.c
+++ /dev/null
@@ -1,2932 +0,0 @@
1/*
2 *
3 * sep_driver.c - Security Processor Driver main group of functions
4 *
5 * Copyright(c) 2009,2010 Intel Corporation. All rights reserved.
6 * Contributions(c) 2009,2010 Discretix. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 *
21 * CONTACTS:
22 *
23 * Mark Allyn mark.a.allyn@intel.com
24 * Jayant Mangalampalli jayant.mangalampalli@intel.com
25 *
26 * CHANGES:
27 *
28 * 2009.06.26 Initial publish
29 * 2010.09.14 Upgrade to Medfield
30 *
31 */
32#include <linux/init.h>
33#include <linux/module.h>
34#include <linux/miscdevice.h>
35#include <linux/fs.h>
36#include <linux/cdev.h>
37#include <linux/kdev_t.h>
38#include <linux/mutex.h>
39#include <linux/sched.h>
40#include <linux/mm.h>
41#include <linux/poll.h>
42#include <linux/wait.h>
43#include <linux/pci.h>
44#include <linux/firmware.h>
45#include <linux/slab.h>
46#include <linux/ioctl.h>
47#include <asm/current.h>
48#include <linux/ioport.h>
49#include <linux/io.h>
50#include <linux/interrupt.h>
51#include <linux/pagemap.h>
52#include <asm/cacheflush.h>
53#include <linux/delay.h>
54#include <linux/jiffies.h>
55#include <linux/rar_register.h>
56
57#include "sep_driver_hw_defs.h"
58#include "sep_driver_config.h"
59#include "sep_driver_api.h"
60#include "sep_dev.h"
61
62/*----------------------------------------
63 DEFINES
64-----------------------------------------*/
65
66#define SEP_RAR_IO_MEM_REGION_SIZE 0x40000
67
68/*--------------------------------------------
69 GLOBAL variables
70--------------------------------------------*/
71
72/* Keep this a single static object for now to keep the conversion easy */
73
74static struct sep_device *sep_dev;
75
76/**
77 * sep_dump_message - dump the message that is pending
78 * @sep: SEP device
79 */
80static void sep_dump_message(struct sep_device *sep)
81{
82 int count;
83 u32 *p = sep->shared_addr;
84 for (count = 0; count < 12 * 4; count += 4)
85 dev_dbg(&sep->pdev->dev, "Word %d of the message is %x\n",
86 count, *p++);
87}
88
89/**
90 * sep_map_and_alloc_shared_area - allocate shared block
91 * @sep: security processor
92 * @size: size of shared area
93 */
94static int sep_map_and_alloc_shared_area(struct sep_device *sep)
95{
96 sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev,
97 sep->shared_size,
98 &sep->shared_bus, GFP_KERNEL);
99
100 if (!sep->shared_addr) {
101 dev_warn(&sep->pdev->dev,
102 "shared memory dma_alloc_coherent failed\n");
103 return -ENOMEM;
104 }
105 dev_dbg(&sep->pdev->dev,
106 "shared_addr %zx bytes @%p (bus %llx)\n",
107 sep->shared_size, sep->shared_addr,
108 (unsigned long long)sep->shared_bus);
109 return 0;
110}
111
112/**
113 * sep_unmap_and_free_shared_area - free shared block
114 * @sep: security processor
115 */
116static void sep_unmap_and_free_shared_area(struct sep_device *sep)
117{
118 dma_free_coherent(&sep->pdev->dev, sep->shared_size,
119 sep->shared_addr, sep->shared_bus);
120}
121
122/**
123 * sep_shared_bus_to_virt - convert bus/virt addresses
124 * @sep: pointer to struct sep_device
125 * @bus_address: address to convert
126 *
127 * Returns virtual address inside the shared area according
128 * to the bus address.
129 */
130static void *sep_shared_bus_to_virt(struct sep_device *sep,
131 dma_addr_t bus_address)
132{
133 return sep->shared_addr + (bus_address - sep->shared_bus);
134}
135
136/**
137 * open function for the singleton driver
138 * @inode_ptr struct inode *
139 * @file_ptr struct file *
140 *
141 * Called when the user opens the singleton device interface
142 */
143static int sep_singleton_open(struct inode *inode_ptr, struct file *file_ptr)
144{
145 struct sep_device *sep;
146
147 /*
148 * Get the SEP device structure and use it for the
149 * private_data field in filp for other methods
150 */
151 sep = sep_dev;
152
153 file_ptr->private_data = sep;
154
155 if (test_and_set_bit(0, &sep->singleton_access_flag))
156 return -EBUSY;
157 return 0;
158}
159
160/**
161 * sep_open - device open method
162 * @inode: inode of SEP device
163 * @filp: file handle to SEP device
164 *
165 * Open method for the SEP device. Called when userspace opens
166 * the SEP device node.
167 *
168 * Returns zero on success otherwise an error code.
169 */
170static int sep_open(struct inode *inode, struct file *filp)
171{
172 struct sep_device *sep;
173
174 /*
175 * Get the SEP device structure and use it for the
176 * private_data field in filp for other methods
177 */
178 sep = sep_dev;
179 filp->private_data = sep;
180
181 /* Anyone can open; locking takes place at transaction level */
182 return 0;
183}
184
185/**
186 * sep_singleton_release - close a SEP singleton device
187 * @inode: inode of SEP device
188 * @filp: file handle being closed
189 *
190 * Called on the final close of a SEP device. As the open protects against
191 * multiple simultaenous opens that means this method is called when the
192 * final reference to the open handle is dropped.
193 */
194static int sep_singleton_release(struct inode *inode, struct file *filp)
195{
196 struct sep_device *sep = filp->private_data;
197
198 clear_bit(0, &sep->singleton_access_flag);
199 return 0;
200}
201
202/**
203 * sep_request_daemon_open - request daemon open method
204 * @inode: inode of SEP device
205 * @filp: file handle to SEP device
206 *
207 * Open method for the SEP request daemon. Called when
208 * request daemon in userspace opens the SEP device node.
209 *
210 * Returns zero on success otherwise an error code.
211 */
212static int sep_request_daemon_open(struct inode *inode, struct file *filp)
213{
214 struct sep_device *sep = sep_dev;
215 int error = 0;
216
217 filp->private_data = sep;
218
219 /* There is supposed to be only one request daemon */
220 if (test_and_set_bit(0, &sep->request_daemon_open))
221 error = -EBUSY;
222 return error;
223}
224
225/**
226 * sep_request_daemon_release - close a SEP daemon
227 * @inode: inode of SEP device
228 * @filp: file handle being closed
229 *
230 * Called on the final close of a SEP daemon.
231 */
232static int sep_request_daemon_release(struct inode *inode, struct file *filp)
233{
234 struct sep_device *sep = filp->private_data;
235
236 dev_dbg(&sep->pdev->dev, "Request daemon release for pid %d\n",
237 current->pid);
238
239 /* Clear the request_daemon_open flag */
240 clear_bit(0, &sep->request_daemon_open);
241 return 0;
242}
243
244/**
245 * sep_req_daemon_send_reply_command_handler - poke the SEP
246 * @sep: struct sep_device *
247 *
248 * This function raises interrupt to SEPm that signals that is has a
249 * new command from HOST
250 */
251static int sep_req_daemon_send_reply_command_handler(struct sep_device *sep)
252{
253 unsigned long lck_flags;
254
255 sep_dump_message(sep);
256
257 /* Counters are lockable region */
258 spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
259 sep->send_ct++;
260 sep->reply_ct++;
261
262 /* Send the interrupt to SEP */
263 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, sep->send_ct);
264 sep->send_ct++;
265
266 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
267
268 dev_dbg(&sep->pdev->dev,
269 "sep_req_daemon_send_reply send_ct %lx reply_ct %lx\n",
270 sep->send_ct, sep->reply_ct);
271
272 return 0;
273}
274
275
276/**
277 * sep_free_dma_table_data_handler - free DMA table
278 * @sep: pointere to struct sep_device
279 *
280 * Handles the request to free DMA table for synchronic actions
281 */
282static int sep_free_dma_table_data_handler(struct sep_device *sep)
283{
284 int count;
285 int dcb_counter;
286 /* Pointer to the current dma_resource struct */
287 struct sep_dma_resource *dma;
288
289 for (dcb_counter = 0; dcb_counter < sep->nr_dcb_creat; dcb_counter++) {
290 dma = &sep->dma_res_arr[dcb_counter];
291
292 /* Unmap and free input map array */
293 if (dma->in_map_array) {
294 for (count = 0; count < dma->in_num_pages; count++) {
295 dma_unmap_page(&sep->pdev->dev,
296 dma->in_map_array[count].dma_addr,
297 dma->in_map_array[count].size,
298 DMA_TO_DEVICE);
299 }
300 kfree(dma->in_map_array);
301 }
302
303 /* Unmap output map array, DON'T free it yet */
304 if (dma->out_map_array) {
305 for (count = 0; count < dma->out_num_pages; count++) {
306 dma_unmap_page(&sep->pdev->dev,
307 dma->out_map_array[count].dma_addr,
308 dma->out_map_array[count].size,
309 DMA_FROM_DEVICE);
310 }
311 kfree(dma->out_map_array);
312 }
313
314 /* Free page cache for output */
315 if (dma->in_page_array) {
316 for (count = 0; count < dma->in_num_pages; count++) {
317 flush_dcache_page(dma->in_page_array[count]);
318 page_cache_release(dma->in_page_array[count]);
319 }
320 kfree(dma->in_page_array);
321 }
322
323 if (dma->out_page_array) {
324 for (count = 0; count < dma->out_num_pages; count++) {
325 if (!PageReserved(dma->out_page_array[count]))
326 SetPageDirty(dma->out_page_array[count]);
327 flush_dcache_page(dma->out_page_array[count]);
328 page_cache_release(dma->out_page_array[count]);
329 }
330 kfree(dma->out_page_array);
331 }
332
333 /* Reset all the values */
334 dma->in_page_array = NULL;
335 dma->out_page_array = NULL;
336 dma->in_num_pages = 0;
337 dma->out_num_pages = 0;
338 dma->in_map_array = NULL;
339 dma->out_map_array = NULL;
340 dma->in_map_num_entries = 0;
341 dma->out_map_num_entries = 0;
342 }
343
344 sep->nr_dcb_creat = 0;
345 sep->num_lli_tables_created = 0;
346
347 return 0;
348}
349
350/**
351 * sep_request_daemon_mmap - maps the shared area to user space
352 * @filp: pointer to struct file
353 * @vma: pointer to vm_area_struct
354 *
355 * Called by the kernel when the daemon attempts an mmap() syscall
356 * using our handle.
357 */
358static int sep_request_daemon_mmap(struct file *filp,
359 struct vm_area_struct *vma)
360{
361 struct sep_device *sep = filp->private_data;
362 dma_addr_t bus_address;
363 int error = 0;
364
365 if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
366 error = -EINVAL;
367 goto end_function;
368 }
369
370 /* Get physical address */
371 bus_address = sep->shared_bus;
372
373 if (remap_pfn_range(vma, vma->vm_start, bus_address >> PAGE_SHIFT,
374 vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
375
376 dev_warn(&sep->pdev->dev, "remap_page_range failed\n");
377 error = -EAGAIN;
378 goto end_function;
379 }
380
381end_function:
382 return error;
383}
384
385/**
386 * sep_request_daemon_poll - poll implementation
387 * @sep: struct sep_device * for current SEP device
388 * @filp: struct file * for open file
389 * @wait: poll_table * for poll
390 *
391 * Called when our device is part of a poll() or select() syscall
392 */
393static unsigned int sep_request_daemon_poll(struct file *filp,
394 poll_table *wait)
395{
396 u32 mask = 0;
397 /* GPR2 register */
398 u32 retval2;
399 unsigned long lck_flags;
400 struct sep_device *sep = filp->private_data;
401
402 poll_wait(filp, &sep->event_request_daemon, wait);
403
404 dev_dbg(&sep->pdev->dev, "daemon poll: send_ct is %lx reply ct is %lx\n",
405 sep->send_ct, sep->reply_ct);
406
407 spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
408 /* Check if the data is ready */
409 if (sep->send_ct == sep->reply_ct) {
410 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
411
412 retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
413 dev_dbg(&sep->pdev->dev,
414 "daemon poll: data check (GPR2) is %x\n", retval2);
415
416 /* Check if PRINT request */
417 if ((retval2 >> 30) & 0x1) {
418 dev_dbg(&sep->pdev->dev, "daemon poll: PRINTF request in\n");
419 mask |= POLLIN;
420 goto end_function;
421 }
422 /* Check if NVS request */
423 if (retval2 >> 31) {
424 dev_dbg(&sep->pdev->dev, "daemon poll: NVS request in\n");
425 mask |= POLLPRI | POLLWRNORM;
426 }
427 } else {
428 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
429 dev_dbg(&sep->pdev->dev,
430 "daemon poll: no reply received; returning 0\n");
431 mask = 0;
432 }
433end_function:
434 return mask;
435}
436
437/**
438 * sep_release - close a SEP device
439 * @inode: inode of SEP device
440 * @filp: file handle being closed
441 *
442 * Called on the final close of a SEP device.
443 */
444static int sep_release(struct inode *inode, struct file *filp)
445{
446 struct sep_device *sep = filp->private_data;
447
448 dev_dbg(&sep->pdev->dev, "Release for pid %d\n", current->pid);
449
450 mutex_lock(&sep->sep_mutex);
451 /* Is this the process that has a transaction open?
452 * If so, lets reset pid_doing_transaction to 0 and
453 * clear the in use flags, and then wake up sep_event
454 * so that other processes can do transactions
455 */
456 if (sep->pid_doing_transaction == current->pid) {
457 clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags);
458 clear_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags);
459 sep_free_dma_table_data_handler(sep);
460 wake_up(&sep->event);
461 sep->pid_doing_transaction = 0;
462 }
463
464 mutex_unlock(&sep->sep_mutex);
465 return 0;
466}
467
468/**
469 * sep_mmap - maps the shared area to user space
470 * @filp: pointer to struct file
471 * @vma: pointer to vm_area_struct
472 *
473 * Called on an mmap of our space via the normal SEP device
474 */
475static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
476{
477 dma_addr_t bus_addr;
478 struct sep_device *sep = filp->private_data;
479 unsigned long error = 0;
480
481 /* Set the transaction busy (own the device) */
482 wait_event_interruptible(sep->event,
483 test_and_set_bit(SEP_MMAP_LOCK_BIT,
484 &sep->in_use_flags) == 0);
485
486 if (signal_pending(current)) {
487 error = -EINTR;
488 goto end_function_with_error;
489 }
490 /*
491 * The pid_doing_transaction indicates that this process
492 * now owns the facilities to performa a transaction with
493 * the SEP. While this process is performing a transaction,
494 * no other process who has the SEP device open can perform
495 * any transactions. This method allows more than one process
496 * to have the device open at any given time, which provides
497 * finer granularity for device utilization by multiple
498 * processes.
499 */
500 mutex_lock(&sep->sep_mutex);
501 sep->pid_doing_transaction = current->pid;
502 mutex_unlock(&sep->sep_mutex);
503
504 /* Zero the pools and the number of data pool alocation pointers */
505 sep->data_pool_bytes_allocated = 0;
506 sep->num_of_data_allocations = 0;
507
508 /*
509 * Check that the size of the mapped range is as the size of the message
510 * shared area
511 */
512 if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
513 error = -EINVAL;
514 goto end_function_with_error;
515 }
516
517 dev_dbg(&sep->pdev->dev, "shared_addr is %p\n", sep->shared_addr);
518
519 /* Get bus address */
520 bus_addr = sep->shared_bus;
521
522 if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT,
523 vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
524 dev_warn(&sep->pdev->dev, "remap_page_range failed\n");
525 error = -EAGAIN;
526 goto end_function_with_error;
527 }
528 goto end_function;
529
530end_function_with_error:
531 /* Clear the bit */
532 clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags);
533 mutex_lock(&sep->sep_mutex);
534 sep->pid_doing_transaction = 0;
535 mutex_unlock(&sep->sep_mutex);
536
537 /* Raise event for stuck contextes */
538
539 wake_up(&sep->event);
540
541end_function:
542 return error;
543}
544
545/**
546 * sep_poll - poll handler
547 * @filp: pointer to struct file
548 * @wait: pointer to poll_table
549 *
550 * Called by the OS when the kernel is asked to do a poll on
551 * a SEP file handle.
552 */
553static unsigned int sep_poll(struct file *filp, poll_table *wait)
554{
555 u32 mask = 0;
556 u32 retval = 0;
557 u32 retval2 = 0;
558 unsigned long lck_flags;
559
560 struct sep_device *sep = filp->private_data;
561
562 /* Am I the process that owns the transaction? */
563 mutex_lock(&sep->sep_mutex);
564 if (current->pid != sep->pid_doing_transaction) {
565 dev_dbg(&sep->pdev->dev, "poll; wrong pid\n");
566 mask = POLLERR;
567 mutex_unlock(&sep->sep_mutex);
568 goto end_function;
569 }
570 mutex_unlock(&sep->sep_mutex);
571
572 /* Check if send command or send_reply were activated previously */
573 if (!test_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags)) {
574 mask = POLLERR;
575 goto end_function;
576 }
577
578 /* Add the event to the polling wait table */
579 dev_dbg(&sep->pdev->dev, "poll: calling wait sep_event\n");
580
581 poll_wait(filp, &sep->event, wait);
582
583 dev_dbg(&sep->pdev->dev, "poll: send_ct is %lx reply ct is %lx\n",
584 sep->send_ct, sep->reply_ct);
585
586 /* Check if error occurred during poll */
587 retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
588 if (retval2 != 0x0) {
589 dev_warn(&sep->pdev->dev, "poll; poll error %x\n", retval2);
590 mask |= POLLERR;
591 goto end_function;
592 }
593
594 spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
595
596 if (sep->send_ct == sep->reply_ct) {
597 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
598 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
599 dev_dbg(&sep->pdev->dev, "poll: data ready check (GPR2) %x\n",
600 retval);
601
602 /* Check if printf request */
603 if ((retval >> 30) & 0x1) {
604 dev_dbg(&sep->pdev->dev, "poll: SEP printf request\n");
605 wake_up(&sep->event_request_daemon);
606 goto end_function;
607 }
608
609 /* Check if the this is SEP reply or request */
610 if (retval >> 31) {
611 dev_dbg(&sep->pdev->dev, "poll: SEP request\n");
612 wake_up(&sep->event_request_daemon);
613 } else {
614 dev_dbg(&sep->pdev->dev, "poll: normal return\n");
615 /* In case it is again by send_reply_comand */
616 clear_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags);
617 sep_dump_message(sep);
618 dev_dbg(&sep->pdev->dev,
619 "poll; SEP reply POLLIN | POLLRDNORM\n");
620 mask |= POLLIN | POLLRDNORM;
621 }
622 } else {
623 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
624 dev_dbg(&sep->pdev->dev,
625 "poll; no reply received; returning mask of 0\n");
626 mask = 0;
627 }
628
629end_function:
630 return mask;
631}
632
633/**
634 * sep_time_address - address in SEP memory of time
635 * @sep: SEP device we want the address from
636 *
637 * Return the address of the two dwords in memory used for time
638 * setting.
639 */
640static u32 *sep_time_address(struct sep_device *sep)
641{
642 return sep->shared_addr + SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
643}
644
645/**
646 * sep_set_time - set the SEP time
647 * @sep: the SEP we are setting the time for
648 *
649 * Calculates time and sets it at the predefined address.
650 * Called with the SEP mutex held.
651 */
652static unsigned long sep_set_time(struct sep_device *sep)
653{
654 struct timeval time;
655 u32 *time_addr; /* Address of time as seen by the kernel */
656
657
658 do_gettimeofday(&time);
659
660 /* Set value in the SYSTEM MEMORY offset */
661 time_addr = sep_time_address(sep);
662
663 time_addr[0] = SEP_TIME_VAL_TOKEN;
664 time_addr[1] = time.tv_sec;
665
666 dev_dbg(&sep->pdev->dev, "time.tv_sec is %lu\n", time.tv_sec);
667 dev_dbg(&sep->pdev->dev, "time_addr is %p\n", time_addr);
668 dev_dbg(&sep->pdev->dev, "sep->shared_addr is %p\n", sep->shared_addr);
669
670 return time.tv_sec;
671}
672
673/**
674 * sep_set_caller_id_handler - insert caller id entry
675 * @sep: SEP device
676 * @arg: pointer to struct caller_id_struct
677 *
678 * Inserts the data into the caller id table. Note that this function
679 * falls under the ioctl lock
680 */
681static int sep_set_caller_id_handler(struct sep_device *sep, unsigned long arg)
682{
683 void __user *hash;
684 int error = 0;
685 int i;
686 struct caller_id_struct command_args;
687
688 for (i = 0; i < SEP_CALLER_ID_TABLE_NUM_ENTRIES; i++) {
689 if (sep->caller_id_table[i].pid == 0)
690 break;
691 }
692
693 if (i == SEP_CALLER_ID_TABLE_NUM_ENTRIES) {
694 dev_dbg(&sep->pdev->dev, "no more caller id entries left\n");
695 dev_dbg(&sep->pdev->dev, "maximum number is %d\n",
696 SEP_CALLER_ID_TABLE_NUM_ENTRIES);
697 error = -EUSERS;
698 goto end_function;
699 }
700
701 /* Copy the data */
702 if (copy_from_user(&command_args, (void __user *)arg,
703 sizeof(command_args))) {
704 error = -EFAULT;
705 goto end_function;
706 }
707
708 hash = (void __user *)(unsigned long)command_args.callerIdAddress;
709
710 if (!command_args.pid || !command_args.callerIdSizeInBytes) {
711 error = -EINVAL;
712 goto end_function;
713 }
714
715 dev_dbg(&sep->pdev->dev, "pid is %x\n", command_args.pid);
716 dev_dbg(&sep->pdev->dev, "callerIdSizeInBytes is %x\n",
717 command_args.callerIdSizeInBytes);
718
719 if (command_args.callerIdSizeInBytes >
720 SEP_CALLER_ID_HASH_SIZE_IN_BYTES) {
721 error = -EMSGSIZE;
722 goto end_function;
723 }
724
725 sep->caller_id_table[i].pid = command_args.pid;
726
727 if (copy_from_user(sep->caller_id_table[i].callerIdHash,
728 hash, command_args.callerIdSizeInBytes))
729 error = -EFAULT;
730end_function:
731 return error;
732}
733
734/**
735 * sep_set_current_caller_id - set the caller id
736 * @sep: pointer to struct_sep_device
737 *
738 * Set the caller ID (if it exists) to the SEP. Note that this
739 * function falls under the ioctl lock
740 */
741static int sep_set_current_caller_id(struct sep_device *sep)
742{
743 int i;
744 u32 *hash_buf_ptr;
745
746 /* Zero the previous value */
747 memset(sep->shared_addr + SEP_CALLER_ID_OFFSET_BYTES,
748 0, SEP_CALLER_ID_HASH_SIZE_IN_BYTES);
749
750 for (i = 0; i < SEP_CALLER_ID_TABLE_NUM_ENTRIES; i++) {
751 if (sep->caller_id_table[i].pid == current->pid) {
752 dev_dbg(&sep->pdev->dev, "Caller Id found\n");
753
754 memcpy(sep->shared_addr + SEP_CALLER_ID_OFFSET_BYTES,
755 (void *)(sep->caller_id_table[i].callerIdHash),
756 SEP_CALLER_ID_HASH_SIZE_IN_BYTES);
757 break;
758 }
759 }
760 /* Ensure data is in little endian */
761 hash_buf_ptr = (u32 *)sep->shared_addr +
762 SEP_CALLER_ID_OFFSET_BYTES;
763
764 for (i = 0; i < SEP_CALLER_ID_HASH_SIZE_IN_WORDS; i++)
765 hash_buf_ptr[i] = cpu_to_le32(hash_buf_ptr[i]);
766
767 return 0;
768}
769
770/**
771 * sep_send_command_handler - kick off a command
772 * @sep: SEP being signalled
773 *
774 * This function raises interrupt to SEP that signals that is has a new
775 * command from the host
776 *
777 * Note that this function does fall under the ioctl lock
778 */
779static int sep_send_command_handler(struct sep_device *sep)
780{
781 unsigned long lck_flags;
782 int error = 0;
783
784 if (test_and_set_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags)) {
785 error = -EPROTO;
786 goto end_function;
787 }
788 sep_set_time(sep);
789
790 sep_set_current_caller_id(sep);
791
792 sep_dump_message(sep);
793
794 /* Update counter */
795 spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
796 sep->send_ct++;
797 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
798
799 dev_dbg(&sep->pdev->dev,
800 "sep_send_command_handler send_ct %lx reply_ct %lx\n",
801 sep->send_ct, sep->reply_ct);
802
803 /* Send interrupt to SEP */
804 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
805
806end_function:
807 return error;
808}
809
810/**
811 * sep_allocate_data_pool_memory_handler -allocate pool memory
812 * @sep: pointer to struct sep_device
813 * @arg: pointer to struct alloc_struct
814 *
815 * This function handles the allocate data pool memory request
816 * This function returns calculates the bus address of the
817 * allocated memory, and the offset of this area from the mapped address.
818 * Therefore, the FVOs in user space can calculate the exact virtual
819 * address of this allocated memory
820 */
821static int sep_allocate_data_pool_memory_handler(struct sep_device *sep,
822 unsigned long arg)
823{
824 int error = 0;
825 struct alloc_struct command_args;
826
827 /* Holds the allocated buffer address in the system memory pool */
828 u32 *token_addr;
829
830 if (copy_from_user(&command_args, (void __user *)arg,
831 sizeof(struct alloc_struct))) {
832 error = -EFAULT;
833 goto end_function;
834 }
835
836 /* Allocate memory */
837 if ((sep->data_pool_bytes_allocated + command_args.num_bytes) >
838 SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
839 error = -ENOMEM;
840 goto end_function;
841 }
842
843 dev_dbg(&sep->pdev->dev,
844 "data pool bytes_allocated: %x\n", (int)sep->data_pool_bytes_allocated);
845 dev_dbg(&sep->pdev->dev,
846 "offset: %x\n", SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES);
847 /* Set the virtual and bus address */
848 command_args.offset = SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES +
849 sep->data_pool_bytes_allocated;
850
851 /* Place in the shared area that is known by the SEP */
852 token_addr = (u32 *)(sep->shared_addr +
853 SEP_DRIVER_DATA_POOL_ALLOCATION_OFFSET_IN_BYTES +
854 (sep->num_of_data_allocations)*2*sizeof(u32));
855
856 token_addr[0] = SEP_DATA_POOL_POINTERS_VAL_TOKEN;
857 token_addr[1] = (u32)sep->shared_bus +
858 SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES +
859 sep->data_pool_bytes_allocated;
860
861 /* Write the memory back to the user space */
862 error = copy_to_user((void *)arg, (void *)&command_args,
863 sizeof(struct alloc_struct));
864 if (error) {
865 error = -EFAULT;
866 goto end_function;
867 }
868
869 /* Update the allocation */
870 sep->data_pool_bytes_allocated += command_args.num_bytes;
871 sep->num_of_data_allocations += 1;
872
873end_function:
874 return error;
875}
876
877/**
878 * sep_lock_kernel_pages - map kernel pages for DMA
879 * @sep: pointer to struct sep_device
880 * @kernel_virt_addr: address of data buffer in kernel
881 * @data_size: size of data
882 * @lli_array_ptr: lli array
883 * @in_out_flag: input into device or output from device
884 *
885 * This function locks all the physical pages of the kernel virtual buffer
886 * and construct a basic lli array, where each entry holds the physical
887 * page address and the size that application data holds in this page
888 * This function is used only during kernel crypto mod calls from within
889 * the kernel (when ioctl is not used)
890 */
891static int sep_lock_kernel_pages(struct sep_device *sep,
892 unsigned long kernel_virt_addr,
893 u32 data_size,
894 struct sep_lli_entry **lli_array_ptr,
895 int in_out_flag)
896
897{
898 int error = 0;
899 /* Array of lli */
900 struct sep_lli_entry *lli_array;
901 /* Map array */
902 struct sep_dma_map *map_array;
903
904 dev_dbg(&sep->pdev->dev, "lock kernel pages kernel_virt_addr is %08lx\n",
905 (unsigned long)kernel_virt_addr);
906 dev_dbg(&sep->pdev->dev, "data_size is %x\n", data_size);
907
908 lli_array = kmalloc(sizeof(struct sep_lli_entry), GFP_ATOMIC);
909 if (!lli_array) {
910 error = -ENOMEM;
911 goto end_function;
912 }
913 map_array = kmalloc(sizeof(struct sep_dma_map), GFP_ATOMIC);
914 if (!map_array) {
915 error = -ENOMEM;
916 goto end_function_with_error;
917 }
918
919 map_array[0].dma_addr =
920 dma_map_single(&sep->pdev->dev, (void *)kernel_virt_addr,
921 data_size, DMA_BIDIRECTIONAL);
922 map_array[0].size = data_size;
923
924
925 /*
926 * Set the start address of the first page - app data may start not at
927 * the beginning of the page
928 */
929 lli_array[0].bus_address = (u32)map_array[0].dma_addr;
930 lli_array[0].block_size = map_array[0].size;
931
932 dev_dbg(&sep->pdev->dev,
933 "lli_array[0].bus_address is %08lx, lli_array[0].block_size is %x\n",
934 (unsigned long)lli_array[0].bus_address,
935 lli_array[0].block_size);
936
937 /* Set the output parameters */
938 if (in_out_flag == SEP_DRIVER_IN_FLAG) {
939 *lli_array_ptr = lli_array;
940 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = 1;
941 sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = NULL;
942 sep->dma_res_arr[sep->nr_dcb_creat].in_map_array = map_array;
943 sep->dma_res_arr[sep->nr_dcb_creat].in_map_num_entries = 1;
944 } else {
945 *lli_array_ptr = lli_array;
946 sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages = 1;
947 sep->dma_res_arr[sep->nr_dcb_creat].out_page_array = NULL;
948 sep->dma_res_arr[sep->nr_dcb_creat].out_map_array = map_array;
949 sep->dma_res_arr[sep->nr_dcb_creat].out_map_num_entries = 1;
950 }
951 goto end_function;
952
953end_function_with_error:
954 kfree(lli_array);
955
956end_function:
957 return error;
958}
959
960/**
961 * sep_lock_user_pages - lock and map user pages for DMA
962 * @sep: pointer to struct sep_device
963 * @app_virt_addr: user memory data buffer
964 * @data_size: size of data buffer
965 * @lli_array_ptr: lli array
966 * @in_out_flag: input or output to device
967 *
968 * This function locks all the physical pages of the application
969 * virtual buffer and construct a basic lli array, where each entry
970 * holds the physical page address and the size that application
971 * data holds in this physical pages
972 */
973static int sep_lock_user_pages(struct sep_device *sep,
974 u32 app_virt_addr,
975 u32 data_size,
976 struct sep_lli_entry **lli_array_ptr,
977 int in_out_flag)
978
979{
980 int error = 0;
981 u32 count;
982 int result;
983 /* The the page of the end address of the user space buffer */
984 u32 end_page;
985 /* The page of the start address of the user space buffer */
986 u32 start_page;
987 /* The range in pages */
988 u32 num_pages;
989 /* Array of pointers to page */
990 struct page **page_array;
991 /* Array of lli */
992 struct sep_lli_entry *lli_array;
993 /* Map array */
994 struct sep_dma_map *map_array;
995 /* Direction of the DMA mapping for locked pages */
996 enum dma_data_direction dir;
997
998 /* Set start and end pages and num pages */
999 end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
1000 start_page = app_virt_addr >> PAGE_SHIFT;
1001 num_pages = end_page - start_page + 1;
1002
1003 dev_dbg(&sep->pdev->dev, "lock user pages app_virt_addr is %x\n", app_virt_addr);
1004 dev_dbg(&sep->pdev->dev, "data_size is %x\n", data_size);
1005 dev_dbg(&sep->pdev->dev, "start_page is %x\n", start_page);
1006 dev_dbg(&sep->pdev->dev, "end_page is %x\n", end_page);
1007 dev_dbg(&sep->pdev->dev, "num_pages is %x\n", num_pages);
1008
1009 /* Allocate array of pages structure pointers */
1010 page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
1011 if (!page_array) {
1012 error = -ENOMEM;
1013 goto end_function;
1014 }
1015 map_array = kmalloc(sizeof(struct sep_dma_map) * num_pages, GFP_ATOMIC);
1016 if (!map_array) {
1017 dev_warn(&sep->pdev->dev, "kmalloc for map_array failed\n");
1018 error = -ENOMEM;
1019 goto end_function_with_error1;
1020 }
1021
1022 lli_array = kmalloc(sizeof(struct sep_lli_entry) * num_pages,
1023 GFP_ATOMIC);
1024
1025 if (!lli_array) {
1026 dev_warn(&sep->pdev->dev, "kmalloc for lli_array failed\n");
1027 error = -ENOMEM;
1028 goto end_function_with_error2;
1029 }
1030
1031 /* Convert the application virtual address into a set of physical */
1032 down_read(&current->mm->mmap_sem);
1033 result = get_user_pages(current, current->mm, app_virt_addr,
1034 num_pages,
1035 ((in_out_flag == SEP_DRIVER_IN_FLAG) ? 0 : 1),
1036 0, page_array, NULL);
1037
1038 up_read(&current->mm->mmap_sem);
1039
1040 /* Check the number of pages locked - if not all then exit with error */
1041 if (result != num_pages) {
1042 dev_warn(&sep->pdev->dev,
1043 "not all pages locked by get_user_pages\n");
1044 error = -ENOMEM;
1045 goto end_function_with_error3;
1046 }
1047
1048 dev_dbg(&sep->pdev->dev, "get_user_pages succeeded\n");
1049
1050 /* Set direction */
1051 if (in_out_flag == SEP_DRIVER_IN_FLAG)
1052 dir = DMA_TO_DEVICE;
1053 else
1054 dir = DMA_FROM_DEVICE;
1055
1056 /*
1057 * Fill the array using page array data and
1058 * map the pages - this action will also flush the cache as needed
1059 */
1060 for (count = 0; count < num_pages; count++) {
1061 /* Fill the map array */
1062 map_array[count].dma_addr =
1063 dma_map_page(&sep->pdev->dev, page_array[count],
1064 0, PAGE_SIZE, /*dir*/DMA_BIDIRECTIONAL);
1065
1066 map_array[count].size = PAGE_SIZE;
1067
1068 /* Fill the lli array entry */
1069 lli_array[count].bus_address = (u32)map_array[count].dma_addr;
1070 lli_array[count].block_size = PAGE_SIZE;
1071
1072 dev_warn(&sep->pdev->dev, "lli_array[%x].bus_address is %08lx, lli_array[%x].block_size is %x\n",
1073 count, (unsigned long)lli_array[count].bus_address,
1074 count, lli_array[count].block_size);
1075 }
1076
1077 /* Check the offset for the first page */
1078 lli_array[0].bus_address =
1079 lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
1080
1081 /* Check that not all the data is in the first page only */
1082 if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
1083 lli_array[0].block_size = data_size;
1084 else
1085 lli_array[0].block_size =
1086 PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
1087
1088 dev_dbg(&sep->pdev->dev,
1089 "lli_array[0].bus_address is %08lx, lli_array[0].block_size is %x\n",
1090 (unsigned long)lli_array[count].bus_address,
1091 lli_array[count].block_size);
1092
1093 /* Check the size of the last page */
1094 if (num_pages > 1) {
1095 lli_array[num_pages - 1].block_size =
1096 (app_virt_addr + data_size) & (~PAGE_MASK);
1097 if (lli_array[num_pages - 1].block_size == 0)
1098 lli_array[num_pages - 1].block_size = PAGE_SIZE;
1099
1100 dev_warn(&sep->pdev->dev,
1101 "lli_array[%x].bus_address is "
1102 "%08lx, lli_array[%x].block_size is %x\n",
1103 num_pages - 1,
1104 (unsigned long)lli_array[num_pages - 1].bus_address,
1105 num_pages - 1,
1106 lli_array[num_pages - 1].block_size);
1107 }
1108
1109 /* Set output params according to the in_out flag */
1110 if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1111 *lli_array_ptr = lli_array;
1112 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = num_pages;
1113 sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = page_array;
1114 sep->dma_res_arr[sep->nr_dcb_creat].in_map_array = map_array;
1115 sep->dma_res_arr[sep->nr_dcb_creat].in_map_num_entries =
1116 num_pages;
1117 } else {
1118 *lli_array_ptr = lli_array;
1119 sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages = num_pages;
1120 sep->dma_res_arr[sep->nr_dcb_creat].out_page_array =
1121 page_array;
1122 sep->dma_res_arr[sep->nr_dcb_creat].out_map_array = map_array;
1123 sep->dma_res_arr[sep->nr_dcb_creat].out_map_num_entries =
1124 num_pages;
1125 }
1126 goto end_function;
1127
1128end_function_with_error3:
1129 /* Free lli array */
1130 kfree(lli_array);
1131
1132end_function_with_error2:
1133 kfree(map_array);
1134
1135end_function_with_error1:
1136 /* Free page array */
1137 kfree(page_array);
1138
1139end_function:
1140 return error;
1141}
1142
1143/**
1144 * u32 sep_calculate_lli_table_max_size - size the LLI table
1145 * @sep: pointer to struct sep_device
1146 * @lli_in_array_ptr
1147 * @num_array_entries
1148 * @last_table_flag
1149 *
1150 * This function calculates the size of data that can be inserted into
1151 * the lli table from this array, such that either the table is full
1152 * (all entries are entered), or there are no more entries in the
1153 * lli array
1154 */
1155static u32 sep_calculate_lli_table_max_size(struct sep_device *sep,
1156 struct sep_lli_entry *lli_in_array_ptr,
1157 u32 num_array_entries,
1158 u32 *last_table_flag)
1159{
1160 u32 counter;
1161 /* Table data size */
1162 u32 table_data_size = 0;
1163 /* Data size for the next table */
1164 u32 next_table_data_size;
1165
1166 *last_table_flag = 0;
1167
1168 /*
1169 * Calculate the data in the out lli table till we fill the whole
1170 * table or till the data has ended
1171 */
1172 for (counter = 0;
1173 (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) &&
1174 (counter < num_array_entries); counter++)
1175 table_data_size += lli_in_array_ptr[counter].block_size;
1176
1177 /*
1178 * Check if we reached the last entry,
1179 * meaning this ia the last table to build,
1180 * and no need to check the block alignment
1181 */
1182 if (counter == num_array_entries) {
1183 /* Set the last table flag */
1184 *last_table_flag = 1;
1185 goto end_function;
1186 }
1187
1188 /*
1189 * Calculate the data size of the next table.
1190 * Stop if no entries left or if data size is more the DMA restriction
1191 */
1192 next_table_data_size = 0;
1193 for (; counter < num_array_entries; counter++) {
1194 next_table_data_size += lli_in_array_ptr[counter].block_size;
1195 if (next_table_data_size >= SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
1196 break;
1197 }
1198
1199 /*
1200 * Check if the next table data size is less then DMA rstriction.
1201 * if it is - recalculate the current table size, so that the next
1202 * table data size will be adaquete for DMA
1203 */
1204 if (next_table_data_size &&
1205 next_table_data_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
1206
1207 table_data_size -= (SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE -
1208 next_table_data_size);
1209
1210end_function:
1211 return table_data_size;
1212}
1213
1214/**
1215 * sep_build_lli_table - build an lli array for the given table
1216 * @sep: pointer to struct sep_device
1217 * @lli_array_ptr: pointer to lli array
1218 * @lli_table_ptr: pointer to lli table
1219 * @num_processed_entries_ptr: pointer to number of entries
1220 * @num_table_entries_ptr: pointer to number of tables
1221 * @table_data_size: total data size
1222 *
1223 * Builds ant lli table from the lli_array according to
1224 * the given size of data
1225 */
1226static void sep_build_lli_table(struct sep_device *sep,
1227 struct sep_lli_entry *lli_array_ptr,
1228 struct sep_lli_entry *lli_table_ptr,
1229 u32 *num_processed_entries_ptr,
1230 u32 *num_table_entries_ptr,
1231 u32 table_data_size)
1232{
1233 /* Current table data size */
1234 u32 curr_table_data_size;
1235 /* Counter of lli array entry */
1236 u32 array_counter;
1237
1238 /* Init current table data size and lli array entry counter */
1239 curr_table_data_size = 0;
1240 array_counter = 0;
1241 *num_table_entries_ptr = 1;
1242
1243 dev_dbg(&sep->pdev->dev, "build lli table table_data_size is %x\n", table_data_size);
1244
1245 /* Fill the table till table size reaches the needed amount */
1246 while (curr_table_data_size < table_data_size) {
1247 /* Update the number of entries in table */
1248 (*num_table_entries_ptr)++;
1249
1250 lli_table_ptr->bus_address =
1251 cpu_to_le32(lli_array_ptr[array_counter].bus_address);
1252
1253 lli_table_ptr->block_size =
1254 cpu_to_le32(lli_array_ptr[array_counter].block_size);
1255
1256 curr_table_data_size += lli_array_ptr[array_counter].block_size;
1257
1258 dev_dbg(&sep->pdev->dev, "lli_table_ptr is %p\n",
1259 lli_table_ptr);
1260 dev_dbg(&sep->pdev->dev, "lli_table_ptr->bus_address is %08lx\n",
1261 (unsigned long)lli_table_ptr->bus_address);
1262 dev_dbg(&sep->pdev->dev, "lli_table_ptr->block_size is %x\n",
1263 lli_table_ptr->block_size);
1264
1265 /* Check for overflow of the table data */
1266 if (curr_table_data_size > table_data_size) {
1267 dev_dbg(&sep->pdev->dev,
1268 "curr_table_data_size too large\n");
1269
1270 /* Update the size of block in the table */
1271 lli_table_ptr->block_size -=
1272 cpu_to_le32((curr_table_data_size - table_data_size));
1273
1274 /* Update the physical address in the lli array */
1275 lli_array_ptr[array_counter].bus_address +=
1276 cpu_to_le32(lli_table_ptr->block_size);
1277
1278 /* Update the block size left in the lli array */
1279 lli_array_ptr[array_counter].block_size =
1280 (curr_table_data_size - table_data_size);
1281 } else
1282 /* Advance to the next entry in the lli_array */
1283 array_counter++;
1284
1285 dev_dbg(&sep->pdev->dev,
1286 "lli_table_ptr->bus_address is %08lx\n",
1287 (unsigned long)lli_table_ptr->bus_address);
1288 dev_dbg(&sep->pdev->dev,
1289 "lli_table_ptr->block_size is %x\n",
1290 lli_table_ptr->block_size);
1291
1292 /* Move to the next entry in table */
1293 lli_table_ptr++;
1294 }
1295
1296 /* Set the info entry to default */
1297 lli_table_ptr->bus_address = 0xffffffff;
1298 lli_table_ptr->block_size = 0;
1299
1300 /* Set the output parameter */
1301 *num_processed_entries_ptr += array_counter;
1302
1303}
1304
1305/**
1306 * sep_shared_area_virt_to_bus - map shared area to bus address
1307 * @sep: pointer to struct sep_device
1308 * @virt_address: virtual address to convert
1309 *
1310 * This functions returns the physical address inside shared area according
1311 * to the virtual address. It can be either on the externa RAM device
1312 * (ioremapped), or on the system RAM
1313 * This implementation is for the external RAM
1314 */
1315static dma_addr_t sep_shared_area_virt_to_bus(struct sep_device *sep,
1316 void *virt_address)
1317{
1318 dev_dbg(&sep->pdev->dev, "sh virt to phys v %p\n", virt_address);
1319 dev_dbg(&sep->pdev->dev, "sh virt to phys p %08lx\n",
1320 (unsigned long)
1321 sep->shared_bus + (virt_address - sep->shared_addr));
1322
1323 return sep->shared_bus + (size_t)(virt_address - sep->shared_addr);
1324}
1325
1326/**
1327 * sep_shared_area_bus_to_virt - map shared area bus address to kernel
1328 * @sep: pointer to struct sep_device
1329 * @bus_address: bus address to convert
1330 *
1331 * This functions returns the virtual address inside shared area
1332 * according to the physical address. It can be either on the
1333 * externa RAM device (ioremapped), or on the system RAM
1334 * This implementation is for the external RAM
1335 */
1336static void *sep_shared_area_bus_to_virt(struct sep_device *sep,
1337 dma_addr_t bus_address)
1338{
1339 dev_dbg(&sep->pdev->dev, "shared bus to virt b=%lx v=%lx\n",
1340 (unsigned long)bus_address, (unsigned long)(sep->shared_addr +
1341 (size_t)(bus_address - sep->shared_bus)));
1342
1343 return sep->shared_addr + (size_t)(bus_address - sep->shared_bus);
1344}
1345
1346/**
1347 * sep_debug_print_lli_tables - dump LLI table
1348 * @sep: pointer to struct sep_device
1349 * @lli_table_ptr: pointer to sep_lli_entry
1350 * @num_table_entries: number of entries
1351 * @table_data_size: total data size
1352 *
1353 * Walk the the list of the print created tables and print all the data
1354 */
1355static void sep_debug_print_lli_tables(struct sep_device *sep,
1356 struct sep_lli_entry *lli_table_ptr,
1357 unsigned long num_table_entries,
1358 unsigned long table_data_size)
1359{
1360 unsigned long table_count = 1;
1361 unsigned long entries_count = 0;
1362
1363 dev_dbg(&sep->pdev->dev, "sep_debug_print_lli_tables start\n");
1364
1365 while ((unsigned long) lli_table_ptr->bus_address != 0xffffffff) {
1366 dev_dbg(&sep->pdev->dev,
1367 "lli table %08lx, table_data_size is %lu\n",
1368 table_count, table_data_size);
1369 dev_dbg(&sep->pdev->dev, "num_table_entries is %lu\n",
1370 num_table_entries);
1371
1372 /* Print entries of the table (without info entry) */
1373 for (entries_count = 0; entries_count < num_table_entries;
1374 entries_count++, lli_table_ptr++) {
1375
1376 dev_dbg(&sep->pdev->dev,
1377 "lli_table_ptr address is %08lx\n",
1378 (unsigned long) lli_table_ptr);
1379
1380 dev_dbg(&sep->pdev->dev,
1381 "phys address is %08lx block size is %x\n",
1382 (unsigned long)lli_table_ptr->bus_address,
1383 lli_table_ptr->block_size);
1384 }
1385 /* Point to the info entry */
1386 lli_table_ptr--;
1387
1388 dev_dbg(&sep->pdev->dev,
1389 "phys lli_table_ptr->block_size is %x\n",
1390 lli_table_ptr->block_size);
1391
1392 dev_dbg(&sep->pdev->dev,
1393 "phys lli_table_ptr->physical_address is %08lu\n",
1394 (unsigned long)lli_table_ptr->bus_address);
1395
1396
1397 table_data_size = lli_table_ptr->block_size & 0xffffff;
1398 num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
1399
1400 dev_dbg(&sep->pdev->dev,
1401 "phys table_data_size is %lu num_table_entries is"
1402 " %lu bus_address is%lu\n", table_data_size,
1403 num_table_entries, (unsigned long)lli_table_ptr->bus_address);
1404
1405 if ((unsigned long)lli_table_ptr->bus_address != 0xffffffff)
1406 lli_table_ptr = (struct sep_lli_entry *)
1407 sep_shared_bus_to_virt(sep,
1408 (unsigned long)lli_table_ptr->bus_address);
1409
1410 table_count++;
1411 }
1412 dev_dbg(&sep->pdev->dev, "sep_debug_print_lli_tables end\n");
1413}
1414
1415
1416/**
1417 * sep_prepare_empty_lli_table - create a blank LLI table
1418 * @sep: pointer to struct sep_device
1419 * @lli_table_addr_ptr: pointer to lli table
1420 * @num_entries_ptr: pointer to number of entries
1421 * @table_data_size_ptr: point to table data size
1422 *
1423 * This function creates empty lli tables when there is no data
1424 */
1425static void sep_prepare_empty_lli_table(struct sep_device *sep,
1426 dma_addr_t *lli_table_addr_ptr,
1427 u32 *num_entries_ptr,
1428 u32 *table_data_size_ptr)
1429{
1430 struct sep_lli_entry *lli_table_ptr;
1431
1432 /* Find the area for new table */
1433 lli_table_ptr =
1434 (struct sep_lli_entry *)(sep->shared_addr +
1435 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1436 sep->num_lli_tables_created * sizeof(struct sep_lli_entry) *
1437 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1438
1439 lli_table_ptr->bus_address = 0;
1440 lli_table_ptr->block_size = 0;
1441
1442 lli_table_ptr++;
1443 lli_table_ptr->bus_address = 0xFFFFFFFF;
1444 lli_table_ptr->block_size = 0;
1445
1446 /* Set the output parameter value */
1447 *lli_table_addr_ptr = sep->shared_bus +
1448 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1449 sep->num_lli_tables_created *
1450 sizeof(struct sep_lli_entry) *
1451 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1452
1453 /* Set the num of entries and table data size for empty table */
1454 *num_entries_ptr = 2;
1455 *table_data_size_ptr = 0;
1456
1457 /* Update the number of created tables */
1458 sep->num_lli_tables_created++;
1459}
1460
1461/**
1462 * sep_prepare_input_dma_table - prepare input DMA mappings
1463 * @sep: pointer to struct sep_device
1464 * @data_size:
1465 * @block_size:
1466 * @lli_table_ptr:
1467 * @num_entries_ptr:
1468 * @table_data_size_ptr:
1469 * @is_kva: set for kernel data (kernel cryptio call)
1470 *
1471 * This function prepares only input DMA table for synhronic symmetric
1472 * operations (HASH)
1473 * Note that all bus addresses that are passed to the SEP
1474 * are in 32 bit format; the SEP is a 32 bit device
1475 */
1476static int sep_prepare_input_dma_table(struct sep_device *sep,
1477 unsigned long app_virt_addr,
1478 u32 data_size,
1479 u32 block_size,
1480 dma_addr_t *lli_table_ptr,
1481 u32 *num_entries_ptr,
1482 u32 *table_data_size_ptr,
1483 bool is_kva)
1484{
1485 int error = 0;
1486 /* Pointer to the info entry of the table - the last entry */
1487 struct sep_lli_entry *info_entry_ptr;
1488 /* Array of pointers to page */
1489 struct sep_lli_entry *lli_array_ptr;
1490 /* Points to the first entry to be processed in the lli_in_array */
1491 u32 current_entry = 0;
1492 /* Num entries in the virtual buffer */
1493 u32 sep_lli_entries = 0;
1494 /* Lli table pointer */
1495 struct sep_lli_entry *in_lli_table_ptr;
1496 /* The total data in one table */
1497 u32 table_data_size = 0;
1498 /* Flag for last table */
1499 u32 last_table_flag = 0;
1500 /* Number of entries in lli table */
1501 u32 num_entries_in_table = 0;
1502 /* Next table address */
1503 void *lli_table_alloc_addr = 0;
1504
1505 dev_dbg(&sep->pdev->dev, "prepare intput dma table data_size is %x\n", data_size);
1506 dev_dbg(&sep->pdev->dev, "block_size is %x\n", block_size);
1507
1508 /* Initialize the pages pointers */
1509 sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = NULL;
1510 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = 0;
1511
1512 /* Set the kernel address for first table to be allocated */
1513 lli_table_alloc_addr = (void *)(sep->shared_addr +
1514 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1515 sep->num_lli_tables_created * sizeof(struct sep_lli_entry) *
1516 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1517
1518 if (data_size == 0) {
1519 /* Special case - create meptu table - 2 entries, zero data */
1520 sep_prepare_empty_lli_table(sep, lli_table_ptr,
1521 num_entries_ptr, table_data_size_ptr);
1522 goto update_dcb_counter;
1523 }
1524
1525 /* Check if the pages are in Kernel Virtual Address layout */
1526 if (is_kva == true)
1527 /* Lock the pages in the kernel */
1528 error = sep_lock_kernel_pages(sep, app_virt_addr,
1529 data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG);
1530 else
1531 /*
1532 * Lock the pages of the user buffer
1533 * and translate them to pages
1534 */
1535 error = sep_lock_user_pages(sep, app_virt_addr,
1536 data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG);
1537
1538 if (error)
1539 goto end_function;
1540
1541 dev_dbg(&sep->pdev->dev, "output sep_in_num_pages is %x\n",
1542 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages);
1543
1544 current_entry = 0;
1545 info_entry_ptr = NULL;
1546
1547 sep_lli_entries = sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages;
1548
1549 /* Loop till all the entries in in array are not processed */
1550 while (current_entry < sep_lli_entries) {
1551
1552 /* Set the new input and output tables */
1553 in_lli_table_ptr =
1554 (struct sep_lli_entry *)lli_table_alloc_addr;
1555
1556 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
1557 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1558
1559 if (lli_table_alloc_addr >
1560 ((void *)sep->shared_addr +
1561 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1562 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
1563
1564 error = -ENOMEM;
1565 goto end_function_error;
1566
1567 }
1568
1569 /* Update the number of created tables */
1570 sep->num_lli_tables_created++;
1571
1572 /* Calculate the maximum size of data for input table */
1573 table_data_size = sep_calculate_lli_table_max_size(sep,
1574 &lli_array_ptr[current_entry],
1575 (sep_lli_entries - current_entry),
1576 &last_table_flag);
1577
1578 /*
1579 * If this is not the last table -
1580 * then align it to the block size
1581 */
1582 if (!last_table_flag)
1583 table_data_size =
1584 (table_data_size / block_size) * block_size;
1585
1586 dev_dbg(&sep->pdev->dev, "output table_data_size is %x\n",
1587 table_data_size);
1588
1589 /* Construct input lli table */
1590 sep_build_lli_table(sep, &lli_array_ptr[current_entry],
1591 in_lli_table_ptr,
1592 &current_entry, &num_entries_in_table, table_data_size);
1593
1594 if (info_entry_ptr == NULL) {
1595
1596 /* Set the output parameters to physical addresses */
1597 *lli_table_ptr = sep_shared_area_virt_to_bus(sep,
1598 in_lli_table_ptr);
1599 *num_entries_ptr = num_entries_in_table;
1600 *table_data_size_ptr = table_data_size;
1601
1602 dev_dbg(&sep->pdev->dev,
1603 "output lli_table_in_ptr is %08lx\n",
1604 (unsigned long)*lli_table_ptr);
1605
1606 } else {
1607 /* Update the info entry of the previous in table */
1608 info_entry_ptr->bus_address =
1609 sep_shared_area_virt_to_bus(sep,
1610 in_lli_table_ptr);
1611 info_entry_ptr->block_size =
1612 ((num_entries_in_table) << 24) |
1613 (table_data_size);
1614 }
1615 /* Save the pointer to the info entry of the current tables */
1616 info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
1617 }
1618 /* Print input tables */
1619 sep_debug_print_lli_tables(sep, (struct sep_lli_entry *)
1620 sep_shared_area_bus_to_virt(sep, *lli_table_ptr),
1621 *num_entries_ptr, *table_data_size_ptr);
1622 /* The array of the pages */
1623 kfree(lli_array_ptr);
1624
1625update_dcb_counter:
1626 /* Update DCB counter */
1627 sep->nr_dcb_creat++;
1628 goto end_function;
1629
1630end_function_error:
1631 /* Free all the allocated resources */
1632 kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_map_array);
1633 kfree(lli_array_ptr);
1634 kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_page_array);
1635
1636end_function:
1637 return error;
1638
1639}
1640/**
1641 * sep_construct_dma_tables_from_lli - prepare AES/DES mappings
1642 * @sep: pointer to struct sep_device
1643 * @lli_in_array:
1644 * @sep_in_lli_entries:
1645 * @lli_out_array:
1646 * @sep_out_lli_entries
1647 * @block_size
1648 * @lli_table_in_ptr
1649 * @lli_table_out_ptr
1650 * @in_num_entries_ptr
1651 * @out_num_entries_ptr
1652 * @table_data_size_ptr
1653 *
1654 * This function creates the input and output DMA tables for
1655 * symmetric operations (AES/DES) according to the block
1656 * size from LLI arays
1657 * Note that all bus addresses that are passed to the SEP
1658 * are in 32 bit format; the SEP is a 32 bit device
1659 */
1660static int sep_construct_dma_tables_from_lli(
1661 struct sep_device *sep,
1662 struct sep_lli_entry *lli_in_array,
1663 u32 sep_in_lli_entries,
1664 struct sep_lli_entry *lli_out_array,
1665 u32 sep_out_lli_entries,
1666 u32 block_size,
1667 dma_addr_t *lli_table_in_ptr,
1668 dma_addr_t *lli_table_out_ptr,
1669 u32 *in_num_entries_ptr,
1670 u32 *out_num_entries_ptr,
1671 u32 *table_data_size_ptr)
1672{
1673 /* Points to the area where next lli table can be allocated */
1674 void *lli_table_alloc_addr = 0;
1675 /* Input lli table */
1676 struct sep_lli_entry *in_lli_table_ptr = NULL;
1677 /* Output lli table */
1678 struct sep_lli_entry *out_lli_table_ptr = NULL;
1679 /* Pointer to the info entry of the table - the last entry */
1680 struct sep_lli_entry *info_in_entry_ptr = NULL;
1681 /* Pointer to the info entry of the table - the last entry */
1682 struct sep_lli_entry *info_out_entry_ptr = NULL;
1683 /* Points to the first entry to be processed in the lli_in_array */
1684 u32 current_in_entry = 0;
1685 /* Points to the first entry to be processed in the lli_out_array */
1686 u32 current_out_entry = 0;
1687 /* Max size of the input table */
1688 u32 in_table_data_size = 0;
1689 /* Max size of the output table */
1690 u32 out_table_data_size = 0;
1691 /* Flag te signifies if this is the last tables build */
1692 u32 last_table_flag = 0;
1693 /* The data size that should be in table */
1694 u32 table_data_size = 0;
1695 /* Number of etnries in the input table */
1696 u32 num_entries_in_table = 0;
1697 /* Number of etnries in the output table */
1698 u32 num_entries_out_table = 0;
1699
1700 /* Initiate to point after the message area */
1701 lli_table_alloc_addr = (void *)(sep->shared_addr +
1702 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1703 (sep->num_lli_tables_created *
1704 (sizeof(struct sep_lli_entry) *
1705 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP)));
1706
1707 /* Loop till all the entries in in array are not processed */
1708 while (current_in_entry < sep_in_lli_entries) {
1709 /* Set the new input and output tables */
1710 in_lli_table_ptr =
1711 (struct sep_lli_entry *)lli_table_alloc_addr;
1712
1713 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
1714 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1715
1716 /* Set the first output tables */
1717 out_lli_table_ptr =
1718 (struct sep_lli_entry *)lli_table_alloc_addr;
1719
1720 /* Check if the DMA table area limit was overrun */
1721 if ((lli_table_alloc_addr + sizeof(struct sep_lli_entry) *
1722 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP) >
1723 ((void *)sep->shared_addr +
1724 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1725 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
1726
1727 dev_warn(&sep->pdev->dev, "dma table limit overrun\n");
1728 return -ENOMEM;
1729 }
1730
1731 /* Update the number of the lli tables created */
1732 sep->num_lli_tables_created += 2;
1733
1734 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
1735 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1736
1737 /* Calculate the maximum size of data for input table */
1738 in_table_data_size =
1739 sep_calculate_lli_table_max_size(sep,
1740 &lli_in_array[current_in_entry],
1741 (sep_in_lli_entries - current_in_entry),
1742 &last_table_flag);
1743
1744 /* Calculate the maximum size of data for output table */
1745 out_table_data_size =
1746 sep_calculate_lli_table_max_size(sep,
1747 &lli_out_array[current_out_entry],
1748 (sep_out_lli_entries - current_out_entry),
1749 &last_table_flag);
1750
1751 dev_dbg(&sep->pdev->dev,
1752 "construct tables from lli in_table_data_size is %x\n",
1753 in_table_data_size);
1754
1755 dev_dbg(&sep->pdev->dev,
1756 "construct tables from lli out_table_data_size is %x\n",
1757 out_table_data_size);
1758
1759 table_data_size = in_table_data_size;
1760
1761 if (!last_table_flag) {
1762 /*
1763 * If this is not the last table,
1764 * then must check where the data is smallest
1765 * and then align it to the block size
1766 */
1767 if (table_data_size > out_table_data_size)
1768 table_data_size = out_table_data_size;
1769
1770 /*
1771 * Now calculate the table size so that
1772 * it will be module block size
1773 */
1774 table_data_size = (table_data_size / block_size) *
1775 block_size;
1776 }
1777
1778 /* Construct input lli table */
1779 sep_build_lli_table(sep, &lli_in_array[current_in_entry],
1780 in_lli_table_ptr,
1781 &current_in_entry,
1782 &num_entries_in_table,
1783 table_data_size);
1784
1785 /* Construct output lli table */
1786 sep_build_lli_table(sep, &lli_out_array[current_out_entry],
1787 out_lli_table_ptr,
1788 &current_out_entry,
1789 &num_entries_out_table,
1790 table_data_size);
1791
1792 /* If info entry is null - this is the first table built */
1793 if (info_in_entry_ptr == NULL) {
1794 /* Set the output parameters to physical addresses */
1795 *lli_table_in_ptr =
1796 sep_shared_area_virt_to_bus(sep, in_lli_table_ptr);
1797
1798 *in_num_entries_ptr = num_entries_in_table;
1799
1800 *lli_table_out_ptr =
1801 sep_shared_area_virt_to_bus(sep,
1802 out_lli_table_ptr);
1803
1804 *out_num_entries_ptr = num_entries_out_table;
1805 *table_data_size_ptr = table_data_size;
1806
1807 dev_dbg(&sep->pdev->dev,
1808 "output lli_table_in_ptr is %08lx\n",
1809 (unsigned long)*lli_table_in_ptr);
1810 dev_dbg(&sep->pdev->dev,
1811 "output lli_table_out_ptr is %08lx\n",
1812 (unsigned long)*lli_table_out_ptr);
1813 } else {
1814 /* Update the info entry of the previous in table */
1815 info_in_entry_ptr->bus_address =
1816 sep_shared_area_virt_to_bus(sep,
1817 in_lli_table_ptr);
1818
1819 info_in_entry_ptr->block_size =
1820 ((num_entries_in_table) << 24) |
1821 (table_data_size);
1822
1823 /* Update the info entry of the previous in table */
1824 info_out_entry_ptr->bus_address =
1825 sep_shared_area_virt_to_bus(sep,
1826 out_lli_table_ptr);
1827
1828 info_out_entry_ptr->block_size =
1829 ((num_entries_out_table) << 24) |
1830 (table_data_size);
1831
1832 dev_dbg(&sep->pdev->dev,
1833 "output lli_table_in_ptr:%08lx %08x\n",
1834 (unsigned long)info_in_entry_ptr->bus_address,
1835 info_in_entry_ptr->block_size);
1836
1837 dev_dbg(&sep->pdev->dev,
1838 "output lli_table_out_ptr:%08lx %08x\n",
1839 (unsigned long)info_out_entry_ptr->bus_address,
1840 info_out_entry_ptr->block_size);
1841 }
1842
1843 /* Save the pointer to the info entry of the current tables */
1844 info_in_entry_ptr = in_lli_table_ptr +
1845 num_entries_in_table - 1;
1846 info_out_entry_ptr = out_lli_table_ptr +
1847 num_entries_out_table - 1;
1848
1849 dev_dbg(&sep->pdev->dev,
1850 "output num_entries_out_table is %x\n",
1851 (u32)num_entries_out_table);
1852 dev_dbg(&sep->pdev->dev,
1853 "output info_in_entry_ptr is %lx\n",
1854 (unsigned long)info_in_entry_ptr);
1855 dev_dbg(&sep->pdev->dev,
1856 "output info_out_entry_ptr is %lx\n",
1857 (unsigned long)info_out_entry_ptr);
1858 }
1859
1860 /* Print input tables */
1861 sep_debug_print_lli_tables(sep,
1862 (struct sep_lli_entry *)
1863 sep_shared_area_bus_to_virt(sep, *lli_table_in_ptr),
1864 *in_num_entries_ptr,
1865 *table_data_size_ptr);
1866
1867 /* Print output tables */
1868 sep_debug_print_lli_tables(sep,
1869 (struct sep_lli_entry *)
1870 sep_shared_area_bus_to_virt(sep, *lli_table_out_ptr),
1871 *out_num_entries_ptr,
1872 *table_data_size_ptr);
1873
1874 return 0;
1875}
1876
1877/**
1878 * sep_prepare_input_output_dma_table - prepare DMA I/O table
1879 * @app_virt_in_addr:
1880 * @app_virt_out_addr:
1881 * @data_size:
1882 * @block_size:
1883 * @lli_table_in_ptr:
1884 * @lli_table_out_ptr:
1885 * @in_num_entries_ptr:
1886 * @out_num_entries_ptr:
1887 * @table_data_size_ptr:
1888 * @is_kva: set for kernel data; used only for kernel crypto module
1889 *
1890 * This function builds input and output DMA tables for synhronic
1891 * symmetric operations (AES, DES, HASH). It also checks that each table
1892 * is of the modular block size
1893 * Note that all bus addresses that are passed to the SEP
1894 * are in 32 bit format; the SEP is a 32 bit device
1895 */
1896static int sep_prepare_input_output_dma_table(struct sep_device *sep,
1897 unsigned long app_virt_in_addr,
1898 unsigned long app_virt_out_addr,
1899 u32 data_size,
1900 u32 block_size,
1901 dma_addr_t *lli_table_in_ptr,
1902 dma_addr_t *lli_table_out_ptr,
1903 u32 *in_num_entries_ptr,
1904 u32 *out_num_entries_ptr,
1905 u32 *table_data_size_ptr,
1906 bool is_kva)
1907
1908{
1909 int error = 0;
1910 /* Array of pointers of page */
1911 struct sep_lli_entry *lli_in_array;
1912 /* Array of pointers of page */
1913 struct sep_lli_entry *lli_out_array;
1914
1915 if (data_size == 0) {
1916 /* Prepare empty table for input and output */
1917 sep_prepare_empty_lli_table(sep, lli_table_in_ptr,
1918 in_num_entries_ptr, table_data_size_ptr);
1919
1920 sep_prepare_empty_lli_table(sep, lli_table_out_ptr,
1921 out_num_entries_ptr, table_data_size_ptr);
1922
1923 goto update_dcb_counter;
1924 }
1925
1926 /* Initialize the pages pointers */
1927 sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = NULL;
1928 sep->dma_res_arr[sep->nr_dcb_creat].out_page_array = NULL;
1929
1930 /* Lock the pages of the buffer and translate them to pages */
1931 if (is_kva == true) {
1932 error = sep_lock_kernel_pages(sep, app_virt_in_addr,
1933 data_size, &lli_in_array, SEP_DRIVER_IN_FLAG);
1934
1935 if (error) {
1936 dev_warn(&sep->pdev->dev,
1937 "lock kernel for in failed\n");
1938 goto end_function;
1939 }
1940
1941 error = sep_lock_kernel_pages(sep, app_virt_out_addr,
1942 data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG);
1943
1944 if (error) {
1945 dev_warn(&sep->pdev->dev,
1946 "lock kernel for out failed\n");
1947 goto end_function;
1948 }
1949 }
1950
1951 else {
1952 error = sep_lock_user_pages(sep, app_virt_in_addr,
1953 data_size, &lli_in_array, SEP_DRIVER_IN_FLAG);
1954 if (error) {
1955 dev_warn(&sep->pdev->dev,
1956 "sep_lock_user_pages for input virtual buffer failed\n");
1957 goto end_function;
1958 }
1959
1960 error = sep_lock_user_pages(sep, app_virt_out_addr,
1961 data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG);
1962
1963 if (error) {
1964 dev_warn(&sep->pdev->dev,
1965 "sep_lock_user_pages for output virtual buffer failed\n");
1966 goto end_function_free_lli_in;
1967 }
1968 }
1969
1970 dev_dbg(&sep->pdev->dev, "prep input output dma table sep_in_num_pages is %x\n",
1971 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages);
1972 dev_dbg(&sep->pdev->dev, "sep_out_num_pages is %x\n",
1973 sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages);
1974 dev_dbg(&sep->pdev->dev, "SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n",
1975 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1976
1977 /* Call the function that creates table from the lli arrays */
1978 error = sep_construct_dma_tables_from_lli(sep, lli_in_array,
1979 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages,
1980 lli_out_array,
1981 sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages,
1982 block_size, lli_table_in_ptr, lli_table_out_ptr,
1983 in_num_entries_ptr, out_num_entries_ptr, table_data_size_ptr);
1984
1985 if (error) {
1986 dev_warn(&sep->pdev->dev,
1987 "sep_construct_dma_tables_from_lli failed\n");
1988 goto end_function_with_error;
1989 }
1990
1991 kfree(lli_out_array);
1992 kfree(lli_in_array);
1993
1994update_dcb_counter:
1995 /* Update DCB counter */
1996 sep->nr_dcb_creat++;
1997
1998 goto end_function;
1999
2000end_function_with_error:
2001 kfree(sep->dma_res_arr[sep->nr_dcb_creat].out_map_array);
2002 kfree(sep->dma_res_arr[sep->nr_dcb_creat].out_page_array);
2003 kfree(lli_out_array);
2004
2005
2006end_function_free_lli_in:
2007 kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_map_array);
2008 kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_page_array);
2009 kfree(lli_in_array);
2010
2011end_function:
2012
2013 return error;
2014
2015}
2016
2017/**
2018 * sep_prepare_input_output_dma_table_in_dcb - prepare control blocks
2019 * @app_in_address: unsigned long; for data buffer in (user space)
2020 * @app_out_address: unsigned long; for data buffer out (user space)
2021 * @data_in_size: u32; for size of data
2022 * @block_size: u32; for block size
2023 * @tail_block_size: u32; for size of tail block
2024 * @isapplet: bool; to indicate external app
2025 * @is_kva: bool; kernel buffer; only used for kernel crypto module
2026 *
2027 * This function prepares the linked DMA tables and puts the
2028 * address for the linked list of tables inta a DCB (data control
2029 * block) the address of which is known by the SEP hardware
2030 * Note that all bus addresses that are passed to the SEP
2031 * are in 32 bit format; the SEP is a 32 bit device
2032 */
2033static int sep_prepare_input_output_dma_table_in_dcb(struct sep_device *sep,
2034 unsigned long app_in_address,
2035 unsigned long app_out_address,
2036 u32 data_in_size,
2037 u32 block_size,
2038 u32 tail_block_size,
2039 bool isapplet,
2040 bool is_kva)
2041{
2042 int error = 0;
2043 /* Size of tail */
2044 u32 tail_size = 0;
2045 /* Address of the created DCB table */
2046 struct sep_dcblock *dcb_table_ptr = NULL;
2047 /* The physical address of the first input DMA table */
2048 dma_addr_t in_first_mlli_address = 0;
2049 /* Number of entries in the first input DMA table */
2050 u32 in_first_num_entries = 0;
2051 /* The physical address of the first output DMA table */
2052 dma_addr_t out_first_mlli_address = 0;
2053 /* Number of entries in the first output DMA table */
2054 u32 out_first_num_entries = 0;
2055 /* Data in the first input/output table */
2056 u32 first_data_size = 0;
2057
2058 if (sep->nr_dcb_creat == SEP_MAX_NUM_SYNC_DMA_OPS) {
2059 /* No more DCBs to allocate */
2060 dev_warn(&sep->pdev->dev, "no more DCBs available\n");
2061 error = -ENOSPC;
2062 goto end_function;
2063 }
2064
2065 /* Allocate new DCB */
2066 dcb_table_ptr = (struct sep_dcblock *)(sep->shared_addr +
2067 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES +
2068 (sep->nr_dcb_creat * sizeof(struct sep_dcblock)));
2069
2070 /* Set the default values in the DCB */
2071 dcb_table_ptr->input_mlli_address = 0;
2072 dcb_table_ptr->input_mlli_num_entries = 0;
2073 dcb_table_ptr->input_mlli_data_size = 0;
2074 dcb_table_ptr->output_mlli_address = 0;
2075 dcb_table_ptr->output_mlli_num_entries = 0;
2076 dcb_table_ptr->output_mlli_data_size = 0;
2077 dcb_table_ptr->tail_data_size = 0;
2078 dcb_table_ptr->out_vr_tail_pt = 0;
2079
2080 if (isapplet == true) {
2081
2082 /* Check if there is enough data for DMA operation */
2083 if (data_in_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE) {
2084 if (is_kva == true) {
2085 memcpy(dcb_table_ptr->tail_data,
2086 (void *)app_in_address, data_in_size);
2087 } else {
2088 if (copy_from_user(dcb_table_ptr->tail_data,
2089 (void __user *)app_in_address,
2090 data_in_size)) {
2091 error = -EFAULT;
2092 goto end_function;
2093 }
2094 }
2095
2096 dcb_table_ptr->tail_data_size = data_in_size;
2097
2098 /* Set the output user-space address for mem2mem op */
2099 if (app_out_address)
2100 dcb_table_ptr->out_vr_tail_pt =
2101 (aligned_u64)app_out_address;
2102
2103 /*
2104 * Update both data length parameters in order to avoid
2105 * second data copy and allow building of empty mlli
2106 * tables
2107 */
2108 tail_size = 0x0;
2109 data_in_size = 0x0;
2110
2111 } else {
2112 if (!app_out_address) {
2113 tail_size = data_in_size % block_size;
2114 if (!tail_size) {
2115 if (tail_block_size == block_size)
2116 tail_size = block_size;
2117 }
2118 } else {
2119 tail_size = 0;
2120 }
2121 }
2122 if (tail_size) {
2123 if (tail_size > sizeof(dcb_table_ptr->tail_data))
2124 return -EINVAL;
2125 if (is_kva == true) {
2126 memcpy(dcb_table_ptr->tail_data,
2127 (void *)(app_in_address + data_in_size -
2128 tail_size), tail_size);
2129 } else {
2130 /* We have tail data - copy it to DCB */
2131 if (copy_from_user(dcb_table_ptr->tail_data,
2132 (void *)(app_in_address +
2133 data_in_size - tail_size), tail_size)) {
2134 error = -EFAULT;
2135 goto end_function;
2136 }
2137 }
2138 if (app_out_address)
2139 /*
2140 * Calculate the output address
2141 * according to tail data size
2142 */
2143 dcb_table_ptr->out_vr_tail_pt =
2144 (aligned_u64)app_out_address + data_in_size
2145 - tail_size;
2146
2147 /* Save the real tail data size */
2148 dcb_table_ptr->tail_data_size = tail_size;
2149 /*
2150 * Update the data size without the tail
2151 * data size AKA data for the dma
2152 */
2153 data_in_size = (data_in_size - tail_size);
2154 }
2155 }
2156 /* Check if we need to build only input table or input/output */
2157 if (app_out_address) {
2158 /* Prepare input/output tables */
2159 error = sep_prepare_input_output_dma_table(sep,
2160 app_in_address,
2161 app_out_address,
2162 data_in_size,
2163 block_size,
2164 &in_first_mlli_address,
2165 &out_first_mlli_address,
2166 &in_first_num_entries,
2167 &out_first_num_entries,
2168 &first_data_size,
2169 is_kva);
2170 } else {
2171 /* Prepare input tables */
2172 error = sep_prepare_input_dma_table(sep,
2173 app_in_address,
2174 data_in_size,
2175 block_size,
2176 &in_first_mlli_address,
2177 &in_first_num_entries,
2178 &first_data_size,
2179 is_kva);
2180 }
2181
2182 if (error) {
2183 dev_warn(&sep->pdev->dev, "prepare DMA table call failed from prepare DCB call\n");
2184 goto end_function;
2185 }
2186
2187 /* Set the DCB values */
2188 dcb_table_ptr->input_mlli_address = in_first_mlli_address;
2189 dcb_table_ptr->input_mlli_num_entries = in_first_num_entries;
2190 dcb_table_ptr->input_mlli_data_size = first_data_size;
2191 dcb_table_ptr->output_mlli_address = out_first_mlli_address;
2192 dcb_table_ptr->output_mlli_num_entries = out_first_num_entries;
2193 dcb_table_ptr->output_mlli_data_size = first_data_size;
2194
2195end_function:
2196 return error;
2197
2198}
2199
2200/**
2201 * sep_free_dma_tables_and_dcb - free DMA tables and DCBs
2202 * @sep: pointer to struct sep_device
2203 * @isapplet: indicates external application (used for kernel access)
2204 * @is_kva: indicates kernel addresses (only used for kernel crypto)
2205 *
2206 * This function frees the DMA tables and DCB
2207 */
2208static int sep_free_dma_tables_and_dcb(struct sep_device *sep, bool isapplet,
2209 bool is_kva)
2210{
2211 int i = 0;
2212 int error = 0;
2213 int error_temp = 0;
2214 struct sep_dcblock *dcb_table_ptr;
2215 unsigned long pt_hold;
2216 void *tail_pt;
2217
2218 if (isapplet == true) {
2219 /* Set pointer to first DCB table */
2220 dcb_table_ptr = (struct sep_dcblock *)
2221 (sep->shared_addr +
2222 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES);
2223
2224 /* Go over each DCB and see if tail pointer must be updated */
2225 for (i = 0; i < sep->nr_dcb_creat; i++, dcb_table_ptr++) {
2226 if (dcb_table_ptr->out_vr_tail_pt) {
2227 pt_hold = (unsigned long)dcb_table_ptr->out_vr_tail_pt;
2228 tail_pt = (void *)pt_hold;
2229 if (is_kva == true) {
2230 memcpy(tail_pt,
2231 dcb_table_ptr->tail_data,
2232 dcb_table_ptr->tail_data_size);
2233 } else {
2234 error_temp = copy_to_user(
2235 tail_pt,
2236 dcb_table_ptr->tail_data,
2237 dcb_table_ptr->tail_data_size);
2238 }
2239 if (error_temp) {
2240 /* Release the DMA resource */
2241 error = -EFAULT;
2242 break;
2243 }
2244 }
2245 }
2246 }
2247 /* Free the output pages, if any */
2248 sep_free_dma_table_data_handler(sep);
2249
2250 return error;
2251}
2252
2253/**
2254 * sep_get_static_pool_addr_handler - get static pool address
2255 * @sep: pointer to struct sep_device
2256 *
2257 * This function sets the bus and virtual addresses of the static pool
2258 */
2259static int sep_get_static_pool_addr_handler(struct sep_device *sep)
2260{
2261 u32 *static_pool_addr = NULL;
2262
2263 static_pool_addr = (u32 *)(sep->shared_addr +
2264 SEP_DRIVER_SYSTEM_RAR_MEMORY_OFFSET_IN_BYTES);
2265
2266 static_pool_addr[0] = SEP_STATIC_POOL_VAL_TOKEN;
2267 static_pool_addr[1] = (u32)sep->shared_bus +
2268 SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
2269
2270 dev_dbg(&sep->pdev->dev, "static pool segment: physical %x\n",
2271 (u32)static_pool_addr[1]);
2272
2273 return 0;
2274}
2275
2276/**
2277 * sep_end_transaction_handler - end transaction
2278 * @sep: pointer to struct sep_device
2279 *
2280 * This API handles the end transaction request
2281 */
2282static int sep_end_transaction_handler(struct sep_device *sep)
2283{
2284 /* Clear the data pool pointers Token */
2285 memset((void *)(sep->shared_addr +
2286 SEP_DRIVER_DATA_POOL_ALLOCATION_OFFSET_IN_BYTES),
2287 0, sep->num_of_data_allocations*2*sizeof(u32));
2288
2289 /* Check that all the DMA resources were freed */
2290 sep_free_dma_table_data_handler(sep);
2291
2292 clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags);
2293
2294 /*
2295 * We are now through with the transaction. Let's
2296 * allow other processes who have the device open
2297 * to perform transactions
2298 */
2299 mutex_lock(&sep->sep_mutex);
2300 sep->pid_doing_transaction = 0;
2301 mutex_unlock(&sep->sep_mutex);
2302 /* Raise event for stuck contextes */
2303 wake_up(&sep->event);
2304
2305 return 0;
2306}
2307
2308/**
2309 * sep_prepare_dcb_handler - prepare a control block
2310 * @sep: pointer to struct sep_device
2311 * @arg: pointer to user parameters
2312 *
2313 * This function will retrieve the RAR buffer physical addresses, type
2314 * & size corresponding to the RAR handles provided in the buffers vector.
2315 */
2316static int sep_prepare_dcb_handler(struct sep_device *sep, unsigned long arg)
2317{
2318 int error;
2319 /* Command arguments */
2320 struct build_dcb_struct command_args;
2321
2322 /* Get the command arguments */
2323 if (copy_from_user(&command_args, (void __user *)arg,
2324 sizeof(struct build_dcb_struct))) {
2325 error = -EFAULT;
2326 goto end_function;
2327 }
2328
2329 dev_dbg(&sep->pdev->dev, "prep dcb handler app_in_address is %08llx\n",
2330 command_args.app_in_address);
2331 dev_dbg(&sep->pdev->dev, "app_out_address is %08llx\n",
2332 command_args.app_out_address);
2333 dev_dbg(&sep->pdev->dev, "data_size is %x\n",
2334 command_args.data_in_size);
2335 dev_dbg(&sep->pdev->dev, "block_size is %x\n",
2336 command_args.block_size);
2337 dev_dbg(&sep->pdev->dev, "tail block_size is %x\n",
2338 command_args.tail_block_size);
2339
2340 error = sep_prepare_input_output_dma_table_in_dcb(sep,
2341 (unsigned long)command_args.app_in_address,
2342 (unsigned long)command_args.app_out_address,
2343 command_args.data_in_size, command_args.block_size,
2344 command_args.tail_block_size, true, false);
2345
2346end_function:
2347 return error;
2348
2349}
2350
2351/**
2352 * sep_free_dcb_handler - free control block resources
2353 * @sep: pointer to struct sep_device
2354 *
2355 * This function frees the DCB resources and updates the needed
2356 * user-space buffers.
2357 */
2358static int sep_free_dcb_handler(struct sep_device *sep)
2359{
2360 return sep_free_dma_tables_and_dcb(sep, false, false);
2361}
2362
2363/**
2364 * sep_rar_prepare_output_msg_handler - prepare an output message
2365 * @sep: pointer to struct sep_device
2366 * @arg: pointer to user parameters
2367 *
2368 * This function will retrieve the RAR buffer physical addresses, type
2369 * & size corresponding to the RAR handles provided in the buffers vector.
2370 */
2371static int sep_rar_prepare_output_msg_handler(struct sep_device *sep,
2372 unsigned long arg)
2373{
2374 int error = 0;
2375 /* Command args */
2376 struct rar_hndl_to_bus_struct command_args;
2377 /* Bus address */
2378 dma_addr_t rar_bus = 0;
2379 /* Holds the RAR address in the system memory offset */
2380 u32 *rar_addr;
2381
2382 /* Copy the data */
2383 if (copy_from_user(&command_args, (void __user *)arg,
2384 sizeof(command_args))) {
2385 error = -EFAULT;
2386 goto end_function;
2387 }
2388
2389 /* Call to translation function only if user handle is not NULL */
2390 if (command_args.rar_handle)
2391 return -EOPNOTSUPP;
2392 dev_dbg(&sep->pdev->dev, "rar msg; rar_addr_bus = %x\n", (u32)rar_bus);
2393
2394 /* Set value in the SYSTEM MEMORY offset */
2395 rar_addr = (u32 *)(sep->shared_addr +
2396 SEP_DRIVER_SYSTEM_RAR_MEMORY_OFFSET_IN_BYTES);
2397
2398 /* Copy the physical address to the System Area for the SEP */
2399 rar_addr[0] = SEP_RAR_VAL_TOKEN;
2400 rar_addr[1] = rar_bus;
2401
2402end_function:
2403 return error;
2404}
2405
2406/**
2407 * sep_ioctl - ioctl api
2408 * @filp: pointer to struct file
2409 * @cmd: command
2410 * @arg: pointer to argument structure
2411 *
2412 * Implement the ioctl methods available on the SEP device.
2413 */
2414static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2415{
2416 int error = 0;
2417 struct sep_device *sep = filp->private_data;
2418
2419 /* Make sure we own this device */
2420 mutex_lock(&sep->sep_mutex);
2421 if ((current->pid != sep->pid_doing_transaction) &&
2422 (sep->pid_doing_transaction != 0)) {
2423 dev_dbg(&sep->pdev->dev, "ioctl pid is not owner\n");
2424 error = -EACCES;
2425 }
2426 mutex_unlock(&sep->sep_mutex);
2427
2428 if (error)
2429 return error;
2430
2431 if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER)
2432 return -ENOTTY;
2433
2434 /* Lock to prevent the daemon to interfere with operation */
2435 mutex_lock(&sep->ioctl_mutex);
2436
2437 switch (cmd) {
2438 case SEP_IOCSENDSEPCOMMAND:
2439 /* Send command to SEP */
2440 error = sep_send_command_handler(sep);
2441 break;
2442 case SEP_IOCALLOCDATAPOLL:
2443 /* Allocate data pool */
2444 error = sep_allocate_data_pool_memory_handler(sep, arg);
2445 break;
2446 case SEP_IOCGETSTATICPOOLADDR:
2447 /* Inform the SEP the bus address of the static pool */
2448 error = sep_get_static_pool_addr_handler(sep);
2449 break;
2450 case SEP_IOCENDTRANSACTION:
2451 error = sep_end_transaction_handler(sep);
2452 break;
2453 case SEP_IOCRARPREPAREMESSAGE:
2454 error = sep_rar_prepare_output_msg_handler(sep, arg);
2455 break;
2456 case SEP_IOCPREPAREDCB:
2457 error = sep_prepare_dcb_handler(sep, arg);
2458 break;
2459 case SEP_IOCFREEDCB:
2460 error = sep_free_dcb_handler(sep);
2461 break;
2462 default:
2463 error = -ENOTTY;
2464 break;
2465 }
2466
2467 mutex_unlock(&sep->ioctl_mutex);
2468 return error;
2469}
2470
2471/**
2472 * sep_singleton_ioctl - ioctl api for singleton interface
2473 * @filp: pointer to struct file
2474 * @cmd: command
2475 * @arg: pointer to argument structure
2476 *
2477 * Implement the additional ioctls for the singleton device
2478 */
2479static long sep_singleton_ioctl(struct file *filp, u32 cmd, unsigned long arg)
2480{
2481 long error = 0;
2482 struct sep_device *sep = filp->private_data;
2483
2484 /* Check that the command is for the SEP device */
2485 if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER)
2486 return -ENOTTY;
2487
2488 /* Make sure we own this device */
2489 mutex_lock(&sep->sep_mutex);
2490 if ((current->pid != sep->pid_doing_transaction) &&
2491 (sep->pid_doing_transaction != 0)) {
2492 dev_dbg(&sep->pdev->dev, "singleton ioctl pid is not owner\n");
2493 mutex_unlock(&sep->sep_mutex);
2494 return -EACCES;
2495 }
2496
2497 mutex_unlock(&sep->sep_mutex);
2498
2499 switch (cmd) {
2500 case SEP_IOCTLSETCALLERID:
2501 mutex_lock(&sep->ioctl_mutex);
2502 error = sep_set_caller_id_handler(sep, arg);
2503 mutex_unlock(&sep->ioctl_mutex);
2504 break;
2505 default:
2506 error = sep_ioctl(filp, cmd, arg);
2507 break;
2508 }
2509 return error;
2510}
2511
2512/**
2513 * sep_request_daemon_ioctl - ioctl for daemon
2514 * @filp: pointer to struct file
2515 * @cmd: command
2516 * @arg: pointer to argument structure
2517 *
2518 * Called by the request daemon to perform ioctls on the daemon device
2519 */
2520static long sep_request_daemon_ioctl(struct file *filp, u32 cmd,
2521 unsigned long arg)
2522{
2523
2524 long error;
2525 struct sep_device *sep = filp->private_data;
2526
2527 /* Check that the command is for SEP device */
2528 if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER)
2529 return -ENOTTY;
2530
2531 /* Only one process can access ioctl at any given time */
2532 mutex_lock(&sep->ioctl_mutex);
2533
2534 switch (cmd) {
2535 case SEP_IOCSENDSEPRPLYCOMMAND:
2536 /* Send reply command to SEP */
2537 error = sep_req_daemon_send_reply_command_handler(sep);
2538 break;
2539 case SEP_IOCENDTRANSACTION:
2540 /*
2541 * End req daemon transaction, do nothing
2542 * will be removed upon update in middleware
2543 * API library
2544 */
2545 error = 0;
2546 break;
2547 default:
2548 error = -ENOTTY;
2549 }
2550 mutex_unlock(&sep->ioctl_mutex);
2551 return error;
2552}
2553
2554/**
2555 * sep_inthandler - interrupt handler
2556 * @irq: interrupt
2557 * @dev_id: device id
2558 */
2559static irqreturn_t sep_inthandler(int irq, void *dev_id)
2560{
2561 irqreturn_t int_error = IRQ_HANDLED;
2562 unsigned long lck_flags;
2563 u32 reg_val, reg_val2 = 0;
2564 struct sep_device *sep = dev_id;
2565
2566 /* Read the IRR register to check if this is SEP interrupt */
2567 reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
2568
2569 if (reg_val & (0x1 << 13)) {
2570 /* Lock and update the counter of reply messages */
2571 spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
2572 sep->reply_ct++;
2573 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
2574
2575 dev_dbg(&sep->pdev->dev, "sep int: send_ct %lx reply_ct %lx\n",
2576 sep->send_ct, sep->reply_ct);
2577
2578 /* Is this printf or daemon request? */
2579 reg_val2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
2580 dev_dbg(&sep->pdev->dev,
2581 "SEP Interrupt - reg2 is %08x\n", reg_val2);
2582
2583 if ((reg_val2 >> 30) & 0x1) {
2584 dev_dbg(&sep->pdev->dev, "int: printf request\n");
2585 wake_up(&sep->event_request_daemon);
2586 } else if (reg_val2 >> 31) {
2587 dev_dbg(&sep->pdev->dev, "int: daemon request\n");
2588 wake_up(&sep->event_request_daemon);
2589 } else {
2590 dev_dbg(&sep->pdev->dev, "int: SEP reply\n");
2591 wake_up(&sep->event);
2592 }
2593 } else {
2594 dev_dbg(&sep->pdev->dev, "int: not SEP interrupt\n");
2595 int_error = IRQ_NONE;
2596 }
2597 if (int_error == IRQ_HANDLED)
2598 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
2599
2600 return int_error;
2601}
2602
2603/**
2604 * sep_reconfig_shared_area - reconfigure shared area
2605 * @sep: pointer to struct sep_device
2606 *
2607 * Reconfig the shared area between HOST and SEP - needed in case
2608 * the DX_CC_Init function was called before OS loading.
2609 */
2610static int sep_reconfig_shared_area(struct sep_device *sep)
2611{
2612 int ret_val;
2613
2614 /* use to limit waiting for SEP */
2615 unsigned long end_time;
2616
2617 /* Send the new SHARED MESSAGE AREA to the SEP */
2618 dev_dbg(&sep->pdev->dev, "reconfig shared; sending %08llx to sep\n",
2619 (unsigned long long)sep->shared_bus);
2620
2621 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
2622
2623 /* Poll for SEP response */
2624 ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
2625
2626 end_time = jiffies + (WAIT_TIME * HZ);
2627
2628 while ((time_before(jiffies, end_time)) && (ret_val != 0xffffffff) &&
2629 (ret_val != sep->shared_bus))
2630 ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
2631
2632 /* Check the return value (register) */
2633 if (ret_val != sep->shared_bus) {
2634 dev_warn(&sep->pdev->dev, "could not reconfig shared area\n");
2635 dev_warn(&sep->pdev->dev, "result was %x\n", ret_val);
2636 ret_val = -ENOMEM;
2637 } else
2638 ret_val = 0;
2639
2640 dev_dbg(&sep->pdev->dev, "reconfig shared area end\n");
2641 return ret_val;
2642}
2643
2644/* File operation for singleton SEP operations */
2645static const struct file_operations singleton_file_operations = {
2646 .owner = THIS_MODULE,
2647 .unlocked_ioctl = sep_singleton_ioctl,
2648 .poll = sep_poll,
2649 .open = sep_singleton_open,
2650 .release = sep_singleton_release,
2651 .mmap = sep_mmap,
2652};
2653
2654/* File operation for daemon operations */
2655static const struct file_operations daemon_file_operations = {
2656 .owner = THIS_MODULE,
2657 .unlocked_ioctl = sep_request_daemon_ioctl,
2658 .poll = sep_request_daemon_poll,
2659 .open = sep_request_daemon_open,
2660 .release = sep_request_daemon_release,
2661 .mmap = sep_request_daemon_mmap,
2662};
2663
2664/* The files operations structure of the driver */
2665static const struct file_operations sep_file_operations = {
2666 .owner = THIS_MODULE,
2667 .unlocked_ioctl = sep_ioctl,
2668 .poll = sep_poll,
2669 .open = sep_open,
2670 .release = sep_release,
2671 .mmap = sep_mmap,
2672};
2673
2674/**
2675 * sep_register_driver_with_fs - register misc devices
2676 * @sep: pointer to struct sep_device
2677 *
2678 * This function registers the driver with the file system
2679 */
2680static int sep_register_driver_with_fs(struct sep_device *sep)
2681{
2682 int ret_val;
2683
2684 sep->miscdev_sep.minor = MISC_DYNAMIC_MINOR;
2685 sep->miscdev_sep.name = SEP_DEV_NAME;
2686 sep->miscdev_sep.fops = &sep_file_operations;
2687
2688 sep->miscdev_singleton.minor = MISC_DYNAMIC_MINOR;
2689 sep->miscdev_singleton.name = SEP_DEV_SINGLETON;
2690 sep->miscdev_singleton.fops = &singleton_file_operations;
2691
2692 sep->miscdev_daemon.minor = MISC_DYNAMIC_MINOR;
2693 sep->miscdev_daemon.name = SEP_DEV_DAEMON;
2694 sep->miscdev_daemon.fops = &daemon_file_operations;
2695
2696 ret_val = misc_register(&sep->miscdev_sep);
2697 if (ret_val) {
2698 dev_warn(&sep->pdev->dev, "misc reg fails for SEP %x\n",
2699 ret_val);
2700 return ret_val;
2701 }
2702
2703 ret_val = misc_register(&sep->miscdev_singleton);
2704 if (ret_val) {
2705 dev_warn(&sep->pdev->dev, "misc reg fails for sing %x\n",
2706 ret_val);
2707 misc_deregister(&sep->miscdev_sep);
2708 return ret_val;
2709 }
2710
2711 ret_val = misc_register(&sep->miscdev_daemon);
2712 if (ret_val) {
2713 dev_warn(&sep->pdev->dev, "misc reg fails for dmn %x\n",
2714 ret_val);
2715 misc_deregister(&sep->miscdev_sep);
2716 misc_deregister(&sep->miscdev_singleton);
2717
2718 return ret_val;
2719 }
2720 return ret_val;
2721}
2722
2723
2724/**
2725 * sep_probe - probe a matching PCI device
2726 * @pdev: pci_device
2727 * @end: pci_device_id
2728 *
2729 * Attempt to set up and configure a SEP device that has been
2730 * discovered by the PCI layer.
2731 */
2732static int __devinit sep_probe(struct pci_dev *pdev,
2733 const struct pci_device_id *ent)
2734{
2735 int error = 0;
2736 struct sep_device *sep;
2737
2738 if (sep_dev != NULL) {
2739 dev_warn(&pdev->dev, "only one SEP supported.\n");
2740 return -EBUSY;
2741 }
2742
2743 /* Enable the device */
2744 error = pci_enable_device(pdev);
2745 if (error) {
2746 dev_warn(&pdev->dev, "error enabling pci device\n");
2747 goto end_function;
2748 }
2749
2750 /* Allocate the sep_device structure for this device */
2751 sep_dev = kzalloc(sizeof(struct sep_device), GFP_ATOMIC);
2752 if (sep_dev == NULL) {
2753 dev_warn(&pdev->dev,
2754 "can't kmalloc the sep_device structure\n");
2755 error = -ENOMEM;
2756 goto end_function_disable_device;
2757 }
2758
2759 /*
2760 * We're going to use another variable for actually
2761 * working with the device; this way, if we have
2762 * multiple devices in the future, it would be easier
2763 * to make appropriate changes
2764 */
2765 sep = sep_dev;
2766
2767 sep->pdev = pci_dev_get(pdev);
2768
2769 init_waitqueue_head(&sep->event);
2770 init_waitqueue_head(&sep->event_request_daemon);
2771 spin_lock_init(&sep->snd_rply_lck);
2772 mutex_init(&sep->sep_mutex);
2773 mutex_init(&sep->ioctl_mutex);
2774
2775 dev_dbg(&sep->pdev->dev, "sep probe: PCI obtained, device being prepared\n");
2776 dev_dbg(&sep->pdev->dev, "revision is %d\n", sep->pdev->revision);
2777
2778 /* Set up our register area */
2779 sep->reg_physical_addr = pci_resource_start(sep->pdev, 0);
2780 if (!sep->reg_physical_addr) {
2781 dev_warn(&sep->pdev->dev, "Error getting register start\n");
2782 error = -ENODEV;
2783 goto end_function_free_sep_dev;
2784 }
2785
2786 sep->reg_physical_end = pci_resource_end(sep->pdev, 0);
2787 if (!sep->reg_physical_end) {
2788 dev_warn(&sep->pdev->dev, "Error getting register end\n");
2789 error = -ENODEV;
2790 goto end_function_free_sep_dev;
2791 }
2792
2793 sep->reg_addr = ioremap_nocache(sep->reg_physical_addr,
2794 (size_t)(sep->reg_physical_end - sep->reg_physical_addr + 1));
2795 if (!sep->reg_addr) {
2796 dev_warn(&sep->pdev->dev, "Error getting register virtual\n");
2797 error = -ENODEV;
2798 goto end_function_free_sep_dev;
2799 }
2800
2801 dev_dbg(&sep->pdev->dev,
2802 "Register area start %llx end %llx virtual %p\n",
2803 (unsigned long long)sep->reg_physical_addr,
2804 (unsigned long long)sep->reg_physical_end,
2805 sep->reg_addr);
2806
2807 /* Allocate the shared area */
2808 sep->shared_size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
2809 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES +
2810 SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES +
2811 SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES +
2812 SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
2813
2814 if (sep_map_and_alloc_shared_area(sep)) {
2815 error = -ENOMEM;
2816 /* Allocation failed */
2817 goto end_function_error;
2818 }
2819
2820 /* Clear ICR register */
2821 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
2822
2823 /* Set the IMR register - open only GPR 2 */
2824 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
2825
2826 /* Read send/receive counters from SEP */
2827 sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
2828 sep->reply_ct &= 0x3FFFFFFF;
2829 sep->send_ct = sep->reply_ct;
2830
2831 /* Get the interrupt line */
2832 error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED,
2833 "sep_driver", sep);
2834
2835 if (error)
2836 goto end_function_deallocate_sep_shared_area;
2837
2838 /* The new chip requires a shared area reconfigure */
2839 if (sep->pdev->revision == 4) { /* Only for new chip */
2840 error = sep_reconfig_shared_area(sep);
2841 if (error)
2842 goto end_function_free_irq;
2843 }
2844 /* Finally magic up the device nodes */
2845 /* Register driver with the fs */
2846 error = sep_register_driver_with_fs(sep);
2847 if (error == 0)
2848 /* Success */
2849 return 0;
2850
2851end_function_free_irq:
2852 free_irq(pdev->irq, sep);
2853
2854end_function_deallocate_sep_shared_area:
2855 /* De-allocate shared area */
2856 sep_unmap_and_free_shared_area(sep);
2857
2858end_function_error:
2859 iounmap(sep->reg_addr);
2860
2861end_function_free_sep_dev:
2862 pci_dev_put(sep_dev->pdev);
2863 kfree(sep_dev);
2864 sep_dev = NULL;
2865
2866end_function_disable_device:
2867 pci_disable_device(pdev);
2868
2869end_function:
2870 return error;
2871}
2872
2873static void sep_remove(struct pci_dev *pdev)
2874{
2875 struct sep_device *sep = sep_dev;
2876
2877 /* Unregister from fs */
2878 misc_deregister(&sep->miscdev_sep);
2879 misc_deregister(&sep->miscdev_singleton);
2880 misc_deregister(&sep->miscdev_daemon);
2881
2882 /* Free the irq */
2883 free_irq(sep->pdev->irq, sep);
2884
2885 /* Free the shared area */
2886 sep_unmap_and_free_shared_area(sep_dev);
2887 iounmap((void *) sep_dev->reg_addr);
2888}
2889
2890static DEFINE_PCI_DEVICE_TABLE(sep_pci_id_tbl) = {
2891 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MFLD_PCI_DEVICE_ID)},
2892 {0}
2893};
2894
2895MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
2896
2897/* Field for registering driver to PCI device */
2898static struct pci_driver sep_pci_driver = {
2899 .name = "sep_sec_driver",
2900 .id_table = sep_pci_id_tbl,
2901 .probe = sep_probe,
2902 .remove = sep_remove
2903};
2904
2905
2906/**
2907 * sep_init - init function
2908 *
2909 * Module load time. Register the PCI device driver.
2910 */
2911static int __init sep_init(void)
2912{
2913 return pci_register_driver(&sep_pci_driver);
2914}
2915
2916
2917/**
2918 * sep_exit - called to unload driver
2919 *
2920 * Drop the misc devices then remove and unmap the various resources
2921 * that are not released by the driver remove method.
2922 */
2923static void __exit sep_exit(void)
2924{
2925 pci_unregister_driver(&sep_pci_driver);
2926}
2927
2928
2929module_init(sep_init);
2930module_exit(sep_exit);
2931
2932MODULE_LICENSE("GPL");
diff --git a/drivers/staging/sep/sep_driver_api.h b/drivers/staging/sep/sep_driver_api.h
index c3aacfcc8ac6..8b797d5388bb 100644
--- a/drivers/staging/sep/sep_driver_api.h
+++ b/drivers/staging/sep/sep_driver_api.h
@@ -2,8 +2,8 @@
2 * 2 *
3 * sep_driver_api.h - Security Processor Driver api definitions 3 * sep_driver_api.h - Security Processor Driver api definitions
4 * 4 *
5 * Copyright(c) 2009,2010 Intel Corporation. All rights reserved. 5 * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
6 * Contributions(c) 2009,2010 Discretix. All rights reserved. 6 * Contributions(c) 2009-2011 Discretix. All rights reserved.
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify it 8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free 9 * under the terms of the GNU General Public License as published by the Free
@@ -26,6 +26,7 @@
26 * CHANGES: 26 * CHANGES:
27 * 27 *
28 * 2010.09.14 Upgrade to Medfield 28 * 2010.09.14 Upgrade to Medfield
29 * 2011.02.22 Enable kernel crypto
29 * 30 *
30 */ 31 */
31 32
@@ -37,26 +38,32 @@
37#define SEP_DRIVER_SRC_REQ 2 38#define SEP_DRIVER_SRC_REQ 2
38#define SEP_DRIVER_SRC_PRINTF 3 39#define SEP_DRIVER_SRC_PRINTF 3
39 40
40 41/* Power state */
41/*------------------------------------------- 42#define SEP_DRIVER_POWERON 1
42 TYPEDEFS 43#define SEP_DRIVER_POWEROFF 2
43----------------------------------------------*/ 44
44 45/* Following enums are used only for kernel crypto api */
45struct alloc_struct { 46enum type_of_request {
46 /* offset from start of shared pool area */ 47 NO_REQUEST,
47 u32 offset; 48 AES_CBC,
48 /* number of bytes to allocate */ 49 AES_ECB,
49 u32 num_bytes; 50 DES_CBC,
50}; 51 DES_ECB,
51 52 DES3_ECB,
52/* command struct for getting caller id value and address */ 53 DES3_CBC,
53struct caller_id_struct { 54 SHA1,
54 /* pid of the process */ 55 MD5,
55 u32 pid; 56 SHA224,
56 /* virtual address of the caller id hash */ 57 SHA256
57 aligned_u64 callerIdAddress; 58 };
58 /* caller id hash size in bytes */ 59
59 u32 callerIdSizeInBytes; 60enum hash_stage {
61 HASH_INIT,
62 HASH_UPDATE,
63 HASH_FINISH,
64 HASH_DIGEST,
65 HASH_FINUP_DATA,
66 HASH_FINUP_FINISH
60}; 67};
61 68
62/* 69/*
@@ -83,11 +90,6 @@ struct sep_dcblock {
83 u8 tail_data[68]; 90 u8 tail_data[68];
84}; 91};
85 92
86struct sep_caller_id_entry {
87 int pid;
88 unsigned char callerIdHash[SEP_CALLER_ID_HASH_SIZE_IN_BYTES];
89};
90
91/* 93/*
92 command structure for building dcb block (currently for ext app only 94 command structure for building dcb block (currently for ext app only
93*/ 95*/
@@ -104,6 +106,33 @@ struct build_dcb_struct {
104 /* the size of the block of the operation - if needed, 106 /* the size of the block of the operation - if needed,
105 every table will be modulo this parameter */ 107 every table will be modulo this parameter */
106 u32 tail_block_size; 108 u32 tail_block_size;
109
110 /* which application calls the driver DX or applet */
111 u32 is_applet;
112};
113
114/*
115 command structure for building dcb block for kernel crypto
116*/
117struct build_dcb_struct_kernel {
118 /* address value of the data in */
119 void *app_in_address;
120 /* size of data in */
121 ssize_t data_in_size;
122 /* address of the data out */
123 void *app_out_address;
124 /* the size of the block of the operation - if needed,
125 every table will be modulo this parameter */
126 u32 block_size;
127 /* the size of the block of the operation - if needed,
128 every table will be modulo this parameter */
129 u32 tail_block_size;
130
131 /* which application calls the driver DX or applet */
132 u32 is_applet;
133
134 struct scatterlist *src_sg;
135 struct scatterlist *dst_sg;
107}; 136};
108 137
109/** 138/**
@@ -147,6 +176,10 @@ struct sep_dma_resource {
147 176
148 /* number of entries of the output mapp array */ 177 /* number of entries of the output mapp array */
149 u32 out_map_num_entries; 178 u32 out_map_num_entries;
179
180 /* Scatter list for kernel operations */
181 struct scatterlist *src_sg;
182 struct scatterlist *dst_sg;
150}; 183};
151 184
152 185
@@ -169,47 +202,201 @@ struct sep_lli_entry {
169 u32 block_size; 202 u32 block_size;
170}; 203};
171 204
172/*---------------------------------------------------------------- 205/*
173 IOCTL command defines 206 * header format for each fastcall write operation
174 -----------------------------------------------------------------*/ 207 */
208struct sep_fastcall_hdr {
209 u32 magic;
210 u32 secure_dma;
211 u32 msg_len;
212 u32 num_dcbs;
213};
175 214
176/* magic number 1 of the sep IOCTL command */ 215/*
177#define SEP_IOC_MAGIC_NUMBER 's' 216 * structure used in file pointer's private data field
217 * to track the status of the calls to the various
218 * driver interface
219 */
220struct sep_call_status {
221 unsigned long status;
222};
178 223
179/* sends interrupt to sep that message is ready */ 224/*
180#define SEP_IOCSENDSEPCOMMAND \ 225 * format of dma context buffer used to store all DMA-related
181 _IO(SEP_IOC_MAGIC_NUMBER, 0) 226 * context information of a particular transaction
227 */
228struct sep_dma_context {
229 /* number of data control blocks */
230 u32 nr_dcb_creat;
231 /* number of the lli tables created in the current transaction */
232 u32 num_lli_tables_created;
233 /* size of currently allocated dma tables region */
234 u32 dmatables_len;
235 /* size of input data */
236 u32 input_data_len;
237 /* secure dma use (for imr memory restriced area in output */
238 bool secure_dma;
239 struct sep_dma_resource dma_res_arr[SEP_MAX_NUM_SYNC_DMA_OPS];
240 /* Scatter gather for kernel crypto */
241 struct scatterlist *src_sg;
242 struct scatterlist *dst_sg;
243};
182 244
183/* sends interrupt to sep that message is ready */ 245/*
184#define SEP_IOCSENDSEPRPLYCOMMAND \ 246 * format for file pointer's private_data field
185 _IO(SEP_IOC_MAGIC_NUMBER, 1) 247 */
248struct sep_private_data {
249 struct sep_queue_info *my_queue_elem;
250 struct sep_device *device;
251 struct sep_call_status call_status;
252 struct sep_dma_context *dma_ctx;
253};
186 254
187/* allocate memory in data pool */
188#define SEP_IOCALLOCDATAPOLL \
189 _IOW(SEP_IOC_MAGIC_NUMBER, 2, struct alloc_struct)
190 255
191/* free dynamic data aalocated during table creation */ 256/* Functions used by sep_crypto */
192#define SEP_IOCFREEDMATABLEDATA \
193 _IO(SEP_IOC_MAGIC_NUMBER, 7)
194 257
195/* get the static pool area addersses (physical and virtual) */ 258/**
196#define SEP_IOCGETSTATICPOOLADDR \ 259 * sep_queue_status_remove - Removes transaction from status queue
197 _IO(SEP_IOC_MAGIC_NUMBER, 8) 260 * @sep: SEP device
261 * @sep_queue_info: pointer to status queue
262 *
263 * This function will removes information about transaction from the queue.
264 */
265void sep_queue_status_remove(struct sep_device *sep,
266 struct sep_queue_info **queue_elem);
267/**
268 * sep_queue_status_add - Adds transaction to status queue
269 * @sep: SEP device
270 * @opcode: transaction opcode
271 * @size: input data size
272 * @pid: pid of current process
273 * @name: current process name
274 * @name_len: length of name (current process)
275 *
276 * This function adds information about about transaction started to the status
277 * queue.
278 */
279struct sep_queue_info *sep_queue_status_add(
280 struct sep_device *sep,
281 u32 opcode,
282 u32 size,
283 u32 pid,
284 u8 *name, size_t name_len);
285
286/**
287 * sep_create_dcb_dmatables_context_kernel - Creates DCB & MLLI/DMA table context
288 * for kernel crypto
289 * @sep: SEP device
290 * @dcb_region: DCB region buf to create for current transaction
291 * @dmatables_region: MLLI/DMA tables buf to create for current transaction
292 * @dma_ctx: DMA context buf to create for current transaction
293 * @user_dcb_args: User arguments for DCB/MLLI creation
294 * @num_dcbs: Number of DCBs to create
295 */
296int sep_create_dcb_dmatables_context_kernel(struct sep_device *sep,
297 struct sep_dcblock **dcb_region,
298 void **dmatables_region,
299 struct sep_dma_context **dma_ctx,
300 const struct build_dcb_struct_kernel *dcb_data,
301 const u32 num_dcbs);
302
303/**
304 * sep_activate_dcb_dmatables_context - Takes DCB & DMA tables
305 * contexts into use
306 * @sep: SEP device
307 * @dcb_region: DCB region copy
308 * @dmatables_region: MLLI/DMA tables copy
309 * @dma_ctx: DMA context for current transaction
310 */
311ssize_t sep_activate_dcb_dmatables_context(struct sep_device *sep,
312 struct sep_dcblock **dcb_region,
313 void **dmatables_region,
314 struct sep_dma_context *dma_ctx);
315
316/**
317 * sep_prepare_input_output_dma_table_in_dcb - prepare control blocks
318 * @app_in_address: unsigned long; for data buffer in (user space)
319 * @app_out_address: unsigned long; for data buffer out (user space)
320 * @data_in_size: u32; for size of data
321 * @block_size: u32; for block size
322 * @tail_block_size: u32; for size of tail block
323 * @isapplet: bool; to indicate external app
324 * @is_kva: bool; kernel buffer; only used for kernel crypto module
325 * @secure_dma; indicates whether this is secure_dma using IMR
326 *
327 * This function prepares the linked DMA tables and puts the
328 * address for the linked list of tables inta a DCB (data control
329 * block) the address of which is known by the SEP hardware
330 * Note that all bus addresses that are passed to the SEP
331 * are in 32 bit format; the SEP is a 32 bit device
332 */
333int sep_prepare_input_output_dma_table_in_dcb(struct sep_device *sep,
334 unsigned long app_in_address,
335 unsigned long app_out_address,
336 u32 data_in_size,
337 u32 block_size,
338 u32 tail_block_size,
339 bool isapplet,
340 bool is_kva,
341 bool secure_dma,
342 struct sep_dcblock *dcb_region,
343 void **dmatables_region,
344 struct sep_dma_context **dma_ctx,
345 struct scatterlist *src_sg,
346 struct scatterlist *dst_sg);
347
348/**
349 * sep_free_dma_table_data_handler - free DMA table
350 * @sep: pointere to struct sep_device
351 * @dma_ctx: dma context
352 *
353 * Handles the request to free DMA table for synchronic actions
354 */
355int sep_free_dma_table_data_handler(struct sep_device *sep,
356 struct sep_dma_context **dma_ctx);
357/**
358 * sep_send_command_handler - kick off a command
359 * @sep: SEP being signalled
360 *
361 * This function raises interrupt to SEP that signals that is has a new
362 * command from the host
363 *
364 * Note that this function does fall under the ioctl lock
365 */
366int sep_send_command_handler(struct sep_device *sep);
367
368/**
369 * sep_wait_transaction - Used for synchronizing transactions
370 * @sep: SEP device
371 */
372int sep_wait_transaction(struct sep_device *sep);
373
374/**
375 * IOCTL command defines
376 */
377/* magic number 1 of the sep IOCTL command */
378#define SEP_IOC_MAGIC_NUMBER 's'
379
380/* sends interrupt to sep that message is ready */
381#define SEP_IOCSENDSEPCOMMAND \
382 _IO(SEP_IOC_MAGIC_NUMBER, 0)
198 383
199/* end transaction command */ 384/* end transaction command */
200#define SEP_IOCENDTRANSACTION \ 385#define SEP_IOCENDTRANSACTION \
201 _IO(SEP_IOC_MAGIC_NUMBER, 15) 386 _IO(SEP_IOC_MAGIC_NUMBER, 15)
202 387
203#define SEP_IOCRARPREPAREMESSAGE \
204 _IOW(SEP_IOC_MAGIC_NUMBER, 20, struct rar_hndl_to_bus_struct)
205
206#define SEP_IOCTLSETCALLERID \
207 _IOW(SEP_IOC_MAGIC_NUMBER, 34, struct caller_id_struct)
208
209#define SEP_IOCPREPAREDCB \ 388#define SEP_IOCPREPAREDCB \
210 _IOW(SEP_IOC_MAGIC_NUMBER, 35, struct build_dcb_struct) 389 _IOW(SEP_IOC_MAGIC_NUMBER, 35, struct build_dcb_struct)
211 390
212#define SEP_IOCFREEDCB \ 391#define SEP_IOCFREEDCB \
213 _IO(SEP_IOC_MAGIC_NUMBER, 36) 392 _IO(SEP_IOC_MAGIC_NUMBER, 36)
214 393
394struct sep_device;
395
396#define SEP_IOCPREPAREDCB_SECURE_DMA \
397 _IOW(SEP_IOC_MAGIC_NUMBER, 38, struct build_dcb_struct)
398
399#define SEP_IOCFREEDCB_SECURE_DMA \
400 _IO(SEP_IOC_MAGIC_NUMBER, 39)
401
215#endif 402#endif
diff --git a/drivers/staging/sep/sep_driver_config.h b/drivers/staging/sep/sep_driver_config.h
index d6bfd2455222..fa7c0d09bfa5 100644
--- a/drivers/staging/sep/sep_driver_config.h
+++ b/drivers/staging/sep/sep_driver_config.h
@@ -2,8 +2,8 @@
2 * 2 *
3 * sep_driver_config.h - Security Processor Driver configuration 3 * sep_driver_config.h - Security Processor Driver configuration
4 * 4 *
5 * Copyright(c) 2009,2010 Intel Corporation. All rights reserved. 5 * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
6 * Contributions(c) 2009,2010 Discretix. All rights reserved. 6 * Contributions(c) 2009-2011 Discretix. All rights reserved.
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify it 8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free 9 * under the terms of the GNU General Public License as published by the Free
@@ -26,6 +26,7 @@
26 * CHANGES: 26 * CHANGES:
27 * 27 *
28 * 2010.06.26 Upgrade to Medfield 28 * 2010.06.26 Upgrade to Medfield
29 * 2011.02.22 Enable kernel crypto
29 * 30 *
30 */ 31 */
31 32
@@ -48,6 +49,8 @@
48/* the mode for running on the ARM1172 Evaluation platform (flag is 1) */ 49/* the mode for running on the ARM1172 Evaluation platform (flag is 1) */
49#define SEP_DRIVER_ARM_DEBUG_MODE 0 50#define SEP_DRIVER_ARM_DEBUG_MODE 0
50 51
52/* Critical message area contents for sanity checking */
53#define SEP_START_MSG_TOKEN 0x02558808
51/*------------------------------------------- 54/*-------------------------------------------
52 INTERNAL DATA CONFIGURATION 55 INTERNAL DATA CONFIGURATION
53 -------------------------------------------*/ 56 -------------------------------------------*/
@@ -65,21 +68,17 @@
65#define SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE 16 68#define SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE 16
66 69
67/* flag that signifies tah the lock is 70/* flag that signifies tah the lock is
68currently held by the process (struct file) */ 71currently held by the proccess (struct file) */
69#define SEP_DRIVER_OWN_LOCK_FLAG 1 72#define SEP_DRIVER_OWN_LOCK_FLAG 1
70 73
71/* flag that signifies tah the lock is currently NOT 74/* flag that signifies tah the lock is currently NOT
72held by the process (struct file) */ 75held by the proccess (struct file) */
73#define SEP_DRIVER_DISOWN_LOCK_FLAG 0 76#define SEP_DRIVER_DISOWN_LOCK_FLAG 0
74 77
75/* indicates whether driver has mapped/unmapped shared area */ 78/* indicates whether driver has mapped/unmapped shared area */
76#define SEP_REQUEST_DAEMON_MAPPED 1 79#define SEP_REQUEST_DAEMON_MAPPED 1
77#define SEP_REQUEST_DAEMON_UNMAPPED 0 80#define SEP_REQUEST_DAEMON_UNMAPPED 0
78 81
79#define SEP_DEV_NAME "sep_sec_driver"
80#define SEP_DEV_SINGLETON "sep_sec_singleton_driver"
81#define SEP_DEV_DAEMON "sep_req_daemon_driver"
82
83/*-------------------------------------------------------- 82/*--------------------------------------------------------
84 SHARED AREA memory total size is 36K 83 SHARED AREA memory total size is 36K
85 it is divided is following: 84 it is divided is following:
@@ -90,7 +89,7 @@ held by the process (struct file) */
90 } 89 }
91 DATA_POOL_AREA 12K } 90 DATA_POOL_AREA 12K }
92 91
93 SYNCHRONIC_DMA_TABLES_AREA 5K 92 SYNCHRONIC_DMA_TABLES_AREA 29K
94 93
95 placeholder until drver changes 94 placeholder until drver changes
96 FLOW_DMA_TABLES_AREA 4K 95 FLOW_DMA_TABLES_AREA 4K
@@ -109,6 +108,12 @@ held by the process (struct file) */
109 108
110 109
111/* 110/*
111 the minimum length of the message - includes 2 reserved fields
112 at the start, then token, message size and opcode fields. all dwords
113*/
114#define SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES (5*sizeof(u32))
115
116/*
112 the maximum length of the message - the rest of the message shared 117 the maximum length of the message - the rest of the message shared
113 area will be dedicated to the dma lli tables 118 area will be dedicated to the dma lli tables
114*/ 119*/
@@ -124,7 +129,7 @@ held by the process (struct file) */
124#define SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES (16 * 1024) 129#define SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES (16 * 1024)
125 130
126/* the size of the message shared area in pages */ 131/* the size of the message shared area in pages */
127#define SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES (1024 * 5) 132#define SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES (1024 * 29)
128 133
129/* Placeholder until driver changes */ 134/* Placeholder until driver changes */
130#define SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES (1024 * 4) 135#define SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES (1024 * 4)
@@ -132,6 +137,9 @@ held by the process (struct file) */
132/* system data (time, caller id etc') pool */ 137/* system data (time, caller id etc') pool */
133#define SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES (1024 * 3) 138#define SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES (1024 * 3)
134 139
140/* Offset of the sep printf buffer in the message area */
141#define SEP_DRIVER_PRINTF_OFFSET_IN_BYTES (5888)
142
135/* the size in bytes of the time memory */ 143/* the size in bytes of the time memory */
136#define SEP_DRIVER_TIME_MEMORY_SIZE_IN_BYTES 8 144#define SEP_DRIVER_TIME_MEMORY_SIZE_IN_BYTES 8
137 145
@@ -223,10 +231,10 @@ held by the process (struct file) */
223#define SEP_ALREADY_INITIALIZED_ERR 12 231#define SEP_ALREADY_INITIALIZED_ERR 12
224 232
225/* bit that locks access to the shared area */ 233/* bit that locks access to the shared area */
226#define SEP_MMAP_LOCK_BIT 0 234#define SEP_TRANSACTION_STARTED_LOCK_BIT 0
227 235
228/* bit that lock access to the poll - after send_command */ 236/* bit that lock access to the poll - after send_command */
229#define SEP_SEND_MSG_LOCK_BIT 1 237#define SEP_WORKING_LOCK_BIT 1
230 238
231/* the token that defines the static pool address address */ 239/* the token that defines the static pool address address */
232#define SEP_STATIC_POOL_VAL_TOKEN 0xABBAABBA 240#define SEP_STATIC_POOL_VAL_TOKEN 0xABBAABBA
@@ -240,4 +248,51 @@ held by the process (struct file) */
240/* Time limit for SEP to finish */ 248/* Time limit for SEP to finish */
241#define WAIT_TIME 10 249#define WAIT_TIME 10
242 250
251/* Delay for pm runtime suspend (reduces pm thrashing with bursty traffic */
252#define SUSPEND_DELAY 10
253
254/* Number of delays to wait until scu boots after runtime resume */
255#define SCU_DELAY_MAX 50
256
257/* Delay for each iteration (usec) wait for scu boots after runtime resume */
258#define SCU_DELAY_ITERATION 10
259
260
261/*
262 * Bits used in struct sep_call_status to check that
263 * driver's APIs are called in valid order
264 */
265
266/* Bit offset which indicates status of sep_write() */
267#define SEP_FASTCALL_WRITE_DONE_OFFSET 0
268
269/* Bit offset which indicates status of sep_mmap() */
270#define SEP_LEGACY_MMAP_DONE_OFFSET 1
271
272/* Bit offset which indicates status of the SEP_IOCSENDSEPCOMMAND ioctl */
273#define SEP_LEGACY_SENDMSG_DONE_OFFSET 2
274
275/* Bit offset which indicates status of sep_poll() */
276#define SEP_LEGACY_POLL_DONE_OFFSET 3
277
278/* Bit offset which indicates status of the SEP_IOCENDTRANSACTION ioctl */
279#define SEP_LEGACY_ENDTRANSACTION_DONE_OFFSET 4
280
281/*
282 * Used to limit number of concurrent processes
283 * allowed to allocte dynamic buffers in fastcall
284 * interface.
285 */
286#define SEP_DOUBLEBUF_USERS_LIMIT 3
287
288/* Identifier for valid fastcall header */
289#define SEP_FC_MAGIC 0xFFAACCAA
290
291/*
292 * Used for enabling driver runtime power management.
293 * Useful for enabling/disabling it during performance
294 * testing
295 */
296#define SEP_ENABLE_RUNTIME_PM
297
243#endif /* SEP DRIVER CONFIG */ 298#endif /* SEP DRIVER CONFIG */
diff --git a/drivers/staging/sep/sep_driver_hw_defs.h b/drivers/staging/sep/sep_driver_hw_defs.h
index 300f90963de3..a6a448170382 100644
--- a/drivers/staging/sep/sep_driver_hw_defs.h
+++ b/drivers/staging/sep/sep_driver_hw_defs.h
@@ -2,8 +2,8 @@
2 * 2 *
3 * sep_driver_hw_defs.h - Security Processor Driver hardware definitions 3 * sep_driver_hw_defs.h - Security Processor Driver hardware definitions
4 * 4 *
5 * Copyright(c) 2009,2010 Intel Corporation. All rights reserved. 5 * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
6 * Contributions(c) 2009,2010 Discretix. All rights reserved. 6 * Contributions(c) 2009-2011 Discretix. All rights reserved.
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify it 8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free 9 * under the terms of the GNU General Public License as published by the Free
@@ -26,15 +26,13 @@
26 * CHANGES: 26 * CHANGES:
27 * 27 *
28 * 2010.09.20 Upgrade to Medfield 28 * 2010.09.20 Upgrade to Medfield
29 * 2011.02.22 Enable kernel crypto
29 * 30 *
30 */ 31 */
31 32
32#ifndef SEP_DRIVER_HW_DEFS__H 33#ifndef SEP_DRIVER_HW_DEFS__H
33#define SEP_DRIVER_HW_DEFS__H 34#define SEP_DRIVER_HW_DEFS__H
34 35
35/* PCI ID's */
36#define MFLD_PCI_DEVICE_ID 0x0826
37
38/*----------------------- */ 36/*----------------------- */
39/* HW Registers Defines. */ 37/* HW Registers Defines. */
40/* */ 38/* */
@@ -42,181 +40,9 @@
42 40
43 41
44/* cf registers */ 42/* cf registers */
45#define HW_R0B_ADDR_0_REG_ADDR 0x0000UL
46#define HW_R0B_ADDR_1_REG_ADDR 0x0004UL
47#define HW_R0B_ADDR_2_REG_ADDR 0x0008UL
48#define HW_R0B_ADDR_3_REG_ADDR 0x000cUL
49#define HW_R0B_ADDR_4_REG_ADDR 0x0010UL
50#define HW_R0B_ADDR_5_REG_ADDR 0x0014UL
51#define HW_R0B_ADDR_6_REG_ADDR 0x0018UL
52#define HW_R0B_ADDR_7_REG_ADDR 0x001cUL
53#define HW_R0B_ADDR_8_REG_ADDR 0x0020UL
54#define HW_R2B_ADDR_0_REG_ADDR 0x0080UL
55#define HW_R2B_ADDR_1_REG_ADDR 0x0084UL
56#define HW_R2B_ADDR_2_REG_ADDR 0x0088UL
57#define HW_R2B_ADDR_3_REG_ADDR 0x008cUL
58#define HW_R2B_ADDR_4_REG_ADDR 0x0090UL
59#define HW_R2B_ADDR_5_REG_ADDR 0x0094UL
60#define HW_R2B_ADDR_6_REG_ADDR 0x0098UL
61#define HW_R2B_ADDR_7_REG_ADDR 0x009cUL
62#define HW_R2B_ADDR_8_REG_ADDR 0x00a0UL
63#define HW_R3B_REG_ADDR 0x00C0UL
64#define HW_R4B_REG_ADDR 0x0100UL
65#define HW_CSA_ADDR_0_REG_ADDR 0x0140UL
66#define HW_CSA_ADDR_1_REG_ADDR 0x0144UL
67#define HW_CSA_ADDR_2_REG_ADDR 0x0148UL
68#define HW_CSA_ADDR_3_REG_ADDR 0x014cUL
69#define HW_CSA_ADDR_4_REG_ADDR 0x0150UL
70#define HW_CSA_ADDR_5_REG_ADDR 0x0154UL
71#define HW_CSA_ADDR_6_REG_ADDR 0x0158UL
72#define HW_CSA_ADDR_7_REG_ADDR 0x015cUL
73#define HW_CSA_ADDR_8_REG_ADDR 0x0160UL
74#define HW_CSA_REG_ADDR 0x0140UL
75#define HW_SINB_REG_ADDR 0x0180UL
76#define HW_SOUTB_REG_ADDR 0x0184UL
77#define HW_PKI_CONTROL_REG_ADDR 0x01C0UL
78#define HW_PKI_STATUS_REG_ADDR 0x01C4UL
79#define HW_PKI_BUSY_REG_ADDR 0x01C8UL
80#define HW_PKI_A_1025_REG_ADDR 0x01CCUL
81#define HW_PKI_SDMA_CTL_REG_ADDR 0x01D0UL
82#define HW_PKI_SDMA_OFFSET_REG_ADDR 0x01D4UL
83#define HW_PKI_SDMA_POINTERS_REG_ADDR 0x01D8UL
84#define HW_PKI_SDMA_DLENG_REG_ADDR 0x01DCUL
85#define HW_PKI_SDMA_EXP_POINTERS_REG_ADDR 0x01E0UL
86#define HW_PKI_SDMA_RES_POINTERS_REG_ADDR 0x01E4UL
87#define HW_PKI_CLR_REG_ADDR 0x01E8UL
88#define HW_PKI_SDMA_BUSY_REG_ADDR 0x01E8UL
89#define HW_PKI_SDMA_FIRST_EXP_N_REG_ADDR 0x01ECUL
90#define HW_PKI_SDMA_MUL_BY1_REG_ADDR 0x01F0UL
91#define HW_PKI_SDMA_RMUL_SEL_REG_ADDR 0x01F4UL
92#define HW_DES_KEY_0_REG_ADDR 0x0208UL
93#define HW_DES_KEY_1_REG_ADDR 0x020CUL
94#define HW_DES_KEY_2_REG_ADDR 0x0210UL
95#define HW_DES_KEY_3_REG_ADDR 0x0214UL
96#define HW_DES_KEY_4_REG_ADDR 0x0218UL
97#define HW_DES_KEY_5_REG_ADDR 0x021CUL
98#define HW_DES_CONTROL_0_REG_ADDR 0x0220UL
99#define HW_DES_CONTROL_1_REG_ADDR 0x0224UL
100#define HW_DES_IV_0_REG_ADDR 0x0228UL
101#define HW_DES_IV_1_REG_ADDR 0x022CUL
102#define HW_AES_KEY_0_ADDR_0_REG_ADDR 0x0400UL
103#define HW_AES_KEY_0_ADDR_1_REG_ADDR 0x0404UL
104#define HW_AES_KEY_0_ADDR_2_REG_ADDR 0x0408UL
105#define HW_AES_KEY_0_ADDR_3_REG_ADDR 0x040cUL
106#define HW_AES_KEY_0_ADDR_4_REG_ADDR 0x0410UL
107#define HW_AES_KEY_0_ADDR_5_REG_ADDR 0x0414UL
108#define HW_AES_KEY_0_ADDR_6_REG_ADDR 0x0418UL
109#define HW_AES_KEY_0_ADDR_7_REG_ADDR 0x041cUL
110#define HW_AES_KEY_0_REG_ADDR 0x0400UL
111#define HW_AES_IV_0_ADDR_0_REG_ADDR 0x0440UL
112#define HW_AES_IV_0_ADDR_1_REG_ADDR 0x0444UL
113#define HW_AES_IV_0_ADDR_2_REG_ADDR 0x0448UL
114#define HW_AES_IV_0_ADDR_3_REG_ADDR 0x044cUL
115#define HW_AES_IV_0_REG_ADDR 0x0440UL
116#define HW_AES_CTR1_ADDR_0_REG_ADDR 0x0460UL
117#define HW_AES_CTR1_ADDR_1_REG_ADDR 0x0464UL
118#define HW_AES_CTR1_ADDR_2_REG_ADDR 0x0468UL
119#define HW_AES_CTR1_ADDR_3_REG_ADDR 0x046cUL
120#define HW_AES_CTR1_REG_ADDR 0x0460UL
121#define HW_AES_SK_REG_ADDR 0x0478UL
122#define HW_AES_MAC_OK_REG_ADDR 0x0480UL
123#define HW_AES_PREV_IV_0_ADDR_0_REG_ADDR 0x0490UL
124#define HW_AES_PREV_IV_0_ADDR_1_REG_ADDR 0x0494UL
125#define HW_AES_PREV_IV_0_ADDR_2_REG_ADDR 0x0498UL
126#define HW_AES_PREV_IV_0_ADDR_3_REG_ADDR 0x049cUL
127#define HW_AES_PREV_IV_0_REG_ADDR 0x0490UL
128#define HW_AES_CONTROL_REG_ADDR 0x04C0UL
129#define HW_HASH_H0_REG_ADDR 0x0640UL
130#define HW_HASH_H1_REG_ADDR 0x0644UL
131#define HW_HASH_H2_REG_ADDR 0x0648UL
132#define HW_HASH_H3_REG_ADDR 0x064CUL
133#define HW_HASH_H4_REG_ADDR 0x0650UL
134#define HW_HASH_H5_REG_ADDR 0x0654UL
135#define HW_HASH_H6_REG_ADDR 0x0658UL
136#define HW_HASH_H7_REG_ADDR 0x065CUL
137#define HW_HASH_H8_REG_ADDR 0x0660UL
138#define HW_HASH_H9_REG_ADDR 0x0664UL
139#define HW_HASH_H10_REG_ADDR 0x0668UL
140#define HW_HASH_H11_REG_ADDR 0x066CUL
141#define HW_HASH_H12_REG_ADDR 0x0670UL
142#define HW_HASH_H13_REG_ADDR 0x0674UL
143#define HW_HASH_H14_REG_ADDR 0x0678UL
144#define HW_HASH_H15_REG_ADDR 0x067CUL
145#define HW_HASH_CONTROL_REG_ADDR 0x07C0UL
146#define HW_HASH_PAD_EN_REG_ADDR 0x07C4UL
147#define HW_HASH_PAD_CFG_REG_ADDR 0x07C8UL
148#define HW_HASH_CUR_LEN_0_REG_ADDR 0x07CCUL
149#define HW_HASH_CUR_LEN_1_REG_ADDR 0x07D0UL
150#define HW_HASH_CUR_LEN_2_REG_ADDR 0x07D4UL
151#define HW_HASH_CUR_LEN_3_REG_ADDR 0x07D8UL
152#define HW_HASH_PARAM_REG_ADDR 0x07DCUL
153#define HW_HASH_INT_BUSY_REG_ADDR 0x07E0UL
154#define HW_HASH_SW_RESET_REG_ADDR 0x07E4UL
155#define HW_HASH_ENDIANESS_REG_ADDR 0x07E8UL
156#define HW_HASH_DATA_REG_ADDR 0x07ECUL
157#define HW_DRNG_CONTROL_REG_ADDR 0x0800UL
158#define HW_DRNG_VALID_REG_ADDR 0x0804UL
159#define HW_DRNG_DATA_REG_ADDR 0x0808UL
160#define HW_RND_SRC_EN_REG_ADDR 0x080CUL
161#define HW_AES_CLK_ENABLE_REG_ADDR 0x0810UL
162#define HW_DES_CLK_ENABLE_REG_ADDR 0x0814UL
163#define HW_HASH_CLK_ENABLE_REG_ADDR 0x0818UL
164#define HW_PKI_CLK_ENABLE_REG_ADDR 0x081CUL
165#define HW_CLK_STATUS_REG_ADDR 0x0824UL
166#define HW_CLK_ENABLE_REG_ADDR 0x0828UL
167#define HW_DRNG_SAMPLE_REG_ADDR 0x0850UL
168#define HW_RND_SRC_CTL_REG_ADDR 0x0858UL
169#define HW_CRYPTO_CTL_REG_ADDR 0x0900UL
170#define HW_CRYPTO_STATUS_REG_ADDR 0x090CUL
171#define HW_CRYPTO_BUSY_REG_ADDR 0x0910UL
172#define HW_AES_BUSY_REG_ADDR 0x0914UL
173#define HW_DES_BUSY_REG_ADDR 0x0918UL
174#define HW_HASH_BUSY_REG_ADDR 0x091CUL
175#define HW_CONTENT_REG_ADDR 0x0924UL
176#define HW_VERSION_REG_ADDR 0x0928UL
177#define HW_CONTEXT_ID_REG_ADDR 0x0930UL
178#define HW_DIN_BUFFER_REG_ADDR 0x0C00UL
179#define HW_DIN_MEM_DMA_BUSY_REG_ADDR 0x0c20UL
180#define HW_SRC_LLI_MEM_ADDR_REG_ADDR 0x0c24UL
181#define HW_SRC_LLI_WORD0_REG_ADDR 0x0C28UL
182#define HW_SRC_LLI_WORD1_REG_ADDR 0x0C2CUL
183#define HW_SRAM_SRC_ADDR_REG_ADDR 0x0c30UL
184#define HW_DIN_SRAM_BYTES_LEN_REG_ADDR 0x0c34UL
185#define HW_DIN_SRAM_DMA_BUSY_REG_ADDR 0x0C38UL
186#define HW_WRITE_ALIGN_REG_ADDR 0x0C3CUL
187#define HW_OLD_DATA_REG_ADDR 0x0C48UL
188#define HW_WRITE_ALIGN_LAST_REG_ADDR 0x0C4CUL
189#define HW_DOUT_BUFFER_REG_ADDR 0x0C00UL
190#define HW_DST_LLI_WORD0_REG_ADDR 0x0D28UL
191#define HW_DST_LLI_WORD1_REG_ADDR 0x0D2CUL
192#define HW_DST_LLI_MEM_ADDR_REG_ADDR 0x0D24UL
193#define HW_DOUT_MEM_DMA_BUSY_REG_ADDR 0x0D20UL
194#define HW_SRAM_DEST_ADDR_REG_ADDR 0x0D30UL
195#define HW_DOUT_SRAM_BYTES_LEN_REG_ADDR 0x0D34UL
196#define HW_DOUT_SRAM_DMA_BUSY_REG_ADDR 0x0D38UL
197#define HW_READ_ALIGN_REG_ADDR 0x0D3CUL
198#define HW_READ_LAST_DATA_REG_ADDR 0x0D44UL
199#define HW_RC4_THRU_CPU_REG_ADDR 0x0D4CUL
200#define HW_AHB_SINGLE_REG_ADDR 0x0E00UL
201#define HW_SRAM_DATA_REG_ADDR 0x0F00UL
202#define HW_SRAM_ADDR_REG_ADDR 0x0F04UL
203#define HW_SRAM_DATA_READY_REG_ADDR 0x0F08UL
204#define HW_HOST_IRR_REG_ADDR 0x0A00UL 43#define HW_HOST_IRR_REG_ADDR 0x0A00UL
205#define HW_HOST_IMR_REG_ADDR 0x0A04UL 44#define HW_HOST_IMR_REG_ADDR 0x0A04UL
206#define HW_HOST_ICR_REG_ADDR 0x0A08UL 45#define HW_HOST_ICR_REG_ADDR 0x0A08UL
207#define HW_HOST_SEP_SRAM_THRESHOLD_REG_ADDR 0x0A10UL
208#define HW_HOST_SEP_BUSY_REG_ADDR 0x0A14UL
209#define HW_HOST_SEP_LCS_REG_ADDR 0x0A18UL
210#define HW_HOST_CC_SW_RST_REG_ADDR 0x0A40UL
211#define HW_HOST_SEP_SW_RST_REG_ADDR 0x0A44UL
212#define HW_HOST_FLOW_DMA_SW_INT0_REG_ADDR 0x0A80UL
213#define HW_HOST_FLOW_DMA_SW_INT1_REG_ADDR 0x0A84UL
214#define HW_HOST_FLOW_DMA_SW_INT2_REG_ADDR 0x0A88UL
215#define HW_HOST_FLOW_DMA_SW_INT3_REG_ADDR 0x0A8cUL
216#define HW_HOST_FLOW_DMA_SW_INT4_REG_ADDR 0x0A90UL
217#define HW_HOST_FLOW_DMA_SW_INT5_REG_ADDR 0x0A94UL
218#define HW_HOST_FLOW_DMA_SW_INT6_REG_ADDR 0x0A98UL
219#define HW_HOST_FLOW_DMA_SW_INT7_REG_ADDR 0x0A9cUL
220#define HW_HOST_SEP_HOST_GPR0_REG_ADDR 0x0B00UL 46#define HW_HOST_SEP_HOST_GPR0_REG_ADDR 0x0B00UL
221#define HW_HOST_SEP_HOST_GPR1_REG_ADDR 0x0B04UL 47#define HW_HOST_SEP_HOST_GPR1_REG_ADDR 0x0B04UL
222#define HW_HOST_SEP_HOST_GPR2_REG_ADDR 0x0B08UL 48#define HW_HOST_SEP_HOST_GPR2_REG_ADDR 0x0B08UL
@@ -225,9 +51,6 @@
225#define HW_HOST_HOST_SEP_GPR1_REG_ADDR 0x0B84UL 51#define HW_HOST_HOST_SEP_GPR1_REG_ADDR 0x0B84UL
226#define HW_HOST_HOST_SEP_GPR2_REG_ADDR 0x0B88UL 52#define HW_HOST_HOST_SEP_GPR2_REG_ADDR 0x0B88UL
227#define HW_HOST_HOST_SEP_GPR3_REG_ADDR 0x0B8CUL 53#define HW_HOST_HOST_SEP_GPR3_REG_ADDR 0x0B8CUL
228#define HW_HOST_HOST_ENDIAN_REG_ADDR 0x0B90UL 54#define HW_SRAM_DATA_READY_REG_ADDR 0x0F08UL
229#define HW_HOST_HOST_COMM_CLK_EN_REG_ADDR 0x0B94UL
230#define HW_CLR_SRAM_BUSY_REG_REG_ADDR 0x0F0CUL
231#define HW_CC_SRAM_BASE_ADDRESS 0x5800UL
232 55
233#endif /* ifndef HW_DEFS */ 56#endif /* ifndef HW_DEFS */
diff --git a/drivers/staging/sep/sep_main.c b/drivers/staging/sep/sep_main.c
new file mode 100644
index 000000000000..ad54c2e5c932
--- /dev/null
+++ b/drivers/staging/sep/sep_main.c
@@ -0,0 +1,4518 @@
1/*
2 *
3 * sep_main.c - Security Processor Driver main group of functions
4 *
5 * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
6 * Contributions(c) 2009-2011 Discretix. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 *
21 * CONTACTS:
22 *
23 * Mark Allyn mark.a.allyn@intel.com
24 * Jayant Mangalampalli jayant.mangalampalli@intel.com
25 *
26 * CHANGES:
27 *
28 * 2009.06.26 Initial publish
29 * 2010.09.14 Upgrade to Medfield
30 * 2011.01.21 Move to sep_main.c to allow for sep_crypto.c
31 * 2011.02.22 Enable kernel crypto operation
32 *
33 * Please note that this driver is based on information in the Discretix
34 * CryptoCell 5.2 Driver Implementation Guide; the Discretix CryptoCell 5.2
35 * Integration Intel Medfield appendix; the Discretix CryptoCell 5.2
36 * Linux Driver Integration Guide; and the Discretix CryptoCell 5.2 System
37 * Overview and Integration Guide.
38 */
39/* #define DEBUG */
40/* #define SEP_PERF_DEBUG */
41
42#include <linux/init.h>
43#include <linux/kernel.h>
44#include <linux/module.h>
45#include <linux/miscdevice.h>
46#include <linux/fs.h>
47#include <linux/cdev.h>
48#include <linux/kdev_t.h>
49#include <linux/mutex.h>
50#include <linux/sched.h>
51#include <linux/mm.h>
52#include <linux/poll.h>
53#include <linux/wait.h>
54#include <linux/pci.h>
55#include <linux/pm_runtime.h>
56#include <linux/slab.h>
57#include <linux/ioctl.h>
58#include <asm/current.h>
59#include <linux/ioport.h>
60#include <linux/io.h>
61#include <linux/interrupt.h>
62#include <linux/pagemap.h>
63#include <asm/cacheflush.h>
64#include <linux/sched.h>
65#include <linux/delay.h>
66#include <linux/jiffies.h>
67#include <linux/async.h>
68#include <linux/crypto.h>
69#include <crypto/internal/hash.h>
70#include <crypto/scatterwalk.h>
71#include <crypto/sha.h>
72#include <crypto/md5.h>
73#include <crypto/aes.h>
74#include <crypto/des.h>
75#include <crypto/hash.h>
76
77#include "sep_driver_hw_defs.h"
78#include "sep_driver_config.h"
79#include "sep_driver_api.h"
80#include "sep_dev.h"
81#include "sep_crypto.h"
82
83#define CREATE_TRACE_POINTS
84#include "sep_trace_events.h"
85
86/*
87 * Let's not spend cycles iterating over message
88 * area contents if debugging not enabled
89 */
90#ifdef DEBUG
91#define sep_dump_message(sep) _sep_dump_message(sep)
92#else
93#define sep_dump_message(sep)
94#endif
95
96/**
97 * Currenlty, there is only one SEP device per platform;
98 * In event platforms in the future have more than one SEP
99 * device, this will be a linked list
100 */
101
102struct sep_device *sep_dev;
103
104/**
105 * sep_queue_status_remove - Removes transaction from status queue
106 * @sep: SEP device
107 * @sep_queue_info: pointer to status queue
108 *
109 * This function will removes information about transaction from the queue.
110 */
111void sep_queue_status_remove(struct sep_device *sep,
112 struct sep_queue_info **queue_elem)
113{
114 unsigned long lck_flags;
115
116 dev_dbg(&sep->pdev->dev, "[PID%d] sep_queue_status_remove\n",
117 current->pid);
118
119 if (!queue_elem || !(*queue_elem)) {
120 dev_dbg(&sep->pdev->dev, "PID%d %s null\n",
121 current->pid, __func__);
122 return;
123 }
124
125 spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
126 list_del(&(*queue_elem)->list);
127 sep->sep_queue_num--;
128 spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
129
130 kfree(*queue_elem);
131 *queue_elem = NULL;
132
133 dev_dbg(&sep->pdev->dev, "[PID%d] sep_queue_status_remove return\n",
134 current->pid);
135 return;
136}
137
138/**
139 * sep_queue_status_add - Adds transaction to status queue
140 * @sep: SEP device
141 * @opcode: transaction opcode
142 * @size: input data size
143 * @pid: pid of current process
144 * @name: current process name
145 * @name_len: length of name (current process)
146 *
147 * This function adds information about about transaction started to the status
148 * queue.
149 */
150struct sep_queue_info *sep_queue_status_add(
151 struct sep_device *sep,
152 u32 opcode,
153 u32 size,
154 u32 pid,
155 u8 *name, size_t name_len)
156{
157 unsigned long lck_flags;
158 struct sep_queue_info *my_elem = NULL;
159
160 my_elem = kzalloc(sizeof(struct sep_queue_info), GFP_KERNEL);
161
162 if (!my_elem)
163 return NULL;
164
165 dev_dbg(&sep->pdev->dev, "[PID%d] kzalloc ok\n", current->pid);
166
167 my_elem->data.opcode = opcode;
168 my_elem->data.size = size;
169 my_elem->data.pid = pid;
170
171 if (name_len > TASK_COMM_LEN)
172 name_len = TASK_COMM_LEN;
173
174 memcpy(&my_elem->data.name, name, name_len);
175
176 spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
177
178 list_add_tail(&my_elem->list, &sep->sep_queue_status);
179 sep->sep_queue_num++;
180
181 spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
182
183 return my_elem;
184}
185
186/**
187 * sep_allocate_dmatables_region - Allocates buf for the MLLI/DMA tables
188 * @sep: SEP device
189 * @dmatables_region: Destination pointer for the buffer
190 * @dma_ctx: DMA context for the transaction
191 * @table_count: Number of MLLI/DMA tables to create
192 * The buffer created will not work as-is for DMA operations,
193 * it needs to be copied over to the appropriate place in the
194 * shared area.
195 */
196static int sep_allocate_dmatables_region(struct sep_device *sep,
197 void **dmatables_region,
198 struct sep_dma_context *dma_ctx,
199 const u32 table_count)
200{
201 const size_t new_len =
202 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES - 1;
203
204 void *tmp_region = NULL;
205
206 dev_dbg(&sep->pdev->dev, "[PID%d] dma_ctx = 0x%p\n",
207 current->pid, dma_ctx);
208 dev_dbg(&sep->pdev->dev, "[PID%d] dmatables_region = 0x%p\n",
209 current->pid, dmatables_region);
210
211 if (!dma_ctx || !dmatables_region) {
212 dev_warn(&sep->pdev->dev,
213 "[PID%d] dma context/region uninitialized\n",
214 current->pid);
215 return -EINVAL;
216 }
217
218 dev_dbg(&sep->pdev->dev, "[PID%d] newlen = 0x%08zX\n",
219 current->pid, new_len);
220 dev_dbg(&sep->pdev->dev, "[PID%d] oldlen = 0x%08X\n", current->pid,
221 dma_ctx->dmatables_len);
222 tmp_region = kzalloc(new_len + dma_ctx->dmatables_len, GFP_KERNEL);
223 if (!tmp_region) {
224 dev_warn(&sep->pdev->dev,
225 "[PID%d] no mem for dma tables region\n",
226 current->pid);
227 return -ENOMEM;
228 }
229
230 /* Were there any previous tables that need to be preserved ? */
231 if (*dmatables_region) {
232 memcpy(tmp_region, *dmatables_region, dma_ctx->dmatables_len);
233 kfree(*dmatables_region);
234 *dmatables_region = NULL;
235 }
236
237 *dmatables_region = tmp_region;
238
239 dma_ctx->dmatables_len += new_len;
240
241 return 0;
242}
243
244/**
245 * sep_wait_transaction - Used for synchronizing transactions
246 * @sep: SEP device
247 */
248int sep_wait_transaction(struct sep_device *sep)
249{
250 int error = 0;
251 DEFINE_WAIT(wait);
252
253 if (0 == test_and_set_bit(SEP_TRANSACTION_STARTED_LOCK_BIT,
254 &sep->in_use_flags)) {
255 dev_dbg(&sep->pdev->dev,
256 "[PID%d] no transactions, returning\n",
257 current->pid);
258 goto end_function_setpid;
259 }
260
261 /*
262 * Looping needed even for exclusive waitq entries
263 * due to process wakeup latencies, previous process
264 * might have already created another transaction.
265 */
266 for (;;) {
267 /*
268 * Exclusive waitq entry, so that only one process is
269 * woken up from the queue at a time.
270 */
271 prepare_to_wait_exclusive(&sep->event_transactions,
272 &wait,
273 TASK_INTERRUPTIBLE);
274 if (0 == test_and_set_bit(SEP_TRANSACTION_STARTED_LOCK_BIT,
275 &sep->in_use_flags)) {
276 dev_dbg(&sep->pdev->dev,
277 "[PID%d] no transactions, breaking\n",
278 current->pid);
279 break;
280 }
281 dev_dbg(&sep->pdev->dev,
282 "[PID%d] transactions ongoing, sleeping\n",
283 current->pid);
284 schedule();
285 dev_dbg(&sep->pdev->dev, "[PID%d] woken up\n", current->pid);
286
287 if (signal_pending(current)) {
288 dev_dbg(&sep->pdev->dev, "[PID%d] received signal\n",
289 current->pid);
290 error = -EINTR;
291 goto end_function;
292 }
293 }
294end_function_setpid:
295 /*
296 * The pid_doing_transaction indicates that this process
297 * now owns the facilities to performa a transaction with
298 * the SEP. While this process is performing a transaction,
299 * no other process who has the SEP device open can perform
300 * any transactions. This method allows more than one process
301 * to have the device open at any given time, which provides
302 * finer granularity for device utilization by multiple
303 * processes.
304 */
305 /* Only one process is able to progress here at a time */
306 sep->pid_doing_transaction = current->pid;
307
308end_function:
309 finish_wait(&sep->event_transactions, &wait);
310
311 return error;
312}
313
314/**
315 * sep_check_transaction_owner - Checks if current process owns transaction
316 * @sep: SEP device
317 */
318static inline int sep_check_transaction_owner(struct sep_device *sep)
319{
320 dev_dbg(&sep->pdev->dev, "[PID%d] transaction pid = %d\n",
321 current->pid,
322 sep->pid_doing_transaction);
323
324 if ((sep->pid_doing_transaction == 0) ||
325 (current->pid != sep->pid_doing_transaction)) {
326 return -EACCES;
327 }
328
329 /* We own the transaction */
330 return 0;
331}
332
333#ifdef DEBUG
334
335/**
336 * sep_dump_message - dump the message that is pending
337 * @sep: SEP device
338 * This will only print dump if DEBUG is set; it does
339 * follow kernel debug print enabling
340 */
341static void _sep_dump_message(struct sep_device *sep)
342{
343 int count;
344
345 u32 *p = sep->shared_addr;
346
347 for (count = 0; count < 10 * 4; count += 4)
348 dev_dbg(&sep->pdev->dev,
349 "[PID%d] Word %d of the message is %x\n",
350 current->pid, count/4, *p++);
351}
352
353#endif
354
355/**
356 * sep_map_and_alloc_shared_area -allocate shared block
357 * @sep: security processor
358 * @size: size of shared area
359 */
360static int sep_map_and_alloc_shared_area(struct sep_device *sep)
361{
362 sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev,
363 sep->shared_size,
364 &sep->shared_bus, GFP_KERNEL);
365
366 if (!sep->shared_addr) {
367 dev_dbg(&sep->pdev->dev,
368 "[PID%d] shared memory dma_alloc_coherent failed\n",
369 current->pid);
370 return -ENOMEM;
371 }
372 dev_dbg(&sep->pdev->dev,
373 "[PID%d] shared_addr %zx bytes @%p (bus %llx)\n",
374 current->pid,
375 sep->shared_size, sep->shared_addr,
376 (unsigned long long)sep->shared_bus);
377 return 0;
378}
379
380/**
381 * sep_unmap_and_free_shared_area - free shared block
382 * @sep: security processor
383 */
384static void sep_unmap_and_free_shared_area(struct sep_device *sep)
385{
386 dma_free_coherent(&sep->pdev->dev, sep->shared_size,
387 sep->shared_addr, sep->shared_bus);
388}
389
390#ifdef DEBUG
391
392/**
393 * sep_shared_bus_to_virt - convert bus/virt addresses
394 * @sep: pointer to struct sep_device
395 * @bus_address: address to convert
396 *
397 * Returns virtual address inside the shared area according
398 * to the bus address.
399 */
400static void *sep_shared_bus_to_virt(struct sep_device *sep,
401 dma_addr_t bus_address)
402{
403 return sep->shared_addr + (bus_address - sep->shared_bus);
404}
405
406#endif
407
408/**
409 * sep_open - device open method
410 * @inode: inode of SEP device
411 * @filp: file handle to SEP device
412 *
413 * Open method for the SEP device. Called when userspace opens
414 * the SEP device node.
415 *
416 * Returns zero on success otherwise an error code.
417 */
418static int sep_open(struct inode *inode, struct file *filp)
419{
420 struct sep_device *sep;
421 struct sep_private_data *priv;
422
423 dev_dbg(&sep_dev->pdev->dev, "[PID%d] open\n", current->pid);
424
425 if (filp->f_flags & O_NONBLOCK)
426 return -ENOTSUPP;
427
428 /*
429 * Get the SEP device structure and use it for the
430 * private_data field in filp for other methods
431 */
432
433 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
434 if (!priv)
435 return -ENOMEM;
436
437 sep = sep_dev;
438 priv->device = sep;
439 filp->private_data = priv;
440
441 dev_dbg(&sep_dev->pdev->dev, "[PID%d] priv is 0x%p\n",
442 current->pid, priv);
443
444 /* Anyone can open; locking takes place at transaction level */
445 return 0;
446}
447
448/**
449 * sep_free_dma_table_data_handler - free DMA table
450 * @sep: pointere to struct sep_device
451 * @dma_ctx: dma context
452 *
453 * Handles the request to free DMA table for synchronic actions
454 */
455int sep_free_dma_table_data_handler(struct sep_device *sep,
456 struct sep_dma_context **dma_ctx)
457{
458 int count;
459 int dcb_counter;
460 /* Pointer to the current dma_resource struct */
461 struct sep_dma_resource *dma;
462
463 dev_dbg(&sep->pdev->dev,
464 "[PID%d] sep_free_dma_table_data_handler\n",
465 current->pid);
466
467 if (!dma_ctx || !(*dma_ctx)) {
468 /* No context or context already freed */
469 dev_dbg(&sep->pdev->dev,
470 "[PID%d] no DMA context or context already freed\n",
471 current->pid);
472
473 return 0;
474 }
475
476 dev_dbg(&sep->pdev->dev, "[PID%d] (*dma_ctx)->nr_dcb_creat 0x%x\n",
477 current->pid,
478 (*dma_ctx)->nr_dcb_creat);
479
480 for (dcb_counter = 0;
481 dcb_counter < (*dma_ctx)->nr_dcb_creat; dcb_counter++) {
482 dma = &(*dma_ctx)->dma_res_arr[dcb_counter];
483
484 /* Unmap and free input map array */
485 if (dma->in_map_array) {
486 for (count = 0; count < dma->in_num_pages; count++) {
487 dma_unmap_page(&sep->pdev->dev,
488 dma->in_map_array[count].dma_addr,
489 dma->in_map_array[count].size,
490 DMA_TO_DEVICE);
491 }
492 kfree(dma->in_map_array);
493 }
494
495 /**
496 * Output is handled different. If
497 * this was a secure dma into restricted memory,
498 * then we skip this step altogether as restricted
499 * memory is not available to the o/s at all.
500 */
501 if (((*dma_ctx)->secure_dma == false) &&
502 (dma->out_map_array)) {
503
504 for (count = 0; count < dma->out_num_pages; count++) {
505 dma_unmap_page(&sep->pdev->dev,
506 dma->out_map_array[count].dma_addr,
507 dma->out_map_array[count].size,
508 DMA_FROM_DEVICE);
509 }
510 kfree(dma->out_map_array);
511 }
512
513 /* Free page cache for output */
514 if (dma->in_page_array) {
515 for (count = 0; count < dma->in_num_pages; count++) {
516 flush_dcache_page(dma->in_page_array[count]);
517 page_cache_release(dma->in_page_array[count]);
518 }
519 kfree(dma->in_page_array);
520 }
521
522 /* Again, we do this only for non secure dma */
523 if (((*dma_ctx)->secure_dma == false) &&
524 (dma->out_page_array)) {
525
526 for (count = 0; count < dma->out_num_pages; count++) {
527 if (!PageReserved(dma->out_page_array[count]))
528
529 SetPageDirty(dma->
530 out_page_array[count]);
531
532 flush_dcache_page(dma->out_page_array[count]);
533 page_cache_release(dma->out_page_array[count]);
534 }
535 kfree(dma->out_page_array);
536 }
537
538 /**
539 * Note that here we use in_map_num_entries because we
540 * don't have a page array; the page array is generated
541 * only in the lock_user_pages, which is not called
542 * for kernel crypto, which is what the sg (scatter gather
543 * is used for exclusively
544 */
545 if (dma->src_sg) {
546 dma_unmap_sg(&sep->pdev->dev, dma->src_sg,
547 dma->in_map_num_entries, DMA_TO_DEVICE);
548 dma->src_sg = NULL;
549 }
550
551 if (dma->dst_sg) {
552 dma_unmap_sg(&sep->pdev->dev, dma->dst_sg,
553 dma->in_map_num_entries, DMA_FROM_DEVICE);
554 dma->dst_sg = NULL;
555 }
556
557 /* Reset all the values */
558 dma->in_page_array = NULL;
559 dma->out_page_array = NULL;
560 dma->in_num_pages = 0;
561 dma->out_num_pages = 0;
562 dma->in_map_array = NULL;
563 dma->out_map_array = NULL;
564 dma->in_map_num_entries = 0;
565 dma->out_map_num_entries = 0;
566 }
567
568 (*dma_ctx)->nr_dcb_creat = 0;
569 (*dma_ctx)->num_lli_tables_created = 0;
570
571 kfree(*dma_ctx);
572 *dma_ctx = NULL;
573
574 dev_dbg(&sep->pdev->dev,
575 "[PID%d] sep_free_dma_table_data_handler end\n",
576 current->pid);
577
578 return 0;
579}
580
581/**
582 * sep_end_transaction_handler - end transaction
583 * @sep: pointer to struct sep_device
584 * @dma_ctx: DMA context
585 * @call_status: Call status
586 *
587 * This API handles the end transaction request.
588 */
589static int sep_end_transaction_handler(struct sep_device *sep,
590 struct sep_dma_context **dma_ctx,
591 struct sep_call_status *call_status,
592 struct sep_queue_info **my_queue_elem)
593{
594 dev_dbg(&sep->pdev->dev, "[PID%d] ending transaction\n", current->pid);
595
596 /*
597 * Extraneous transaction clearing would mess up PM
598 * device usage counters and SEP would get suspended
599 * just before we send a command to SEP in the next
600 * transaction
601 * */
602 if (sep_check_transaction_owner(sep)) {
603 dev_dbg(&sep->pdev->dev, "[PID%d] not transaction owner\n",
604 current->pid);
605 return 0;
606 }
607
608 /* Update queue status */
609 sep_queue_status_remove(sep, my_queue_elem);
610
611 /* Check that all the DMA resources were freed */
612 if (dma_ctx)
613 sep_free_dma_table_data_handler(sep, dma_ctx);
614
615 /* Reset call status for next transaction */
616 if (call_status)
617 call_status->status = 0;
618
619 /* Clear the message area to avoid next transaction reading
620 * sensitive results from previous transaction */
621 memset(sep->shared_addr, 0,
622 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
623
624 /* start suspend delay */
625#ifdef SEP_ENABLE_RUNTIME_PM
626 if (sep->in_use) {
627 sep->in_use = 0;
628 pm_runtime_mark_last_busy(&sep->pdev->dev);
629 pm_runtime_put_autosuspend(&sep->pdev->dev);
630 }
631#endif
632
633 clear_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags);
634 sep->pid_doing_transaction = 0;
635
636 /* Now it's safe for next process to proceed */
637 dev_dbg(&sep->pdev->dev, "[PID%d] waking up next transaction\n",
638 current->pid);
639 clear_bit(SEP_TRANSACTION_STARTED_LOCK_BIT, &sep->in_use_flags);
640 wake_up(&sep->event_transactions);
641
642 return 0;
643}
644
645
646/**
647 * sep_release - close a SEP device
648 * @inode: inode of SEP device
649 * @filp: file handle being closed
650 *
651 * Called on the final close of a SEP device.
652 */
653static int sep_release(struct inode *inode, struct file *filp)
654{
655 struct sep_private_data * const private_data = filp->private_data;
656 struct sep_call_status *call_status = &private_data->call_status;
657 struct sep_device *sep = private_data->device;
658 struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
659 struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
660
661 dev_dbg(&sep->pdev->dev, "[PID%d] release\n", current->pid);
662
663 sep_end_transaction_handler(sep, dma_ctx, call_status,
664 my_queue_elem);
665
666 kfree(filp->private_data);
667
668 return 0;
669}
670
671/**
672 * sep_mmap - maps the shared area to user space
673 * @filp: pointer to struct file
674 * @vma: pointer to vm_area_struct
675 *
676 * Called on an mmap of our space via the normal SEP device
677 */
678static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
679{
680 struct sep_private_data * const private_data = filp->private_data;
681 struct sep_call_status *call_status = &private_data->call_status;
682 struct sep_device *sep = private_data->device;
683 struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
684 dma_addr_t bus_addr;
685 unsigned long error = 0;
686
687 dev_dbg(&sep->pdev->dev, "[PID%d] sep_mmap\n", current->pid);
688
689 /* Set the transaction busy (own the device) */
690 /*
691 * Problem for multithreaded applications is that here we're
692 * possibly going to sleep while holding a write lock on
693 * current->mm->mmap_sem, which will cause deadlock for ongoing
694 * transaction trying to create DMA tables
695 */
696 error = sep_wait_transaction(sep);
697 if (error)
698 /* Interrupted by signal, don't clear transaction */
699 goto end_function;
700
701 /* Clear the message area to avoid next transaction reading
702 * sensitive results from previous transaction */
703 memset(sep->shared_addr, 0,
704 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
705
706 /*
707 * Check that the size of the mapped range is as the size of the message
708 * shared area
709 */
710 if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
711 error = -EINVAL;
712 goto end_function_with_error;
713 }
714
715 dev_dbg(&sep->pdev->dev, "[PID%d] shared_addr is %p\n",
716 current->pid, sep->shared_addr);
717
718 /* Get bus address */
719 bus_addr = sep->shared_bus;
720
721 if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT,
722 vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
723 dev_dbg(&sep->pdev->dev, "[PID%d] remap_page_range failed\n",
724 current->pid);
725 error = -EAGAIN;
726 goto end_function_with_error;
727 }
728
729 /* Update call status */
730 set_bit(SEP_LEGACY_MMAP_DONE_OFFSET, &call_status->status);
731
732 goto end_function;
733
734end_function_with_error:
735 /* Clear our transaction */
736 sep_end_transaction_handler(sep, NULL, call_status,
737 my_queue_elem);
738
739end_function:
740 return error;
741}
742
743/**
744 * sep_poll - poll handler
745 * @filp: pointer to struct file
746 * @wait: pointer to poll_table
747 *
748 * Called by the OS when the kernel is asked to do a poll on
749 * a SEP file handle.
750 */
751static unsigned int sep_poll(struct file *filp, poll_table *wait)
752{
753 struct sep_private_data * const private_data = filp->private_data;
754 struct sep_call_status *call_status = &private_data->call_status;
755 struct sep_device *sep = private_data->device;
756 u32 mask = 0;
757 u32 retval = 0;
758 u32 retval2 = 0;
759 unsigned long lock_irq_flag;
760
761 /* Am I the process that owns the transaction? */
762 if (sep_check_transaction_owner(sep)) {
763 dev_dbg(&sep->pdev->dev, "[PID%d] poll pid not owner\n",
764 current->pid);
765 mask = POLLERR;
766 goto end_function;
767 }
768
769 /* Check if send command or send_reply were activated previously */
770 if (0 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
771 &call_status->status)) {
772 dev_warn(&sep->pdev->dev, "[PID%d] sendmsg not called\n",
773 current->pid);
774 mask = POLLERR;
775 goto end_function;
776 }
777
778
779 /* Add the event to the polling wait table */
780 dev_dbg(&sep->pdev->dev, "[PID%d] poll: calling wait sep_event\n",
781 current->pid);
782
783 poll_wait(filp, &sep->event_interrupt, wait);
784
785 dev_dbg(&sep->pdev->dev,
786 "[PID%d] poll: send_ct is %lx reply ct is %lx\n",
787 current->pid, sep->send_ct, sep->reply_ct);
788
789 /* Check if error occured during poll */
790 retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
791 if ((retval2 != 0x0) && (retval2 != 0x8)) {
792 dev_dbg(&sep->pdev->dev, "[PID%d] poll; poll error %x\n",
793 current->pid, retval2);
794 mask |= POLLERR;
795 goto end_function;
796 }
797
798 spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
799
800 if (sep->send_ct == sep->reply_ct) {
801 spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
802 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
803 dev_dbg(&sep->pdev->dev,
804 "[PID%d] poll: data ready check (GPR2) %x\n",
805 current->pid, retval);
806
807 /* Check if printf request */
808 if ((retval >> 30) & 0x1) {
809 dev_dbg(&sep->pdev->dev,
810 "[PID%d] poll: SEP printf request\n",
811 current->pid);
812 goto end_function;
813 }
814
815 /* Check if the this is SEP reply or request */
816 if (retval >> 31) {
817 dev_dbg(&sep->pdev->dev,
818 "[PID%d] poll: SEP request\n",
819 current->pid);
820 } else {
821 dev_dbg(&sep->pdev->dev,
822 "[PID%d] poll: normal return\n",
823 current->pid);
824 sep_dump_message(sep);
825 dev_dbg(&sep->pdev->dev,
826 "[PID%d] poll; SEP reply POLLIN|POLLRDNORM\n",
827 current->pid);
828 mask |= POLLIN | POLLRDNORM;
829 }
830 set_bit(SEP_LEGACY_POLL_DONE_OFFSET, &call_status->status);
831 } else {
832 spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
833 dev_dbg(&sep->pdev->dev,
834 "[PID%d] poll; no reply; returning mask of 0\n",
835 current->pid);
836 mask = 0;
837 }
838
839end_function:
840 return mask;
841}
842
843/**
844 * sep_time_address - address in SEP memory of time
845 * @sep: SEP device we want the address from
846 *
847 * Return the address of the two dwords in memory used for time
848 * setting.
849 */
850static u32 *sep_time_address(struct sep_device *sep)
851{
852 return sep->shared_addr +
853 SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
854}
855
856/**
857 * sep_set_time - set the SEP time
858 * @sep: the SEP we are setting the time for
859 *
860 * Calculates time and sets it at the predefined address.
861 * Called with the SEP mutex held.
862 */
863static unsigned long sep_set_time(struct sep_device *sep)
864{
865 struct timeval time;
866 u32 *time_addr; /* Address of time as seen by the kernel */
867
868
869 do_gettimeofday(&time);
870
871 /* Set value in the SYSTEM MEMORY offset */
872 time_addr = sep_time_address(sep);
873
874 time_addr[0] = SEP_TIME_VAL_TOKEN;
875 time_addr[1] = time.tv_sec;
876
877 dev_dbg(&sep->pdev->dev, "[PID%d] time.tv_sec is %lu\n",
878 current->pid, time.tv_sec);
879 dev_dbg(&sep->pdev->dev, "[PID%d] time_addr is %p\n",
880 current->pid, time_addr);
881 dev_dbg(&sep->pdev->dev, "[PID%d] sep->shared_addr is %p\n",
882 current->pid, sep->shared_addr);
883
884 return time.tv_sec;
885}
886
887/**
888 * sep_send_command_handler - kick off a command
889 * @sep: SEP being signalled
890 *
891 * This function raises interrupt to SEP that signals that is has a new
892 * command from the host
893 *
894 * Note that this function does fall under the ioctl lock
895 */
896int sep_send_command_handler(struct sep_device *sep)
897{
898 unsigned long lock_irq_flag;
899 u32 *msg_pool;
900 int error = 0;
901
902 /* Basic sanity check; set msg pool to start of shared area */
903 msg_pool = (u32 *)sep->shared_addr;
904 msg_pool += 2;
905
906 /* Look for start msg token */
907 if (*msg_pool != SEP_START_MSG_TOKEN) {
908 dev_warn(&sep->pdev->dev, "start message token not present\n");
909 error = -EPROTO;
910 goto end_function;
911 }
912
913 /* Do we have a reasonable size? */
914 msg_pool += 1;
915 if ((*msg_pool < 2) ||
916 (*msg_pool > SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES)) {
917
918 dev_warn(&sep->pdev->dev, "invalid message size\n");
919 error = -EPROTO;
920 goto end_function;
921 }
922
923 /* Does the command look reasonable? */
924 msg_pool += 1;
925 if (*msg_pool < 2) {
926 dev_warn(&sep->pdev->dev, "invalid message opcode\n");
927 error = -EPROTO;
928 goto end_function;
929 }
930
931#if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM)
932 dev_dbg(&sep->pdev->dev, "[PID%d] before pm sync status 0x%X\n",
933 current->pid,
934 sep->pdev->dev.power.runtime_status);
935 sep->in_use = 1; /* device is about to be used */
936 pm_runtime_get_sync(&sep->pdev->dev);
937#endif
938
939 if (test_and_set_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags)) {
940 error = -EPROTO;
941 goto end_function;
942 }
943 sep->in_use = 1; /* device is about to be used */
944 sep_set_time(sep);
945
946 sep_dump_message(sep);
947
948 /* Update counter */
949 spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
950 sep->send_ct++;
951 spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
952
953 dev_dbg(&sep->pdev->dev,
954 "[PID%d] sep_send_command_handler send_ct %lx reply_ct %lx\n",
955 current->pid, sep->send_ct, sep->reply_ct);
956
957 /* Send interrupt to SEP */
958 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
959
960end_function:
961 return error;
962}
963
964/**
965 * sep_crypto_dma -
966 * @sep: pointer to struct sep_device
967 * @sg: pointer to struct scatterlist
968 * @direction:
969 * @dma_maps: pointer to place a pointer to array of dma maps
970 * This is filled in; anything previous there will be lost
971 * The structure for dma maps is sep_dma_map
972 * @returns number of dma maps on success; negative on error
973 *
974 * This creates the dma table from the scatterlist
975 * It is used only for kernel crypto as it works with scatterlists
976 * representation of data buffers
977 *
978 */
979static int sep_crypto_dma(
980 struct sep_device *sep,
981 struct scatterlist *sg,
982 struct sep_dma_map **dma_maps,
983 enum dma_data_direction direction)
984{
985 struct scatterlist *temp_sg;
986
987 u32 count_segment;
988 u32 count_mapped;
989 struct sep_dma_map *sep_dma;
990 int ct1;
991
992 if (sg->length == 0)
993 return 0;
994
995 /* Count the segments */
996 temp_sg = sg;
997 count_segment = 0;
998 while (temp_sg) {
999 count_segment += 1;
1000 temp_sg = scatterwalk_sg_next(temp_sg);
1001 }
1002 dev_dbg(&sep->pdev->dev,
1003 "There are (hex) %x segments in sg\n", count_segment);
1004
1005 /* DMA map segments */
1006 count_mapped = dma_map_sg(&sep->pdev->dev, sg,
1007 count_segment, direction);
1008
1009 dev_dbg(&sep->pdev->dev,
1010 "There are (hex) %x maps in sg\n", count_mapped);
1011
1012 if (count_mapped == 0) {
1013 dev_dbg(&sep->pdev->dev, "Cannot dma_map_sg\n");
1014 return -ENOMEM;
1015 }
1016
1017 sep_dma = kmalloc(sizeof(struct sep_dma_map) *
1018 count_mapped, GFP_ATOMIC);
1019
1020 if (sep_dma == NULL) {
1021 dev_dbg(&sep->pdev->dev, "Cannot allocate dma_maps\n");
1022 return -ENOMEM;
1023 }
1024
1025 for_each_sg(sg, temp_sg, count_mapped, ct1) {
1026 sep_dma[ct1].dma_addr = sg_dma_address(temp_sg);
1027 sep_dma[ct1].size = sg_dma_len(temp_sg);
1028 dev_dbg(&sep->pdev->dev, "(all hex) map %x dma %lx len %lx\n",
1029 ct1, (unsigned long)sep_dma[ct1].dma_addr,
1030 (unsigned long)sep_dma[ct1].size);
1031 }
1032
1033 *dma_maps = sep_dma;
1034 return count_mapped;
1035
1036}
1037
1038/**
1039 * sep_crypto_lli -
1040 * @sep: pointer to struct sep_device
1041 * @sg: pointer to struct scatterlist
1042 * @data_size: total data size
1043 * @direction:
1044 * @dma_maps: pointer to place a pointer to array of dma maps
1045 * This is filled in; anything previous there will be lost
1046 * The structure for dma maps is sep_dma_map
1047 * @lli_maps: pointer to place a pointer to array of lli maps
1048 * This is filled in; anything previous there will be lost
1049 * The structure for dma maps is sep_dma_map
1050 * @returns number of dma maps on success; negative on error
1051 *
1052 * This creates the LLI table from the scatterlist
1053 * It is only used for kernel crypto as it works exclusively
1054 * with scatterlists (struct scatterlist) representation of
1055 * data buffers
1056 */
1057static int sep_crypto_lli(
1058 struct sep_device *sep,
1059 struct scatterlist *sg,
1060 struct sep_dma_map **maps,
1061 struct sep_lli_entry **llis,
1062 u32 data_size,
1063 enum dma_data_direction direction)
1064{
1065
1066 int ct1;
1067 struct sep_lli_entry *sep_lli;
1068 struct sep_dma_map *sep_map;
1069
1070 int nbr_ents;
1071
1072 nbr_ents = sep_crypto_dma(sep, sg, maps, direction);
1073 if (nbr_ents <= 0) {
1074 dev_dbg(&sep->pdev->dev, "crypto_dma failed %x\n",
1075 nbr_ents);
1076 return nbr_ents;
1077 }
1078
1079 sep_map = *maps;
1080
1081 sep_lli = kmalloc(sizeof(struct sep_lli_entry) * nbr_ents, GFP_ATOMIC);
1082
1083 if (sep_lli == NULL) {
1084 dev_dbg(&sep->pdev->dev, "Cannot allocate lli_maps\n");
1085
1086 kfree(*maps);
1087 *maps = NULL;
1088 return -ENOMEM;
1089 }
1090
1091 for (ct1 = 0; ct1 < nbr_ents; ct1 += 1) {
1092 sep_lli[ct1].bus_address = (u32)sep_map[ct1].dma_addr;
1093
1094 /* Maximum for page is total data size */
1095 if (sep_map[ct1].size > data_size)
1096 sep_map[ct1].size = data_size;
1097
1098 sep_lli[ct1].block_size = (u32)sep_map[ct1].size;
1099 }
1100
1101 *llis = sep_lli;
1102 return nbr_ents;
1103}
1104
1105/**
1106 * sep_lock_kernel_pages - map kernel pages for DMA
1107 * @sep: pointer to struct sep_device
1108 * @kernel_virt_addr: address of data buffer in kernel
1109 * @data_size: size of data
1110 * @lli_array_ptr: lli array
1111 * @in_out_flag: input into device or output from device
1112 *
1113 * This function locks all the physical pages of the kernel virtual buffer
1114 * and construct a basic lli array, where each entry holds the physical
1115 * page address and the size that application data holds in this page
1116 * This function is used only during kernel crypto mod calls from within
1117 * the kernel (when ioctl is not used)
1118 *
1119 * This is used only for kernel crypto. Kernel pages
1120 * are handled differently as they are done via
1121 * scatter gather lists (struct scatterlist)
1122 */
1123static int sep_lock_kernel_pages(struct sep_device *sep,
1124 unsigned long kernel_virt_addr,
1125 u32 data_size,
1126 struct sep_lli_entry **lli_array_ptr,
1127 int in_out_flag,
1128 struct sep_dma_context *dma_ctx)
1129
1130{
1131 u32 num_pages;
1132 struct scatterlist *sg;
1133
1134 /* Array of lli */
1135 struct sep_lli_entry *lli_array;
1136 /* Map array */
1137 struct sep_dma_map *map_array;
1138
1139 enum dma_data_direction direction;
1140
1141 lli_array = NULL;
1142 map_array = NULL;
1143
1144 if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1145 direction = DMA_TO_DEVICE;
1146 sg = dma_ctx->src_sg;
1147 } else {
1148 direction = DMA_FROM_DEVICE;
1149 sg = dma_ctx->dst_sg;
1150 }
1151
1152 num_pages = sep_crypto_lli(sep, sg, &map_array, &lli_array,
1153 data_size, direction);
1154
1155 if (num_pages <= 0) {
1156 dev_dbg(&sep->pdev->dev, "sep_crypto_lli returned error %x\n",
1157 num_pages);
1158 return -ENOMEM;
1159 }
1160
1161 /* Put mapped kernel sg into kernel resource array */
1162
1163 /* Set output params acording to the in_out flag */
1164 if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1165 *lli_array_ptr = lli_array;
1166 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages =
1167 num_pages;
1168 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array =
1169 NULL;
1170 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array =
1171 map_array;
1172 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_num_entries =
1173 num_pages;
1174 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].src_sg =
1175 dma_ctx->src_sg;
1176 } else {
1177 *lli_array_ptr = lli_array;
1178 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages =
1179 num_pages;
1180 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array =
1181 NULL;
1182 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array =
1183 map_array;
1184 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
1185 out_map_num_entries = num_pages;
1186 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].dst_sg =
1187 dma_ctx->dst_sg;
1188 }
1189
1190 return 0;
1191}
1192
1193/**
1194 * sep_lock_user_pages - lock and map user pages for DMA
1195 * @sep: pointer to struct sep_device
1196 * @app_virt_addr: user memory data buffer
1197 * @data_size: size of data buffer
1198 * @lli_array_ptr: lli array
1199 * @in_out_flag: input or output to device
1200 *
1201 * This function locks all the physical pages of the application
1202 * virtual buffer and construct a basic lli array, where each entry
1203 * holds the physical page address and the size that application
1204 * data holds in this physical pages
1205 */
1206static int sep_lock_user_pages(struct sep_device *sep,
1207 u32 app_virt_addr,
1208 u32 data_size,
1209 struct sep_lli_entry **lli_array_ptr,
1210 int in_out_flag,
1211 struct sep_dma_context *dma_ctx)
1212
1213{
1214 int error = 0;
1215 u32 count;
1216 int result;
1217 /* The the page of the end address of the user space buffer */
1218 u32 end_page;
1219 /* The page of the start address of the user space buffer */
1220 u32 start_page;
1221 /* The range in pages */
1222 u32 num_pages;
1223 /* Array of pointers to page */
1224 struct page **page_array;
1225 /* Array of lli */
1226 struct sep_lli_entry *lli_array;
1227 /* Map array */
1228 struct sep_dma_map *map_array;
1229
1230 /* Set start and end pages and num pages */
1231 end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
1232 start_page = app_virt_addr >> PAGE_SHIFT;
1233 num_pages = end_page - start_page + 1;
1234
1235 dev_dbg(&sep->pdev->dev,
1236 "[PID%d] lock user pages app_virt_addr is %x\n",
1237 current->pid, app_virt_addr);
1238
1239 dev_dbg(&sep->pdev->dev, "[PID%d] data_size is (hex) %x\n",
1240 current->pid, data_size);
1241 dev_dbg(&sep->pdev->dev, "[PID%d] start_page is (hex) %x\n",
1242 current->pid, start_page);
1243 dev_dbg(&sep->pdev->dev, "[PID%d] end_page is (hex) %x\n",
1244 current->pid, end_page);
1245 dev_dbg(&sep->pdev->dev, "[PID%d] num_pages is (hex) %x\n",
1246 current->pid, num_pages);
1247
1248 /* Allocate array of pages structure pointers */
1249 page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
1250 if (!page_array) {
1251 error = -ENOMEM;
1252 goto end_function;
1253 }
1254 map_array = kmalloc(sizeof(struct sep_dma_map) * num_pages, GFP_ATOMIC);
1255 if (!map_array) {
1256 dev_warn(&sep->pdev->dev,
1257 "[PID%d] kmalloc for map_array failed\n",
1258 current->pid);
1259 error = -ENOMEM;
1260 goto end_function_with_error1;
1261 }
1262
1263 lli_array = kmalloc(sizeof(struct sep_lli_entry) * num_pages,
1264 GFP_ATOMIC);
1265
1266 if (!lli_array) {
1267 dev_warn(&sep->pdev->dev,
1268 "[PID%d] kmalloc for lli_array failed\n",
1269 current->pid);
1270 error = -ENOMEM;
1271 goto end_function_with_error2;
1272 }
1273
1274 /* Convert the application virtual address into a set of physical */
1275 down_read(&current->mm->mmap_sem);
1276 result = get_user_pages(current, current->mm, app_virt_addr,
1277 num_pages,
1278 ((in_out_flag == SEP_DRIVER_IN_FLAG) ? 0 : 1),
1279 0, page_array, NULL);
1280
1281 up_read(&current->mm->mmap_sem);
1282
1283 /* Check the number of pages locked - if not all then exit with error */
1284 if (result != num_pages) {
1285 dev_warn(&sep->pdev->dev,
1286 "[PID%d] not all pages locked by get_user_pages, "
1287 "result 0x%X, num_pages 0x%X\n",
1288 current->pid, result, num_pages);
1289 error = -ENOMEM;
1290 goto end_function_with_error3;
1291 }
1292
1293 dev_dbg(&sep->pdev->dev, "[PID%d] get_user_pages succeeded\n",
1294 current->pid);
1295
1296 /*
1297 * Fill the array using page array data and
1298 * map the pages - this action will also flush the cache as needed
1299 */
1300 for (count = 0; count < num_pages; count++) {
1301 /* Fill the map array */
1302 map_array[count].dma_addr =
1303 dma_map_page(&sep->pdev->dev, page_array[count],
1304 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
1305
1306 map_array[count].size = PAGE_SIZE;
1307
1308 /* Fill the lli array entry */
1309 lli_array[count].bus_address = (u32)map_array[count].dma_addr;
1310 lli_array[count].block_size = PAGE_SIZE;
1311
1312 dev_dbg(&sep->pdev->dev,
1313 "[PID%d] lli_array[%x].bus_address is %08lx, "
1314 "lli_array[%x].block_size is (hex) %x\n", current->pid,
1315 count, (unsigned long)lli_array[count].bus_address,
1316 count, lli_array[count].block_size);
1317 }
1318
1319 /* Check the offset for the first page */
1320 lli_array[0].bus_address =
1321 lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
1322
1323 /* Check that not all the data is in the first page only */
1324 if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
1325 lli_array[0].block_size = data_size;
1326 else
1327 lli_array[0].block_size =
1328 PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
1329
1330 dev_dbg(&sep->pdev->dev,
1331 "[PID%d] After check if page 0 has all data\n",
1332 current->pid);
1333 dev_dbg(&sep->pdev->dev,
1334 "[PID%d] lli_array[0].bus_address is (hex) %08lx, "
1335 "lli_array[0].block_size is (hex) %x\n",
1336 current->pid,
1337 (unsigned long)lli_array[0].bus_address,
1338 lli_array[0].block_size);
1339
1340
1341 /* Check the size of the last page */
1342 if (num_pages > 1) {
1343 lli_array[num_pages - 1].block_size =
1344 (app_virt_addr + data_size) & (~PAGE_MASK);
1345 if (lli_array[num_pages - 1].block_size == 0)
1346 lli_array[num_pages - 1].block_size = PAGE_SIZE;
1347
1348 dev_dbg(&sep->pdev->dev,
1349 "[PID%d] After last page size adjustment\n",
1350 current->pid);
1351 dev_dbg(&sep->pdev->dev,
1352 "[PID%d] lli_array[%x].bus_address is (hex) %08lx, "
1353 "lli_array[%x].block_size is (hex) %x\n",
1354 current->pid,
1355 num_pages - 1,
1356 (unsigned long)lli_array[num_pages - 1].bus_address,
1357 num_pages - 1,
1358 lli_array[num_pages - 1].block_size);
1359 }
1360
1361 /* Set output params acording to the in_out flag */
1362 if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1363 *lli_array_ptr = lli_array;
1364 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages =
1365 num_pages;
1366 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array =
1367 page_array;
1368 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array =
1369 map_array;
1370 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_num_entries =
1371 num_pages;
1372 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].src_sg = NULL;
1373 } else {
1374 *lli_array_ptr = lli_array;
1375 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages =
1376 num_pages;
1377 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array =
1378 page_array;
1379 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array =
1380 map_array;
1381 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
1382 out_map_num_entries = num_pages;
1383 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].dst_sg = NULL;
1384 }
1385 goto end_function;
1386
1387end_function_with_error3:
1388 /* Free lli array */
1389 kfree(lli_array);
1390
1391end_function_with_error2:
1392 kfree(map_array);
1393
1394end_function_with_error1:
1395 /* Free page array */
1396 kfree(page_array);
1397
1398end_function:
1399 return error;
1400}
1401
1402/**
1403 * sep_lli_table_secure_dma - get lli array for IMR addresses
1404 * @sep: pointer to struct sep_device
1405 * @app_virt_addr: user memory data buffer
1406 * @data_size: size of data buffer
1407 * @lli_array_ptr: lli array
1408 * @in_out_flag: not used
1409 * @dma_ctx: pointer to struct sep_dma_context
1410 *
1411 * This function creates lli tables for outputting data to
1412 * IMR memory, which is memory that cannot be accessed by the
1413 * the x86 processor.
1414 */
1415static int sep_lli_table_secure_dma(struct sep_device *sep,
1416 u32 app_virt_addr,
1417 u32 data_size,
1418 struct sep_lli_entry **lli_array_ptr,
1419 int in_out_flag,
1420 struct sep_dma_context *dma_ctx)
1421
1422{
1423 int error = 0;
1424 u32 count;
1425 /* The the page of the end address of the user space buffer */
1426 u32 end_page;
1427 /* The page of the start address of the user space buffer */
1428 u32 start_page;
1429 /* The range in pages */
1430 u32 num_pages;
1431 /* Array of lli */
1432 struct sep_lli_entry *lli_array;
1433
1434 /* Set start and end pages and num pages */
1435 end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
1436 start_page = app_virt_addr >> PAGE_SHIFT;
1437 num_pages = end_page - start_page + 1;
1438
1439 dev_dbg(&sep->pdev->dev, "[PID%d] lock user pages"
1440 " app_virt_addr is %x\n", current->pid, app_virt_addr);
1441
1442 dev_dbg(&sep->pdev->dev, "[PID%d] data_size is (hex) %x\n",
1443 current->pid, data_size);
1444 dev_dbg(&sep->pdev->dev, "[PID%d] start_page is (hex) %x\n",
1445 current->pid, start_page);
1446 dev_dbg(&sep->pdev->dev, "[PID%d] end_page is (hex) %x\n",
1447 current->pid, end_page);
1448 dev_dbg(&sep->pdev->dev, "[PID%d] num_pages is (hex) %x\n",
1449 current->pid, num_pages);
1450
1451 lli_array = kmalloc(sizeof(struct sep_lli_entry) * num_pages,
1452 GFP_ATOMIC);
1453
1454 if (!lli_array) {
1455 dev_warn(&sep->pdev->dev,
1456 "[PID%d] kmalloc for lli_array failed\n",
1457 current->pid);
1458 return -ENOMEM;
1459 }
1460
1461 /*
1462 * Fill the lli_array
1463 */
1464 start_page = start_page << PAGE_SHIFT;
1465 for (count = 0; count < num_pages; count++) {
1466 /* Fill the lli array entry */
1467 lli_array[count].bus_address = start_page;
1468 lli_array[count].block_size = PAGE_SIZE;
1469
1470 start_page += PAGE_SIZE;
1471
1472 dev_dbg(&sep->pdev->dev,
1473 "[PID%d] lli_array[%x].bus_address is %08lx, "
1474 "lli_array[%x].block_size is (hex) %x\n",
1475 current->pid,
1476 count, (unsigned long)lli_array[count].bus_address,
1477 count, lli_array[count].block_size);
1478 }
1479
1480 /* Check the offset for the first page */
1481 lli_array[0].bus_address =
1482 lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
1483
1484 /* Check that not all the data is in the first page only */
1485 if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
1486 lli_array[0].block_size = data_size;
1487 else
1488 lli_array[0].block_size =
1489 PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
1490
1491 dev_dbg(&sep->pdev->dev,
1492 "[PID%d] After check if page 0 has all data\n"
1493 "lli_array[0].bus_address is (hex) %08lx, "
1494 "lli_array[0].block_size is (hex) %x\n",
1495 current->pid,
1496 (unsigned long)lli_array[0].bus_address,
1497 lli_array[0].block_size);
1498
1499 /* Check the size of the last page */
1500 if (num_pages > 1) {
1501 lli_array[num_pages - 1].block_size =
1502 (app_virt_addr + data_size) & (~PAGE_MASK);
1503 if (lli_array[num_pages - 1].block_size == 0)
1504 lli_array[num_pages - 1].block_size = PAGE_SIZE;
1505
1506 dev_dbg(&sep->pdev->dev,
1507 "[PID%d] After last page size adjustment\n"
1508 "lli_array[%x].bus_address is (hex) %08lx, "
1509 "lli_array[%x].block_size is (hex) %x\n",
1510 current->pid, num_pages - 1,
1511 (unsigned long)lli_array[num_pages - 1].bus_address,
1512 num_pages - 1,
1513 lli_array[num_pages - 1].block_size);
1514 }
1515 *lli_array_ptr = lli_array;
1516 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages = num_pages;
1517 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
1518 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array = NULL;
1519 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_num_entries = 0;
1520
1521 return error;
1522}
1523
1524/**
1525 * sep_calculate_lli_table_max_size - size the LLI table
1526 * @sep: pointer to struct sep_device
1527 * @lli_in_array_ptr
1528 * @num_array_entries
1529 * @last_table_flag
1530 *
1531 * This function calculates the size of data that can be inserted into
1532 * the lli table from this array, such that either the table is full
1533 * (all entries are entered), or there are no more entries in the
1534 * lli array
1535 */
1536static u32 sep_calculate_lli_table_max_size(struct sep_device *sep,
1537 struct sep_lli_entry *lli_in_array_ptr,
1538 u32 num_array_entries,
1539 u32 *last_table_flag)
1540{
1541 u32 counter;
1542 /* Table data size */
1543 u32 table_data_size = 0;
1544 /* Data size for the next table */
1545 u32 next_table_data_size;
1546
1547 *last_table_flag = 0;
1548
1549 /*
1550 * Calculate the data in the out lli table till we fill the whole
1551 * table or till the data has ended
1552 */
1553 for (counter = 0;
1554 (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) &&
1555 (counter < num_array_entries); counter++)
1556 table_data_size += lli_in_array_ptr[counter].block_size;
1557
1558 /*
1559 * Check if we reached the last entry,
1560 * meaning this ia the last table to build,
1561 * and no need to check the block alignment
1562 */
1563 if (counter == num_array_entries) {
1564 /* Set the last table flag */
1565 *last_table_flag = 1;
1566 goto end_function;
1567 }
1568
1569 /*
1570 * Calculate the data size of the next table.
1571 * Stop if no entries left or if data size is more the DMA restriction
1572 */
1573 next_table_data_size = 0;
1574 for (; counter < num_array_entries; counter++) {
1575 next_table_data_size += lli_in_array_ptr[counter].block_size;
1576 if (next_table_data_size >= SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
1577 break;
1578 }
1579
1580 /*
1581 * Check if the next table data size is less then DMA rstriction.
1582 * if it is - recalculate the current table size, so that the next
1583 * table data size will be adaquete for DMA
1584 */
1585 if (next_table_data_size &&
1586 next_table_data_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
1587
1588 table_data_size -= (SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE -
1589 next_table_data_size);
1590
1591end_function:
1592 return table_data_size;
1593}
1594
1595/**
1596 * sep_build_lli_table - build an lli array for the given table
1597 * @sep: pointer to struct sep_device
1598 * @lli_array_ptr: pointer to lli array
1599 * @lli_table_ptr: pointer to lli table
1600 * @num_processed_entries_ptr: pointer to number of entries
1601 * @num_table_entries_ptr: pointer to number of tables
1602 * @table_data_size: total data size
1603 *
1604 * Builds ant lli table from the lli_array according to
1605 * the given size of data
1606 */
1607static void sep_build_lli_table(struct sep_device *sep,
1608 struct sep_lli_entry *lli_array_ptr,
1609 struct sep_lli_entry *lli_table_ptr,
1610 u32 *num_processed_entries_ptr,
1611 u32 *num_table_entries_ptr,
1612 u32 table_data_size)
1613{
1614 /* Current table data size */
1615 u32 curr_table_data_size;
1616 /* Counter of lli array entry */
1617 u32 array_counter;
1618
1619 /* Init current table data size and lli array entry counter */
1620 curr_table_data_size = 0;
1621 array_counter = 0;
1622 *num_table_entries_ptr = 1;
1623
1624 dev_dbg(&sep->pdev->dev,
1625 "[PID%d] build lli table table_data_size: (hex) %x\n",
1626 current->pid, table_data_size);
1627
1628 /* Fill the table till table size reaches the needed amount */
1629 while (curr_table_data_size < table_data_size) {
1630 /* Update the number of entries in table */
1631 (*num_table_entries_ptr)++;
1632
1633 lli_table_ptr->bus_address =
1634 cpu_to_le32(lli_array_ptr[array_counter].bus_address);
1635
1636 lli_table_ptr->block_size =
1637 cpu_to_le32(lli_array_ptr[array_counter].block_size);
1638
1639 curr_table_data_size += lli_array_ptr[array_counter].block_size;
1640
1641 dev_dbg(&sep->pdev->dev,
1642 "[PID%d] lli_table_ptr is %p\n",
1643 current->pid, lli_table_ptr);
1644 dev_dbg(&sep->pdev->dev,
1645 "[PID%d] lli_table_ptr->bus_address: %08lx\n",
1646 current->pid,
1647 (unsigned long)lli_table_ptr->bus_address);
1648
1649 dev_dbg(&sep->pdev->dev,
1650 "[PID%d] lli_table_ptr->block_size is (hex) %x\n",
1651 current->pid, lli_table_ptr->block_size);
1652
1653 /* Check for overflow of the table data */
1654 if (curr_table_data_size > table_data_size) {
1655 dev_dbg(&sep->pdev->dev,
1656 "[PID%d] curr_table_data_size too large\n",
1657 current->pid);
1658
1659 /* Update the size of block in the table */
1660 lli_table_ptr->block_size =
1661 cpu_to_le32(lli_table_ptr->block_size) -
1662 (curr_table_data_size - table_data_size);
1663
1664 /* Update the physical address in the lli array */
1665 lli_array_ptr[array_counter].bus_address +=
1666 cpu_to_le32(lli_table_ptr->block_size);
1667
1668 /* Update the block size left in the lli array */
1669 lli_array_ptr[array_counter].block_size =
1670 (curr_table_data_size - table_data_size);
1671 } else
1672 /* Advance to the next entry in the lli_array */
1673 array_counter++;
1674
1675 dev_dbg(&sep->pdev->dev,
1676 "[PID%d] lli_table_ptr->bus_address is %08lx\n",
1677 current->pid,
1678 (unsigned long)lli_table_ptr->bus_address);
1679 dev_dbg(&sep->pdev->dev,
1680 "[PID%d] lli_table_ptr->block_size is (hex) %x\n",
1681 current->pid,
1682 lli_table_ptr->block_size);
1683
1684 /* Move to the next entry in table */
1685 lli_table_ptr++;
1686 }
1687
1688 /* Set the info entry to default */
1689 lli_table_ptr->bus_address = 0xffffffff;
1690 lli_table_ptr->block_size = 0;
1691
1692 /* Set the output parameter */
1693 *num_processed_entries_ptr += array_counter;
1694
1695}
1696
1697/**
1698 * sep_shared_area_virt_to_bus - map shared area to bus address
1699 * @sep: pointer to struct sep_device
1700 * @virt_address: virtual address to convert
1701 *
1702 * This functions returns the physical address inside shared area according
1703 * to the virtual address. It can be either on the externa RAM device
1704 * (ioremapped), or on the system RAM
1705 * This implementation is for the external RAM
1706 */
1707static dma_addr_t sep_shared_area_virt_to_bus(struct sep_device *sep,
1708 void *virt_address)
1709{
1710 dev_dbg(&sep->pdev->dev, "[PID%d] sh virt to phys v %p\n",
1711 current->pid, virt_address);
1712 dev_dbg(&sep->pdev->dev, "[PID%d] sh virt to phys p %08lx\n",
1713 current->pid,
1714 (unsigned long)
1715 sep->shared_bus + (virt_address - sep->shared_addr));
1716
1717 return sep->shared_bus + (size_t)(virt_address - sep->shared_addr);
1718}
1719
1720/**
1721 * sep_shared_area_bus_to_virt - map shared area bus address to kernel
1722 * @sep: pointer to struct sep_device
1723 * @bus_address: bus address to convert
1724 *
1725 * This functions returns the virtual address inside shared area
1726 * according to the physical address. It can be either on the
1727 * externa RAM device (ioremapped), or on the system RAM
1728 * This implementation is for the external RAM
1729 */
1730static void *sep_shared_area_bus_to_virt(struct sep_device *sep,
1731 dma_addr_t bus_address)
1732{
1733 dev_dbg(&sep->pdev->dev, "[PID%d] shared bus to virt b=%lx v=%lx\n",
1734 current->pid,
1735 (unsigned long)bus_address, (unsigned long)(sep->shared_addr +
1736 (size_t)(bus_address - sep->shared_bus)));
1737
1738 return sep->shared_addr + (size_t)(bus_address - sep->shared_bus);
1739}
1740
1741/**
1742 * sep_debug_print_lli_tables - dump LLI table
1743 * @sep: pointer to struct sep_device
1744 * @lli_table_ptr: pointer to sep_lli_entry
1745 * @num_table_entries: number of entries
1746 * @table_data_size: total data size
1747 *
1748 * Walk the the list of the print created tables and print all the data
1749 */
1750static void sep_debug_print_lli_tables(struct sep_device *sep,
1751 struct sep_lli_entry *lli_table_ptr,
1752 unsigned long num_table_entries,
1753 unsigned long table_data_size)
1754{
1755#ifdef DEBUG
1756 unsigned long table_count = 1;
1757 unsigned long entries_count = 0;
1758
1759 dev_dbg(&sep->pdev->dev, "[PID%d] sep_debug_print_lli_tables start\n",
1760 current->pid);
1761 if (num_table_entries == 0) {
1762 dev_dbg(&sep->pdev->dev, "[PID%d] no table to print\n",
1763 current->pid);
1764 return;
1765 }
1766
1767 while ((unsigned long) lli_table_ptr->bus_address != 0xffffffff) {
1768 dev_dbg(&sep->pdev->dev,
1769 "[PID%d] lli table %08lx, "
1770 "table_data_size is (hex) %lx\n",
1771 current->pid, table_count, table_data_size);
1772 dev_dbg(&sep->pdev->dev,
1773 "[PID%d] num_table_entries is (hex) %lx\n",
1774 current->pid, num_table_entries);
1775
1776 /* Print entries of the table (without info entry) */
1777 for (entries_count = 0; entries_count < num_table_entries;
1778 entries_count++, lli_table_ptr++) {
1779
1780 dev_dbg(&sep->pdev->dev,
1781 "[PID%d] lli_table_ptr address is %08lx\n",
1782 current->pid,
1783 (unsigned long) lli_table_ptr);
1784
1785 dev_dbg(&sep->pdev->dev,
1786 "[PID%d] phys address is %08lx "
1787 "block size is (hex) %x\n", current->pid,
1788 (unsigned long)lli_table_ptr->bus_address,
1789 lli_table_ptr->block_size);
1790 }
1791
1792 /* Point to the info entry */
1793 lli_table_ptr--;
1794
1795 dev_dbg(&sep->pdev->dev,
1796 "[PID%d] phys lli_table_ptr->block_size "
1797 "is (hex) %x\n",
1798 current->pid,
1799 lli_table_ptr->block_size);
1800
1801 dev_dbg(&sep->pdev->dev,
1802 "[PID%d] phys lli_table_ptr->physical_address "
1803 "is %08lx\n",
1804 current->pid,
1805 (unsigned long)lli_table_ptr->bus_address);
1806
1807
1808 table_data_size = lli_table_ptr->block_size & 0xffffff;
1809 num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
1810
1811 dev_dbg(&sep->pdev->dev,
1812 "[PID%d] phys table_data_size is "
1813 "(hex) %lx num_table_entries is"
1814 " %lx bus_address is%lx\n",
1815 current->pid,
1816 table_data_size,
1817 num_table_entries,
1818 (unsigned long)lli_table_ptr->bus_address);
1819
1820 if ((unsigned long)lli_table_ptr->bus_address != 0xffffffff)
1821 lli_table_ptr = (struct sep_lli_entry *)
1822 sep_shared_bus_to_virt(sep,
1823 (unsigned long)lli_table_ptr->bus_address);
1824
1825 table_count++;
1826 }
1827 dev_dbg(&sep->pdev->dev, "[PID%d] sep_debug_print_lli_tables end\n",
1828 current->pid);
1829#endif
1830}
1831
1832
1833/**
1834 * sep_prepare_empty_lli_table - create a blank LLI table
1835 * @sep: pointer to struct sep_device
1836 * @lli_table_addr_ptr: pointer to lli table
1837 * @num_entries_ptr: pointer to number of entries
1838 * @table_data_size_ptr: point to table data size
1839 * @dmatables_region: Optional buffer for DMA tables
1840 * @dma_ctx: DMA context
1841 *
1842 * This function creates empty lli tables when there is no data
1843 */
1844static void sep_prepare_empty_lli_table(struct sep_device *sep,
1845 dma_addr_t *lli_table_addr_ptr,
1846 u32 *num_entries_ptr,
1847 u32 *table_data_size_ptr,
1848 void **dmatables_region,
1849 struct sep_dma_context *dma_ctx)
1850{
1851 struct sep_lli_entry *lli_table_ptr;
1852
1853 /* Find the area for new table */
1854 lli_table_ptr =
1855 (struct sep_lli_entry *)(sep->shared_addr +
1856 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1857 dma_ctx->num_lli_tables_created * sizeof(struct sep_lli_entry) *
1858 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1859
1860 if (dmatables_region && *dmatables_region)
1861 lli_table_ptr = *dmatables_region;
1862
1863 lli_table_ptr->bus_address = 0;
1864 lli_table_ptr->block_size = 0;
1865
1866 lli_table_ptr++;
1867 lli_table_ptr->bus_address = 0xFFFFFFFF;
1868 lli_table_ptr->block_size = 0;
1869
1870 /* Set the output parameter value */
1871 *lli_table_addr_ptr = sep->shared_bus +
1872 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1873 dma_ctx->num_lli_tables_created *
1874 sizeof(struct sep_lli_entry) *
1875 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1876
1877 /* Set the num of entries and table data size for empty table */
1878 *num_entries_ptr = 2;
1879 *table_data_size_ptr = 0;
1880
1881 /* Update the number of created tables */
1882 dma_ctx->num_lli_tables_created++;
1883}
1884
1885/**
1886 * sep_prepare_input_dma_table - prepare input DMA mappings
1887 * @sep: pointer to struct sep_device
1888 * @data_size:
1889 * @block_size:
1890 * @lli_table_ptr:
1891 * @num_entries_ptr:
1892 * @table_data_size_ptr:
1893 * @is_kva: set for kernel data (kernel cryptio call)
1894 *
1895 * This function prepares only input DMA table for synhronic symmetric
1896 * operations (HASH)
1897 * Note that all bus addresses that are passed to the SEP
1898 * are in 32 bit format; the SEP is a 32 bit device
1899 */
1900static int sep_prepare_input_dma_table(struct sep_device *sep,
1901 unsigned long app_virt_addr,
1902 u32 data_size,
1903 u32 block_size,
1904 dma_addr_t *lli_table_ptr,
1905 u32 *num_entries_ptr,
1906 u32 *table_data_size_ptr,
1907 bool is_kva,
1908 void **dmatables_region,
1909 struct sep_dma_context *dma_ctx
1910)
1911{
1912 int error = 0;
1913 /* Pointer to the info entry of the table - the last entry */
1914 struct sep_lli_entry *info_entry_ptr;
1915 /* Array of pointers to page */
1916 struct sep_lli_entry *lli_array_ptr;
1917 /* Points to the first entry to be processed in the lli_in_array */
1918 u32 current_entry = 0;
1919 /* Num entries in the virtual buffer */
1920 u32 sep_lli_entries = 0;
1921 /* Lli table pointer */
1922 struct sep_lli_entry *in_lli_table_ptr;
1923 /* The total data in one table */
1924 u32 table_data_size = 0;
1925 /* Flag for last table */
1926 u32 last_table_flag = 0;
1927 /* Number of entries in lli table */
1928 u32 num_entries_in_table = 0;
1929 /* Next table address */
1930 void *lli_table_alloc_addr = NULL;
1931 void *dma_lli_table_alloc_addr = NULL;
1932 void *dma_in_lli_table_ptr = NULL;
1933
1934 dev_dbg(&sep->pdev->dev, "[PID%d] prepare intput dma "
1935 "tbl data size: (hex) %x\n",
1936 current->pid, data_size);
1937
1938 dev_dbg(&sep->pdev->dev, "[PID%d] block_size is (hex) %x\n",
1939 current->pid, block_size);
1940
1941 /* Initialize the pages pointers */
1942 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
1943 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages = 0;
1944
1945 /* Set the kernel address for first table to be allocated */
1946 lli_table_alloc_addr = (void *)(sep->shared_addr +
1947 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1948 dma_ctx->num_lli_tables_created * sizeof(struct sep_lli_entry) *
1949 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1950
1951 if (data_size == 0) {
1952 if (dmatables_region) {
1953 error = sep_allocate_dmatables_region(sep,
1954 dmatables_region,
1955 dma_ctx,
1956 1);
1957 if (error)
1958 return error;
1959 }
1960 /* Special case - create meptu table - 2 entries, zero data */
1961 sep_prepare_empty_lli_table(sep, lli_table_ptr,
1962 num_entries_ptr, table_data_size_ptr,
1963 dmatables_region, dma_ctx);
1964 goto update_dcb_counter;
1965 }
1966
1967 /* Check if the pages are in Kernel Virtual Address layout */
1968 if (is_kva == true)
1969 error = sep_lock_kernel_pages(sep, app_virt_addr,
1970 data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG,
1971 dma_ctx);
1972 else
1973 /*
1974 * Lock the pages of the user buffer
1975 * and translate them to pages
1976 */
1977 error = sep_lock_user_pages(sep, app_virt_addr,
1978 data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG,
1979 dma_ctx);
1980
1981 if (error)
1982 goto end_function;
1983
1984 dev_dbg(&sep->pdev->dev,
1985 "[PID%d] output sep_in_num_pages is (hex) %x\n",
1986 current->pid,
1987 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages);
1988
1989 current_entry = 0;
1990 info_entry_ptr = NULL;
1991
1992 sep_lli_entries =
1993 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages;
1994
1995 dma_lli_table_alloc_addr = lli_table_alloc_addr;
1996 if (dmatables_region) {
1997 error = sep_allocate_dmatables_region(sep,
1998 dmatables_region,
1999 dma_ctx,
2000 sep_lli_entries);
2001 if (error)
2002 return error;
2003 lli_table_alloc_addr = *dmatables_region;
2004 }
2005
2006 /* Loop till all the entries in in array are processed */
2007 while (current_entry < sep_lli_entries) {
2008
2009 /* Set the new input and output tables */
2010 in_lli_table_ptr =
2011 (struct sep_lli_entry *)lli_table_alloc_addr;
2012 dma_in_lli_table_ptr =
2013 (struct sep_lli_entry *)dma_lli_table_alloc_addr;
2014
2015 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2016 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2017 dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2018 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2019
2020 if (dma_lli_table_alloc_addr >
2021 ((void *)sep->shared_addr +
2022 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
2023 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
2024
2025 error = -ENOMEM;
2026 goto end_function_error;
2027
2028 }
2029
2030 /* Update the number of created tables */
2031 dma_ctx->num_lli_tables_created++;
2032
2033 /* Calculate the maximum size of data for input table */
2034 table_data_size = sep_calculate_lli_table_max_size(sep,
2035 &lli_array_ptr[current_entry],
2036 (sep_lli_entries - current_entry),
2037 &last_table_flag);
2038
2039 /*
2040 * If this is not the last table -
2041 * then allign it to the block size
2042 */
2043 if (!last_table_flag)
2044 table_data_size =
2045 (table_data_size / block_size) * block_size;
2046
2047 dev_dbg(&sep->pdev->dev,
2048 "[PID%d] output table_data_size is (hex) %x\n",
2049 current->pid,
2050 table_data_size);
2051
2052 /* Construct input lli table */
2053 sep_build_lli_table(sep, &lli_array_ptr[current_entry],
2054 in_lli_table_ptr,
2055 &current_entry, &num_entries_in_table, table_data_size);
2056
2057 if (info_entry_ptr == NULL) {
2058
2059 /* Set the output parameters to physical addresses */
2060 *lli_table_ptr = sep_shared_area_virt_to_bus(sep,
2061 dma_in_lli_table_ptr);
2062 *num_entries_ptr = num_entries_in_table;
2063 *table_data_size_ptr = table_data_size;
2064
2065 dev_dbg(&sep->pdev->dev,
2066 "[PID%d] output lli_table_in_ptr is %08lx\n",
2067 current->pid,
2068 (unsigned long)*lli_table_ptr);
2069
2070 } else {
2071 /* Update the info entry of the previous in table */
2072 info_entry_ptr->bus_address =
2073 sep_shared_area_virt_to_bus(sep,
2074 dma_in_lli_table_ptr);
2075 info_entry_ptr->block_size =
2076 ((num_entries_in_table) << 24) |
2077 (table_data_size);
2078 }
2079 /* Save the pointer to the info entry of the current tables */
2080 info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
2081 }
2082 /* Print input tables */
2083 if (!dmatables_region) {
2084 sep_debug_print_lli_tables(sep, (struct sep_lli_entry *)
2085 sep_shared_area_bus_to_virt(sep, *lli_table_ptr),
2086 *num_entries_ptr, *table_data_size_ptr);
2087 }
2088
2089 /* The array of the pages */
2090 kfree(lli_array_ptr);
2091
2092update_dcb_counter:
2093 /* Update DCB counter */
2094 dma_ctx->nr_dcb_creat++;
2095 goto end_function;
2096
2097end_function_error:
2098 /* Free all the allocated resources */
2099 kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array);
2100 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array = NULL;
2101 kfree(lli_array_ptr);
2102 kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array);
2103 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
2104
2105end_function:
2106 return error;
2107
2108}
2109
2110/**
2111 * sep_construct_dma_tables_from_lli - prepare AES/DES mappings
2112 * @sep: pointer to struct sep_device
2113 * @lli_in_array:
2114 * @sep_in_lli_entries:
2115 * @lli_out_array:
2116 * @sep_out_lli_entries
2117 * @block_size
2118 * @lli_table_in_ptr
2119 * @lli_table_out_ptr
2120 * @in_num_entries_ptr
2121 * @out_num_entries_ptr
2122 * @table_data_size_ptr
2123 *
2124 * This function creates the input and output DMA tables for
2125 * symmetric operations (AES/DES) according to the block
2126 * size from LLI arays
2127 * Note that all bus addresses that are passed to the SEP
2128 * are in 32 bit format; the SEP is a 32 bit device
2129 */
2130static int sep_construct_dma_tables_from_lli(
2131 struct sep_device *sep,
2132 struct sep_lli_entry *lli_in_array,
2133 u32 sep_in_lli_entries,
2134 struct sep_lli_entry *lli_out_array,
2135 u32 sep_out_lli_entries,
2136 u32 block_size,
2137 dma_addr_t *lli_table_in_ptr,
2138 dma_addr_t *lli_table_out_ptr,
2139 u32 *in_num_entries_ptr,
2140 u32 *out_num_entries_ptr,
2141 u32 *table_data_size_ptr,
2142 void **dmatables_region,
2143 struct sep_dma_context *dma_ctx)
2144{
2145 /* Points to the area where next lli table can be allocated */
2146 void *lli_table_alloc_addr = NULL;
2147 /*
2148 * Points to the area in shared region where next lli table
2149 * can be allocated
2150 */
2151 void *dma_lli_table_alloc_addr = NULL;
2152 /* Input lli table in dmatables_region or shared region */
2153 struct sep_lli_entry *in_lli_table_ptr = NULL;
2154 /* Input lli table location in the shared region */
2155 struct sep_lli_entry *dma_in_lli_table_ptr = NULL;
2156 /* Output lli table in dmatables_region or shared region */
2157 struct sep_lli_entry *out_lli_table_ptr = NULL;
2158 /* Output lli table location in the shared region */
2159 struct sep_lli_entry *dma_out_lli_table_ptr = NULL;
2160 /* Pointer to the info entry of the table - the last entry */
2161 struct sep_lli_entry *info_in_entry_ptr = NULL;
2162 /* Pointer to the info entry of the table - the last entry */
2163 struct sep_lli_entry *info_out_entry_ptr = NULL;
2164 /* Points to the first entry to be processed in the lli_in_array */
2165 u32 current_in_entry = 0;
2166 /* Points to the first entry to be processed in the lli_out_array */
2167 u32 current_out_entry = 0;
2168 /* Max size of the input table */
2169 u32 in_table_data_size = 0;
2170 /* Max size of the output table */
2171 u32 out_table_data_size = 0;
2172 /* Flag te signifies if this is the last tables build */
2173 u32 last_table_flag = 0;
2174 /* The data size that should be in table */
2175 u32 table_data_size = 0;
2176 /* Number of etnries in the input table */
2177 u32 num_entries_in_table = 0;
2178 /* Number of etnries in the output table */
2179 u32 num_entries_out_table = 0;
2180
2181 if (!dma_ctx) {
2182 dev_warn(&sep->pdev->dev, "DMA context uninitialized\n");
2183 return -EINVAL;
2184 }
2185
2186 /* Initiate to point after the message area */
2187 lli_table_alloc_addr = (void *)(sep->shared_addr +
2188 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
2189 (dma_ctx->num_lli_tables_created *
2190 (sizeof(struct sep_lli_entry) *
2191 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP)));
2192 dma_lli_table_alloc_addr = lli_table_alloc_addr;
2193
2194 if (dmatables_region) {
2195 /* 2 for both in+out table */
2196 if (sep_allocate_dmatables_region(sep,
2197 dmatables_region,
2198 dma_ctx,
2199 2*sep_in_lli_entries))
2200 return -ENOMEM;
2201 lli_table_alloc_addr = *dmatables_region;
2202 }
2203
2204 /* Loop till all the entries in in array are not processed */
2205 while (current_in_entry < sep_in_lli_entries) {
2206 /* Set the new input and output tables */
2207 in_lli_table_ptr =
2208 (struct sep_lli_entry *)lli_table_alloc_addr;
2209 dma_in_lli_table_ptr =
2210 (struct sep_lli_entry *)dma_lli_table_alloc_addr;
2211
2212 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2213 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2214 dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2215 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2216
2217 /* Set the first output tables */
2218 out_lli_table_ptr =
2219 (struct sep_lli_entry *)lli_table_alloc_addr;
2220 dma_out_lli_table_ptr =
2221 (struct sep_lli_entry *)dma_lli_table_alloc_addr;
2222
2223 /* Check if the DMA table area limit was overrun */
2224 if ((dma_lli_table_alloc_addr + sizeof(struct sep_lli_entry) *
2225 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP) >
2226 ((void *)sep->shared_addr +
2227 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
2228 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
2229
2230 dev_warn(&sep->pdev->dev, "dma table limit overrun\n");
2231 return -ENOMEM;
2232 }
2233
2234 /* Update the number of the lli tables created */
2235 dma_ctx->num_lli_tables_created += 2;
2236
2237 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2238 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2239 dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2240 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2241
2242 /* Calculate the maximum size of data for input table */
2243 in_table_data_size =
2244 sep_calculate_lli_table_max_size(sep,
2245 &lli_in_array[current_in_entry],
2246 (sep_in_lli_entries - current_in_entry),
2247 &last_table_flag);
2248
2249 /* Calculate the maximum size of data for output table */
2250 out_table_data_size =
2251 sep_calculate_lli_table_max_size(sep,
2252 &lli_out_array[current_out_entry],
2253 (sep_out_lli_entries - current_out_entry),
2254 &last_table_flag);
2255
2256 if (!last_table_flag) {
2257 in_table_data_size = (in_table_data_size /
2258 block_size) * block_size;
2259 out_table_data_size = (out_table_data_size /
2260 block_size) * block_size;
2261 }
2262
2263 table_data_size = in_table_data_size;
2264 if (table_data_size > out_table_data_size)
2265 table_data_size = out_table_data_size;
2266
2267 dev_dbg(&sep->pdev->dev,
2268 "[PID%d] construct tables from lli"
2269 " in_table_data_size is (hex) %x\n", current->pid,
2270 in_table_data_size);
2271
2272 dev_dbg(&sep->pdev->dev,
2273 "[PID%d] construct tables from lli"
2274 "out_table_data_size is (hex) %x\n", current->pid,
2275 out_table_data_size);
2276
2277 /* Construct input lli table */
2278 sep_build_lli_table(sep, &lli_in_array[current_in_entry],
2279 in_lli_table_ptr,
2280 &current_in_entry,
2281 &num_entries_in_table,
2282 table_data_size);
2283
2284 /* Construct output lli table */
2285 sep_build_lli_table(sep, &lli_out_array[current_out_entry],
2286 out_lli_table_ptr,
2287 &current_out_entry,
2288 &num_entries_out_table,
2289 table_data_size);
2290
2291 /* If info entry is null - this is the first table built */
2292 if (info_in_entry_ptr == NULL) {
2293 /* Set the output parameters to physical addresses */
2294 *lli_table_in_ptr =
2295 sep_shared_area_virt_to_bus(sep, dma_in_lli_table_ptr);
2296
2297 *in_num_entries_ptr = num_entries_in_table;
2298
2299 *lli_table_out_ptr =
2300 sep_shared_area_virt_to_bus(sep,
2301 dma_out_lli_table_ptr);
2302
2303 *out_num_entries_ptr = num_entries_out_table;
2304 *table_data_size_ptr = table_data_size;
2305
2306 dev_dbg(&sep->pdev->dev,
2307 "[PID%d] output lli_table_in_ptr is %08lx\n",
2308 current->pid,
2309 (unsigned long)*lli_table_in_ptr);
2310 dev_dbg(&sep->pdev->dev,
2311 "[PID%d] output lli_table_out_ptr is %08lx\n",
2312 current->pid,
2313 (unsigned long)*lli_table_out_ptr);
2314 } else {
2315 /* Update the info entry of the previous in table */
2316 info_in_entry_ptr->bus_address =
2317 sep_shared_area_virt_to_bus(sep,
2318 dma_in_lli_table_ptr);
2319
2320 info_in_entry_ptr->block_size =
2321 ((num_entries_in_table) << 24) |
2322 (table_data_size);
2323
2324 /* Update the info entry of the previous in table */
2325 info_out_entry_ptr->bus_address =
2326 sep_shared_area_virt_to_bus(sep,
2327 dma_out_lli_table_ptr);
2328
2329 info_out_entry_ptr->block_size =
2330 ((num_entries_out_table) << 24) |
2331 (table_data_size);
2332
2333 dev_dbg(&sep->pdev->dev,
2334 "[PID%d] output lli_table_in_ptr:%08lx %08x\n",
2335 current->pid,
2336 (unsigned long)info_in_entry_ptr->bus_address,
2337 info_in_entry_ptr->block_size);
2338
2339 dev_dbg(&sep->pdev->dev,
2340 "[PID%d] output lli_table_out_ptr:"
2341 "%08lx %08x\n",
2342 current->pid,
2343 (unsigned long)info_out_entry_ptr->bus_address,
2344 info_out_entry_ptr->block_size);
2345 }
2346
2347 /* Save the pointer to the info entry of the current tables */
2348 info_in_entry_ptr = in_lli_table_ptr +
2349 num_entries_in_table - 1;
2350 info_out_entry_ptr = out_lli_table_ptr +
2351 num_entries_out_table - 1;
2352
2353 dev_dbg(&sep->pdev->dev,
2354 "[PID%d] output num_entries_out_table is %x\n",
2355 current->pid,
2356 (u32)num_entries_out_table);
2357 dev_dbg(&sep->pdev->dev,
2358 "[PID%d] output info_in_entry_ptr is %lx\n",
2359 current->pid,
2360 (unsigned long)info_in_entry_ptr);
2361 dev_dbg(&sep->pdev->dev,
2362 "[PID%d] output info_out_entry_ptr is %lx\n",
2363 current->pid,
2364 (unsigned long)info_out_entry_ptr);
2365 }
2366
2367 /* Print input tables */
2368 if (!dmatables_region) {
2369 sep_debug_print_lli_tables(
2370 sep,
2371 (struct sep_lli_entry *)
2372 sep_shared_area_bus_to_virt(sep, *lli_table_in_ptr),
2373 *in_num_entries_ptr,
2374 *table_data_size_ptr);
2375 }
2376
2377 /* Print output tables */
2378 if (!dmatables_region) {
2379 sep_debug_print_lli_tables(
2380 sep,
2381 (struct sep_lli_entry *)
2382 sep_shared_area_bus_to_virt(sep, *lli_table_out_ptr),
2383 *out_num_entries_ptr,
2384 *table_data_size_ptr);
2385 }
2386
2387 return 0;
2388}
2389
2390/**
2391 * sep_prepare_input_output_dma_table - prepare DMA I/O table
2392 * @app_virt_in_addr:
2393 * @app_virt_out_addr:
2394 * @data_size:
2395 * @block_size:
2396 * @lli_table_in_ptr:
2397 * @lli_table_out_ptr:
2398 * @in_num_entries_ptr:
2399 * @out_num_entries_ptr:
2400 * @table_data_size_ptr:
2401 * @is_kva: set for kernel data; used only for kernel crypto module
2402 *
2403 * This function builds input and output DMA tables for synhronic
2404 * symmetric operations (AES, DES, HASH). It also checks that each table
2405 * is of the modular block size
2406 * Note that all bus addresses that are passed to the SEP
2407 * are in 32 bit format; the SEP is a 32 bit device
2408 */
2409static int sep_prepare_input_output_dma_table(struct sep_device *sep,
2410 unsigned long app_virt_in_addr,
2411 unsigned long app_virt_out_addr,
2412 u32 data_size,
2413 u32 block_size,
2414 dma_addr_t *lli_table_in_ptr,
2415 dma_addr_t *lli_table_out_ptr,
2416 u32 *in_num_entries_ptr,
2417 u32 *out_num_entries_ptr,
2418 u32 *table_data_size_ptr,
2419 bool is_kva,
2420 void **dmatables_region,
2421 struct sep_dma_context *dma_ctx)
2422
2423{
2424 int error = 0;
2425 /* Array of pointers of page */
2426 struct sep_lli_entry *lli_in_array;
2427 /* Array of pointers of page */
2428 struct sep_lli_entry *lli_out_array;
2429
2430 if (!dma_ctx) {
2431 error = -EINVAL;
2432 goto end_function;
2433 }
2434
2435 if (data_size == 0) {
2436 /* Prepare empty table for input and output */
2437 if (dmatables_region) {
2438 error = sep_allocate_dmatables_region(
2439 sep,
2440 dmatables_region,
2441 dma_ctx,
2442 2);
2443 if (error)
2444 goto end_function;
2445 }
2446 sep_prepare_empty_lli_table(sep, lli_table_in_ptr,
2447 in_num_entries_ptr, table_data_size_ptr,
2448 dmatables_region, dma_ctx);
2449
2450 sep_prepare_empty_lli_table(sep, lli_table_out_ptr,
2451 out_num_entries_ptr, table_data_size_ptr,
2452 dmatables_region, dma_ctx);
2453
2454 goto update_dcb_counter;
2455 }
2456
2457 /* Initialize the pages pointers */
2458 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
2459 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
2460
2461 /* Lock the pages of the buffer and translate them to pages */
2462 if (is_kva == true) {
2463 dev_dbg(&sep->pdev->dev, "[PID%d] Locking kernel input pages\n",
2464 current->pid);
2465 error = sep_lock_kernel_pages(sep, app_virt_in_addr,
2466 data_size, &lli_in_array, SEP_DRIVER_IN_FLAG,
2467 dma_ctx);
2468 if (error) {
2469 dev_warn(&sep->pdev->dev,
2470 "[PID%d] sep_lock_kernel_pages for input "
2471 "virtual buffer failed\n", current->pid);
2472
2473 goto end_function;
2474 }
2475
2476 dev_dbg(&sep->pdev->dev, "[PID%d] Locking kernel output pages\n",
2477 current->pid);
2478 error = sep_lock_kernel_pages(sep, app_virt_out_addr,
2479 data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG,
2480 dma_ctx);
2481
2482 if (error) {
2483 dev_warn(&sep->pdev->dev,
2484 "[PID%d] sep_lock_kernel_pages for output "
2485 "virtual buffer failed\n", current->pid);
2486
2487 goto end_function_free_lli_in;
2488 }
2489
2490 }
2491
2492 else {
2493 dev_dbg(&sep->pdev->dev, "[PID%d] Locking user input pages\n",
2494 current->pid);
2495 error = sep_lock_user_pages(sep, app_virt_in_addr,
2496 data_size, &lli_in_array, SEP_DRIVER_IN_FLAG,
2497 dma_ctx);
2498 if (error) {
2499 dev_warn(&sep->pdev->dev,
2500 "[PID%d] sep_lock_user_pages for input "
2501 "virtual buffer failed\n", current->pid);
2502
2503 goto end_function;
2504 }
2505
2506 if (dma_ctx->secure_dma == true) {
2507 /* secure_dma requires use of non accessible memory */
2508 dev_dbg(&sep->pdev->dev, "[PID%d] in secure_dma\n",
2509 current->pid);
2510 error = sep_lli_table_secure_dma(sep,
2511 app_virt_out_addr, data_size, &lli_out_array,
2512 SEP_DRIVER_OUT_FLAG, dma_ctx);
2513 if (error) {
2514 dev_warn(&sep->pdev->dev,
2515 "[PID%d] secure dma table setup "
2516 " for output virtual buffer failed\n",
2517 current->pid);
2518
2519 goto end_function_free_lli_in;
2520 }
2521 } else {
2522 /* For normal, non-secure dma */
2523 dev_dbg(&sep->pdev->dev, "[PID%d] not in secure_dma\n",
2524 current->pid);
2525
2526 dev_dbg(&sep->pdev->dev,
2527 "[PID%d] Locking user output pages\n",
2528 current->pid);
2529
2530 error = sep_lock_user_pages(sep, app_virt_out_addr,
2531 data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG,
2532 dma_ctx);
2533
2534 if (error) {
2535 dev_warn(&sep->pdev->dev,
2536 "[PID%d] sep_lock_user_pages"
2537 " for output virtual buffer failed\n",
2538 current->pid);
2539
2540 goto end_function_free_lli_in;
2541 }
2542 }
2543 }
2544
2545 dev_dbg(&sep->pdev->dev, "[PID%d] After lock; prep input output dma "
2546 "table sep_in_num_pages is (hex) %x\n", current->pid,
2547 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages);
2548
2549 dev_dbg(&sep->pdev->dev, "[PID%d] sep_out_num_pages is (hex) %x\n",
2550 current->pid,
2551 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages);
2552
2553 dev_dbg(&sep->pdev->dev, "[PID%d] SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP"
2554 " is (hex) %x\n", current->pid,
2555 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
2556
2557 /* Call the fucntion that creates table from the lli arrays */
2558 dev_dbg(&sep->pdev->dev, "[PID%d] calling create table from lli\n",
2559 current->pid);
2560 error = sep_construct_dma_tables_from_lli(
2561 sep, lli_in_array,
2562 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
2563 in_num_pages,
2564 lli_out_array,
2565 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
2566 out_num_pages,
2567 block_size, lli_table_in_ptr, lli_table_out_ptr,
2568 in_num_entries_ptr, out_num_entries_ptr,
2569 table_data_size_ptr, dmatables_region, dma_ctx);
2570
2571 if (error) {
2572 dev_warn(&sep->pdev->dev,
2573 "[PID%d] sep_construct_dma_tables_from_lli failed\n",
2574 current->pid);
2575 goto end_function_with_error;
2576 }
2577
2578 kfree(lli_out_array);
2579 kfree(lli_in_array);
2580
2581update_dcb_counter:
2582 /* Update DCB counter */
2583 dma_ctx->nr_dcb_creat++;
2584
2585 goto end_function;
2586
2587end_function_with_error:
2588 kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array);
2589 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array = NULL;
2590 kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array);
2591 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
2592 kfree(lli_out_array);
2593
2594
2595end_function_free_lli_in:
2596 kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array);
2597 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array = NULL;
2598 kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array);
2599 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
2600 kfree(lli_in_array);
2601
2602end_function:
2603
2604 return error;
2605
2606}
2607
2608/**
2609 * sep_prepare_input_output_dma_table_in_dcb - prepare control blocks
2610 * @app_in_address: unsigned long; for data buffer in (user space)
2611 * @app_out_address: unsigned long; for data buffer out (user space)
2612 * @data_in_size: u32; for size of data
2613 * @block_size: u32; for block size
2614 * @tail_block_size: u32; for size of tail block
2615 * @isapplet: bool; to indicate external app
2616 * @is_kva: bool; kernel buffer; only used for kernel crypto module
2617 * @secure_dma; indicates whether this is secure_dma using IMR
2618 *
2619 * This function prepares the linked DMA tables and puts the
2620 * address for the linked list of tables inta a DCB (data control
2621 * block) the address of which is known by the SEP hardware
2622 * Note that all bus addresses that are passed to the SEP
2623 * are in 32 bit format; the SEP is a 32 bit device
2624 */
2625int sep_prepare_input_output_dma_table_in_dcb(struct sep_device *sep,
2626 unsigned long app_in_address,
2627 unsigned long app_out_address,
2628 u32 data_in_size,
2629 u32 block_size,
2630 u32 tail_block_size,
2631 bool isapplet,
2632 bool is_kva,
2633 bool secure_dma,
2634 struct sep_dcblock *dcb_region,
2635 void **dmatables_region,
2636 struct sep_dma_context **dma_ctx,
2637 struct scatterlist *src_sg,
2638 struct scatterlist *dst_sg)
2639{
2640 int error = 0;
2641 /* Size of tail */
2642 u32 tail_size = 0;
2643 /* Address of the created DCB table */
2644 struct sep_dcblock *dcb_table_ptr = NULL;
2645 /* The physical address of the first input DMA table */
2646 dma_addr_t in_first_mlli_address = 0;
2647 /* Number of entries in the first input DMA table */
2648 u32 in_first_num_entries = 0;
2649 /* The physical address of the first output DMA table */
2650 dma_addr_t out_first_mlli_address = 0;
2651 /* Number of entries in the first output DMA table */
2652 u32 out_first_num_entries = 0;
2653 /* Data in the first input/output table */
2654 u32 first_data_size = 0;
2655
2656 dev_dbg(&sep->pdev->dev, "[PID%d] app_in_address %lx\n",
2657 current->pid, app_in_address);
2658
2659 dev_dbg(&sep->pdev->dev, "[PID%d] app_out_address %lx\n",
2660 current->pid, app_out_address);
2661
2662 dev_dbg(&sep->pdev->dev, "[PID%d] data_in_size %x\n",
2663 current->pid, data_in_size);
2664
2665 dev_dbg(&sep->pdev->dev, "[PID%d] block_size %x\n",
2666 current->pid, block_size);
2667
2668 dev_dbg(&sep->pdev->dev, "[PID%d] tail_block_size %x\n",
2669 current->pid, tail_block_size);
2670
2671 dev_dbg(&sep->pdev->dev, "[PID%d] isapplet %x\n",
2672 current->pid, isapplet);
2673
2674 dev_dbg(&sep->pdev->dev, "[PID%d] is_kva %x\n",
2675 current->pid, is_kva);
2676
2677 dev_dbg(&sep->pdev->dev, "[PID%d] src_sg %p\n",
2678 current->pid, src_sg);
2679
2680 dev_dbg(&sep->pdev->dev, "[PID%d] dst_sg %p\n",
2681 current->pid, dst_sg);
2682
2683 if (!dma_ctx) {
2684 dev_warn(&sep->pdev->dev, "[PID%d] no DMA context pointer\n",
2685 current->pid);
2686 error = -EINVAL;
2687 goto end_function;
2688 }
2689
2690 if (*dma_ctx) {
2691 /* In case there are multiple DCBs for this transaction */
2692 dev_dbg(&sep->pdev->dev, "[PID%d] DMA context already set\n",
2693 current->pid);
2694 } else {
2695 *dma_ctx = kzalloc(sizeof(**dma_ctx), GFP_KERNEL);
2696 if (!(*dma_ctx)) {
2697 dev_dbg(&sep->pdev->dev,
2698 "[PID%d] Not enough memory for DMA context\n",
2699 current->pid);
2700 error = -ENOMEM;
2701 goto end_function;
2702 }
2703 dev_dbg(&sep->pdev->dev,
2704 "[PID%d] Created DMA context addr at 0x%p\n",
2705 current->pid, *dma_ctx);
2706 }
2707
2708 (*dma_ctx)->secure_dma = secure_dma;
2709
2710 /* these are for kernel crypto only */
2711 (*dma_ctx)->src_sg = src_sg;
2712 (*dma_ctx)->dst_sg = dst_sg;
2713
2714 if ((*dma_ctx)->nr_dcb_creat == SEP_MAX_NUM_SYNC_DMA_OPS) {
2715 /* No more DCBs to allocate */
2716 dev_dbg(&sep->pdev->dev, "[PID%d] no more DCBs available\n",
2717 current->pid);
2718 error = -ENOSPC;
2719 goto end_function_error;
2720 }
2721
2722 /* Allocate new DCB */
2723 if (dcb_region) {
2724 dcb_table_ptr = dcb_region;
2725 } else {
2726 dcb_table_ptr = (struct sep_dcblock *)(sep->shared_addr +
2727 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES +
2728 ((*dma_ctx)->nr_dcb_creat *
2729 sizeof(struct sep_dcblock)));
2730 }
2731
2732 /* Set the default values in the DCB */
2733 dcb_table_ptr->input_mlli_address = 0;
2734 dcb_table_ptr->input_mlli_num_entries = 0;
2735 dcb_table_ptr->input_mlli_data_size = 0;
2736 dcb_table_ptr->output_mlli_address = 0;
2737 dcb_table_ptr->output_mlli_num_entries = 0;
2738 dcb_table_ptr->output_mlli_data_size = 0;
2739 dcb_table_ptr->tail_data_size = 0;
2740 dcb_table_ptr->out_vr_tail_pt = 0;
2741
2742 if (isapplet == true) {
2743
2744 /* Check if there is enough data for DMA operation */
2745 if (data_in_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE) {
2746 if (is_kva == true) {
2747 error = -ENODEV;
2748 goto end_function_error;
2749 } else {
2750 if (copy_from_user(dcb_table_ptr->tail_data,
2751 (void __user *)app_in_address,
2752 data_in_size)) {
2753 error = -EFAULT;
2754 goto end_function_error;
2755 }
2756 }
2757
2758 dcb_table_ptr->tail_data_size = data_in_size;
2759
2760 /* Set the output user-space address for mem2mem op */
2761 if (app_out_address)
2762 dcb_table_ptr->out_vr_tail_pt =
2763 (aligned_u64)app_out_address;
2764
2765 /*
2766 * Update both data length parameters in order to avoid
2767 * second data copy and allow building of empty mlli
2768 * tables
2769 */
2770 tail_size = 0x0;
2771 data_in_size = 0x0;
2772
2773 } else {
2774 if (!app_out_address) {
2775 tail_size = data_in_size % block_size;
2776 if (!tail_size) {
2777 if (tail_block_size == block_size)
2778 tail_size = block_size;
2779 }
2780 } else {
2781 tail_size = 0;
2782 }
2783 }
2784 if (tail_size) {
2785 if (tail_size > sizeof(dcb_table_ptr->tail_data))
2786 return -EINVAL;
2787 if (is_kva == true) {
2788 error = -ENODEV;
2789 goto end_function_error;
2790 } else {
2791 /* We have tail data - copy it to DCB */
2792 if (copy_from_user(dcb_table_ptr->tail_data,
2793 (void __user *)(app_in_address +
2794 data_in_size - tail_size), tail_size)) {
2795 error = -EFAULT;
2796 goto end_function_error;
2797 }
2798 }
2799 if (app_out_address)
2800 /*
2801 * Calculate the output address
2802 * according to tail data size
2803 */
2804 dcb_table_ptr->out_vr_tail_pt =
2805 (aligned_u64)app_out_address +
2806 data_in_size - tail_size;
2807
2808 /* Save the real tail data size */
2809 dcb_table_ptr->tail_data_size = tail_size;
2810 /*
2811 * Update the data size without the tail
2812 * data size AKA data for the dma
2813 */
2814 data_in_size = (data_in_size - tail_size);
2815 }
2816 }
2817 /* Check if we need to build only input table or input/output */
2818 if (app_out_address) {
2819 /* Prepare input/output tables */
2820 error = sep_prepare_input_output_dma_table(sep,
2821 app_in_address,
2822 app_out_address,
2823 data_in_size,
2824 block_size,
2825 &in_first_mlli_address,
2826 &out_first_mlli_address,
2827 &in_first_num_entries,
2828 &out_first_num_entries,
2829 &first_data_size,
2830 is_kva,
2831 dmatables_region,
2832 *dma_ctx);
2833 } else {
2834 /* Prepare input tables */
2835 error = sep_prepare_input_dma_table(sep,
2836 app_in_address,
2837 data_in_size,
2838 block_size,
2839 &in_first_mlli_address,
2840 &in_first_num_entries,
2841 &first_data_size,
2842 is_kva,
2843 dmatables_region,
2844 *dma_ctx);
2845 }
2846
2847 if (error) {
2848 dev_warn(&sep->pdev->dev,
2849 "prepare DMA table call failed "
2850 "from prepare DCB call\n");
2851 goto end_function_error;
2852 }
2853
2854 /* Set the DCB values */
2855 dcb_table_ptr->input_mlli_address = in_first_mlli_address;
2856 dcb_table_ptr->input_mlli_num_entries = in_first_num_entries;
2857 dcb_table_ptr->input_mlli_data_size = first_data_size;
2858 dcb_table_ptr->output_mlli_address = out_first_mlli_address;
2859 dcb_table_ptr->output_mlli_num_entries = out_first_num_entries;
2860 dcb_table_ptr->output_mlli_data_size = first_data_size;
2861
2862 goto end_function;
2863
2864end_function_error:
2865 kfree(*dma_ctx);
2866 *dma_ctx = NULL;
2867
2868end_function:
2869 return error;
2870
2871}
2872
2873
2874/**
2875 * sep_free_dma_tables_and_dcb - free DMA tables and DCBs
2876 * @sep: pointer to struct sep_device
2877 * @isapplet: indicates external application (used for kernel access)
2878 * @is_kva: indicates kernel addresses (only used for kernel crypto)
2879 *
2880 * This function frees the DMA tables and DCB
2881 */
2882static int sep_free_dma_tables_and_dcb(struct sep_device *sep, bool isapplet,
2883 bool is_kva, struct sep_dma_context **dma_ctx)
2884{
2885 struct sep_dcblock *dcb_table_ptr;
2886 unsigned long pt_hold;
2887 void *tail_pt;
2888
2889 int i = 0;
2890 int error = 0;
2891 int error_temp = 0;
2892
2893 dev_dbg(&sep->pdev->dev, "[PID%d] sep_free_dma_tables_and_dcb\n",
2894 current->pid);
2895
2896 if (((*dma_ctx)->secure_dma == false) && (isapplet == true)) {
2897 dev_dbg(&sep->pdev->dev, "[PID%d] handling applet\n",
2898 current->pid);
2899
2900 /* Tail stuff is only for non secure_dma */
2901 /* Set pointer to first DCB table */
2902 dcb_table_ptr = (struct sep_dcblock *)
2903 (sep->shared_addr +
2904 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES);
2905
2906 /**
2907 * Go over each DCB and see if
2908 * tail pointer must be updated
2909 */
2910 for (i = 0; dma_ctx && *dma_ctx &&
2911 i < (*dma_ctx)->nr_dcb_creat; i++, dcb_table_ptr++) {
2912 if (dcb_table_ptr->out_vr_tail_pt) {
2913 pt_hold = (unsigned long)dcb_table_ptr->
2914 out_vr_tail_pt;
2915 tail_pt = (void *)pt_hold;
2916 if (is_kva == true) {
2917 error = -ENODEV;
2918 break;
2919 } else {
2920 error_temp = copy_to_user(
2921 (void __user *)tail_pt,
2922 dcb_table_ptr->tail_data,
2923 dcb_table_ptr->tail_data_size);
2924 }
2925 if (error_temp) {
2926 /* Release the DMA resource */
2927 error = -EFAULT;
2928 break;
2929 }
2930 }
2931 }
2932 }
2933
2934 /* Free the output pages, if any */
2935 sep_free_dma_table_data_handler(sep, dma_ctx);
2936
2937 dev_dbg(&sep->pdev->dev, "[PID%d] sep_free_dma_tables_and_dcb end\n",
2938 current->pid);
2939
2940 return error;
2941}
2942
2943/**
2944 * sep_prepare_dcb_handler - prepare a control block
2945 * @sep: pointer to struct sep_device
2946 * @arg: pointer to user parameters
2947 * @secure_dma: indicate whether we are using secure_dma on IMR
2948 *
2949 * This function will retrieve the RAR buffer physical addresses, type
2950 * & size corresponding to the RAR handles provided in the buffers vector.
2951 */
2952static int sep_prepare_dcb_handler(struct sep_device *sep, unsigned long arg,
2953 bool secure_dma,
2954 struct sep_dma_context **dma_ctx)
2955{
2956 int error;
2957 /* Command arguments */
2958 static struct build_dcb_struct command_args;
2959
2960 /* Get the command arguments */
2961 if (copy_from_user(&command_args, (void __user *)arg,
2962 sizeof(struct build_dcb_struct))) {
2963 error = -EFAULT;
2964 goto end_function;
2965 }
2966
2967 dev_dbg(&sep->pdev->dev,
2968 "[PID%d] prep dcb handler app_in_address is %08llx\n",
2969 current->pid, command_args.app_in_address);
2970 dev_dbg(&sep->pdev->dev,
2971 "[PID%d] app_out_address is %08llx\n",
2972 current->pid, command_args.app_out_address);
2973 dev_dbg(&sep->pdev->dev,
2974 "[PID%d] data_size is %x\n",
2975 current->pid, command_args.data_in_size);
2976 dev_dbg(&sep->pdev->dev,
2977 "[PID%d] block_size is %x\n",
2978 current->pid, command_args.block_size);
2979 dev_dbg(&sep->pdev->dev,
2980 "[PID%d] tail block_size is %x\n",
2981 current->pid, command_args.tail_block_size);
2982 dev_dbg(&sep->pdev->dev,
2983 "[PID%d] is_applet is %x\n",
2984 current->pid, command_args.is_applet);
2985
2986 if (!command_args.app_in_address) {
2987 dev_warn(&sep->pdev->dev,
2988 "[PID%d] null app_in_address\n", current->pid);
2989 error = -EINVAL;
2990 goto end_function;
2991 }
2992
2993 error = sep_prepare_input_output_dma_table_in_dcb(sep,
2994 (unsigned long)command_args.app_in_address,
2995 (unsigned long)command_args.app_out_address,
2996 command_args.data_in_size, command_args.block_size,
2997 command_args.tail_block_size,
2998 command_args.is_applet, false,
2999 secure_dma, NULL, NULL, dma_ctx, NULL, NULL);
3000
3001end_function:
3002 return error;
3003
3004}
3005
3006/**
3007 * sep_free_dcb_handler - free control block resources
3008 * @sep: pointer to struct sep_device
3009 *
3010 * This function frees the DCB resources and updates the needed
3011 * user-space buffers.
3012 */
3013static int sep_free_dcb_handler(struct sep_device *sep,
3014 struct sep_dma_context **dma_ctx)
3015{
3016 if (!dma_ctx || !(*dma_ctx)) {
3017 dev_dbg(&sep->pdev->dev,
3018 "[PID%d] no dma context defined, nothing to free\n",
3019 current->pid);
3020 return -EINVAL;
3021 }
3022
3023 dev_dbg(&sep->pdev->dev, "[PID%d] free dcbs num of DCBs %x\n",
3024 current->pid,
3025 (*dma_ctx)->nr_dcb_creat);
3026
3027 return sep_free_dma_tables_and_dcb(sep, false, false, dma_ctx);
3028}
3029
3030/**
3031 * sep_ioctl - ioctl handler for sep device
3032 * @filp: pointer to struct file
3033 * @cmd: command
3034 * @arg: pointer to argument structure
3035 *
3036 * Implement the ioctl methods availble on the SEP device.
3037 */
3038static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
3039{
3040 struct sep_private_data * const private_data = filp->private_data;
3041 struct sep_call_status *call_status = &private_data->call_status;
3042 struct sep_device *sep = private_data->device;
3043 struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
3044 struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
3045 int error = 0;
3046
3047 dev_dbg(&sep->pdev->dev, "[PID%d] ioctl cmd 0x%x\n",
3048 current->pid, cmd);
3049 dev_dbg(&sep->pdev->dev, "[PID%d] dma context addr 0x%p\n",
3050 current->pid, *dma_ctx);
3051
3052 /* Make sure we own this device */
3053 error = sep_check_transaction_owner(sep);
3054 if (error) {
3055 dev_dbg(&sep->pdev->dev, "[PID%d] ioctl pid is not owner\n",
3056 current->pid);
3057 goto end_function;
3058 }
3059
3060 /* Check that sep_mmap has been called before */
3061 if (0 == test_bit(SEP_LEGACY_MMAP_DONE_OFFSET,
3062 &call_status->status)) {
3063 dev_dbg(&sep->pdev->dev,
3064 "[PID%d] mmap not called\n", current->pid);
3065 error = -EPROTO;
3066 goto end_function;
3067 }
3068
3069 /* Check that the command is for SEP device */
3070 if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) {
3071 error = -ENOTTY;
3072 goto end_function;
3073 }
3074
3075 switch (cmd) {
3076 case SEP_IOCSENDSEPCOMMAND:
3077 dev_dbg(&sep->pdev->dev,
3078 "[PID%d] SEP_IOCSENDSEPCOMMAND start\n",
3079 current->pid);
3080 if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
3081 &call_status->status)) {
3082 dev_warn(&sep->pdev->dev,
3083 "[PID%d] send msg already done\n",
3084 current->pid);
3085 error = -EPROTO;
3086 goto end_function;
3087 }
3088 /* Send command to SEP */
3089 error = sep_send_command_handler(sep);
3090 if (!error)
3091 set_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
3092 &call_status->status);
3093 dev_dbg(&sep->pdev->dev,
3094 "[PID%d] SEP_IOCSENDSEPCOMMAND end\n",
3095 current->pid);
3096 break;
3097 case SEP_IOCENDTRANSACTION:
3098 dev_dbg(&sep->pdev->dev,
3099 "[PID%d] SEP_IOCENDTRANSACTION start\n",
3100 current->pid);
3101 error = sep_end_transaction_handler(sep, dma_ctx, call_status,
3102 my_queue_elem);
3103 dev_dbg(&sep->pdev->dev,
3104 "[PID%d] SEP_IOCENDTRANSACTION end\n",
3105 current->pid);
3106 break;
3107 case SEP_IOCPREPAREDCB:
3108 dev_dbg(&sep->pdev->dev,
3109 "[PID%d] SEP_IOCPREPAREDCB start\n",
3110 current->pid);
3111 case SEP_IOCPREPAREDCB_SECURE_DMA:
3112 dev_dbg(&sep->pdev->dev,
3113 "[PID%d] SEP_IOCPREPAREDCB_SECURE_DMA start\n",
3114 current->pid);
3115 if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
3116 &call_status->status)) {
3117 dev_warn(&sep->pdev->dev,
3118 "[PID%d] dcb prep needed before send msg\n",
3119 current->pid);
3120 error = -EPROTO;
3121 goto end_function;
3122 }
3123
3124 if (!arg) {
3125 dev_warn(&sep->pdev->dev,
3126 "[PID%d] dcb null arg\n", current->pid);
3127 error = EINVAL;
3128 goto end_function;
3129 }
3130
3131 if (cmd == SEP_IOCPREPAREDCB) {
3132 /* No secure dma */
3133 dev_dbg(&sep->pdev->dev,
3134 "[PID%d] SEP_IOCPREPAREDCB (no secure_dma)\n",
3135 current->pid);
3136
3137 error = sep_prepare_dcb_handler(sep, arg, false,
3138 dma_ctx);
3139 } else {
3140 /* Secure dma */
3141 dev_dbg(&sep->pdev->dev,
3142 "[PID%d] SEP_IOC_POC (with secure_dma)\n",
3143 current->pid);
3144
3145 error = sep_prepare_dcb_handler(sep, arg, true,
3146 dma_ctx);
3147 }
3148 dev_dbg(&sep->pdev->dev, "[PID%d] dcb's end\n",
3149 current->pid);
3150 break;
3151 case SEP_IOCFREEDCB:
3152 dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCFREEDCB start\n",
3153 current->pid);
3154 case SEP_IOCFREEDCB_SECURE_DMA:
3155 dev_dbg(&sep->pdev->dev,
3156 "[PID%d] SEP_IOCFREEDCB_SECURE_DMA start\n",
3157 current->pid);
3158 error = sep_free_dcb_handler(sep, dma_ctx);
3159 dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCFREEDCB end\n",
3160 current->pid);
3161 break;
3162 default:
3163 error = -ENOTTY;
3164 dev_dbg(&sep->pdev->dev, "[PID%d] default end\n",
3165 current->pid);
3166 break;
3167 }
3168
3169end_function:
3170 dev_dbg(&sep->pdev->dev, "[PID%d] ioctl end\n", current->pid);
3171
3172 return error;
3173}
3174
3175/**
3176 * sep_inthandler - interrupt handler for sep device
3177 * @irq: interrupt
3178 * @dev_id: device id
3179 */
3180static irqreturn_t sep_inthandler(int irq, void *dev_id)
3181{
3182 unsigned long lock_irq_flag;
3183 u32 reg_val, reg_val2 = 0;
3184 struct sep_device *sep = dev_id;
3185 irqreturn_t int_error = IRQ_HANDLED;
3186
3187 /* Are we in power save? */
3188#if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM)
3189 if (sep->pdev->dev.power.runtime_status != RPM_ACTIVE) {
3190 dev_dbg(&sep->pdev->dev, "interrupt during pwr save\n");
3191 return IRQ_NONE;
3192 }
3193#endif
3194
3195 if (test_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags) == 0) {
3196 dev_dbg(&sep->pdev->dev, "interrupt while nobody using sep\n");
3197 return IRQ_NONE;
3198 }
3199
3200 /* Read the IRR register to check if this is SEP interrupt */
3201 reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
3202
3203 dev_dbg(&sep->pdev->dev, "sep int: IRR REG val: %x\n", reg_val);
3204
3205 if (reg_val & (0x1 << 13)) {
3206
3207 /* Lock and update the counter of reply messages */
3208 spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
3209 sep->reply_ct++;
3210 spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
3211
3212 dev_dbg(&sep->pdev->dev, "sep int: send_ct %lx reply_ct %lx\n",
3213 sep->send_ct, sep->reply_ct);
3214
3215 /* Is this a kernel client request */
3216 if (sep->in_kernel) {
3217 tasklet_schedule(&sep->finish_tasklet);
3218 goto finished_interrupt;
3219 }
3220
3221 /* Is this printf or daemon request? */
3222 reg_val2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
3223 dev_dbg(&sep->pdev->dev,
3224 "SEP Interrupt - GPR2 is %08x\n", reg_val2);
3225
3226 clear_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags);
3227
3228 if ((reg_val2 >> 30) & 0x1) {
3229 dev_dbg(&sep->pdev->dev, "int: printf request\n");
3230 } else if (reg_val2 >> 31) {
3231 dev_dbg(&sep->pdev->dev, "int: daemon request\n");
3232 } else {
3233 dev_dbg(&sep->pdev->dev, "int: SEP reply\n");
3234 wake_up(&sep->event_interrupt);
3235 }
3236 } else {
3237 dev_dbg(&sep->pdev->dev, "int: not SEP interrupt\n");
3238 int_error = IRQ_NONE;
3239 }
3240
3241finished_interrupt:
3242
3243 if (int_error == IRQ_HANDLED)
3244 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
3245
3246 return int_error;
3247}
3248
3249/**
3250 * sep_reconfig_shared_area - reconfigure shared area
3251 * @sep: pointer to struct sep_device
3252 *
3253 * Reconfig the shared area between HOST and SEP - needed in case
3254 * the DX_CC_Init function was called before OS loading.
3255 */
3256static int sep_reconfig_shared_area(struct sep_device *sep)
3257{
3258 int ret_val;
3259
3260 /* use to limit waiting for SEP */
3261 unsigned long end_time;
3262
3263 /* Send the new SHARED MESSAGE AREA to the SEP */
3264 dev_dbg(&sep->pdev->dev, "reconfig shared; sending %08llx to sep\n",
3265 (unsigned long long)sep->shared_bus);
3266
3267 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
3268
3269 /* Poll for SEP response */
3270 ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
3271
3272 end_time = jiffies + (WAIT_TIME * HZ);
3273
3274 while ((time_before(jiffies, end_time)) && (ret_val != 0xffffffff) &&
3275 (ret_val != sep->shared_bus))
3276 ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
3277
3278 /* Check the return value (register) */
3279 if (ret_val != sep->shared_bus) {
3280 dev_warn(&sep->pdev->dev, "could not reconfig shared area\n");
3281 dev_warn(&sep->pdev->dev, "result was %x\n", ret_val);
3282 ret_val = -ENOMEM;
3283 } else
3284 ret_val = 0;
3285
3286 dev_dbg(&sep->pdev->dev, "reconfig shared area end\n");
3287
3288 return ret_val;
3289}
3290
3291/**
3292 * sep_activate_dcb_dmatables_context - Takes DCB & DMA tables
3293 * contexts into use
3294 * @sep: SEP device
3295 * @dcb_region: DCB region copy
3296 * @dmatables_region: MLLI/DMA tables copy
3297 * @dma_ctx: DMA context for current transaction
3298 */
3299ssize_t sep_activate_dcb_dmatables_context(struct sep_device *sep,
3300 struct sep_dcblock **dcb_region,
3301 void **dmatables_region,
3302 struct sep_dma_context *dma_ctx)
3303{
3304 void *dmaregion_free_start = NULL;
3305 void *dmaregion_free_end = NULL;
3306 void *dcbregion_free_start = NULL;
3307 void *dcbregion_free_end = NULL;
3308 ssize_t error = 0;
3309
3310 dev_dbg(&sep->pdev->dev, "[PID%d] activating dcb/dma region\n",
3311 current->pid);
3312
3313 if (1 > dma_ctx->nr_dcb_creat) {
3314 dev_warn(&sep->pdev->dev,
3315 "[PID%d] invalid number of dcbs to activate 0x%08X\n",
3316 current->pid, dma_ctx->nr_dcb_creat);
3317 error = -EINVAL;
3318 goto end_function;
3319 }
3320
3321 dmaregion_free_start = sep->shared_addr
3322 + SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES;
3323 dmaregion_free_end = dmaregion_free_start
3324 + SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES - 1;
3325
3326 if (dmaregion_free_start
3327 + dma_ctx->dmatables_len > dmaregion_free_end) {
3328 error = -ENOMEM;
3329 goto end_function;
3330 }
3331 memcpy(dmaregion_free_start,
3332 *dmatables_region,
3333 dma_ctx->dmatables_len);
3334 /* Free MLLI table copy */
3335 kfree(*dmatables_region);
3336 *dmatables_region = NULL;
3337
3338 /* Copy thread's DCB table copy to DCB table region */
3339 dcbregion_free_start = sep->shared_addr +
3340 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES;
3341 dcbregion_free_end = dcbregion_free_start +
3342 (SEP_MAX_NUM_SYNC_DMA_OPS *
3343 sizeof(struct sep_dcblock)) - 1;
3344
3345 if (dcbregion_free_start
3346 + (dma_ctx->nr_dcb_creat * sizeof(struct sep_dcblock))
3347 > dcbregion_free_end) {
3348 error = -ENOMEM;
3349 goto end_function;
3350 }
3351
3352 memcpy(dcbregion_free_start,
3353 *dcb_region,
3354 dma_ctx->nr_dcb_creat * sizeof(struct sep_dcblock));
3355
3356 /* Print the tables */
3357 dev_dbg(&sep->pdev->dev, "activate: input table\n");
3358 sep_debug_print_lli_tables(sep,
3359 (struct sep_lli_entry *)sep_shared_area_bus_to_virt(sep,
3360 (*dcb_region)->input_mlli_address),
3361 (*dcb_region)->input_mlli_num_entries,
3362 (*dcb_region)->input_mlli_data_size);
3363
3364 dev_dbg(&sep->pdev->dev, "activate: output table\n");
3365 sep_debug_print_lli_tables(sep,
3366 (struct sep_lli_entry *)sep_shared_area_bus_to_virt(sep,
3367 (*dcb_region)->output_mlli_address),
3368 (*dcb_region)->output_mlli_num_entries,
3369 (*dcb_region)->output_mlli_data_size);
3370
3371 dev_dbg(&sep->pdev->dev,
3372 "[PID%d] printing activated tables\n", current->pid);
3373
3374end_function:
3375 kfree(*dmatables_region);
3376 *dmatables_region = NULL;
3377
3378 kfree(*dcb_region);
3379 *dcb_region = NULL;
3380
3381 return error;
3382}
3383
3384/**
3385 * sep_create_dcb_dmatables_context - Creates DCB & MLLI/DMA table context
3386 * @sep: SEP device
3387 * @dcb_region: DCB region buf to create for current transaction
3388 * @dmatables_region: MLLI/DMA tables buf to create for current transaction
3389 * @dma_ctx: DMA context buf to create for current transaction
3390 * @user_dcb_args: User arguments for DCB/MLLI creation
3391 * @num_dcbs: Number of DCBs to create
3392 * @secure_dma: Indicate use of IMR restricted memory secure dma
3393 */
3394static ssize_t sep_create_dcb_dmatables_context(struct sep_device *sep,
3395 struct sep_dcblock **dcb_region,
3396 void **dmatables_region,
3397 struct sep_dma_context **dma_ctx,
3398 const struct build_dcb_struct __user *user_dcb_args,
3399 const u32 num_dcbs, bool secure_dma)
3400{
3401 int error = 0;
3402 int i = 0;
3403 struct build_dcb_struct *dcb_args = NULL;
3404
3405 dev_dbg(&sep->pdev->dev, "[PID%d] creating dcb/dma region\n",
3406 current->pid);
3407
3408 if (!dcb_region || !dma_ctx || !dmatables_region || !user_dcb_args) {
3409 error = -EINVAL;
3410 goto end_function;
3411 }
3412
3413 if (SEP_MAX_NUM_SYNC_DMA_OPS < num_dcbs) {
3414 dev_warn(&sep->pdev->dev,
3415 "[PID%d] invalid number of dcbs 0x%08X\n",
3416 current->pid, num_dcbs);
3417 error = -EINVAL;
3418 goto end_function;
3419 }
3420
3421 dcb_args = kzalloc(num_dcbs * sizeof(struct build_dcb_struct),
3422 GFP_KERNEL);
3423 if (!dcb_args) {
3424 dev_warn(&sep->pdev->dev, "[PID%d] no memory for dcb args\n",
3425 current->pid);
3426 error = -ENOMEM;
3427 goto end_function;
3428 }
3429
3430 if (copy_from_user(dcb_args,
3431 user_dcb_args,
3432 num_dcbs * sizeof(struct build_dcb_struct))) {
3433 error = -EINVAL;
3434 goto end_function;
3435 }
3436
3437 /* Allocate thread-specific memory for DCB */
3438 *dcb_region = kzalloc(num_dcbs * sizeof(struct sep_dcblock),
3439 GFP_KERNEL);
3440 if (!(*dcb_region)) {
3441 error = -ENOMEM;
3442 goto end_function;
3443 }
3444
3445 /* Prepare DCB and MLLI table into the allocated regions */
3446 for (i = 0; i < num_dcbs; i++) {
3447 error = sep_prepare_input_output_dma_table_in_dcb(sep,
3448 (unsigned long)dcb_args[i].app_in_address,
3449 (unsigned long)dcb_args[i].app_out_address,
3450 dcb_args[i].data_in_size,
3451 dcb_args[i].block_size,
3452 dcb_args[i].tail_block_size,
3453 dcb_args[i].is_applet,
3454 false, secure_dma,
3455 *dcb_region, dmatables_region,
3456 dma_ctx,
3457 NULL,
3458 NULL);
3459 if (error) {
3460 dev_warn(&sep->pdev->dev,
3461 "[PID%d] dma table creation failed\n",
3462 current->pid);
3463 goto end_function;
3464 }
3465
3466 if (dcb_args[i].app_in_address != 0)
3467 (*dma_ctx)->input_data_len += dcb_args[i].data_in_size;
3468 }
3469
3470end_function:
3471 kfree(dcb_args);
3472 return error;
3473
3474}
3475
3476/**
3477 * sep_create_dcb_dmatables_context_kernel - Creates DCB & MLLI/DMA table context
3478 * for kernel crypto
3479 * @sep: SEP device
3480 * @dcb_region: DCB region buf to create for current transaction
3481 * @dmatables_region: MLLI/DMA tables buf to create for current transaction
3482 * @dma_ctx: DMA context buf to create for current transaction
3483 * @user_dcb_args: User arguments for DCB/MLLI creation
3484 * @num_dcbs: Number of DCBs to create
3485 * This does that same thing as sep_create_dcb_dmatables_context
3486 * except that it is used only for the kernel crypto operation. It is
3487 * separate because there is no user data involved; the dcb data structure
3488 * is specific for kernel crypto (build_dcb_struct_kernel)
3489 */
3490int sep_create_dcb_dmatables_context_kernel(struct sep_device *sep,
3491 struct sep_dcblock **dcb_region,
3492 void **dmatables_region,
3493 struct sep_dma_context **dma_ctx,
3494 const struct build_dcb_struct_kernel *dcb_data,
3495 const u32 num_dcbs)
3496{
3497 int error = 0;
3498 int i = 0;
3499
3500 dev_dbg(&sep->pdev->dev, "[PID%d] creating dcb/dma region\n",
3501 current->pid);
3502
3503 if (!dcb_region || !dma_ctx || !dmatables_region || !dcb_data) {
3504 error = -EINVAL;
3505 goto end_function;
3506 }
3507
3508 if (SEP_MAX_NUM_SYNC_DMA_OPS < num_dcbs) {
3509 dev_warn(&sep->pdev->dev,
3510 "[PID%d] invalid number of dcbs 0x%08X\n",
3511 current->pid, num_dcbs);
3512 error = -EINVAL;
3513 goto end_function;
3514 }
3515
3516 dev_dbg(&sep->pdev->dev, "[PID%d] num_dcbs is %d\n",
3517 current->pid, num_dcbs);
3518
3519 /* Allocate thread-specific memory for DCB */
3520 *dcb_region = kzalloc(num_dcbs * sizeof(struct sep_dcblock),
3521 GFP_KERNEL);
3522 if (!(*dcb_region)) {
3523 error = -ENOMEM;
3524 goto end_function;
3525 }
3526
3527 /* Prepare DCB and MLLI table into the allocated regions */
3528 for (i = 0; i < num_dcbs; i++) {
3529 error = sep_prepare_input_output_dma_table_in_dcb(sep,
3530 (unsigned long)dcb_data->app_in_address,
3531 (unsigned long)dcb_data->app_out_address,
3532 dcb_data->data_in_size,
3533 dcb_data->block_size,
3534 dcb_data->tail_block_size,
3535 dcb_data->is_applet,
3536 true,
3537 false,
3538 *dcb_region, dmatables_region,
3539 dma_ctx,
3540 dcb_data->src_sg,
3541 dcb_data->dst_sg);
3542 if (error) {
3543 dev_warn(&sep->pdev->dev,
3544 "[PID%d] dma table creation failed\n",
3545 current->pid);
3546 goto end_function;
3547 }
3548 }
3549
3550end_function:
3551 return error;
3552
3553}
3554
3555/**
3556 * sep_activate_msgarea_context - Takes the message area context into use
3557 * @sep: SEP device
3558 * @msg_region: Message area context buf
3559 * @msg_len: Message area context buffer size
3560 */
3561static ssize_t sep_activate_msgarea_context(struct sep_device *sep,
3562 void **msg_region,
3563 const size_t msg_len)
3564{
3565 dev_dbg(&sep->pdev->dev, "[PID%d] activating msg region\n",
3566 current->pid);
3567
3568 if (!msg_region || !(*msg_region) ||
3569 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES < msg_len) {
3570 dev_warn(&sep->pdev->dev,
3571 "[PID%d] invalid act msgarea len 0x%08zX\n",
3572 current->pid, msg_len);
3573 return -EINVAL;
3574 }
3575
3576 memcpy(sep->shared_addr, *msg_region, msg_len);
3577
3578 return 0;
3579}
3580
3581/**
3582 * sep_create_msgarea_context - Creates message area context
3583 * @sep: SEP device
3584 * @msg_region: Msg area region buf to create for current transaction
3585 * @msg_user: Content for msg area region from user
3586 * @msg_len: Message area size
3587 */
3588static ssize_t sep_create_msgarea_context(struct sep_device *sep,
3589 void **msg_region,
3590 const void __user *msg_user,
3591 const size_t msg_len)
3592{
3593 int error = 0;
3594
3595 dev_dbg(&sep->pdev->dev, "[PID%d] creating msg region\n",
3596 current->pid);
3597
3598 if (!msg_region ||
3599 !msg_user ||
3600 SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES < msg_len ||
3601 SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES > msg_len) {
3602 dev_warn(&sep->pdev->dev,
3603 "[PID%d] invalid creat msgarea len 0x%08zX\n",
3604 current->pid, msg_len);
3605 error = -EINVAL;
3606 goto end_function;
3607 }
3608
3609 /* Allocate thread-specific memory for message buffer */
3610 *msg_region = kzalloc(msg_len, GFP_KERNEL);
3611 if (!(*msg_region)) {
3612 dev_warn(&sep->pdev->dev,
3613 "[PID%d] no mem for msgarea context\n",
3614 current->pid);
3615 error = -ENOMEM;
3616 goto end_function;
3617 }
3618
3619 /* Copy input data to write() to allocated message buffer */
3620 if (copy_from_user(*msg_region, msg_user, msg_len)) {
3621 error = -EINVAL;
3622 goto end_function;
3623 }
3624
3625end_function:
3626 if (error && msg_region) {
3627 kfree(*msg_region);
3628 *msg_region = NULL;
3629 }
3630
3631 return error;
3632}
3633
3634
3635/**
3636 * sep_read - Returns results of an operation for fastcall interface
3637 * @filp: File pointer
3638 * @buf_user: User buffer for storing results
3639 * @count_user: User buffer size
3640 * @offset: File offset, not supported
3641 *
3642 * The implementation does not support reading in chunks, all data must be
3643 * consumed during a single read system call.
3644 */
3645static ssize_t sep_read(struct file *filp,
3646 char __user *buf_user, size_t count_user,
3647 loff_t *offset)
3648{
3649 struct sep_private_data * const private_data = filp->private_data;
3650 struct sep_call_status *call_status = &private_data->call_status;
3651 struct sep_device *sep = private_data->device;
3652 struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
3653 struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
3654 ssize_t error = 0, error_tmp = 0;
3655
3656 /* Am I the process that owns the transaction? */
3657 error = sep_check_transaction_owner(sep);
3658 if (error) {
3659 dev_dbg(&sep->pdev->dev, "[PID%d] read pid is not owner\n",
3660 current->pid);
3661 goto end_function;
3662 }
3663
3664 /* Checks that user has called necessarry apis */
3665 if (0 == test_bit(SEP_FASTCALL_WRITE_DONE_OFFSET,
3666 &call_status->status)) {
3667 dev_warn(&sep->pdev->dev,
3668 "[PID%d] fastcall write not called\n",
3669 current->pid);
3670 error = -EPROTO;
3671 goto end_function_error;
3672 }
3673
3674 if (!buf_user) {
3675 dev_warn(&sep->pdev->dev,
3676 "[PID%d] null user buffer\n",
3677 current->pid);
3678 error = -EINVAL;
3679 goto end_function_error;
3680 }
3681
3682
3683 /* Wait for SEP to finish */
3684 wait_event(sep->event_interrupt,
3685 test_bit(SEP_WORKING_LOCK_BIT,
3686 &sep->in_use_flags) == 0);
3687
3688 sep_dump_message(sep);
3689
3690 dev_dbg(&sep->pdev->dev, "[PID%d] count_user = 0x%08zX\n",
3691 current->pid, count_user);
3692
3693 /* In case user has allocated bigger buffer */
3694 if (count_user > SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES)
3695 count_user = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES;
3696
3697 if (copy_to_user(buf_user, sep->shared_addr, count_user)) {
3698 error = -EFAULT;
3699 goto end_function_error;
3700 }
3701
3702 dev_dbg(&sep->pdev->dev, "[PID%d] read succeeded\n", current->pid);
3703 error = count_user;
3704
3705end_function_error:
3706 /* Copy possible tail data to user and free DCB and MLLIs */
3707 error_tmp = sep_free_dcb_handler(sep, dma_ctx);
3708 if (error_tmp)
3709 dev_warn(&sep->pdev->dev, "[PID%d] dcb free failed\n",
3710 current->pid);
3711
3712 /* End the transaction, wakeup pending ones */
3713 error_tmp = sep_end_transaction_handler(sep, dma_ctx, call_status,
3714 my_queue_elem);
3715 if (error_tmp)
3716 dev_warn(&sep->pdev->dev,
3717 "[PID%d] ending transaction failed\n",
3718 current->pid);
3719
3720end_function:
3721 return error;
3722}
3723
3724/**
3725 * sep_fastcall_args_get - Gets fastcall params from user
3726 * sep: SEP device
3727 * @args: Parameters buffer
3728 * @buf_user: User buffer for operation parameters
3729 * @count_user: User buffer size
3730 */
3731static inline ssize_t sep_fastcall_args_get(struct sep_device *sep,
3732 struct sep_fastcall_hdr *args,
3733 const char __user *buf_user,
3734 const size_t count_user)
3735{
3736 ssize_t error = 0;
3737 size_t actual_count = 0;
3738
3739 if (!buf_user) {
3740 dev_warn(&sep->pdev->dev,
3741 "[PID%d] null user buffer\n",
3742 current->pid);
3743 error = -EINVAL;
3744 goto end_function;
3745 }
3746
3747 if (count_user < sizeof(struct sep_fastcall_hdr)) {
3748 dev_warn(&sep->pdev->dev,
3749 "[PID%d] too small message size 0x%08zX\n",
3750 current->pid, count_user);
3751 error = -EINVAL;
3752 goto end_function;
3753 }
3754
3755
3756 if (copy_from_user(args, buf_user, sizeof(struct sep_fastcall_hdr))) {
3757 error = -EFAULT;
3758 goto end_function;
3759 }
3760
3761 if (SEP_FC_MAGIC != args->magic) {
3762 dev_warn(&sep->pdev->dev,
3763 "[PID%d] invalid fastcall magic 0x%08X\n",
3764 current->pid, args->magic);
3765 error = -EINVAL;
3766 goto end_function;
3767 }
3768
3769 dev_dbg(&sep->pdev->dev, "[PID%d] fastcall hdr num of DCBs 0x%08X\n",
3770 current->pid, args->num_dcbs);
3771 dev_dbg(&sep->pdev->dev, "[PID%d] fastcall hdr msg len 0x%08X\n",
3772 current->pid, args->msg_len);
3773
3774 if (SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES < args->msg_len ||
3775 SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES > args->msg_len) {
3776 dev_warn(&sep->pdev->dev,
3777 "[PID%d] invalid message length\n",
3778 current->pid);
3779 error = -EINVAL;
3780 goto end_function;
3781 }
3782
3783 actual_count = sizeof(struct sep_fastcall_hdr)
3784 + args->msg_len
3785 + (args->num_dcbs * sizeof(struct build_dcb_struct));
3786
3787 if (actual_count != count_user) {
3788 dev_warn(&sep->pdev->dev,
3789 "[PID%d] inconsistent message "
3790 "sizes 0x%08zX vs 0x%08zX\n",
3791 current->pid, actual_count, count_user);
3792 error = -EMSGSIZE;
3793 goto end_function;
3794 }
3795
3796end_function:
3797 return error;
3798}
3799
3800/**
3801 * sep_write - Starts an operation for fastcall interface
3802 * @filp: File pointer
3803 * @buf_user: User buffer for operation parameters
3804 * @count_user: User buffer size
3805 * @offset: File offset, not supported
3806 *
3807 * The implementation does not support writing in chunks,
3808 * all data must be given during a single write system call.
3809 */
3810static ssize_t sep_write(struct file *filp,
3811 const char __user *buf_user, size_t count_user,
3812 loff_t *offset)
3813{
3814 struct sep_private_data * const private_data = filp->private_data;
3815 struct sep_call_status *call_status = &private_data->call_status;
3816 struct sep_device *sep = private_data->device;
3817 struct sep_dma_context *dma_ctx = NULL;
3818 struct sep_fastcall_hdr call_hdr = {0};
3819 void *msg_region = NULL;
3820 void *dmatables_region = NULL;
3821 struct sep_dcblock *dcb_region = NULL;
3822 ssize_t error = 0;
3823 struct sep_queue_info *my_queue_elem = NULL;
3824 bool my_secure_dma; /* are we using secure_dma (IMR)? */
3825
3826 dev_dbg(&sep->pdev->dev, "[PID%d] sep dev is 0x%p\n",
3827 current->pid, sep);
3828 dev_dbg(&sep->pdev->dev, "[PID%d] private_data is 0x%p\n",
3829 current->pid, private_data);
3830
3831 error = sep_fastcall_args_get(sep, &call_hdr, buf_user, count_user);
3832 if (error)
3833 goto end_function;
3834
3835 buf_user += sizeof(struct sep_fastcall_hdr);
3836
3837 if (call_hdr.secure_dma == 0)
3838 my_secure_dma = false;
3839 else
3840 my_secure_dma = true;
3841
3842 /*
3843 * Controlling driver memory usage by limiting amount of
3844 * buffers created. Only SEP_DOUBLEBUF_USERS_LIMIT number
3845 * of threads can progress further at a time
3846 */
3847 dev_dbg(&sep->pdev->dev, "[PID%d] waiting for double buffering "
3848 "region access\n", current->pid);
3849 error = down_interruptible(&sep->sep_doublebuf);
3850 dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region start\n",
3851 current->pid);
3852 if (error) {
3853 /* Signal received */
3854 goto end_function_error;
3855 }
3856
3857
3858 /*
3859 * Prepare contents of the shared area regions for
3860 * the operation into temporary buffers
3861 */
3862 if (0 < call_hdr.num_dcbs) {
3863 error = sep_create_dcb_dmatables_context(sep,
3864 &dcb_region,
3865 &dmatables_region,
3866 &dma_ctx,
3867 (const struct build_dcb_struct __user *)
3868 buf_user,
3869 call_hdr.num_dcbs, my_secure_dma);
3870 if (error)
3871 goto end_function_error_doublebuf;
3872
3873 buf_user += call_hdr.num_dcbs * sizeof(struct build_dcb_struct);
3874 }
3875
3876 error = sep_create_msgarea_context(sep,
3877 &msg_region,
3878 buf_user,
3879 call_hdr.msg_len);
3880 if (error)
3881 goto end_function_error_doublebuf;
3882
3883 dev_dbg(&sep->pdev->dev, "[PID%d] updating queue status\n",
3884 current->pid);
3885 my_queue_elem = sep_queue_status_add(sep,
3886 ((struct sep_msgarea_hdr *)msg_region)->opcode,
3887 (dma_ctx) ? dma_ctx->input_data_len : 0,
3888 current->pid,
3889 current->comm, sizeof(current->comm));
3890
3891 if (!my_queue_elem) {
3892 dev_dbg(&sep->pdev->dev, "[PID%d] updating queue"
3893 "status error\n", current->pid);
3894 error = -ENOMEM;
3895 goto end_function_error_doublebuf;
3896 }
3897
3898 /* Wait until current process gets the transaction */
3899 error = sep_wait_transaction(sep);
3900
3901 if (error) {
3902 /* Interrupted by signal, don't clear transaction */
3903 dev_dbg(&sep->pdev->dev, "[PID%d] interrupted by signal\n",
3904 current->pid);
3905 sep_queue_status_remove(sep, &my_queue_elem);
3906 goto end_function_error_doublebuf;
3907 }
3908
3909 dev_dbg(&sep->pdev->dev, "[PID%d] saving queue element\n",
3910 current->pid);
3911 private_data->my_queue_elem = my_queue_elem;
3912
3913 /* Activate shared area regions for the transaction */
3914 error = sep_activate_msgarea_context(sep, &msg_region,
3915 call_hdr.msg_len);
3916 if (error)
3917 goto end_function_error_clear_transact;
3918
3919 sep_dump_message(sep);
3920
3921 if (0 < call_hdr.num_dcbs) {
3922 error = sep_activate_dcb_dmatables_context(sep,
3923 &dcb_region,
3924 &dmatables_region,
3925 dma_ctx);
3926 if (error)
3927 goto end_function_error_clear_transact;
3928 }
3929
3930 /* Send command to SEP */
3931 error = sep_send_command_handler(sep);
3932 if (error)
3933 goto end_function_error_clear_transact;
3934
3935 /* Store DMA context for the transaction */
3936 private_data->dma_ctx = dma_ctx;
3937 /* Update call status */
3938 set_bit(SEP_FASTCALL_WRITE_DONE_OFFSET, &call_status->status);
3939 error = count_user;
3940
3941 up(&sep->sep_doublebuf);
3942 dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region end\n",
3943 current->pid);
3944
3945 goto end_function;
3946
3947end_function_error_clear_transact:
3948 sep_end_transaction_handler(sep, &dma_ctx, call_status,
3949 &private_data->my_queue_elem);
3950
3951end_function_error_doublebuf:
3952 up(&sep->sep_doublebuf);
3953 dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region end\n",
3954 current->pid);
3955
3956end_function_error:
3957 if (dma_ctx)
3958 sep_free_dma_table_data_handler(sep, &dma_ctx);
3959
3960end_function:
3961 kfree(dcb_region);
3962 kfree(dmatables_region);
3963 kfree(msg_region);
3964
3965 return error;
3966}
3967/**
3968 * sep_seek - Handler for seek system call
3969 * @filp: File pointer
3970 * @offset: File offset
3971 * @origin: Options for offset
3972 *
3973 * Fastcall interface does not support seeking, all reads
3974 * and writes are from/to offset zero
3975 */
3976static loff_t sep_seek(struct file *filp, loff_t offset, int origin)
3977{
3978 return -ENOSYS;
3979}
3980
3981
3982
3983/**
3984 * sep_file_operations - file operation on sep device
3985 * @sep_ioctl: ioctl handler from user space call
3986 * @sep_poll: poll handler
3987 * @sep_open: handles sep device open request
3988 * @sep_release:handles sep device release request
3989 * @sep_mmap: handles memory mapping requests
3990 * @sep_read: handles read request on sep device
3991 * @sep_write: handles write request on sep device
3992 * @sep_seek: handles seek request on sep device
3993 */
3994static const struct file_operations sep_file_operations = {
3995 .owner = THIS_MODULE,
3996 .unlocked_ioctl = sep_ioctl,
3997 .poll = sep_poll,
3998 .open = sep_open,
3999 .release = sep_release,
4000 .mmap = sep_mmap,
4001 .read = sep_read,
4002 .write = sep_write,
4003 .llseek = sep_seek,
4004};
4005
4006/**
4007 * sep_sysfs_read - read sysfs entry per gives arguments
4008 * @filp: file pointer
4009 * @kobj: kobject pointer
4010 * @attr: binary file attributes
4011 * @buf: read to this buffer
4012 * @pos: offset to read
4013 * @count: amount of data to read
4014 *
4015 * This function is to read sysfs entries for sep driver per given arguments.
4016 */
4017static ssize_t
4018sep_sysfs_read(struct file *filp, struct kobject *kobj,
4019 struct bin_attribute *attr,
4020 char *buf, loff_t pos, size_t count)
4021{
4022 unsigned long lck_flags;
4023 size_t nleft = count;
4024 struct sep_device *sep = sep_dev;
4025 struct sep_queue_info *queue_elem = NULL;
4026 u32 queue_num = 0;
4027 u32 i = 1;
4028
4029 spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
4030
4031 queue_num = sep->sep_queue_num;
4032 if (queue_num > SEP_DOUBLEBUF_USERS_LIMIT)
4033 queue_num = SEP_DOUBLEBUF_USERS_LIMIT;
4034
4035
4036 if (count < sizeof(queue_num)
4037 + (queue_num * sizeof(struct sep_queue_data))) {
4038 spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
4039 return -EINVAL;
4040 }
4041
4042 memcpy(buf, &queue_num, sizeof(queue_num));
4043 buf += sizeof(queue_num);
4044 nleft -= sizeof(queue_num);
4045
4046 list_for_each_entry(queue_elem, &sep->sep_queue_status, list) {
4047 if (i++ > queue_num)
4048 break;
4049
4050 memcpy(buf, &queue_elem->data, sizeof(queue_elem->data));
4051 nleft -= sizeof(queue_elem->data);
4052 buf += sizeof(queue_elem->data);
4053 }
4054 spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
4055
4056 return count - nleft;
4057}
4058
4059/**
4060 * bin_attributes - defines attributes for queue_status
4061 * @attr: attributes (name & permissions)
4062 * @read: function pointer to read this file
4063 * @size: maxinum size of binary attribute
4064 */
4065static const struct bin_attribute queue_status = {
4066 .attr = {.name = "queue_status", .mode = 0444},
4067 .read = sep_sysfs_read,
4068 .size = sizeof(u32)
4069 + (SEP_DOUBLEBUF_USERS_LIMIT * sizeof(struct sep_queue_data)),
4070};
4071
4072/**
4073 * sep_register_driver_with_fs - register misc devices
4074 * @sep: pointer to struct sep_device
4075 *
4076 * This function registers the driver with the file system
4077 */
4078static int sep_register_driver_with_fs(struct sep_device *sep)
4079{
4080 int ret_val;
4081
4082 sep->miscdev_sep.minor = MISC_DYNAMIC_MINOR;
4083 sep->miscdev_sep.name = SEP_DEV_NAME;
4084 sep->miscdev_sep.fops = &sep_file_operations;
4085
4086 ret_val = misc_register(&sep->miscdev_sep);
4087 if (ret_val) {
4088 dev_warn(&sep->pdev->dev, "misc reg fails for SEP %x\n",
4089 ret_val);
4090 return ret_val;
4091 }
4092
4093 ret_val = device_create_bin_file(sep->miscdev_sep.this_device,
4094 &queue_status);
4095 if (ret_val) {
4096 dev_warn(&sep->pdev->dev, "sysfs attribute1 fails for SEP %x\n",
4097 ret_val);
4098 return ret_val;
4099 }
4100
4101 return ret_val;
4102}
4103
4104
4105/**
4106 *sep_probe - probe a matching PCI device
4107 *@pdev: pci_device
4108 *@ent: pci_device_id
4109 *
4110 *Attempt to set up and configure a SEP device that has been
4111 *discovered by the PCI layer. Allocates all required resources.
4112 */
4113static int __devinit sep_probe(struct pci_dev *pdev,
4114 const struct pci_device_id *ent)
4115{
4116 int error = 0;
4117 struct sep_device *sep = NULL;
4118
4119 if (sep_dev != NULL) {
4120 dev_dbg(&pdev->dev, "only one SEP supported.\n");
4121 return -EBUSY;
4122 }
4123
4124 /* Enable the device */
4125 error = pci_enable_device(pdev);
4126 if (error) {
4127 dev_warn(&pdev->dev, "error enabling pci device\n");
4128 goto end_function;
4129 }
4130
4131 /* Allocate the sep_device structure for this device */
4132 sep_dev = kzalloc(sizeof(struct sep_device), GFP_ATOMIC);
4133 if (sep_dev == NULL) {
4134 dev_warn(&pdev->dev,
4135 "can't kmalloc the sep_device structure\n");
4136 error = -ENOMEM;
4137 goto end_function_disable_device;
4138 }
4139
4140 /*
4141 * We're going to use another variable for actually
4142 * working with the device; this way, if we have
4143 * multiple devices in the future, it would be easier
4144 * to make appropriate changes
4145 */
4146 sep = sep_dev;
4147
4148 sep->pdev = pci_dev_get(pdev);
4149
4150 init_waitqueue_head(&sep->event_transactions);
4151 init_waitqueue_head(&sep->event_interrupt);
4152 spin_lock_init(&sep->snd_rply_lck);
4153 spin_lock_init(&sep->sep_queue_lock);
4154 sema_init(&sep->sep_doublebuf, SEP_DOUBLEBUF_USERS_LIMIT);
4155
4156 INIT_LIST_HEAD(&sep->sep_queue_status);
4157
4158 dev_dbg(&sep->pdev->dev, "sep probe: PCI obtained, "
4159 "device being prepared\n");
4160
4161 /* Set up our register area */
4162 sep->reg_physical_addr = pci_resource_start(sep->pdev, 0);
4163 if (!sep->reg_physical_addr) {
4164 dev_warn(&sep->pdev->dev, "Error getting register start\n");
4165 error = -ENODEV;
4166 goto end_function_free_sep_dev;
4167 }
4168
4169 sep->reg_physical_end = pci_resource_end(sep->pdev, 0);
4170 if (!sep->reg_physical_end) {
4171 dev_warn(&sep->pdev->dev, "Error getting register end\n");
4172 error = -ENODEV;
4173 goto end_function_free_sep_dev;
4174 }
4175
4176 sep->reg_addr = ioremap_nocache(sep->reg_physical_addr,
4177 (size_t)(sep->reg_physical_end - sep->reg_physical_addr + 1));
4178 if (!sep->reg_addr) {
4179 dev_warn(&sep->pdev->dev, "Error getting register virtual\n");
4180 error = -ENODEV;
4181 goto end_function_free_sep_dev;
4182 }
4183
4184 dev_dbg(&sep->pdev->dev,
4185 "Register area start %llx end %llx virtual %p\n",
4186 (unsigned long long)sep->reg_physical_addr,
4187 (unsigned long long)sep->reg_physical_end,
4188 sep->reg_addr);
4189
4190 /* Allocate the shared area */
4191 sep->shared_size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
4192 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES +
4193 SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES +
4194 SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES +
4195 SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
4196
4197 if (sep_map_and_alloc_shared_area(sep)) {
4198 error = -ENOMEM;
4199 /* Allocation failed */
4200 goto end_function_error;
4201 }
4202
4203 /* Clear ICR register */
4204 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4205
4206 /* Set the IMR register - open only GPR 2 */
4207 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
4208
4209 /* Read send/receive counters from SEP */
4210 sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
4211 sep->reply_ct &= 0x3FFFFFFF;
4212 sep->send_ct = sep->reply_ct;
4213
4214 /* Get the interrupt line */
4215 error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED,
4216 "sep_driver", sep);
4217
4218 if (error)
4219 goto end_function_deallocate_sep_shared_area;
4220
4221 /* The new chip requires a shared area reconfigure */
4222 error = sep_reconfig_shared_area(sep);
4223 if (error)
4224 goto end_function_free_irq;
4225
4226 sep->in_use = 1;
4227
4228 /* Finally magic up the device nodes */
4229 /* Register driver with the fs */
4230 error = sep_register_driver_with_fs(sep);
4231
4232 if (error) {
4233 dev_err(&sep->pdev->dev, "error registering dev file\n");
4234 goto end_function_free_irq;
4235 }
4236
4237 sep->in_use = 0; /* through touching the device */
4238#ifdef SEP_ENABLE_RUNTIME_PM
4239 pm_runtime_put_noidle(&sep->pdev->dev);
4240 pm_runtime_allow(&sep->pdev->dev);
4241 pm_runtime_set_autosuspend_delay(&sep->pdev->dev,
4242 SUSPEND_DELAY);
4243 pm_runtime_use_autosuspend(&sep->pdev->dev);
4244 pm_runtime_mark_last_busy(&sep->pdev->dev);
4245 sep->power_save_setup = 1;
4246#endif
4247 /* register kernel crypto driver */
4248#if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
4249 error = sep_crypto_setup();
4250 if (error) {
4251 dev_err(&sep->pdev->dev, "crypto setup failed\n");
4252 goto end_function_free_irq;
4253 }
4254#endif
4255 goto end_function;
4256
4257end_function_free_irq:
4258 free_irq(pdev->irq, sep);
4259
4260end_function_deallocate_sep_shared_area:
4261 /* De-allocate shared area */
4262 sep_unmap_and_free_shared_area(sep);
4263
4264end_function_error:
4265 iounmap(sep->reg_addr);
4266
4267end_function_free_sep_dev:
4268 pci_dev_put(sep_dev->pdev);
4269 kfree(sep_dev);
4270 sep_dev = NULL;
4271
4272end_function_disable_device:
4273 pci_disable_device(pdev);
4274
4275end_function:
4276 return error;
4277}
4278
4279/**
4280 * sep_remove - handles removing device from pci subsystem
4281 * @pdev: pointer to pci device
4282 *
4283 * This function will handle removing our sep device from pci subsystem on exit
4284 * or unloading this module. It should free up all used resources, and unmap if
4285 * any memory regions mapped.
4286 */
4287static void sep_remove(struct pci_dev *pdev)
4288{
4289 struct sep_device *sep = sep_dev;
4290
4291 /* Unregister from fs */
4292 misc_deregister(&sep->miscdev_sep);
4293
4294 /* Unregister from kernel crypto */
4295#if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
4296 sep_crypto_takedown();
4297#endif
4298 /* Free the irq */
4299 free_irq(sep->pdev->irq, sep);
4300
4301 /* Free the shared area */
4302 sep_unmap_and_free_shared_area(sep_dev);
4303 iounmap(sep_dev->reg_addr);
4304
4305#ifdef SEP_ENABLE_RUNTIME_PM
4306 if (sep->in_use) {
4307 sep->in_use = 0;
4308 pm_runtime_forbid(&sep->pdev->dev);
4309 pm_runtime_get_noresume(&sep->pdev->dev);
4310 }
4311#endif
4312 pci_dev_put(sep_dev->pdev);
4313 kfree(sep_dev);
4314 sep_dev = NULL;
4315}
4316
4317/* Initialize struct pci_device_id for our driver */
4318static DEFINE_PCI_DEVICE_TABLE(sep_pci_id_tbl) = {
4319 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0826)},
4320 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08e9)},
4321 {0}
4322};
4323
4324/* Export our pci_device_id structure to user space */
4325MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
4326
4327#ifdef SEP_ENABLE_RUNTIME_PM
4328
4329/**
4330 * sep_pm_resume - rsume routine while waking up from S3 state
4331 * @dev: pointer to sep device
4332 *
4333 * This function is to be used to wake up sep driver while system awakes from S3
4334 * state i.e. suspend to ram. The RAM in intact.
4335 * Notes - revisit with more understanding of pm, ICR/IMR & counters.
4336 */
4337static int sep_pci_resume(struct device *dev)
4338{
4339 struct sep_device *sep = sep_dev;
4340
4341 dev_dbg(&sep->pdev->dev, "pci resume called\n");
4342
4343 if (sep->power_state == SEP_DRIVER_POWERON)
4344 return 0;
4345
4346 /* Clear ICR register */
4347 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4348
4349 /* Set the IMR register - open only GPR 2 */
4350 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
4351
4352 /* Read send/receive counters from SEP */
4353 sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
4354 sep->reply_ct &= 0x3FFFFFFF;
4355 sep->send_ct = sep->reply_ct;
4356
4357 sep->power_state = SEP_DRIVER_POWERON;
4358
4359 return 0;
4360}
4361
4362/**
4363 * sep_pm_suspend - suspend routine while going to S3 state
4364 * @dev: pointer to sep device
4365 *
4366 * This function is to be used to suspend sep driver while system goes to S3
4367 * state i.e. suspend to ram. The RAM in intact and ON during this suspend.
4368 * Notes - revisit with more understanding of pm, ICR/IMR
4369 */
4370static int sep_pci_suspend(struct device *dev)
4371{
4372 struct sep_device *sep = sep_dev;
4373
4374 dev_dbg(&sep->pdev->dev, "pci suspend called\n");
4375 if (sep->in_use == 1)
4376 return -EAGAIN;
4377
4378 sep->power_state = SEP_DRIVER_POWEROFF;
4379
4380 /* Clear ICR register */
4381 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4382
4383 /* Set the IMR to block all */
4384 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0xFFFFFFFF);
4385
4386 return 0;
4387}
4388
4389/**
4390 * sep_pm_runtime_resume - runtime resume routine
4391 * @dev: pointer to sep device
4392 *
4393 * Notes - revisit with more understanding of pm, ICR/IMR & counters
4394 */
4395static int sep_pm_runtime_resume(struct device *dev)
4396{
4397
4398 u32 retval2;
4399 u32 delay_count;
4400 struct sep_device *sep = sep_dev;
4401
4402 dev_dbg(&sep->pdev->dev, "pm runtime resume called\n");
4403
4404 /**
4405 * Wait until the SCU boot is ready
4406 * This is done by iterating SCU_DELAY_ITERATION (10
4407 * microseconds each) up to SCU_DELAY_MAX (50) times.
4408 * This bit can be set in a random time that is less
4409 * than 500 microseconds after each power resume
4410 */
4411 retval2 = 0;
4412 delay_count = 0;
4413 while ((!retval2) && (delay_count < SCU_DELAY_MAX)) {
4414 retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
4415 retval2 &= 0x00000008;
4416 if (!retval2) {
4417 udelay(SCU_DELAY_ITERATION);
4418 delay_count += 1;
4419 }
4420 }
4421
4422 if (!retval2) {
4423 dev_warn(&sep->pdev->dev, "scu boot bit not set at resume\n");
4424 return -EINVAL;
4425 }
4426
4427 /* Clear ICR register */
4428 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4429
4430 /* Set the IMR register - open only GPR 2 */
4431 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
4432
4433 /* Read send/receive counters from SEP */
4434 sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
4435 sep->reply_ct &= 0x3FFFFFFF;
4436 sep->send_ct = sep->reply_ct;
4437
4438 return 0;
4439}
4440
4441/**
4442 * sep_pm_runtime_suspend - runtime suspend routine
4443 * @dev: pointer to sep device
4444 *
4445 * Notes - revisit with more understanding of pm
4446 */
4447static int sep_pm_runtime_suspend(struct device *dev)
4448{
4449 struct sep_device *sep = sep_dev;
4450
4451 dev_dbg(&sep->pdev->dev, "pm runtime suspend called\n");
4452
4453 /* Clear ICR register */
4454 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4455 return 0;
4456}
4457
4458/**
4459 * sep_pm - power management for sep driver
4460 * @sep_pm_runtime_resume: resume- no communication with cpu & main memory
4461 * @sep_pm_runtime_suspend: suspend- no communication with cpu & main memory
4462 * @sep_pci_suspend: suspend - main memory is still ON
4463 * @sep_pci_resume: resume - main meory is still ON
4464 */
4465static const struct dev_pm_ops sep_pm = {
4466 .runtime_resume = sep_pm_runtime_resume,
4467 .runtime_suspend = sep_pm_runtime_suspend,
4468 .resume = sep_pci_resume,
4469 .suspend = sep_pci_suspend,
4470};
4471#endif /* SEP_ENABLE_RUNTIME_PM */
4472
4473/**
4474 * sep_pci_driver - registers this device with pci subsystem
4475 * @name: name identifier for this driver
4476 * @sep_pci_id_tbl: pointer to struct pci_device_id table
4477 * @sep_probe: pointer to probe function in PCI driver
4478 * @sep_remove: pointer to remove function in PCI driver
4479 */
4480static struct pci_driver sep_pci_driver = {
4481#ifdef SEP_ENABLE_RUNTIME_PM
4482 .driver = {
4483 .pm = &sep_pm,
4484 },
4485#endif
4486 .name = "sep_sec_driver",
4487 .id_table = sep_pci_id_tbl,
4488 .probe = sep_probe,
4489 .remove = sep_remove
4490};
4491
4492/**
4493 * sep_init - init function
4494 *
4495 * Module load time. Register the PCI device driver.
4496 */
4497
4498static int __init sep_init(void)
4499{
4500 return pci_register_driver(&sep_pci_driver);
4501}
4502
4503
4504/**
4505 * sep_exit - called to unload driver
4506 *
4507 * Unregister the driver The device will perform all the cleanup required.
4508 */
4509static void __exit sep_exit(void)
4510{
4511 pci_unregister_driver(&sep_pci_driver);
4512}
4513
4514
4515module_init(sep_init);
4516module_exit(sep_exit);
4517
4518MODULE_LICENSE("GPL");
diff --git a/drivers/staging/sep/sep_trace_events.h b/drivers/staging/sep/sep_trace_events.h
new file mode 100644
index 000000000000..2b053a93afe6
--- /dev/null
+++ b/drivers/staging/sep/sep_trace_events.h
@@ -0,0 +1,188 @@
1/*
2 * If TRACE_SYSTEM is defined, that will be the directory created
3 * in the ftrace directory under /sys/kernel/debug/tracing/events/<system>
4 *
5 * The define_trace.h below will also look for a file name of
6 * TRACE_SYSTEM.h where TRACE_SYSTEM is what is defined here.
7 * In this case, it would look for sample.h
8 *
9 * If the header name will be different than the system name
10 * (as in this case), then you can override the header name that
11 * define_trace.h will look up by defining TRACE_INCLUDE_FILE
12 *
13 * This file is called trace-events-sample.h but we want the system
14 * to be called "sample". Therefore we must define the name of this
15 * file:
16 *
17 * #define TRACE_INCLUDE_FILE trace-events-sample
18 *
19 * As we do an the bottom of this file.
20 *
21 * Notice that TRACE_SYSTEM should be defined outside of #if
22 * protection, just like TRACE_INCLUDE_FILE.
23 */
24#undef TRACE_SYSTEM
25#define TRACE_SYSTEM sep
26
27/*
28 * Notice that this file is not protected like a normal header.
29 * We also must allow for rereading of this file. The
30 *
31 * || defined(TRACE_HEADER_MULTI_READ)
32 *
33 * serves this purpose.
34 */
35#if !defined(_TRACE_SEP_EVENTS_H) || defined(TRACE_HEADER_MULTI_READ)
36#define _TRACE_SEP_EVENTS_H
37
38#ifdef SEP_PERF_DEBUG
39#define SEP_TRACE_FUNC_IN() trace_sep_func_start(__func__, 0)
40#define SEP_TRACE_FUNC_OUT(branch) trace_sep_func_end(__func__, branch)
41#define SEP_TRACE_EVENT(branch) trace_sep_misc_event(__func__, branch)
42#else
43#define SEP_TRACE_FUNC_IN()
44#define SEP_TRACE_FUNC_OUT(branch)
45#define SEP_TRACE_EVENT(branch)
46#endif
47
48
49/*
50 * All trace headers should include tracepoint.h, until we finally
51 * make it into a standard header.
52 */
53#include <linux/tracepoint.h>
54
55/*
56 * The TRACE_EVENT macro is broken up into 5 parts.
57 *
58 * name: name of the trace point. This is also how to enable the tracepoint.
59 * A function called trace_foo_bar() will be created.
60 *
61 * proto: the prototype of the function trace_foo_bar()
62 * Here it is trace_foo_bar(char *foo, int bar).
63 *
64 * args: must match the arguments in the prototype.
65 * Here it is simply "foo, bar".
66 *
67 * struct: This defines the way the data will be stored in the ring buffer.
68 * There are currently two types of elements. __field and __array.
69 * a __field is broken up into (type, name). Where type can be any
70 * type but an array.
71 * For an array. there are three fields. (type, name, size). The
72 * type of elements in the array, the name of the field and the size
73 * of the array.
74 *
75 * __array( char, foo, 10) is the same as saying char foo[10].
76 *
77 * fast_assign: This is a C like function that is used to store the items
78 * into the ring buffer.
79 *
80 * printk: This is a way to print out the data in pretty print. This is
81 * useful if the system crashes and you are logging via a serial line,
82 * the data can be printed to the console using this "printk" method.
83 *
84 * Note, that for both the assign and the printk, __entry is the handler
85 * to the data structure in the ring buffer, and is defined by the
86 * TP_STRUCT__entry.
87 */
88TRACE_EVENT(sep_func_start,
89
90 TP_PROTO(const char *name, int branch),
91
92 TP_ARGS(name, branch),
93
94 TP_STRUCT__entry(
95 __array(char, name, 20)
96 __field(int, branch)
97 ),
98
99 TP_fast_assign(
100 strncpy(__entry->name, name, 20);
101 __entry->branch = branch;
102 ),
103
104 TP_printk("func_start %s %d", __entry->name, __entry->branch)
105);
106
107TRACE_EVENT(sep_func_end,
108
109 TP_PROTO(const char *name, int branch),
110
111 TP_ARGS(name, branch),
112
113 TP_STRUCT__entry(
114 __array(char, name, 20)
115 __field(int, branch)
116 ),
117
118 TP_fast_assign(
119 strncpy(__entry->name, name, 20);
120 __entry->branch = branch;
121 ),
122
123 TP_printk("func_end %s %d", __entry->name, __entry->branch)
124);
125
126TRACE_EVENT(sep_misc_event,
127
128 TP_PROTO(const char *name, int branch),
129
130 TP_ARGS(name, branch),
131
132 TP_STRUCT__entry(
133 __array(char, name, 20)
134 __field(int, branch)
135 ),
136
137 TP_fast_assign(
138 strncpy(__entry->name, name, 20);
139 __entry->branch = branch;
140 ),
141
142 TP_printk("misc_event %s %d", __entry->name, __entry->branch)
143);
144
145
146#endif
147
148/***** NOTICE! The #if protection ends here. *****/
149
150
151/*
152 * There are several ways I could have done this. If I left out the
153 * TRACE_INCLUDE_PATH, then it would default to the kernel source
154 * include/trace/events directory.
155 *
156 * I could specify a path from the define_trace.h file back to this
157 * file.
158 *
159 * #define TRACE_INCLUDE_PATH ../../samples/trace_events
160 *
161 * But the safest and easiest way to simply make it use the directory
162 * that the file is in is to add in the Makefile:
163 *
164 * CFLAGS_trace-events-sample.o := -I$(src)
165 *
166 * This will make sure the current path is part of the include
167 * structure for our file so that define_trace.h can find it.
168 *
169 * I could have made only the top level directory the include:
170 *
171 * CFLAGS_trace-events-sample.o := -I$(PWD)
172 *
173 * And then let the path to this directory be the TRACE_INCLUDE_PATH:
174 *
175 * #define TRACE_INCLUDE_PATH samples/trace_events
176 *
177 * But then if something defines "samples" or "trace_events" as a macro
178 * then we could risk that being converted too, and give us an unexpected
179 * result.
180 */
181#undef TRACE_INCLUDE_PATH
182#undef TRACE_INCLUDE_FILE
183#define TRACE_INCLUDE_PATH .
184/*
185 * TRACE_INCLUDE_FILE is not needed if the filename and TRACE_SYSTEM are equal
186 */
187#define TRACE_INCLUDE_FILE sep_trace_events
188#include <trace/define_trace.h>
diff --git a/drivers/staging/slicoss/README b/drivers/staging/slicoss/README
index b83bba19b7f0..cb04a87b2017 100644
--- a/drivers/staging/slicoss/README
+++ b/drivers/staging/slicoss/README
@@ -42,7 +42,7 @@ TODO:
42 42
43 43
44Please send patches to: 44Please send patches to:
45 Greg Kroah-Hartman <gregkh@suse.de> 45 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
46and Cc: Lior Dotan <liodot@gmail.com> and Christopher Harrer 46and Cc: Lior Dotan <liodot@gmail.com> and Christopher Harrer
47<charrer@alacritech.com> as well as they are also able to test out any 47<charrer@alacritech.com> as well as they are also able to test out any
48changes. 48changes.
diff --git a/drivers/staging/sm7xx/smtcfb.c b/drivers/staging/sm7xx/smtcfb.c
index ae0035f327e7..83c582ed12e5 100644
--- a/drivers/staging/sm7xx/smtcfb.c
+++ b/drivers/staging/sm7xx/smtcfb.c
@@ -41,7 +41,6 @@
41 41
42#ifdef CONFIG_PM 42#ifdef CONFIG_PM
43#include <linux/pm.h> 43#include <linux/pm.h>
44#include <linux/module.h>
45#endif 44#endif
46 45
47#include "smtcfb.h" 46#include "smtcfb.h"
@@ -443,7 +442,7 @@ static int smtc_setcolreg(unsigned regno, unsigned red, unsigned green,
443} 442}
444 443
445#ifdef __BIG_ENDIAN 444#ifdef __BIG_ENDIAN
446static ssize_t smtcfb_read(struct fb_info *info, char __user * buf, size_t 445static ssize_t smtcfb_read(struct fb_info *info, char __user *buf, size_t
447 count, loff_t *ppos) 446 count, loff_t *ppos)
448{ 447{
449 unsigned long p = *ppos; 448 unsigned long p = *ppos;
diff --git a/drivers/staging/sm7xx/smtcfb.h b/drivers/staging/sm7xx/smtcfb.h
index c5e6989e65ab..ab95af2b9c07 100644
--- a/drivers/staging/sm7xx/smtcfb.h
+++ b/drivers/staging/sm7xx/smtcfb.h
@@ -38,7 +38,7 @@
38#define dac_reg (0x3c8) 38#define dac_reg (0x3c8)
39#define dac_val (0x3c9) 39#define dac_val (0x3c9)
40 40
41extern char *smtc_RegBaseAddress; 41extern char __iomem *smtc_RegBaseAddress;
42#define smtc_mmiowb(dat, reg) writeb(dat, smtc_RegBaseAddress + reg) 42#define smtc_mmiowb(dat, reg) writeb(dat, smtc_RegBaseAddress + reg)
43#define smtc_mmioww(dat, reg) writew(dat, smtc_RegBaseAddress + reg) 43#define smtc_mmioww(dat, reg) writew(dat, smtc_RegBaseAddress + reg)
44#define smtc_mmiowl(dat, reg) writel(dat, smtc_RegBaseAddress + reg) 44#define smtc_mmiowl(dat, reg) writel(dat, smtc_RegBaseAddress + reg)
diff --git a/drivers/telephony/Kconfig b/drivers/staging/telephony/Kconfig
index b5f78b6ed2bd..b5f78b6ed2bd 100644
--- a/drivers/telephony/Kconfig
+++ b/drivers/staging/telephony/Kconfig
diff --git a/drivers/telephony/Makefile b/drivers/staging/telephony/Makefile
index 1206615d69e4..1206615d69e4 100644
--- a/drivers/telephony/Makefile
+++ b/drivers/staging/telephony/Makefile
diff --git a/drivers/staging/telephony/TODO b/drivers/staging/telephony/TODO
new file mode 100644
index 000000000000..d47dec3508d7
--- /dev/null
+++ b/drivers/staging/telephony/TODO
@@ -0,0 +1,10 @@
1TODO
2. Determine if the boards are still in use
3 and move this module back to drivers/telephony if necessary
4. Coding style cleanups
5
6Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
7cc Joe Perches <joe@perches.com> if the module should be reactivated.
8
9If no module activity occurs before version 3.6 is released, this
10module should be removed.
diff --git a/drivers/telephony/ixj-ver.h b/drivers/staging/telephony/ixj-ver.h
index 2031ac6c888c..2031ac6c888c 100644
--- a/drivers/telephony/ixj-ver.h
+++ b/drivers/staging/telephony/ixj-ver.h
diff --git a/drivers/telephony/ixj.c b/drivers/staging/telephony/ixj.c
index d5f923bcdffe..d5f923bcdffe 100644
--- a/drivers/telephony/ixj.c
+++ b/drivers/staging/telephony/ixj.c
diff --git a/drivers/telephony/ixj.h b/drivers/staging/telephony/ixj.h
index 2c841134f61c..2c841134f61c 100644
--- a/drivers/telephony/ixj.h
+++ b/drivers/staging/telephony/ixj.h
diff --git a/drivers/telephony/ixj_pcmcia.c b/drivers/staging/telephony/ixj_pcmcia.c
index 05032e2cc954..05032e2cc954 100644
--- a/drivers/telephony/ixj_pcmcia.c
+++ b/drivers/staging/telephony/ixj_pcmcia.c
diff --git a/drivers/telephony/phonedev.c b/drivers/staging/telephony/phonedev.c
index 1915af201175..1915af201175 100644
--- a/drivers/telephony/phonedev.c
+++ b/drivers/staging/telephony/phonedev.c
diff --git a/drivers/staging/tidspbridge/Kconfig b/drivers/staging/tidspbridge/Kconfig
index 21a559ecbbb1..0dd479f5638d 100644
--- a/drivers/staging/tidspbridge/Kconfig
+++ b/drivers/staging/tidspbridge/Kconfig
@@ -31,12 +31,6 @@ config TIDSPBRIDGE_MEMPOOL_SIZE
31 Allocate specified size of memory at booting time to avoid allocation 31 Allocate specified size of memory at booting time to avoid allocation
32 failure under heavy memory fragmentation after some use time. 32 failure under heavy memory fragmentation after some use time.
33 33
34config TIDSPBRIDGE_DEBUG
35 bool "Debug Support"
36 depends on TIDSPBRIDGE
37 help
38 Say Y to enable Bridge debugging capabilities
39
40config TIDSPBRIDGE_RECOVERY 34config TIDSPBRIDGE_RECOVERY
41 bool "Recovery Support" 35 bool "Recovery Support"
42 depends on TIDSPBRIDGE 36 depends on TIDSPBRIDGE
@@ -58,22 +52,6 @@ config TIDSPBRIDGE_CACHE_LINE_CHECK
58 This can lead to heap corruption. Say Y, to enforce the check for 128 52 This can lead to heap corruption. Say Y, to enforce the check for 128
59 byte alignment, buffers failing this check will be rejected. 53 byte alignment, buffers failing this check will be rejected.
60 54
61config TIDSPBRIDGE_WDT3
62 bool "Enable watchdog timer"
63 depends on TIDSPBRIDGE
64 help
65 WTD3 is managed by DSP and once it is enabled, DSP side bridge is in
66 charge of refreshing the timer before overflow, if the DSP hangs MPU
67 will caught the interrupt and try to recover DSP.
68
69config TIDSPBRIDGE_WDT_TIMEOUT
70 int "Watchdog timer timeout (in secs)"
71 depends on TIDSPBRIDGE && TIDSPBRIDGE_WDT3
72 default 5
73 help
74 Watchdog timer timeout value, after that time if the watchdog timer
75 counter is not reset the wdt overflow interrupt will be triggered
76
77config TIDSPBRIDGE_NTFY_PWRERR 55config TIDSPBRIDGE_NTFY_PWRERR
78 bool "Notify power errors" 56 bool "Notify power errors"
79 depends on TIDSPBRIDGE 57 depends on TIDSPBRIDGE
diff --git a/drivers/staging/tidspbridge/Makefile b/drivers/staging/tidspbridge/Makefile
index fd6a2761cc3b..8c8c92a9083f 100644
--- a/drivers/staging/tidspbridge/Makefile
+++ b/drivers/staging/tidspbridge/Makefile
@@ -1,4 +1,4 @@
1obj-$(CONFIG_TIDSPBRIDGE) += bridgedriver.o 1obj-$(CONFIG_TIDSPBRIDGE) += tidspbridge.o
2 2
3libgen = gen/gh.o gen/uuidutil.o 3libgen = gen/gh.o gen/uuidutil.o
4libcore = core/chnl_sm.o core/msg_sm.o core/io_sm.o core/tiomap3430.o \ 4libcore = core/chnl_sm.o core/msg_sm.o core/io_sm.o core/tiomap3430.o \
@@ -13,7 +13,7 @@ libdload = dynload/cload.o dynload/getsection.o dynload/reloc.o \
13 dynload/tramp.o 13 dynload/tramp.o
14libhw = hw/hw_mmu.o 14libhw = hw/hw_mmu.o
15 15
16bridgedriver-y := $(libgen) $(libservices) $(libcore) $(libpmgr) $(librmgr) \ 16tidspbridge-y := $(libgen) $(libservices) $(libcore) $(libpmgr) $(librmgr) \
17 $(libdload) $(libhw) 17 $(libdload) $(libhw)
18 18
19#Machine dependent 19#Machine dependent
diff --git a/drivers/staging/tidspbridge/core/chnl_sm.c b/drivers/staging/tidspbridge/core/chnl_sm.c
index 6d66e7d0fba8..e0c7e4c470c8 100644
--- a/drivers/staging/tidspbridge/core/chnl_sm.c
+++ b/drivers/staging/tidspbridge/core/chnl_sm.c
@@ -50,9 +50,6 @@
50/* ----------------------------------- DSP/BIOS Bridge */ 50/* ----------------------------------- DSP/BIOS Bridge */
51#include <dspbridge/dbdefs.h> 51#include <dspbridge/dbdefs.h>
52 52
53/* ----------------------------------- Trace & Debug */
54#include <dspbridge/dbc.h>
55
56/* ----------------------------------- OS Adaptation Layer */ 53/* ----------------------------------- OS Adaptation Layer */
57#include <dspbridge/sync.h> 54#include <dspbridge/sync.h>
58 55
@@ -123,7 +120,6 @@ int bridge_chnl_add_io_req(struct chnl_object *chnl_obj, void *host_buf,
123 CHNL_IS_OUTPUT(pchnl->chnl_mode)) 120 CHNL_IS_OUTPUT(pchnl->chnl_mode))
124 return -EPIPE; 121 return -EPIPE;
125 /* No other possible states left */ 122 /* No other possible states left */
126 DBC_ASSERT(0);
127 } 123 }
128 124
129 dev_obj = dev_get_first(); 125 dev_obj = dev_get_first();
@@ -190,7 +186,6 @@ func_cont:
190 * Note: for dma chans dw_dsp_addr contains dsp address 186 * Note: for dma chans dw_dsp_addr contains dsp address
191 * of SM buffer. 187 * of SM buffer.
192 */ 188 */
193 DBC_ASSERT(chnl_mgr_obj->word_size != 0);
194 /* DSP address */ 189 /* DSP address */
195 chnl_packet_obj->dsp_tx_addr = dw_dsp_addr / chnl_mgr_obj->word_size; 190 chnl_packet_obj->dsp_tx_addr = dw_dsp_addr / chnl_mgr_obj->word_size;
196 chnl_packet_obj->byte_size = byte_size; 191 chnl_packet_obj->byte_size = byte_size;
@@ -201,7 +196,6 @@ func_cont:
201 CHNL_IOCSTATCOMPLETE); 196 CHNL_IOCSTATCOMPLETE);
202 list_add_tail(&chnl_packet_obj->link, &pchnl->io_requests); 197 list_add_tail(&chnl_packet_obj->link, &pchnl->io_requests);
203 pchnl->cio_reqs++; 198 pchnl->cio_reqs++;
204 DBC_ASSERT(pchnl->cio_reqs <= pchnl->chnl_packets);
205 /* 199 /*
206 * If end of stream, update the channel state to prevent 200 * If end of stream, update the channel state to prevent
207 * more IOR's. 201 * more IOR's.
@@ -209,8 +203,6 @@ func_cont:
209 if (is_eos) 203 if (is_eos)
210 pchnl->state |= CHNL_STATEEOS; 204 pchnl->state |= CHNL_STATEEOS;
211 205
212 /* Legacy DSM Processor-Copy */
213 DBC_ASSERT(pchnl->chnl_type == CHNL_PCPY);
214 /* Request IO from the DSP */ 206 /* Request IO from the DSP */
215 io_request_chnl(chnl_mgr_obj->iomgr, pchnl, 207 io_request_chnl(chnl_mgr_obj->iomgr, pchnl,
216 (CHNL_IS_INPUT(pchnl->chnl_mode) ? IO_INPUT : 208 (CHNL_IS_INPUT(pchnl->chnl_mode) ? IO_INPUT :
@@ -283,7 +275,6 @@ int bridge_chnl_cancel_io(struct chnl_object *chnl_obj)
283 list_add_tail(&chirp->link, &pchnl->io_completions); 275 list_add_tail(&chirp->link, &pchnl->io_completions);
284 pchnl->cio_cs++; 276 pchnl->cio_cs++;
285 pchnl->cio_reqs--; 277 pchnl->cio_reqs--;
286 DBC_ASSERT(pchnl->cio_reqs >= 0);
287 } 278 }
288 279
289 spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock); 280 spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
@@ -311,8 +302,6 @@ int bridge_chnl_close(struct chnl_object *chnl_obj)
311 status = bridge_chnl_cancel_io(chnl_obj); 302 status = bridge_chnl_cancel_io(chnl_obj);
312 if (status) 303 if (status)
313 return status; 304 return status;
314 /* Assert I/O on this channel is now cancelled: Protects from io_dpc */
315 DBC_ASSERT((pchnl->state & CHNL_STATECANCEL));
316 /* Invalidate channel object: Protects from CHNL_GetIOCompletion() */ 305 /* Invalidate channel object: Protects from CHNL_GetIOCompletion() */
317 /* Free the slot in the channel manager: */ 306 /* Free the slot in the channel manager: */
318 pchnl->chnl_mgr_obj->channels[pchnl->chnl_id] = NULL; 307 pchnl->chnl_mgr_obj->channels[pchnl->chnl_id] = NULL;
@@ -358,13 +347,6 @@ int bridge_chnl_create(struct chnl_mgr **channel_mgr,
358 struct chnl_mgr *chnl_mgr_obj = NULL; 347 struct chnl_mgr *chnl_mgr_obj = NULL;
359 u8 max_channels; 348 u8 max_channels;
360 349
361 /* Check DBC requirements: */
362 DBC_REQUIRE(channel_mgr != NULL);
363 DBC_REQUIRE(mgr_attrts != NULL);
364 DBC_REQUIRE(mgr_attrts->max_channels > 0);
365 DBC_REQUIRE(mgr_attrts->max_channels <= CHNL_MAXCHANNELS);
366 DBC_REQUIRE(mgr_attrts->word_size != 0);
367
368 /* Allocate channel manager object */ 350 /* Allocate channel manager object */
369 chnl_mgr_obj = kzalloc(sizeof(struct chnl_mgr), GFP_KERNEL); 351 chnl_mgr_obj = kzalloc(sizeof(struct chnl_mgr), GFP_KERNEL);
370 if (chnl_mgr_obj) { 352 if (chnl_mgr_obj) {
@@ -374,7 +356,6 @@ int bridge_chnl_create(struct chnl_mgr **channel_mgr,
374 * mgr_attrts->max_channels = CHNL_MAXCHANNELS = 356 * mgr_attrts->max_channels = CHNL_MAXCHANNELS =
375 * DDMA_MAXDDMACHNLS = DDMA_MAXZCPYCHNLS. 357 * DDMA_MAXDDMACHNLS = DDMA_MAXZCPYCHNLS.
376 */ 358 */
377 DBC_ASSERT(mgr_attrts->max_channels == CHNL_MAXCHANNELS);
378 max_channels = CHNL_MAXCHANNELS + CHNL_MAXCHANNELS * CHNL_PCPY; 359 max_channels = CHNL_MAXCHANNELS + CHNL_MAXCHANNELS * CHNL_PCPY;
379 /* Create array of channels */ 360 /* Create array of channels */
380 chnl_mgr_obj->channels = kzalloc(sizeof(struct chnl_object *) 361 chnl_mgr_obj->channels = kzalloc(sizeof(struct chnl_object *)
@@ -491,7 +472,6 @@ int bridge_chnl_flush_io(struct chnl_object *chnl_obj, u32 timeout)
491 pchnl->state &= ~CHNL_STATECANCEL; 472 pchnl->state &= ~CHNL_STATECANCEL;
492 } 473 }
493 } 474 }
494 DBC_ENSURE(status || list_empty(&pchnl->io_requests));
495 return status; 475 return status;
496} 476}
497 477
@@ -592,7 +572,6 @@ int bridge_chnl_get_ioc(struct chnl_object *chnl_obj, u32 timeout,
592 omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX); 572 omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
593 if (dequeue_ioc) { 573 if (dequeue_ioc) {
594 /* Dequeue IOC and set chan_ioc; */ 574 /* Dequeue IOC and set chan_ioc; */
595 DBC_ASSERT(!list_empty(&pchnl->io_completions));
596 chnl_packet_obj = list_first_entry(&pchnl->io_completions, 575 chnl_packet_obj = list_first_entry(&pchnl->io_completions,
597 struct chnl_irp, link); 576 struct chnl_irp, link);
598 list_del(&chnl_packet_obj->link); 577 list_del(&chnl_packet_obj->link);
@@ -705,8 +684,6 @@ int bridge_chnl_idle(struct chnl_object *chnl_obj, u32 timeout,
705 struct chnl_mgr *chnl_mgr_obj; 684 struct chnl_mgr *chnl_mgr_obj;
706 int status = 0; 685 int status = 0;
707 686
708 DBC_REQUIRE(chnl_obj);
709
710 chnl_mode = chnl_obj->chnl_mode; 687 chnl_mode = chnl_obj->chnl_mode;
711 chnl_mgr_obj = chnl_obj->chnl_mgr_obj; 688 chnl_mgr_obj = chnl_obj->chnl_mgr_obj;
712 689
@@ -736,10 +713,7 @@ int bridge_chnl_open(struct chnl_object **chnl,
736 struct chnl_mgr *chnl_mgr_obj = hchnl_mgr; 713 struct chnl_mgr *chnl_mgr_obj = hchnl_mgr;
737 struct chnl_object *pchnl = NULL; 714 struct chnl_object *pchnl = NULL;
738 struct sync_object *sync_event = NULL; 715 struct sync_object *sync_event = NULL;
739 /* Ensure DBC requirements: */ 716
740 DBC_REQUIRE(chnl != NULL);
741 DBC_REQUIRE(pattrs != NULL);
742 DBC_REQUIRE(hchnl_mgr != NULL);
743 *chnl = NULL; 717 *chnl = NULL;
744 718
745 /* Validate Args: */ 719 /* Validate Args: */
@@ -761,7 +735,6 @@ int bridge_chnl_open(struct chnl_object **chnl,
761 return status; 735 return status;
762 } 736 }
763 737
764 DBC_ASSERT(ch_id < chnl_mgr_obj->max_channels);
765 738
766 /* Create channel object: */ 739 /* Create channel object: */
767 pchnl = kzalloc(sizeof(struct chnl_object), GFP_KERNEL); 740 pchnl = kzalloc(sizeof(struct chnl_object), GFP_KERNEL);
@@ -850,7 +823,6 @@ int bridge_chnl_register_notify(struct chnl_object *chnl_obj,
850{ 823{
851 int status = 0; 824 int status = 0;
852 825
853 DBC_ASSERT(!(event_mask & ~(DSP_STREAMDONE | DSP_STREAMIOCOMPLETION)));
854 826
855 if (event_mask) 827 if (event_mask)
856 status = ntfy_register(chnl_obj->ntfy_obj, hnotification, 828 status = ntfy_register(chnl_obj->ntfy_obj, hnotification,
@@ -906,8 +878,6 @@ static void free_chirp_list(struct list_head *chirp_list)
906{ 878{
907 struct chnl_irp *chirp, *tmp; 879 struct chnl_irp *chirp, *tmp;
908 880
909 DBC_REQUIRE(chirp_list != NULL);
910
911 list_for_each_entry_safe(chirp, tmp, chirp_list, link) { 881 list_for_each_entry_safe(chirp, tmp, chirp_list, link) {
912 list_del(&chirp->link); 882 list_del(&chirp->link);
913 kfree(chirp); 883 kfree(chirp);
@@ -924,8 +894,6 @@ static int search_free_channel(struct chnl_mgr *chnl_mgr_obj,
924 int status = -ENOSR; 894 int status = -ENOSR;
925 u32 i; 895 u32 i;
926 896
927 DBC_REQUIRE(chnl_mgr_obj);
928
929 for (i = 0; i < chnl_mgr_obj->max_channels; i++) { 897 for (i = 0; i < chnl_mgr_obj->max_channels; i++) {
930 if (chnl_mgr_obj->channels[i] == NULL) { 898 if (chnl_mgr_obj->channels[i] == NULL) {
931 status = 0; 899 status = 0;
diff --git a/drivers/staging/tidspbridge/core/dsp-clock.c b/drivers/staging/tidspbridge/core/dsp-clock.c
index 7eb56178fb64..c7df34e6b60b 100644
--- a/drivers/staging/tidspbridge/core/dsp-clock.c
+++ b/drivers/staging/tidspbridge/core/dsp-clock.c
@@ -29,9 +29,6 @@
29#include <dspbridge/dev.h> 29#include <dspbridge/dev.h>
30#include "_tiomap.h" 30#include "_tiomap.h"
31 31
32/* ----------------------------------- Trace & Debug */
33#include <dspbridge/dbc.h>
34
35/* ----------------------------------- This */ 32/* ----------------------------------- This */
36#include <dspbridge/clk.h> 33#include <dspbridge/clk.h>
37 34
diff --git a/drivers/staging/tidspbridge/core/io_sm.c b/drivers/staging/tidspbridge/core/io_sm.c
index 694c0e5e55cc..9b50b5bd4edb 100644
--- a/drivers/staging/tidspbridge/core/io_sm.c
+++ b/drivers/staging/tidspbridge/core/io_sm.c
@@ -33,9 +33,6 @@
33/* ----------------------------------- DSP/BIOS Bridge */ 33/* ----------------------------------- DSP/BIOS Bridge */
34#include <dspbridge/dbdefs.h> 34#include <dspbridge/dbdefs.h>
35 35
36/* Trace & Debug */
37#include <dspbridge/dbc.h>
38
39/* Services Layer */ 36/* Services Layer */
40#include <dspbridge/ntfy.h> 37#include <dspbridge/ntfy.h>
41#include <dspbridge/sync.h> 38#include <dspbridge/sync.h>
@@ -114,7 +111,7 @@ struct io_mgr {
114 struct mgr_processorextinfo ext_proc_info; 111 struct mgr_processorextinfo ext_proc_info;
115 struct cmm_object *cmm_mgr; /* Shared Mem Mngr */ 112 struct cmm_object *cmm_mgr; /* Shared Mem Mngr */
116 struct work_struct io_workq; /* workqueue */ 113 struct work_struct io_workq; /* workqueue */
117#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG) 114#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE)
118 u32 trace_buffer_begin; /* Trace message start address */ 115 u32 trace_buffer_begin; /* Trace message start address */
119 u32 trace_buffer_end; /* Trace message end address */ 116 u32 trace_buffer_end; /* Trace message end address */
120 u32 trace_buffer_current; /* Trace message current address */ 117 u32 trace_buffer_current; /* Trace message current address */
@@ -246,7 +243,7 @@ int bridge_io_destroy(struct io_mgr *hio_mgr)
246 /* Free IO DPC object */ 243 /* Free IO DPC object */
247 tasklet_kill(&hio_mgr->dpc_tasklet); 244 tasklet_kill(&hio_mgr->dpc_tasklet);
248 245
249#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG) 246#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE)
250 kfree(hio_mgr->msg); 247 kfree(hio_mgr->msg);
251#endif 248#endif
252 dsp_wdt_exit(); 249 dsp_wdt_exit();
@@ -386,7 +383,7 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
386 status = -EFAULT; 383 status = -EFAULT;
387 } 384 }
388 if (!status) { 385 if (!status) {
389#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG) 386#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE)
390 status = 387 status =
391 cod_get_sym_value(cod_man, DSP_TRACESEC_END, &shm0_end); 388 cod_get_sym_value(cod_man, DSP_TRACESEC_END, &shm0_end);
392#else 389#else
@@ -731,7 +728,7 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
731 hmsg_mgr->max_msgs); 728 hmsg_mgr->max_msgs);
732 memset((void *)hio_mgr->shared_mem, 0, sizeof(struct shm)); 729 memset((void *)hio_mgr->shared_mem, 0, sizeof(struct shm));
733 730
734#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG) 731#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE)
735 /* Get the start address of trace buffer */ 732 /* Get the start address of trace buffer */
736 status = cod_get_sym_value(cod_man, SYS_PUTCBEG, 733 status = cod_get_sym_value(cod_man, SYS_PUTCBEG,
737 &hio_mgr->trace_buffer_begin); 734 &hio_mgr->trace_buffer_begin);
@@ -910,7 +907,7 @@ void io_dpc(unsigned long ref_data)
910 } 907 }
911 908
912#endif 909#endif
913#ifdef CONFIG_TIDSPBRIDGE_DEBUG 910#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
914 if (pio_mgr->intr_val & MBX_DBG_SYSPRINTF) { 911 if (pio_mgr->intr_val & MBX_DBG_SYSPRINTF) {
915 /* Notify DSP Trace message */ 912 /* Notify DSP Trace message */
916 print_dsp_debug_trace(pio_mgr); 913 print_dsp_debug_trace(pio_mgr);
@@ -973,29 +970,16 @@ void io_request_chnl(struct io_mgr *io_manager, struct chnl_object *pchnl,
973 chnl_mgr_obj = io_manager->chnl_mgr; 970 chnl_mgr_obj = io_manager->chnl_mgr;
974 sm = io_manager->shared_mem; 971 sm = io_manager->shared_mem;
975 if (io_mode == IO_INPUT) { 972 if (io_mode == IO_INPUT) {
976 /*
977 * Assertion fires if CHNL_AddIOReq() called on a stream
978 * which was cancelled, or attached to a dead board.
979 */
980 DBC_ASSERT((pchnl->state == CHNL_STATEREADY) ||
981 (pchnl->state == CHNL_STATEEOS));
982 /* Indicate to the DSP we have a buffer available for input */ 973 /* Indicate to the DSP we have a buffer available for input */
983 set_chnl_busy(sm, pchnl->chnl_id); 974 set_chnl_busy(sm, pchnl->chnl_id);
984 *mbx_val = MBX_PCPY_CLASS; 975 *mbx_val = MBX_PCPY_CLASS;
985 } else if (io_mode == IO_OUTPUT) { 976 } else if (io_mode == IO_OUTPUT) {
986 /* 977 /*
987 * This assertion fails if CHNL_AddIOReq() was called on a
988 * stream which was cancelled, or attached to a dead board.
989 */
990 DBC_ASSERT((pchnl->state & ~CHNL_STATEEOS) ==
991 CHNL_STATEREADY);
992 /*
993 * Record the fact that we have a buffer available for 978 * Record the fact that we have a buffer available for
994 * output. 979 * output.
995 */ 980 */
996 chnl_mgr_obj->output_mask |= (1 << pchnl->chnl_id); 981 chnl_mgr_obj->output_mask |= (1 << pchnl->chnl_id);
997 } else { 982 } else {
998 DBC_ASSERT(io_mode); /* Shouldn't get here. */
999 } 983 }
1000func_end: 984func_end:
1001 return; 985 return;
@@ -1087,7 +1071,6 @@ static void input_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
1087 dw_arg = sm->arg; 1071 dw_arg = sm->arg;
1088 if (chnl_id >= CHNL_MAXCHANNELS) { 1072 if (chnl_id >= CHNL_MAXCHANNELS) {
1089 /* Shouldn't be here: would indicate corrupted shm. */ 1073 /* Shouldn't be here: would indicate corrupted shm. */
1090 DBC_ASSERT(chnl_id);
1091 goto func_end; 1074 goto func_end;
1092 } 1075 }
1093 pchnl = chnl_mgr_obj->channels[chnl_id]; 1076 pchnl = chnl_mgr_obj->channels[chnl_id];
@@ -1683,7 +1666,7 @@ int bridge_io_get_proc_load(struct io_mgr *hio_mgr,
1683} 1666}
1684 1667
1685 1668
1686#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG) 1669#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE)
1687void print_dsp_debug_trace(struct io_mgr *hio_mgr) 1670void print_dsp_debug_trace(struct io_mgr *hio_mgr)
1688{ 1671{
1689 u32 ul_new_message_length = 0, ul_gpp_cur_pointer; 1672 u32 ul_new_message_length = 0, ul_gpp_cur_pointer;
diff --git a/drivers/staging/tidspbridge/core/msg_sm.c b/drivers/staging/tidspbridge/core/msg_sm.c
index 94d9e04a22fa..ce9557e16eb0 100644
--- a/drivers/staging/tidspbridge/core/msg_sm.c
+++ b/drivers/staging/tidspbridge/core/msg_sm.c
@@ -20,9 +20,6 @@
20/* ----------------------------------- DSP/BIOS Bridge */ 20/* ----------------------------------- DSP/BIOS Bridge */
21#include <dspbridge/dbdefs.h> 21#include <dspbridge/dbdefs.h>
22 22
23/* ----------------------------------- Trace & Debug */
24#include <dspbridge/dbc.h>
25
26/* ----------------------------------- OS Adaptation Layer */ 23/* ----------------------------------- OS Adaptation Layer */
27#include <dspbridge/sync.h> 24#include <dspbridge/sync.h>
28 25
diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c
index dde559d06c43..7862513cc295 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430.c
@@ -27,9 +27,6 @@
27/* ----------------------------------- DSP/BIOS Bridge */ 27/* ----------------------------------- DSP/BIOS Bridge */
28#include <dspbridge/dbdefs.h> 28#include <dspbridge/dbdefs.h>
29 29
30/* ----------------------------------- Trace & Debug */
31#include <dspbridge/dbc.h>
32
33/* ----------------------------------- OS Adaptation Layer */ 30/* ----------------------------------- OS Adaptation Layer */
34#include <dspbridge/drv.h> 31#include <dspbridge/drv.h>
35#include <dspbridge/sync.h> 32#include <dspbridge/sync.h>
@@ -256,9 +253,6 @@ static void bad_page_dump(u32 pa, struct page *pg)
256void bridge_drv_entry(struct bridge_drv_interface **drv_intf, 253void bridge_drv_entry(struct bridge_drv_interface **drv_intf,
257 const char *driver_file_name) 254 const char *driver_file_name)
258{ 255{
259
260 DBC_REQUIRE(driver_file_name != NULL);
261
262 if (strcmp(driver_file_name, "UMA") == 0) 256 if (strcmp(driver_file_name, "UMA") == 0)
263 *drv_intf = &drv_interface_fxns; 257 *drv_intf = &drv_interface_fxns;
264 else 258 else
@@ -389,6 +383,7 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
389 u32 clk_cmd; 383 u32 clk_cmd;
390 struct io_mgr *hio_mgr; 384 struct io_mgr *hio_mgr;
391 u32 ul_load_monitor_timer; 385 u32 ul_load_monitor_timer;
386 u32 wdt_en = 0;
392 struct omap_dsp_platform_data *pdata = 387 struct omap_dsp_platform_data *pdata =
393 omap_dspbridge_dev->dev.platform_data; 388 omap_dspbridge_dev->dev.platform_data;
394 389
@@ -399,16 +394,13 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
399 (void)dev_get_symbol(dev_context->dev_obj, SHMBASENAME, 394 (void)dev_get_symbol(dev_context->dev_obj, SHMBASENAME,
400 &ul_shm_base_virt); 395 &ul_shm_base_virt);
401 ul_shm_base_virt *= DSPWORDSIZE; 396 ul_shm_base_virt *= DSPWORDSIZE;
402 DBC_ASSERT(ul_shm_base_virt != 0);
403 /* DSP Virtual address */ 397 /* DSP Virtual address */
404 ul_tlb_base_virt = dev_context->atlb_entry[0].dsp_va; 398 ul_tlb_base_virt = dev_context->atlb_entry[0].dsp_va;
405 DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
406 ul_shm_offset_virt = 399 ul_shm_offset_virt =
407 ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE); 400 ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE);
408 /* Kernel logical address */ 401 /* Kernel logical address */
409 ul_shm_base = dev_context->atlb_entry[0].gpp_va + ul_shm_offset_virt; 402 ul_shm_base = dev_context->atlb_entry[0].gpp_va + ul_shm_offset_virt;
410 403
411 DBC_ASSERT(ul_shm_base != 0);
412 /* 2nd wd is used as sync field */ 404 /* 2nd wd is used as sync field */
413 dw_sync_addr = ul_shm_base + SHMSYNCOFFSET; 405 dw_sync_addr = ul_shm_base + SHMSYNCOFFSET;
414 /* Write a signature into the shm base + offset; this will 406 /* Write a signature into the shm base + offset; this will
@@ -603,9 +595,12 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
603 if (!wait_for_start(dev_context, dw_sync_addr)) 595 if (!wait_for_start(dev_context, dw_sync_addr))
604 status = -ETIMEDOUT; 596 status = -ETIMEDOUT;
605 597
606 /* Start wdt */ 598 dev_get_symbol(dev_context->dev_obj, "_WDT_enable", &wdt_en);
607 dsp_wdt_sm_set((void *)ul_shm_base); 599 if (wdt_en) {
608 dsp_wdt_enable(true); 600 /* Start wdt */
601 dsp_wdt_sm_set((void *)ul_shm_base);
602 dsp_wdt_enable(true);
603 }
609 604
610 status = dev_get_io_mgr(dev_context->dev_obj, &hio_mgr); 605 status = dev_get_io_mgr(dev_context->dev_obj, &hio_mgr);
611 if (hio_mgr) { 606 if (hio_mgr) {
diff --git a/drivers/staging/tidspbridge/core/tiomap3430_pwr.c b/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
index 02dd4391309a..16a4aafa86ae 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
@@ -303,7 +303,6 @@ int dsp_peripheral_clk_ctrl(struct bridge_dev_context *dev_context,
303 } 303 }
304 /* TODO -- Assert may be a too hard restriction here.. May be we should 304 /* TODO -- Assert may be a too hard restriction here.. May be we should
305 * just return with failure when the CLK ID does not match */ 305 * just return with failure when the CLK ID does not match */
306 /* DBC_ASSERT(clk_id_index < MBX_PM_MAX_RESOURCES); */
307 if (clk_id_index == MBX_PM_MAX_RESOURCES) { 306 if (clk_id_index == MBX_PM_MAX_RESOURCES) {
308 /* return with a more meaningfull error code */ 307 /* return with a more meaningfull error code */
309 return -EPERM; 308 return -EPERM;
diff --git a/drivers/staging/tidspbridge/core/tiomap_io.c b/drivers/staging/tidspbridge/core/tiomap_io.c
index dfb356eb6723..7fda10c36862 100644
--- a/drivers/staging/tidspbridge/core/tiomap_io.c
+++ b/drivers/staging/tidspbridge/core/tiomap_io.c
@@ -21,9 +21,6 @@
21/* ----------------------------------- DSP/BIOS Bridge */ 21/* ----------------------------------- DSP/BIOS Bridge */
22#include <dspbridge/dbdefs.h> 22#include <dspbridge/dbdefs.h>
23 23
24/* ----------------------------------- Trace & Debug */
25#include <dspbridge/dbc.h>
26
27/* ----------------------------------- Platform Manager */ 24/* ----------------------------------- Platform Manager */
28#include <dspbridge/dev.h> 25#include <dspbridge/dev.h>
29#include <dspbridge/drv.h> 26#include <dspbridge/drv.h>
@@ -68,20 +65,17 @@ int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt,
68 status = dev_get_symbol(dev_context->dev_obj, 65 status = dev_get_symbol(dev_context->dev_obj,
69 SHMBASENAME, &ul_shm_base_virt); 66 SHMBASENAME, &ul_shm_base_virt);
70 } 67 }
71 DBC_ASSERT(ul_shm_base_virt != 0);
72 68
73 /* Check if it is a read of Trace section */ 69 /* Check if it is a read of Trace section */
74 if (!status && !ul_trace_sec_beg) { 70 if (!status && !ul_trace_sec_beg) {
75 status = dev_get_symbol(dev_context->dev_obj, 71 status = dev_get_symbol(dev_context->dev_obj,
76 DSP_TRACESEC_BEG, &ul_trace_sec_beg); 72 DSP_TRACESEC_BEG, &ul_trace_sec_beg);
77 } 73 }
78 DBC_ASSERT(ul_trace_sec_beg != 0);
79 74
80 if (!status && !ul_trace_sec_end) { 75 if (!status && !ul_trace_sec_end) {
81 status = dev_get_symbol(dev_context->dev_obj, 76 status = dev_get_symbol(dev_context->dev_obj,
82 DSP_TRACESEC_END, &ul_trace_sec_end); 77 DSP_TRACESEC_END, &ul_trace_sec_end);
83 } 78 }
84 DBC_ASSERT(ul_trace_sec_end != 0);
85 79
86 if (!status) { 80 if (!status) {
87 if ((dsp_addr <= ul_trace_sec_end) && 81 if ((dsp_addr <= ul_trace_sec_end) &&
@@ -105,19 +99,16 @@ int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt,
105 status = dev_get_symbol(dev_context->dev_obj, 99 status = dev_get_symbol(dev_context->dev_obj,
106 DYNEXTBASE, &ul_dyn_ext_base); 100 DYNEXTBASE, &ul_dyn_ext_base);
107 } 101 }
108 DBC_ASSERT(ul_dyn_ext_base != 0);
109 102
110 if (!status) { 103 if (!status) {
111 status = dev_get_symbol(dev_context->dev_obj, 104 status = dev_get_symbol(dev_context->dev_obj,
112 EXTBASE, &ul_ext_base); 105 EXTBASE, &ul_ext_base);
113 } 106 }
114 DBC_ASSERT(ul_ext_base != 0);
115 107
116 if (!status) { 108 if (!status) {
117 status = dev_get_symbol(dev_context->dev_obj, 109 status = dev_get_symbol(dev_context->dev_obj,
118 EXTEND, &ul_ext_end); 110 EXTEND, &ul_ext_end);
119 } 111 }
120 DBC_ASSERT(ul_ext_end != 0);
121 112
122 /* Trace buffer is right after the shm SEG0, 113 /* Trace buffer is right after the shm SEG0,
123 * so set the base address to SHMBASE */ 114 * so set the base address to SHMBASE */
@@ -126,8 +117,6 @@ int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt,
126 ul_ext_end = ul_trace_sec_end; 117 ul_ext_end = ul_trace_sec_end;
127 } 118 }
128 119
129 DBC_ASSERT(ul_ext_end != 0);
130 DBC_ASSERT(ul_ext_end > ul_ext_base);
131 120
132 if (ul_ext_end < ul_ext_base) 121 if (ul_ext_end < ul_ext_base)
133 status = -EPERM; 122 status = -EPERM;
@@ -135,7 +124,6 @@ int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt,
135 if (!status) { 124 if (!status) {
136 ul_tlb_base_virt = 125 ul_tlb_base_virt =
137 dev_context->atlb_entry[0].dsp_va * DSPWORDSIZE; 126 dev_context->atlb_entry[0].dsp_va * DSPWORDSIZE;
138 DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
139 dw_ext_prog_virt_mem = 127 dw_ext_prog_virt_mem =
140 dev_context->atlb_entry[0].gpp_va; 128 dev_context->atlb_entry[0].gpp_va;
141 129
@@ -271,7 +259,6 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
271 /* Get SHM_BEG EXT_BEG and EXT_END. */ 259 /* Get SHM_BEG EXT_BEG and EXT_END. */
272 ret = dev_get_symbol(dev_context->dev_obj, 260 ret = dev_get_symbol(dev_context->dev_obj,
273 SHMBASENAME, &ul_shm_base_virt); 261 SHMBASENAME, &ul_shm_base_virt);
274 DBC_ASSERT(ul_shm_base_virt != 0);
275 if (dynamic_load) { 262 if (dynamic_load) {
276 if (!ret) { 263 if (!ret) {
277 if (symbols_reloaded) 264 if (symbols_reloaded)
@@ -280,7 +267,6 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
280 (dev_context->dev_obj, DYNEXTBASE, 267 (dev_context->dev_obj, DYNEXTBASE,
281 &ul_ext_base); 268 &ul_ext_base);
282 } 269 }
283 DBC_ASSERT(ul_ext_base != 0);
284 if (!ret) { 270 if (!ret) {
285 /* DR OMAPS00013235 : DLModules array may be 271 /* DR OMAPS00013235 : DLModules array may be
286 * in EXTMEM. It is expected that DYNEXTMEM and 272 * in EXTMEM. It is expected that DYNEXTMEM and
@@ -299,7 +285,6 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
299 dev_get_symbol 285 dev_get_symbol
300 (dev_context->dev_obj, EXTBASE, 286 (dev_context->dev_obj, EXTBASE,
301 &ul_ext_base); 287 &ul_ext_base);
302 DBC_ASSERT(ul_ext_base != 0);
303 if (!ret) 288 if (!ret)
304 ret = 289 ret =
305 dev_get_symbol 290 dev_get_symbol
@@ -312,15 +297,12 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
312 if (trace_load) 297 if (trace_load)
313 ul_ext_base = ul_shm_base_virt; 298 ul_ext_base = ul_shm_base_virt;
314 299
315 DBC_ASSERT(ul_ext_end != 0);
316 DBC_ASSERT(ul_ext_end > ul_ext_base);
317 if (ul_ext_end < ul_ext_base) 300 if (ul_ext_end < ul_ext_base)
318 ret = -EPERM; 301 ret = -EPERM;
319 302
320 if (!ret) { 303 if (!ret) {
321 ul_tlb_base_virt = 304 ul_tlb_base_virt =
322 dev_context->atlb_entry[0].dsp_va * DSPWORDSIZE; 305 dev_context->atlb_entry[0].dsp_va * DSPWORDSIZE;
323 DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
324 306
325 if (symbols_reloaded) { 307 if (symbols_reloaded) {
326 ret = dev_get_symbol 308 ret = dev_get_symbol
diff --git a/drivers/staging/tidspbridge/core/wdt.c b/drivers/staging/tidspbridge/core/wdt.c
index 2126f5977530..70055c8111ed 100644
--- a/drivers/staging/tidspbridge/core/wdt.c
+++ b/drivers/staging/tidspbridge/core/wdt.c
@@ -25,8 +25,6 @@
25#include <dspbridge/host_os.h> 25#include <dspbridge/host_os.h>
26 26
27 27
28#ifdef CONFIG_TIDSPBRIDGE_WDT3
29
30#define OMAP34XX_WDT3_BASE (L4_PER_34XX_BASE + 0x30000) 28#define OMAP34XX_WDT3_BASE (L4_PER_34XX_BASE + 0x30000)
31 29
32static struct dsp_wdt_setting dsp_wdt; 30static struct dsp_wdt_setting dsp_wdt;
@@ -84,7 +82,7 @@ int dsp_wdt_init(void)
84void dsp_wdt_sm_set(void *data) 82void dsp_wdt_sm_set(void *data)
85{ 83{
86 dsp_wdt.sm_wdt = data; 84 dsp_wdt.sm_wdt = data;
87 dsp_wdt.sm_wdt->wdt_overflow = CONFIG_TIDSPBRIDGE_WDT_TIMEOUT; 85 dsp_wdt.sm_wdt->wdt_overflow = 5; /* in seconds */
88} 86}
89 87
90 88
@@ -128,23 +126,3 @@ void dsp_wdt_enable(bool enable)
128 clk_disable(dsp_wdt.fclk); 126 clk_disable(dsp_wdt.fclk);
129 } 127 }
130} 128}
131
132#else
133void dsp_wdt_enable(bool enable)
134{
135}
136
137void dsp_wdt_sm_set(void *data)
138{
139}
140
141int dsp_wdt_init(void)
142{
143 return 0;
144}
145
146void dsp_wdt_exit(void)
147{
148}
149#endif
150
diff --git a/drivers/staging/tidspbridge/gen/gh.c b/drivers/staging/tidspbridge/gen/gh.c
index 60aa7b063c91..25eaef782aaa 100644
--- a/drivers/staging/tidspbridge/gen/gh.c
+++ b/drivers/staging/tidspbridge/gen/gh.c
@@ -95,15 +95,6 @@ void gh_delete(struct gh_t_hash_tab *hash_tab)
95} 95}
96 96
97/* 97/*
98 * ======== gh_exit ========
99 */
100
101void gh_exit(void)
102{
103 /* Do nothing */
104}
105
106/*
107 * ======== gh_find ======== 98 * ======== gh_find ========
108 */ 99 */
109 100
@@ -122,15 +113,6 @@ void *gh_find(struct gh_t_hash_tab *hash_tab, void *key)
122} 113}
123 114
124/* 115/*
125 * ======== gh_init ========
126 */
127
128void gh_init(void)
129{
130 /* Do nothing */
131}
132
133/*
134 * ======== gh_insert ======== 116 * ======== gh_insert ========
135 */ 117 */
136 118
diff --git a/drivers/staging/tidspbridge/gen/uuidutil.c b/drivers/staging/tidspbridge/gen/uuidutil.c
index ff6ebadf98f4..b44656cf7858 100644
--- a/drivers/staging/tidspbridge/gen/uuidutil.c
+++ b/drivers/staging/tidspbridge/gen/uuidutil.c
@@ -23,9 +23,6 @@
23/* ----------------------------------- DSP/BIOS Bridge */ 23/* ----------------------------------- DSP/BIOS Bridge */
24#include <dspbridge/dbdefs.h> 24#include <dspbridge/dbdefs.h>
25 25
26/* ----------------------------------- Trace & Debug */
27#include <dspbridge/dbc.h>
28
29/* ----------------------------------- This */ 26/* ----------------------------------- This */
30#include <dspbridge/uuidutil.h> 27#include <dspbridge/uuidutil.h>
31 28
@@ -41,8 +38,6 @@ void uuid_uuid_to_string(struct dsp_uuid *uuid_obj, char *sz_uuid,
41{ 38{
42 s32 i; /* return result from snprintf. */ 39 s32 i; /* return result from snprintf. */
43 40
44 DBC_REQUIRE(uuid_obj && sz_uuid);
45
46 i = snprintf(sz_uuid, size, 41 i = snprintf(sz_uuid, size,
47 "%.8X_%.4X_%.4X_%.2X%.2X_%.2X%.2X%.2X%.2X%.2X%.2X", 42 "%.8X_%.4X_%.4X_%.2X%.2X_%.2X%.2X%.2X%.2X%.2X%.2X",
48 uuid_obj->data1, uuid_obj->data2, uuid_obj->data3, 43 uuid_obj->data1, uuid_obj->data2, uuid_obj->data3,
@@ -50,8 +45,6 @@ void uuid_uuid_to_string(struct dsp_uuid *uuid_obj, char *sz_uuid,
50 uuid_obj->data6[0], uuid_obj->data6[1], 45 uuid_obj->data6[0], uuid_obj->data6[1],
51 uuid_obj->data6[2], uuid_obj->data6[3], 46 uuid_obj->data6[2], uuid_obj->data6[3],
52 uuid_obj->data6[4], uuid_obj->data6[5]); 47 uuid_obj->data6[4], uuid_obj->data6[5]);
53
54 DBC_ENSURE(i != -1);
55} 48}
56 49
57static s32 uuid_hex_to_bin(char *buf, s32 len) 50static s32 uuid_hex_to_bin(char *buf, s32 len)
diff --git a/drivers/staging/tidspbridge/include/dspbridge/_chnl_sm.h b/drivers/staging/tidspbridge/include/dspbridge/_chnl_sm.h
index 6e7ab4fd8c39..cc95a18f1db9 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/_chnl_sm.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/_chnl_sm.h
@@ -99,14 +99,10 @@ struct shm {
99 struct opp_rqst_struct opp_request; 99 struct opp_rqst_struct opp_request;
100 /* load monitor information structure */ 100 /* load monitor information structure */
101 struct load_mon_struct load_mon_info; 101 struct load_mon_struct load_mon_info;
102#ifdef CONFIG_TIDSPBRIDGE_WDT3
103 /* Flag for WDT enable/disable F/I clocks */ 102 /* Flag for WDT enable/disable F/I clocks */
104 u32 wdt_setclocks; 103 u32 wdt_setclocks;
105 u32 wdt_overflow; /* WDT overflow time */ 104 u32 wdt_overflow; /* WDT overflow time */
106 char dummy[176]; /* padding to 256 byte boundary */ 105 char dummy[176]; /* padding to 256 byte boundary */
107#else
108 char dummy[184]; /* padding to 256 byte boundary */
109#endif
110 u32 shm_dbg_var[64]; /* shared memory debug variables */ 106 u32 shm_dbg_var[64]; /* shared memory debug variables */
111}; 107};
112 108
diff --git a/drivers/staging/tidspbridge/include/dspbridge/chnl.h b/drivers/staging/tidspbridge/include/dspbridge/chnl.h
index 92f6a13424f2..9b018b1f9bf3 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/chnl.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/chnl.h
@@ -48,7 +48,6 @@
48 * -ECHRNG: This manager cannot handle this many channels. 48 * -ECHRNG: This manager cannot handle this many channels.
49 * -EEXIST: Channel manager already exists for this device. 49 * -EEXIST: Channel manager already exists for this device.
50 * Requires: 50 * Requires:
51 * chnl_init(void) called.
52 * channel_mgr != NULL. 51 * channel_mgr != NULL.
53 * mgr_attrts != NULL. 52 * mgr_attrts != NULL.
54 * Ensures: 53 * Ensures:
@@ -70,7 +69,6 @@ extern int chnl_create(struct chnl_mgr **channel_mgr,
70 * 0: Success. 69 * 0: Success.
71 * -EFAULT: hchnl_mgr was invalid. 70 * -EFAULT: hchnl_mgr was invalid.
72 * Requires: 71 * Requires:
73 * chnl_init(void) called.
74 * Ensures: 72 * Ensures:
75 * 0: Cancels I/O on each open channel. 73 * 0: Cancels I/O on each open channel.
76 * Closes each open channel. 74 * Closes each open channel.
@@ -79,31 +77,4 @@ extern int chnl_create(struct chnl_mgr **channel_mgr,
79 */ 77 */
80extern int chnl_destroy(struct chnl_mgr *hchnl_mgr); 78extern int chnl_destroy(struct chnl_mgr *hchnl_mgr);
81 79
82/*
83 * ======== chnl_exit ========
84 * Purpose:
85 * Discontinue usage of the CHNL module.
86 * Parameters:
87 * Returns:
88 * Requires:
89 * chnl_init(void) previously called.
90 * Ensures:
91 * Resources, if any acquired in chnl_init(void), are freed when the last
92 * client of CHNL calls chnl_exit(void).
93 */
94extern void chnl_exit(void);
95
96/*
97 * ======== chnl_init ========
98 * Purpose:
99 * Initialize the CHNL module's private state.
100 * Parameters:
101 * Returns:
102 * TRUE if initialized; FALSE if error occurred.
103 * Requires:
104 * Ensures:
105 * A requirement for each of the other public CHNL functions.
106 */
107extern bool chnl_init(void);
108
109#endif /* CHNL_ */ 80#endif /* CHNL_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/cmm.h b/drivers/staging/tidspbridge/include/dspbridge/cmm.h
index aff22051cf57..c66bcf7ea90c 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/cmm.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/cmm.h
@@ -79,7 +79,6 @@ extern void *cmm_calloc_buf(struct cmm_object *hcmm_mgr,
79 * -EPERM: Failed to initialize critical sect sync object. 79 * -EPERM: Failed to initialize critical sect sync object.
80 * 80 *
81 * Requires: 81 * Requires:
82 * cmm_init(void) called.
83 * ph_cmm_mgr != NULL. 82 * ph_cmm_mgr != NULL.
84 * mgr_attrts->min_block_size >= 4 bytes. 83 * mgr_attrts->min_block_size >= 4 bytes.
85 * Ensures: 84 * Ensures:
@@ -111,20 +110,6 @@ extern int cmm_create(struct cmm_object **ph_cmm_mgr,
111extern int cmm_destroy(struct cmm_object *hcmm_mgr, bool force); 110extern int cmm_destroy(struct cmm_object *hcmm_mgr, bool force);
112 111
113/* 112/*
114 * ======== cmm_exit ========
115 * Purpose:
116 * Discontinue usage of module. Cleanup CMM module if CMM cRef reaches zero.
117 * Parameters:
118 * n/a
119 * Returns:
120 * n/a
121 * Requires:
122 * CMM is initialized.
123 * Ensures:
124 */
125extern void cmm_exit(void);
126
127/*
128 * ======== cmm_free_buf ======== 113 * ======== cmm_free_buf ========
129 * Purpose: 114 * Purpose:
130 * Free the given buffer. 115 * Free the given buffer.
@@ -185,19 +170,6 @@ extern int cmm_get_info(struct cmm_object *hcmm_mgr,
185 struct cmm_info *cmm_info_obj); 170 struct cmm_info *cmm_info_obj);
186 171
187/* 172/*
188 * ======== cmm_init ========
189 * Purpose:
190 * Initializes private state of CMM module.
191 * Parameters:
192 * Returns:
193 * TRUE if initialized; FALSE if error occurred.
194 * Requires:
195 * Ensures:
196 * CMM initialized.
197 */
198extern bool cmm_init(void);
199
200/*
201 * ======== cmm_register_gppsm_seg ======== 173 * ======== cmm_register_gppsm_seg ========
202 * Purpose: 174 * Purpose:
203 * Register a block of SM with the CMM. 175 * Register a block of SM with the CMM.
@@ -333,7 +305,6 @@ extern int cmm_xlator_free_buf(struct cmm_xlatorobject *xlator,
333 * 0: Success. 305 * 0: Success.
334 * -EFAULT: Bad translator handle. 306 * -EFAULT: Bad translator handle.
335 * Requires: 307 * Requires:
336 * (refs > 0)
337 * (paddr != NULL) 308 * (paddr != NULL)
338 * (ul_size > 0) 309 * (ul_size > 0)
339 * Ensures: 310 * Ensures:
@@ -355,7 +326,6 @@ extern int cmm_xlator_info(struct cmm_xlatorobject *xlator,
355 * Returns: 326 * Returns:
356 * Valid address on success, else NULL. 327 * Valid address on success, else NULL.
357 * Requires: 328 * Requires:
358 * refs > 0
359 * paddr != NULL 329 * paddr != NULL
360 * xtype >= CMM_VA2PA) && (xtype <= CMM_DSPPA2PA) 330 * xtype >= CMM_VA2PA) && (xtype <= CMM_DSPPA2PA)
361 * Ensures: 331 * Ensures:
diff --git a/drivers/staging/tidspbridge/include/dspbridge/cod.h b/drivers/staging/tidspbridge/include/dspbridge/cod.h
index cb684c11b302..ba2005d02422 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/cod.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/cod.h
@@ -100,21 +100,6 @@ extern int cod_create(struct cod_manager **mgr,
100extern void cod_delete(struct cod_manager *cod_mgr_obj); 100extern void cod_delete(struct cod_manager *cod_mgr_obj);
101 101
102/* 102/*
103 * ======== cod_exit ========
104 * Purpose:
105 * Discontinue usage of the COD module.
106 * Parameters:
107 * None.
108 * Returns:
109 * None.
110 * Requires:
111 * COD initialized.
112 * Ensures:
113 * Resources acquired in cod_init(void) are freed.
114 */
115extern void cod_exit(void);
116
117/*
118 * ======== cod_get_base_lib ======== 103 * ======== cod_get_base_lib ========
119 * Purpose: 104 * Purpose:
120 * Get handle to the base image DBL library. 105 * Get handle to the base image DBL library.
@@ -243,20 +228,6 @@ extern int cod_get_sym_value(struct cod_manager *cod_mgr_obj,
243 char *str_sym, u32 * pul_value); 228 char *str_sym, u32 * pul_value);
244 229
245/* 230/*
246 * ======== cod_init ========
247 * Purpose:
248 * Initialize the COD module's private state.
249 * Parameters:
250 * None.
251 * Returns:
252 * TRUE if initialized; FALSE if error occurred.
253 * Requires:
254 * Ensures:
255 * A requirement for each of the other public COD functions.
256 */
257extern bool cod_init(void);
258
259/*
260 * ======== cod_load_base ======== 231 * ======== cod_load_base ========
261 * Purpose: 232 * Purpose:
262 * Load the initial program image, optionally with command-line arguments, 233 * Load the initial program image, optionally with command-line arguments,
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dbc.h b/drivers/staging/tidspbridge/include/dspbridge/dbc.h
deleted file mode 100644
index 463760f499a4..000000000000
--- a/drivers/staging/tidspbridge/include/dspbridge/dbc.h
+++ /dev/null
@@ -1,46 +0,0 @@
1/*
2 * dbc.h
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * "Design by Contract" programming macros.
7 *
8 * Notes:
9 * Requires that the GT->ERROR function has been defaulted to a valid
10 * error handler for the given execution environment.
11 *
12 * Does not require that GT_init() be called.
13 *
14 * Copyright (C) 2008 Texas Instruments, Inc.
15 *
16 * This package is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License version 2 as
18 * published by the Free Software Foundation.
19 *
20 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
22 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
23 */
24
25#ifndef DBC_
26#define DBC_
27
28/* Assertion Macros: */
29#ifdef CONFIG_TIDSPBRIDGE_DEBUG
30
31#define DBC_ASSERT(exp) \
32 if (!(exp)) \
33 pr_err("%s, line %d: Assertion (" #exp ") failed.\n", \
34 __FILE__, __LINE__)
35#define DBC_REQUIRE DBC_ASSERT /* Function Precondition. */
36#define DBC_ENSURE DBC_ASSERT /* Function Postcondition. */
37
38#else
39
40#define DBC_ASSERT(exp) {}
41#define DBC_REQUIRE(exp) {}
42#define DBC_ENSURE(exp) {}
43
44#endif /* DEBUG */
45
46#endif /* DBC_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dev.h b/drivers/staging/tidspbridge/include/dspbridge/dev.h
index f92b4be0b413..fa2d79ef6cc8 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/dev.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/dev.h
@@ -478,33 +478,6 @@ extern int dev_get_bridge_context(struct dev_object *hdev_obj,
478 **phbridge_context); 478 **phbridge_context);
479 479
480/* 480/*
481 * ======== dev_exit ========
482 * Purpose:
483 * Decrement reference count, and free resources when reference count is
484 * 0.
485 * Parameters:
486 * Returns:
487 * Requires:
488 * DEV is initialized.
489 * Ensures:
490 * When reference count == 0, DEV's private resources are freed.
491 */
492extern void dev_exit(void);
493
494/*
495 * ======== dev_init ========
496 * Purpose:
497 * Initialize DEV's private state, keeping a reference count on each call.
498 * Parameters:
499 * Returns:
500 * TRUE if initialized; FALSE if error occurred.
501 * Requires:
502 * Ensures:
503 * TRUE: A requirement for the other public DEV functions.
504 */
505extern bool dev_init(void);
506
507/*
508 * ======== dev_insert_proc_object ======== 481 * ======== dev_insert_proc_object ========
509 * Purpose: 482 * Purpose:
510 * Inserts the Processor Object into the List of PROC Objects 483 * Inserts the Processor Object into the List of PROC Objects
diff --git a/drivers/staging/tidspbridge/include/dspbridge/disp.h b/drivers/staging/tidspbridge/include/dspbridge/disp.h
index 5dfdc8cfb937..39d3cea9ca8b 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/disp.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/disp.h
@@ -53,7 +53,6 @@ struct disp_attr {
53 * -ENOMEM: Insufficient memory for requested resources. 53 * -ENOMEM: Insufficient memory for requested resources.
54 * -EPERM: Unable to create dispatcher. 54 * -EPERM: Unable to create dispatcher.
55 * Requires: 55 * Requires:
56 * disp_init(void) called.
57 * disp_attrs != NULL. 56 * disp_attrs != NULL.
58 * hdev_obj != NULL. 57 * hdev_obj != NULL.
59 * dispatch_obj != NULL. 58 * dispatch_obj != NULL.
@@ -73,7 +72,6 @@ extern int disp_create(struct disp_object **dispatch_obj,
73 * disp_obj: Node Dispatcher object. 72 * disp_obj: Node Dispatcher object.
74 * Returns: 73 * Returns:
75 * Requires: 74 * Requires:
76 * disp_init(void) called.
77 * Valid disp_obj. 75 * Valid disp_obj.
78 * Ensures: 76 * Ensures:
79 * disp_obj is invalid. 77 * disp_obj is invalid.
@@ -81,31 +79,6 @@ extern int disp_create(struct disp_object **dispatch_obj,
81extern void disp_delete(struct disp_object *disp_obj); 79extern void disp_delete(struct disp_object *disp_obj);
82 80
83/* 81/*
84 * ======== disp_exit ========
85 * Discontinue usage of DISP module.
86 *
87 * Parameters:
88 * Returns:
89 * Requires:
90 * disp_init(void) previously called.
91 * Ensures:
92 * Any resources acquired in disp_init(void) will be freed when last DISP
93 * client calls disp_exit(void).
94 */
95extern void disp_exit(void);
96
97/*
98 * ======== disp_init ========
99 * Initialize the DISP module.
100 *
101 * Parameters:
102 * Returns:
103 * TRUE if initialization succeeded, FALSE otherwise.
104 * Ensures:
105 */
106extern bool disp_init(void);
107
108/*
109 * ======== disp_node_change_priority ======== 82 * ======== disp_node_change_priority ========
110 * Change the priority of a node currently running on the target. 83 * Change the priority of a node currently running on the target.
111 * 84 *
@@ -120,7 +93,6 @@ extern bool disp_init(void);
120 * 0: Success. 93 * 0: Success.
121 * -ETIME: A timeout occurred before the DSP responded. 94 * -ETIME: A timeout occurred before the DSP responded.
122 * Requires: 95 * Requires:
123 * disp_init(void) called.
124 * Valid disp_obj. 96 * Valid disp_obj.
125 * hnode != NULL. 97 * hnode != NULL.
126 * Ensures: 98 * Ensures:
@@ -148,7 +120,6 @@ extern int disp_node_change_priority(struct disp_object
148 * -ETIME: A timeout occurred before the DSP responded. 120 * -ETIME: A timeout occurred before the DSP responded.
149 * -EPERM: A failure occurred, unable to create node. 121 * -EPERM: A failure occurred, unable to create node.
150 * Requires: 122 * Requires:
151 * disp_init(void) called.
152 * Valid disp_obj. 123 * Valid disp_obj.
153 * pargs != NULL. 124 * pargs != NULL.
154 * hnode != NULL. 125 * hnode != NULL.
@@ -178,7 +149,6 @@ extern int disp_node_create(struct disp_object *disp_obj,
178 * 0: Success. 149 * 0: Success.
179 * -ETIME: A timeout occurred before the DSP responded. 150 * -ETIME: A timeout occurred before the DSP responded.
180 * Requires: 151 * Requires:
181 * disp_init(void) called.
182 * Valid disp_obj. 152 * Valid disp_obj.
183 * hnode != NULL. 153 * hnode != NULL.
184 * Ensures: 154 * Ensures:
@@ -204,7 +174,6 @@ extern int disp_node_delete(struct disp_object *disp_obj,
204 * 0: Success. 174 * 0: Success.
205 * -ETIME: A timeout occurred before the DSP responded. 175 * -ETIME: A timeout occurred before the DSP responded.
206 * Requires: 176 * Requires:
207 * disp_init(void) called.
208 * Valid disp_obj. 177 * Valid disp_obj.
209 * hnode != NULL. 178 * hnode != NULL.
210 * Ensures: 179 * Ensures:
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dmm.h b/drivers/staging/tidspbridge/include/dspbridge/dmm.h
index 6c58335c5f60..c3487be8fcf5 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/dmm.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/dmm.h
@@ -61,10 +61,6 @@ extern int dmm_create(struct dmm_object **dmm_manager,
61 struct dev_object *hdev_obj, 61 struct dev_object *hdev_obj,
62 const struct dmm_mgrattrs *mgr_attrts); 62 const struct dmm_mgrattrs *mgr_attrts);
63 63
64extern bool dmm_init(void);
65
66extern void dmm_exit(void);
67
68extern int dmm_create_tables(struct dmm_object *dmm_mgr, 64extern int dmm_create_tables(struct dmm_object *dmm_mgr,
69 u32 addr, u32 size); 65 u32 addr, u32 size);
70 66
diff --git a/drivers/staging/tidspbridge/include/dspbridge/drv.h b/drivers/staging/tidspbridge/include/dspbridge/drv.h
index 9cdbd955dce9..b0c7708321b2 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/drv.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/drv.h
@@ -199,17 +199,6 @@ extern int drv_create(struct drv_object **drv_obj);
199extern int drv_destroy(struct drv_object *driver_obj); 199extern int drv_destroy(struct drv_object *driver_obj);
200 200
201/* 201/*
202 * ======== drv_exit ========
203 * Purpose:
204 * Exit the DRV module, freeing any modules initialized in drv_init.
205 * Parameters:
206 * Returns:
207 * Requires:
208 * Ensures:
209 */
210extern void drv_exit(void);
211
212/*
213 * ======== drv_get_first_dev_object ======== 202 * ======== drv_get_first_dev_object ========
214 * Purpose: 203 * Purpose:
215 * Returns the Ptr to the FirstDev Object in the List 204 * Returns the Ptr to the FirstDev Object in the List
@@ -294,18 +283,6 @@ extern u32 drv_get_next_dev_object(u32 hdev_obj);
294extern u32 drv_get_next_dev_extension(u32 dev_extension); 283extern u32 drv_get_next_dev_extension(u32 dev_extension);
295 284
296/* 285/*
297 * ======== drv_init ========
298 * Purpose:
299 * Initialize the DRV module.
300 * Parameters:
301 * Returns:
302 * TRUE if success; FALSE otherwise.
303 * Requires:
304 * Ensures:
305 */
306extern int drv_init(void);
307
308/*
309 * ======== drv_insert_dev_object ======== 286 * ======== drv_insert_dev_object ========
310 * Purpose: 287 * Purpose:
311 * Insert a DeviceObject into the list of Driver object. 288 * Insert a DeviceObject into the list of Driver object.
diff --git a/drivers/staging/tidspbridge/include/dspbridge/gh.h b/drivers/staging/tidspbridge/include/dspbridge/gh.h
index 9de291d1f566..da85079dbfb6 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/gh.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/gh.h
@@ -23,9 +23,7 @@ extern struct gh_t_hash_tab *gh_create(u16 max_bucket, u16 val_size,
23 bool(*match) (void *, void *), 23 bool(*match) (void *, void *),
24 void (*delete) (void *)); 24 void (*delete) (void *));
25extern void gh_delete(struct gh_t_hash_tab *hash_tab); 25extern void gh_delete(struct gh_t_hash_tab *hash_tab);
26extern void gh_exit(void);
27extern void *gh_find(struct gh_t_hash_tab *hash_tab, void *key); 26extern void *gh_find(struct gh_t_hash_tab *hash_tab, void *key);
28extern void gh_init(void);
29extern void *gh_insert(struct gh_t_hash_tab *hash_tab, void *key, void *value); 27extern void *gh_insert(struct gh_t_hash_tab *hash_tab, void *key, void *value);
30#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE 28#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
31void gh_iterate(struct gh_t_hash_tab *hash_tab, 29void gh_iterate(struct gh_t_hash_tab *hash_tab,
diff --git a/drivers/staging/tidspbridge/include/dspbridge/io.h b/drivers/staging/tidspbridge/include/dspbridge/io.h
index 500bbd71684d..750571856908 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/io.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/io.h
@@ -55,7 +55,6 @@ struct io_attrs {
55 * -EINVAL: Invalid DSP word size (must be > 0). 55 * -EINVAL: Invalid DSP word size (must be > 0).
56 * Invalid base address for DSP communications. 56 * Invalid base address for DSP communications.
57 * Requires: 57 * Requires:
58 * io_init(void) called.
59 * io_man != NULL. 58 * io_man != NULL.
60 * mgr_attrts != NULL. 59 * mgr_attrts != NULL.
61 * Ensures: 60 * Ensures:
@@ -74,36 +73,8 @@ extern int io_create(struct io_mgr **io_man,
74 * 0: Success. 73 * 0: Success.
75 * -EFAULT: hio_mgr was invalid. 74 * -EFAULT: hio_mgr was invalid.
76 * Requires: 75 * Requires:
77 * io_init(void) called.
78 * Ensures: 76 * Ensures:
79 */ 77 */
80extern int io_destroy(struct io_mgr *hio_mgr); 78extern int io_destroy(struct io_mgr *hio_mgr);
81 79
82/*
83 * ======== io_exit ========
84 * Purpose:
85 * Discontinue usage of the IO module.
86 * Parameters:
87 * Returns:
88 * Requires:
89 * io_init(void) previously called.
90 * Ensures:
91 * Resources, if any acquired in io_init(void), are freed when the last
92 * client of IO calls io_exit(void).
93 */
94extern void io_exit(void);
95
96/*
97 * ======== io_init ========
98 * Purpose:
99 * Initialize the IO module's private state.
100 * Parameters:
101 * Returns:
102 * TRUE if initialized; FALSE if error occurred.
103 * Requires:
104 * Ensures:
105 * A requirement for each of the other public CHNL functions.
106 */
107extern bool io_init(void);
108
109#endif /* CHNL_ */ 80#endif /* CHNL_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/io_sm.h b/drivers/staging/tidspbridge/include/dspbridge/io_sm.h
index a054dad21333..903ff12b14de 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/io_sm.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/io_sm.h
@@ -154,8 +154,6 @@ int dump_dsp_stack(struct bridge_dev_context *bridge_context);
154 154
155void dump_dl_modules(struct bridge_dev_context *bridge_context); 155void dump_dl_modules(struct bridge_dev_context *bridge_context);
156 156
157#endif
158#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
159void print_dsp_debug_trace(struct io_mgr *hio_mgr); 157void print_dsp_debug_trace(struct io_mgr *hio_mgr);
160#endif 158#endif
161 159
diff --git a/drivers/staging/tidspbridge/include/dspbridge/msg.h b/drivers/staging/tidspbridge/include/dspbridge/msg.h
index 95778bcb5aae..2c8712c933fc 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/msg.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/msg.h
@@ -34,7 +34,6 @@
34 * msg_callback: Called whenever an RMS_EXIT message is received. 34 * msg_callback: Called whenever an RMS_EXIT message is received.
35 * Returns: 35 * Returns:
36 * Requires: 36 * Requires:
37 * msg_mod_init(void) called.
38 * msg_man != NULL. 37 * msg_man != NULL.
39 * hdev_obj != NULL. 38 * hdev_obj != NULL.
40 * msg_callback != NULL. 39 * msg_callback != NULL.
@@ -52,35 +51,9 @@ extern int msg_create(struct msg_mgr **msg_man,
52 * hmsg_mgr: Handle returned from msg_create(). 51 * hmsg_mgr: Handle returned from msg_create().
53 * Returns: 52 * Returns:
54 * Requires: 53 * Requires:
55 * msg_mod_init(void) called.
56 * Valid hmsg_mgr. 54 * Valid hmsg_mgr.
57 * Ensures: 55 * Ensures:
58 */ 56 */
59extern void msg_delete(struct msg_mgr *hmsg_mgr); 57extern void msg_delete(struct msg_mgr *hmsg_mgr);
60 58
61/*
62 * ======== msg_exit ========
63 * Purpose:
64 * Discontinue usage of msg_ctrl module.
65 * Parameters:
66 * Returns:
67 * Requires:
68 * msg_mod_init(void) successfully called before.
69 * Ensures:
70 * Any resources acquired in msg_mod_init(void) will be freed when last
71 * msg_ctrl client calls msg_exit(void).
72 */
73extern void msg_exit(void);
74
75/*
76 * ======== msg_mod_init ========
77 * Purpose:
78 * Initialize the msg_ctrl module.
79 * Parameters:
80 * Returns:
81 * TRUE if initialization succeeded, FALSE otherwise.
82 * Ensures:
83 */
84extern bool msg_mod_init(void);
85
86#endif /* MSG_ */ 59#endif /* MSG_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/nldr.h b/drivers/staging/tidspbridge/include/dspbridge/nldr.h
index d9653ee667e1..c5e48ca6c548 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/nldr.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/nldr.h
@@ -36,7 +36,6 @@ extern int nldr_create(struct nldr_object **nldr,
36 const struct nldr_attrs *pattrs); 36 const struct nldr_attrs *pattrs);
37 37
38extern void nldr_delete(struct nldr_object *nldr_obj); 38extern void nldr_delete(struct nldr_object *nldr_obj);
39extern void nldr_exit(void);
40 39
41extern int nldr_get_fxn_addr(struct nldr_nodeobject *nldr_node_obj, 40extern int nldr_get_fxn_addr(struct nldr_nodeobject *nldr_node_obj,
42 char *str_fxn, u32 * addr); 41 char *str_fxn, u32 * addr);
@@ -44,7 +43,6 @@ extern int nldr_get_fxn_addr(struct nldr_nodeobject *nldr_node_obj,
44extern int nldr_get_rmm_manager(struct nldr_object *nldr, 43extern int nldr_get_rmm_manager(struct nldr_object *nldr,
45 struct rmm_target_obj **rmm_mgr); 44 struct rmm_target_obj **rmm_mgr);
46 45
47extern bool nldr_init(void);
48extern int nldr_load(struct nldr_nodeobject *nldr_node_obj, 46extern int nldr_load(struct nldr_nodeobject *nldr_node_obj,
49 enum nldr_phase phase); 47 enum nldr_phase phase);
50extern int nldr_unload(struct nldr_nodeobject *nldr_node_obj, 48extern int nldr_unload(struct nldr_nodeobject *nldr_node_obj,
diff --git a/drivers/staging/tidspbridge/include/dspbridge/nldrdefs.h b/drivers/staging/tidspbridge/include/dspbridge/nldrdefs.h
index ee3a85f08fc3..7e3c7f58b496 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/nldrdefs.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/nldrdefs.h
@@ -119,7 +119,6 @@ enum nldr_phase {
119 * 0: Success. 119 * 0: Success.
120 * -ENOMEM: Insufficient memory on GPP. 120 * -ENOMEM: Insufficient memory on GPP.
121 * Requires: 121 * Requires:
122 * nldr_init(void) called.
123 * Valid nldr_obj. 122 * Valid nldr_obj.
124 * node_props != NULL. 123 * node_props != NULL.
125 * nldr_nodeobj != NULL. 124 * nldr_nodeobj != NULL.
@@ -148,7 +147,6 @@ typedef int(*nldr_allocatefxn) (struct nldr_object *nldr_obj,
148 * 0: Success; 147 * 0: Success;
149 * -ENOMEM: Insufficient memory for requested resources. 148 * -ENOMEM: Insufficient memory for requested resources.
150 * Requires: 149 * Requires:
151 * nldr_init(void) called.
152 * nldr != NULL. 150 * nldr != NULL.
153 * hdev_obj != NULL. 151 * hdev_obj != NULL.
154 * pattrs != NULL. 152 * pattrs != NULL.
@@ -168,7 +166,6 @@ typedef int(*nldr_createfxn) (struct nldr_object **nldr,
168 * nldr_obj: Node manager object. 166 * nldr_obj: Node manager object.
169 * Returns: 167 * Returns:
170 * Requires: 168 * Requires:
171 * nldr_init(void) called.
172 * Valid nldr_obj. 169 * Valid nldr_obj.
173 * Ensures: 170 * Ensures:
174 * nldr_obj invalid 171 * nldr_obj invalid
@@ -176,20 +173,6 @@ typedef int(*nldr_createfxn) (struct nldr_object **nldr,
176typedef void (*nldr_deletefxn) (struct nldr_object *nldr_obj); 173typedef void (*nldr_deletefxn) (struct nldr_object *nldr_obj);
177 174
178/* 175/*
179 * ======== nldr_exit ========
180 * Discontinue usage of NLDR module.
181 *
182 * Parameters:
183 * Returns:
184 * Requires:
185 * nldr_init(void) successfully called before.
186 * Ensures:
187 * Any resources acquired in nldr_init(void) will be freed when last NLDR
188 * client calls nldr_exit(void).
189 */
190typedef void (*nldr_exitfxn) (void);
191
192/*
193 * ======== NLDR_Free ======== 176 * ======== NLDR_Free ========
194 * Free resources allocated in nldr_allocate. 177 * Free resources allocated in nldr_allocate.
195 * 178 *
@@ -197,7 +180,6 @@ typedef void (*nldr_exitfxn) (void);
197 * nldr_node_obj: Handle returned from nldr_allocate(). 180 * nldr_node_obj: Handle returned from nldr_allocate().
198 * Returns: 181 * Returns:
199 * Requires: 182 * Requires:
200 * nldr_init(void) called.
201 * Valid nldr_node_obj. 183 * Valid nldr_node_obj.
202 * Ensures: 184 * Ensures:
203 */ 185 */
@@ -216,7 +198,6 @@ typedef void (*nldr_freefxn) (struct nldr_nodeobject *nldr_node_obj);
216 * 0: Success. 198 * 0: Success.
217 * -ESPIPE: Address of function not found. 199 * -ESPIPE: Address of function not found.
218 * Requires: 200 * Requires:
219 * nldr_init(void) called.
220 * Valid nldr_node_obj. 201 * Valid nldr_node_obj.
221 * addr != NULL; 202 * addr != NULL;
222 * str_fxn != NULL; 203 * str_fxn != NULL;
@@ -227,17 +208,6 @@ typedef int(*nldr_getfxnaddrfxn) (struct nldr_nodeobject
227 char *str_fxn, u32 * addr); 208 char *str_fxn, u32 * addr);
228 209
229/* 210/*
230 * ======== nldr_init ========
231 * Initialize the NLDR module.
232 *
233 * Parameters:
234 * Returns:
235 * TRUE if initialization succeeded, FALSE otherwise.
236 * Ensures:
237 */
238typedef bool(*nldr_initfxn) (void);
239
240/*
241 * ======== nldr_load ======== 211 * ======== nldr_load ========
242 * Load create, delete, or execute phase function of a node on the DSP. 212 * Load create, delete, or execute phase function of a node on the DSP.
243 * 213 *
@@ -251,7 +221,6 @@ typedef bool(*nldr_initfxn) (void);
251 * is already in use. 221 * is already in use.
252 * -EILSEQ: Failure in dynamic loader library. 222 * -EILSEQ: Failure in dynamic loader library.
253 * Requires: 223 * Requires:
254 * nldr_init(void) called.
255 * Valid nldr_node_obj. 224 * Valid nldr_node_obj.
256 * Ensures: 225 * Ensures:
257 */ 226 */
@@ -269,7 +238,6 @@ typedef int(*nldr_loadfxn) (struct nldr_nodeobject *nldr_node_obj,
269 * 0: Success. 238 * 0: Success.
270 * -ENOMEM: Insufficient memory on GPP. 239 * -ENOMEM: Insufficient memory on GPP.
271 * Requires: 240 * Requires:
272 * nldr_init(void) called.
273 * Valid nldr_node_obj. 241 * Valid nldr_node_obj.
274 * Ensures: 242 * Ensures:
275 */ 243 */
@@ -283,9 +251,7 @@ struct node_ldr_fxns {
283 nldr_allocatefxn allocate; 251 nldr_allocatefxn allocate;
284 nldr_createfxn create; 252 nldr_createfxn create;
285 nldr_deletefxn delete; 253 nldr_deletefxn delete;
286 nldr_exitfxn exit;
287 nldr_getfxnaddrfxn get_fxn_addr; 254 nldr_getfxnaddrfxn get_fxn_addr;
288 nldr_initfxn init;
289 nldr_loadfxn load; 255 nldr_loadfxn load;
290 nldr_unloadfxn unload; 256 nldr_unloadfxn unload;
291}; 257};
diff --git a/drivers/staging/tidspbridge/include/dspbridge/node.h b/drivers/staging/tidspbridge/include/dspbridge/node.h
index 16371d818e3d..7397b7a12f7a 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/node.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/node.h
@@ -47,7 +47,6 @@
47 * -EPERM: A failure occurred, unable to allocate node. 47 * -EPERM: A failure occurred, unable to allocate node.
48 * -EBADR: Proccessor is not in the running state. 48 * -EBADR: Proccessor is not in the running state.
49 * Requires: 49 * Requires:
50 * node_init(void) called.
51 * hprocessor != NULL. 50 * hprocessor != NULL.
52 * node_uuid != NULL. 51 * node_uuid != NULL.
53 * noderes != NULL. 52 * noderes != NULL.
@@ -81,7 +80,6 @@ extern int node_allocate(struct proc_object *hprocessor,
81 * -EPERM: General Failure. 80 * -EPERM: General Failure.
82 * -EINVAL: Invalid Size. 81 * -EINVAL: Invalid Size.
83 * Requires: 82 * Requires:
84 * node_init(void) called.
85 * pbuffer != NULL. 83 * pbuffer != NULL.
86 * Ensures: 84 * Ensures:
87 */ 85 */
@@ -106,7 +104,6 @@ extern int node_alloc_msg_buf(struct node_object *hnode,
106 * or NODE_RUNNING state. 104 * or NODE_RUNNING state.
107 * -ETIME: A timeout occurred before the DSP responded. 105 * -ETIME: A timeout occurred before the DSP responded.
108 * Requires: 106 * Requires:
109 * node_init(void) called.
110 * Ensures: 107 * Ensures:
111 * 0 && (Node's current priority == prio) 108 * 0 && (Node's current priority == prio)
112 */ 109 */
@@ -157,7 +154,6 @@ extern int node_change_priority(struct node_object *hnode, s32 prio);
157 * Device node to device node, or device node to 154 * Device node to device node, or device node to
158 * GPP), the two nodes are on different DSPs. 155 * GPP), the two nodes are on different DSPs.
159 * Requires: 156 * Requires:
160 * node_init(void) called.
161 * Ensures: 157 * Ensures:
162 */ 158 */
163extern int node_connect(struct node_object *node1, 159extern int node_connect(struct node_object *node1,
@@ -185,7 +181,6 @@ extern int node_connect(struct node_object *node1,
185 * -ETIME: A timeout occurred before the DSP responded. 181 * -ETIME: A timeout occurred before the DSP responded.
186 * -EPERM: A failure occurred, unable to create node. 182 * -EPERM: A failure occurred, unable to create node.
187 * Requires: 183 * Requires:
188 * node_init(void) called.
189 * Ensures: 184 * Ensures:
190 */ 185 */
191extern int node_create(struct node_object *hnode); 186extern int node_create(struct node_object *hnode);
@@ -206,7 +201,6 @@ extern int node_create(struct node_object *hnode);
206 * -ENOMEM: Insufficient memory for requested resources. 201 * -ENOMEM: Insufficient memory for requested resources.
207 * -EPERM: General failure. 202 * -EPERM: General failure.
208 * Requires: 203 * Requires:
209 * node_init(void) called.
210 * node_man != NULL. 204 * node_man != NULL.
211 * hdev_obj != NULL. 205 * hdev_obj != NULL.
212 * Ensures: 206 * Ensures:
@@ -234,7 +228,6 @@ extern int node_create_mgr(struct node_mgr **node_man,
234 * -EPERM: A failure occurred in deleting the node. 228 * -EPERM: A failure occurred in deleting the node.
235 * -ESPIPE: Delete function not found in the COFF file. 229 * -ESPIPE: Delete function not found in the COFF file.
236 * Requires: 230 * Requires:
237 * node_init(void) called.
238 * Ensures: 231 * Ensures:
239 * 0: hnode is invalid. 232 * 0: hnode is invalid.
240 */ 233 */
@@ -250,7 +243,6 @@ extern int node_delete(struct node_res_object *noderes,
250 * Returns: 243 * Returns:
251 * 0: Success. 244 * 0: Success.
252 * Requires: 245 * Requires:
253 * node_init(void) called.
254 * Valid hnode_mgr. 246 * Valid hnode_mgr.
255 * Ensures: 247 * Ensures:
256 */ 248 */
@@ -287,20 +279,6 @@ extern int node_enum_nodes(struct node_mgr *hnode_mgr,
287 u32 *pu_allocated); 279 u32 *pu_allocated);
288 280
289/* 281/*
290 * ======== node_exit ========
291 * Purpose:
292 * Discontinue usage of NODE module.
293 * Parameters:
294 * Returns:
295 * Requires:
296 * node_init(void) successfully called before.
297 * Ensures:
298 * Any resources acquired in node_init(void) will be freed when last NODE
299 * client calls node_exit(void).
300 */
301extern void node_exit(void);
302
303/*
304 * ======== node_free_msg_buf ======== 282 * ======== node_free_msg_buf ========
305 * Purpose: 283 * Purpose:
306 * Free a message buffer previously allocated with node_alloc_msg_buf. 284 * Free a message buffer previously allocated with node_alloc_msg_buf.
@@ -313,7 +291,6 @@ extern void node_exit(void);
313 * -EFAULT: Invalid node handle. 291 * -EFAULT: Invalid node handle.
314 * -EPERM: Failure to free the buffer. 292 * -EPERM: Failure to free the buffer.
315 * Requires: 293 * Requires:
316 * node_init(void) called.
317 * pbuffer != NULL. 294 * pbuffer != NULL.
318 * Ensures: 295 * Ensures:
319 */ 296 */
@@ -336,7 +313,6 @@ extern int node_free_msg_buf(struct node_object *hnode,
336 * 0: Success. 313 * 0: Success.
337 * -EFAULT: Invalid hnode. 314 * -EFAULT: Invalid hnode.
338 * Requires: 315 * Requires:
339 * node_init(void) called.
340 * pattr != NULL. 316 * pattr != NULL.
341 * Ensures: 317 * Ensures:
342 * 0: *pattrs contains the node's current attributes. 318 * 0: *pattrs contains the node's current attributes.
@@ -363,7 +339,6 @@ extern int node_get_attr(struct node_object *hnode,
363 * Error occurred while trying to retrieve a message. 339 * Error occurred while trying to retrieve a message.
364 * -ETIME: Timeout occurred and no message is available. 340 * -ETIME: Timeout occurred and no message is available.
365 * Requires: 341 * Requires:
366 * node_init(void) called.
367 * message != NULL. 342 * message != NULL.
368 * Ensures: 343 * Ensures:
369 */ 344 */
@@ -386,17 +361,6 @@ extern int node_get_nldr_obj(struct node_mgr *hnode_mgr,
386 struct nldr_object **nldr_ovlyobj); 361 struct nldr_object **nldr_ovlyobj);
387 362
388/* 363/*
389 * ======== node_init ========
390 * Purpose:
391 * Initialize the NODE module.
392 * Parameters:
393 * Returns:
394 * TRUE if initialization succeeded, FALSE otherwise.
395 * Ensures:
396 */
397extern bool node_init(void);
398
399/*
400 * ======== node_on_exit ======== 364 * ======== node_on_exit ========
401 * Purpose: 365 * Purpose:
402 * Gets called when RMS_EXIT is received for a node. PROC needs to pass 366 * Gets called when RMS_EXIT is received for a node. PROC needs to pass
@@ -425,7 +389,6 @@ void node_on_exit(struct node_object *hnode, s32 node_status);
425 * -ETIME: A timeout occurred before the DSP responded. 389 * -ETIME: A timeout occurred before the DSP responded.
426 * DSP_EWRONGSTSATE: Node is not in NODE_RUNNING state. 390 * DSP_EWRONGSTSATE: Node is not in NODE_RUNNING state.
427 * Requires: 391 * Requires:
428 * node_init(void) called.
429 * Ensures: 392 * Ensures:
430 */ 393 */
431extern int node_pause(struct node_object *hnode); 394extern int node_pause(struct node_object *hnode);
@@ -449,7 +412,6 @@ extern int node_pause(struct node_object *hnode);
449 * -ETIME: Timeout occurred before message could be set. 412 * -ETIME: Timeout occurred before message could be set.
450 * -EBADR: Node is in invalid state for sending messages. 413 * -EBADR: Node is in invalid state for sending messages.
451 * Requires: 414 * Requires:
452 * node_init(void) called.
453 * pmsg != NULL. 415 * pmsg != NULL.
454 * Ensures: 416 * Ensures:
455 */ 417 */
@@ -473,7 +435,6 @@ extern int node_put_message(struct node_object *hnode,
473 * -ENOSYS: Notification type specified by notify_type is not 435 * -ENOSYS: Notification type specified by notify_type is not
474 * supported. 436 * supported.
475 * Requires: 437 * Requires:
476 * node_init(void) called.
477 * hnotification != NULL. 438 * hnotification != NULL.
478 * Ensures: 439 * Ensures:
479 */ 440 */
@@ -500,7 +461,6 @@ extern int node_register_notify(struct node_object *hnode,
500 * DSP_EWRONGSTSATE: Node is not in NODE_PAUSED or NODE_CREATED state. 461 * DSP_EWRONGSTSATE: Node is not in NODE_PAUSED or NODE_CREATED state.
501 * -ESPIPE: Execute function not found in the COFF file. 462 * -ESPIPE: Execute function not found in the COFF file.
502 * Requires: 463 * Requires:
503 * node_init(void) called.
504 * Ensures: 464 * Ensures:
505 */ 465 */
506extern int node_run(struct node_object *hnode); 466extern int node_run(struct node_object *hnode);
@@ -523,7 +483,6 @@ extern int node_run(struct node_object *hnode);
523 * Unable to terminate the node. 483 * Unable to terminate the node.
524 * -EBADR: Operation not valid for the current node state. 484 * -EBADR: Operation not valid for the current node state.
525 * Requires: 485 * Requires:
526 * node_init(void) called.
527 * pstatus != NULL. 486 * pstatus != NULL.
528 * Ensures: 487 * Ensures:
529 */ 488 */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/nodepriv.h b/drivers/staging/tidspbridge/include/dspbridge/nodepriv.h
index 9c1e06758c89..d5b54bb81e8e 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/nodepriv.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/nodepriv.h
@@ -96,7 +96,6 @@ struct node_createargs {
96 * -EINVAL: The node's stream corresponding to index and dir 96 * -EINVAL: The node's stream corresponding to index and dir
97 * is not a stream to or from the host. 97 * is not a stream to or from the host.
98 * Requires: 98 * Requires:
99 * node_init(void) called.
100 * Valid dir. 99 * Valid dir.
101 * chan_id != NULL. 100 * chan_id != NULL.
102 * Ensures: 101 * Ensures:
diff --git a/drivers/staging/tidspbridge/include/dspbridge/proc.h b/drivers/staging/tidspbridge/include/dspbridge/proc.h
index f00dffd51989..a82380ebc041 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/proc.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/proc.h
@@ -189,20 +189,6 @@ extern int proc_get_resource_info(void *hprocessor,
189 u32 resource_info_size); 189 u32 resource_info_size);
190 190
191/* 191/*
192 * ======== proc_exit ========
193 * Purpose:
194 * Decrement reference count, and free resources when reference count is
195 * 0.
196 * Parameters:
197 * Returns:
198 * Requires:
199 * PROC is initialized.
200 * Ensures:
201 * When reference count == 0, PROC's private resources are freed.
202 */
203extern void proc_exit(void);
204
205/*
206 * ======== proc_get_dev_object ========= 192 * ======== proc_get_dev_object =========
207 * Purpose: 193 * Purpose:
208 * Returns the DEV Hanlde for a given Processor handle 194 * Returns the DEV Hanlde for a given Processor handle
@@ -223,20 +209,6 @@ extern int proc_get_dev_object(void *hprocessor,
223 struct dev_object **device_obj); 209 struct dev_object **device_obj);
224 210
225/* 211/*
226 * ======== proc_init ========
227 * Purpose:
228 * Initialize PROC's private state, keeping a reference count on each
229 * call.
230 * Parameters:
231 * Returns:
232 * TRUE if initialized; FALSE if error occurred.
233 * Requires:
234 * Ensures:
235 * TRUE: A requirement for the other public PROC functions.
236 */
237extern bool proc_init(void);
238
239/*
240 * ======== proc_get_state ======== 212 * ======== proc_get_state ========
241 * Purpose: 213 * Purpose:
242 * Report the state of the specified DSP processor. 214 * Report the state of the specified DSP processor.
diff --git a/drivers/staging/tidspbridge/include/dspbridge/rmm.h b/drivers/staging/tidspbridge/include/dspbridge/rmm.h
index baea536681e9..f7a4dc8ecb4f 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/rmm.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/rmm.h
@@ -115,18 +115,6 @@ extern int rmm_create(struct rmm_target_obj **target_obj,
115extern void rmm_delete(struct rmm_target_obj *target); 115extern void rmm_delete(struct rmm_target_obj *target);
116 116
117/* 117/*
118 * ======== rmm_exit ========
119 * Exit the RMM module
120 *
121 * Parameters:
122 * Returns:
123 * Requires:
124 * rmm_init successfully called.
125 * Ensures:
126 */
127extern void rmm_exit(void);
128
129/*
130 * ======== rmm_free ======== 118 * ======== rmm_free ========
131 * Free or unreserve memory allocated through rmm_alloc(). 119 * Free or unreserve memory allocated through rmm_alloc().
132 * 120 *
@@ -148,19 +136,6 @@ extern bool rmm_free(struct rmm_target_obj *target, u32 segid, u32 dsp_addr,
148 u32 size, bool reserved); 136 u32 size, bool reserved);
149 137
150/* 138/*
151 * ======== rmm_init ========
152 * Initialize the RMM module
153 *
154 * Parameters:
155 * Returns:
156 * TRUE: Success.
157 * FALSE: Failure.
158 * Requires:
159 * Ensures:
160 */
161extern bool rmm_init(void);
162
163/*
164 * ======== rmm_stat ======== 139 * ======== rmm_stat ========
165 * Obtain memory segment status 140 * Obtain memory segment status
166 * 141 *
diff --git a/drivers/staging/tidspbridge/include/dspbridge/strm.h b/drivers/staging/tidspbridge/include/dspbridge/strm.h
index 613fe53dd239..dacf0c234fd1 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/strm.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/strm.h
@@ -40,7 +40,6 @@
40 * -EPERM: Failure occurred, unable to allocate buffers. 40 * -EPERM: Failure occurred, unable to allocate buffers.
41 * -EINVAL: usize must be > 0 bytes. 41 * -EINVAL: usize must be > 0 bytes.
42 * Requires: 42 * Requires:
43 * strm_init(void) called.
44 * ap_buffer != NULL. 43 * ap_buffer != NULL.
45 * Ensures: 44 * Ensures:
46 */ 45 */
@@ -63,7 +62,6 @@ extern int strm_allocate_buffer(struct strm_res_object *strmres,
63 * been reclaimed. 62 * been reclaimed.
64 * -EPERM: Failure to close stream. 63 * -EPERM: Failure to close stream.
65 * Requires: 64 * Requires:
66 * strm_init(void) called.
67 * Ensures: 65 * Ensures:
68 */ 66 */
69extern int strm_close(struct strm_res_object *strmres, 67extern int strm_close(struct strm_res_object *strmres,
@@ -83,7 +81,6 @@ extern int strm_close(struct strm_res_object *strmres,
83 * -ENOMEM: Insufficient memory for requested resources. 81 * -ENOMEM: Insufficient memory for requested resources.
84 * -EPERM: General failure. 82 * -EPERM: General failure.
85 * Requires: 83 * Requires:
86 * strm_init(void) called.
87 * strm_man != NULL. 84 * strm_man != NULL.
88 * dev_obj != NULL. 85 * dev_obj != NULL.
89 * Ensures: 86 * Ensures:
@@ -101,7 +98,6 @@ extern int strm_create(struct strm_mgr **strm_man,
101 * strm_mgr_obj: Handle to STRM manager object from strm_create. 98 * strm_mgr_obj: Handle to STRM manager object from strm_create.
102 * Returns: 99 * Returns:
103 * Requires: 100 * Requires:
104 * strm_init(void) called.
105 * Valid strm_mgr_obj. 101 * Valid strm_mgr_obj.
106 * Ensures: 102 * Ensures:
107 * strm_mgr_obj is not valid. 103 * strm_mgr_obj is not valid.
@@ -109,18 +105,6 @@ extern int strm_create(struct strm_mgr **strm_man,
109extern void strm_delete(struct strm_mgr *strm_mgr_obj); 105extern void strm_delete(struct strm_mgr *strm_mgr_obj);
110 106
111/* 107/*
112 * ======== strm_exit ========
113 * Purpose:
114 * Discontinue usage of STRM module.
115 * Parameters:
116 * Returns:
117 * Requires:
118 * strm_init(void) successfully called before.
119 * Ensures:
120 */
121extern void strm_exit(void);
122
123/*
124 * ======== strm_free_buffer ======== 108 * ======== strm_free_buffer ========
125 * Purpose: 109 * Purpose:
126 * Free buffer(s) allocated with strm_allocate_buffer. 110 * Free buffer(s) allocated with strm_allocate_buffer.
@@ -133,7 +117,6 @@ extern void strm_exit(void);
133 * -EFAULT: Invalid stream handle. 117 * -EFAULT: Invalid stream handle.
134 * -EPERM: Failure occurred, unable to free buffers. 118 * -EPERM: Failure occurred, unable to free buffers.
135 * Requires: 119 * Requires:
136 * strm_init(void) called.
137 * ap_buffer != NULL. 120 * ap_buffer != NULL.
138 * Ensures: 121 * Ensures:
139 */ 122 */
@@ -156,7 +139,6 @@ extern int strm_free_buffer(struct strm_res_object *strmres,
156 * -EINVAL: stream_info_size < sizeof(dsp_streaminfo). 139 * -EINVAL: stream_info_size < sizeof(dsp_streaminfo).
157 * -EPERM: Unable to get stream info. 140 * -EPERM: Unable to get stream info.
158 * Requires: 141 * Requires:
159 * strm_init(void) called.
160 * stream_info != NULL. 142 * stream_info != NULL.
161 * Ensures: 143 * Ensures:
162 */ 144 */
@@ -184,24 +166,11 @@ extern int strm_get_info(struct strm_object *stream_obj,
184 * -ETIME: A timeout occurred before the stream could be idled. 166 * -ETIME: A timeout occurred before the stream could be idled.
185 * -EPERM: Unable to idle stream. 167 * -EPERM: Unable to idle stream.
186 * Requires: 168 * Requires:
187 * strm_init(void) called.
188 * Ensures: 169 * Ensures:
189 */ 170 */
190extern int strm_idle(struct strm_object *stream_obj, bool flush_data); 171extern int strm_idle(struct strm_object *stream_obj, bool flush_data);
191 172
192/* 173/*
193 * ======== strm_init ========
194 * Purpose:
195 * Initialize the STRM module.
196 * Parameters:
197 * Returns:
198 * TRUE if initialization succeeded, FALSE otherwise.
199 * Requires:
200 * Ensures:
201 */
202extern bool strm_init(void);
203
204/*
205 * ======== strm_issue ======== 174 * ======== strm_issue ========
206 * Purpose: 175 * Purpose:
207 * Send a buffer of data to a stream. 176 * Send a buffer of data to a stream.
@@ -217,8 +186,7 @@ extern bool strm_init(void);
217 * -ENOSR: The stream is full. 186 * -ENOSR: The stream is full.
218 * -EPERM: Failure occurred, unable to issue buffer. 187 * -EPERM: Failure occurred, unable to issue buffer.
219 * Requires: 188 * Requires:
220 * strm_init(void) called. 189* pbuf != NULL.
221 * pbuf != NULL.
222 * Ensures: 190 * Ensures:
223 */ 191 */
224extern int strm_issue(struct strm_object *stream_obj, u8 * pbuf, 192extern int strm_issue(struct strm_object *stream_obj, u8 * pbuf,
@@ -244,7 +212,6 @@ extern int strm_issue(struct strm_object *stream_obj, u8 * pbuf,
244 * Unable to open stream. 212 * Unable to open stream.
245 * -EINVAL: Invalid index. 213 * -EINVAL: Invalid index.
246 * Requires: 214 * Requires:
247 * strm_init(void) called.
248 * strmres != NULL. 215 * strmres != NULL.
249 * pattr != NULL. 216 * pattr != NULL.
250 * Ensures: 217 * Ensures:
@@ -275,7 +242,6 @@ extern int strm_open(struct node_object *hnode, u32 dir,
275 * retrieved. 242 * retrieved.
276 * -EPERM: Failure occurred, unable to reclaim buffer. 243 * -EPERM: Failure occurred, unable to reclaim buffer.
277 * Requires: 244 * Requires:
278 * strm_init(void) called.
279 * buf_ptr != NULL. 245 * buf_ptr != NULL.
280 * nbytes != NULL. 246 * nbytes != NULL.
281 * pdw_arg != NULL. 247 * pdw_arg != NULL.
@@ -302,7 +268,6 @@ extern int strm_reclaim(struct strm_object *stream_obj,
302 * -ENOSYS: Notification type specified by notify_type is not 268 * -ENOSYS: Notification type specified by notify_type is not
303 * supported. 269 * supported.
304 * Requires: 270 * Requires:
305 * strm_init(void) called.
306 * hnotification != NULL. 271 * hnotification != NULL.
307 * Ensures: 272 * Ensures:
308 */ 273 */
@@ -328,7 +293,6 @@ extern int strm_register_notify(struct strm_object *stream_obj,
328 * -ETIME: A timeout occurred before a stream became ready. 293 * -ETIME: A timeout occurred before a stream became ready.
329 * -EPERM: Failure occurred, unable to select a stream. 294 * -EPERM: Failure occurred, unable to select a stream.
330 * Requires: 295 * Requires:
331 * strm_init(void) called.
332 * strm_tab != NULL. 296 * strm_tab != NULL.
333 * strms > 0. 297 * strms > 0.
334 * pmask != NULL. 298 * pmask != NULL.
diff --git a/drivers/staging/tidspbridge/pmgr/chnl.c b/drivers/staging/tidspbridge/pmgr/chnl.c
index 245de82e2d67..4bd8686f2355 100644
--- a/drivers/staging/tidspbridge/pmgr/chnl.c
+++ b/drivers/staging/tidspbridge/pmgr/chnl.c
@@ -24,9 +24,6 @@
24/* ----------------------------------- DSP/BIOS Bridge */ 24/* ----------------------------------- DSP/BIOS Bridge */
25#include <dspbridge/dbdefs.h> 25#include <dspbridge/dbdefs.h>
26 26
27/* ----------------------------------- Trace & Debug */
28#include <dspbridge/dbc.h>
29
30/* ----------------------------------- OS Adaptation Layer */ 27/* ----------------------------------- OS Adaptation Layer */
31#include <dspbridge/sync.h> 28#include <dspbridge/sync.h>
32 29
@@ -41,9 +38,6 @@
41/* ----------------------------------- This */ 38/* ----------------------------------- This */
42#include <dspbridge/chnl.h> 39#include <dspbridge/chnl.h>
43 40
44/* ----------------------------------- Globals */
45static u32 refs;
46
47/* 41/*
48 * ======== chnl_create ======== 42 * ======== chnl_create ========
49 * Purpose: 43 * Purpose:
@@ -58,10 +52,6 @@ int chnl_create(struct chnl_mgr **channel_mgr,
58 struct chnl_mgr *hchnl_mgr; 52 struct chnl_mgr *hchnl_mgr;
59 struct chnl_mgr_ *chnl_mgr_obj = NULL; 53 struct chnl_mgr_ *chnl_mgr_obj = NULL;
60 54
61 DBC_REQUIRE(refs > 0);
62 DBC_REQUIRE(channel_mgr != NULL);
63 DBC_REQUIRE(mgr_attrts != NULL);
64
65 *channel_mgr = NULL; 55 *channel_mgr = NULL;
66 56
67 /* Validate args: */ 57 /* Validate args: */
@@ -99,8 +89,6 @@ int chnl_create(struct chnl_mgr **channel_mgr,
99 } 89 }
100 } 90 }
101 91
102 DBC_ENSURE(status || chnl_mgr_obj);
103
104 return status; 92 return status;
105} 93}
106 94
@@ -115,8 +103,6 @@ int chnl_destroy(struct chnl_mgr *hchnl_mgr)
115 struct bridge_drv_interface *intf_fxns; 103 struct bridge_drv_interface *intf_fxns;
116 int status; 104 int status;
117 105
118 DBC_REQUIRE(refs > 0);
119
120 if (chnl_mgr_obj) { 106 if (chnl_mgr_obj) {
121 intf_fxns = chnl_mgr_obj->intf_fxns; 107 intf_fxns = chnl_mgr_obj->intf_fxns;
122 /* Let Bridge channel module destroy the chnl_mgr: */ 108 /* Let Bridge channel module destroy the chnl_mgr: */
@@ -127,36 +113,3 @@ int chnl_destroy(struct chnl_mgr *hchnl_mgr)
127 113
128 return status; 114 return status;
129} 115}
130
131/*
132 * ======== chnl_exit ========
133 * Purpose:
134 * Discontinue usage of the CHNL module.
135 */
136void chnl_exit(void)
137{
138 DBC_REQUIRE(refs > 0);
139
140 refs--;
141
142 DBC_ENSURE(refs >= 0);
143}
144
145/*
146 * ======== chnl_init ========
147 * Purpose:
148 * Initialize the CHNL module's private state.
149 */
150bool chnl_init(void)
151{
152 bool ret = true;
153
154 DBC_REQUIRE(refs >= 0);
155
156 if (ret)
157 refs++;
158
159 DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
160
161 return ret;
162}
diff --git a/drivers/staging/tidspbridge/pmgr/cmm.c b/drivers/staging/tidspbridge/pmgr/cmm.c
index e6b2c8962f81..4a800dadd703 100644
--- a/drivers/staging/tidspbridge/pmgr/cmm.c
+++ b/drivers/staging/tidspbridge/pmgr/cmm.c
@@ -35,9 +35,6 @@
35/* ----------------------------------- DSP/BIOS Bridge */ 35/* ----------------------------------- DSP/BIOS Bridge */
36#include <dspbridge/dbdefs.h> 36#include <dspbridge/dbdefs.h>
37 37
38/* ----------------------------------- Trace & Debug */
39#include <dspbridge/dbc.h>
40
41/* ----------------------------------- OS Adaptation Layer */ 38/* ----------------------------------- OS Adaptation Layer */
42#include <dspbridge/sync.h> 39#include <dspbridge/sync.h>
43 40
@@ -134,9 +131,6 @@ struct cmm_mnode {
134 u32 client_proc; /* Process that allocated this mem block */ 131 u32 client_proc; /* Process that allocated this mem block */
135}; 132};
136 133
137/* ----------------------------------- Globals */
138static u32 refs; /* module reference count */
139
140/* ----------------------------------- Function Prototypes */ 134/* ----------------------------------- Function Prototypes */
141static void add_to_free_list(struct cmm_allocator *allocator, 135static void add_to_free_list(struct cmm_allocator *allocator,
142 struct cmm_mnode *pnode); 136 struct cmm_mnode *pnode);
@@ -244,9 +238,6 @@ int cmm_create(struct cmm_object **ph_cmm_mgr,
244 struct cmm_object *cmm_obj = NULL; 238 struct cmm_object *cmm_obj = NULL;
245 int status = 0; 239 int status = 0;
246 240
247 DBC_REQUIRE(refs > 0);
248 DBC_REQUIRE(ph_cmm_mgr != NULL);
249
250 *ph_cmm_mgr = NULL; 241 *ph_cmm_mgr = NULL;
251 /* create, zero, and tag a cmm mgr object */ 242 /* create, zero, and tag a cmm mgr object */
252 cmm_obj = kzalloc(sizeof(struct cmm_object), GFP_KERNEL); 243 cmm_obj = kzalloc(sizeof(struct cmm_object), GFP_KERNEL);
@@ -256,8 +247,6 @@ int cmm_create(struct cmm_object **ph_cmm_mgr,
256 if (mgr_attrts == NULL) 247 if (mgr_attrts == NULL)
257 mgr_attrts = &cmm_dfltmgrattrs; /* set defaults */ 248 mgr_attrts = &cmm_dfltmgrattrs; /* set defaults */
258 249
259 /* 4 bytes minimum */
260 DBC_ASSERT(mgr_attrts->min_block_size >= 4);
261 /* save away smallest block allocation for this cmm mgr */ 250 /* save away smallest block allocation for this cmm mgr */
262 cmm_obj->min_block_size = mgr_attrts->min_block_size; 251 cmm_obj->min_block_size = mgr_attrts->min_block_size;
263 cmm_obj->page_size = PAGE_SIZE; 252 cmm_obj->page_size = PAGE_SIZE;
@@ -283,7 +272,6 @@ int cmm_destroy(struct cmm_object *hcmm_mgr, bool force)
283 s32 slot_seg; 272 s32 slot_seg;
284 struct cmm_mnode *node, *tmp; 273 struct cmm_mnode *node, *tmp;
285 274
286 DBC_REQUIRE(refs > 0);
287 if (!hcmm_mgr) { 275 if (!hcmm_mgr) {
288 status = -EFAULT; 276 status = -EFAULT;
289 return status; 277 return status;
@@ -326,19 +314,6 @@ int cmm_destroy(struct cmm_object *hcmm_mgr, bool force)
326} 314}
327 315
328/* 316/*
329 * ======== cmm_exit ========
330 * Purpose:
331 * Discontinue usage of module; free resources when reference count
332 * reaches 0.
333 */
334void cmm_exit(void)
335{
336 DBC_REQUIRE(refs > 0);
337
338 refs--;
339}
340
341/*
342 * ======== cmm_free_buf ======== 317 * ======== cmm_free_buf ========
343 * Purpose: 318 * Purpose:
344 * Free the given buffer. 319 * Free the given buffer.
@@ -351,9 +326,6 @@ int cmm_free_buf(struct cmm_object *hcmm_mgr, void *buf_pa, u32 ul_seg_id)
351 struct cmm_allocator *allocator; 326 struct cmm_allocator *allocator;
352 struct cmm_attrs *pattrs; 327 struct cmm_attrs *pattrs;
353 328
354 DBC_REQUIRE(refs > 0);
355 DBC_REQUIRE(buf_pa != NULL);
356
357 if (ul_seg_id == 0) { 329 if (ul_seg_id == 0) {
358 pattrs = &cmm_dfltalctattrs; 330 pattrs = &cmm_dfltalctattrs;
359 ul_seg_id = pattrs->seg_id; 331 ul_seg_id = pattrs->seg_id;
@@ -392,8 +364,6 @@ int cmm_get_handle(void *hprocessor, struct cmm_object ** ph_cmm_mgr)
392 int status = 0; 364 int status = 0;
393 struct dev_object *hdev_obj; 365 struct dev_object *hdev_obj;
394 366
395 DBC_REQUIRE(refs > 0);
396 DBC_REQUIRE(ph_cmm_mgr != NULL);
397 if (hprocessor != NULL) 367 if (hprocessor != NULL)
398 status = proc_get_dev_object(hprocessor, &hdev_obj); 368 status = proc_get_dev_object(hprocessor, &hdev_obj);
399 else 369 else
@@ -419,8 +389,6 @@ int cmm_get_info(struct cmm_object *hcmm_mgr,
419 struct cmm_allocator *altr; 389 struct cmm_allocator *altr;
420 struct cmm_mnode *curr; 390 struct cmm_mnode *curr;
421 391
422 DBC_REQUIRE(cmm_info_obj != NULL);
423
424 if (!hcmm_mgr) { 392 if (!hcmm_mgr) {
425 status = -EFAULT; 393 status = -EFAULT;
426 return status; 394 return status;
@@ -464,24 +432,6 @@ int cmm_get_info(struct cmm_object *hcmm_mgr,
464} 432}
465 433
466/* 434/*
467 * ======== cmm_init ========
468 * Purpose:
469 * Initializes private state of CMM module.
470 */
471bool cmm_init(void)
472{
473 bool ret = true;
474
475 DBC_REQUIRE(refs >= 0);
476 if (ret)
477 refs++;
478
479 DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
480
481 return ret;
482}
483
484/*
485 * ======== cmm_register_gppsm_seg ======== 435 * ======== cmm_register_gppsm_seg ========
486 * Purpose: 436 * Purpose:
487 * Register a block of SM with the CMM to be used for later GPP SM 437 * Register a block of SM with the CMM to be used for later GPP SM
@@ -499,13 +449,6 @@ int cmm_register_gppsm_seg(struct cmm_object *hcmm_mgr,
499 struct cmm_mnode *new_node; 449 struct cmm_mnode *new_node;
500 s32 slot_seg; 450 s32 slot_seg;
501 451
502 DBC_REQUIRE(ul_size > 0);
503 DBC_REQUIRE(sgmt_id != NULL);
504 DBC_REQUIRE(dw_gpp_base_pa != 0);
505 DBC_REQUIRE(gpp_base_va != 0);
506 DBC_REQUIRE((c_factor <= CMM_ADDTODSPPA) &&
507 (c_factor >= CMM_SUBFROMDSPPA));
508
509 dev_dbg(bridge, "%s: dw_gpp_base_pa %x ul_size %x dsp_addr_offset %x " 452 dev_dbg(bridge, "%s: dw_gpp_base_pa %x ul_size %x dsp_addr_offset %x "
510 "dw_dsp_base %x ul_dsp_size %x gpp_base_va %x\n", 453 "dw_dsp_base %x ul_dsp_size %x gpp_base_va %x\n",
511 __func__, dw_gpp_base_pa, ul_size, dsp_addr_offset, 454 __func__, dw_gpp_base_pa, ul_size, dsp_addr_offset,
@@ -589,7 +532,6 @@ int cmm_un_register_gppsm_seg(struct cmm_object *hcmm_mgr,
589 struct cmm_allocator *psma; 532 struct cmm_allocator *psma;
590 u32 ul_id = ul_seg_id; 533 u32 ul_id = ul_seg_id;
591 534
592 DBC_REQUIRE(ul_seg_id > 0);
593 if (!hcmm_mgr) 535 if (!hcmm_mgr)
594 return -EFAULT; 536 return -EFAULT;
595 537
@@ -635,8 +577,6 @@ static void un_register_gppsm_seg(struct cmm_allocator *psma)
635{ 577{
636 struct cmm_mnode *curr, *tmp; 578 struct cmm_mnode *curr, *tmp;
637 579
638 DBC_REQUIRE(psma != NULL);
639
640 /* free nodes on free list */ 580 /* free nodes on free list */
641 list_for_each_entry_safe(curr, tmp, &psma->free_list, link) { 581 list_for_each_entry_safe(curr, tmp, &psma->free_list, link) {
642 list_del(&curr->link); 582 list_del(&curr->link);
@@ -664,7 +604,6 @@ static void un_register_gppsm_seg(struct cmm_allocator *psma)
664static s32 get_slot(struct cmm_object *cmm_mgr_obj) 604static s32 get_slot(struct cmm_object *cmm_mgr_obj)
665{ 605{
666 s32 slot_seg = -1; /* neg on failure */ 606 s32 slot_seg = -1; /* neg on failure */
667 DBC_REQUIRE(cmm_mgr_obj != NULL);
668 /* get first available slot in cmm mgr SMSegTab[] */ 607 /* get first available slot in cmm mgr SMSegTab[] */
669 for (slot_seg = 0; slot_seg < CMM_MAXGPPSEGS; slot_seg++) { 608 for (slot_seg = 0; slot_seg < CMM_MAXGPPSEGS; slot_seg++) {
670 if (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] == NULL) 609 if (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] == NULL)
@@ -687,11 +626,6 @@ static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa,
687{ 626{
688 struct cmm_mnode *pnode; 627 struct cmm_mnode *pnode;
689 628
690 DBC_REQUIRE(cmm_mgr_obj != NULL);
691 DBC_REQUIRE(dw_pa != 0);
692 DBC_REQUIRE(dw_va != 0);
693 DBC_REQUIRE(ul_size != 0);
694
695 /* Check cmm mgr's node freelist */ 629 /* Check cmm mgr's node freelist */
696 if (list_empty(&cmm_mgr_obj->node_free_list)) { 630 if (list_empty(&cmm_mgr_obj->node_free_list)) {
697 pnode = kzalloc(sizeof(struct cmm_mnode), GFP_KERNEL); 631 pnode = kzalloc(sizeof(struct cmm_mnode), GFP_KERNEL);
@@ -719,7 +653,6 @@ static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa,
719 */ 653 */
720static void delete_node(struct cmm_object *cmm_mgr_obj, struct cmm_mnode *pnode) 654static void delete_node(struct cmm_object *cmm_mgr_obj, struct cmm_mnode *pnode)
721{ 655{
722 DBC_REQUIRE(pnode != NULL);
723 list_add_tail(&pnode->link, &cmm_mgr_obj->node_free_list); 656 list_add_tail(&pnode->link, &cmm_mgr_obj->node_free_list);
724} 657}
725 658
@@ -794,9 +727,6 @@ static void add_to_free_list(struct cmm_allocator *allocator,
794static struct cmm_allocator *get_allocator(struct cmm_object *cmm_mgr_obj, 727static struct cmm_allocator *get_allocator(struct cmm_object *cmm_mgr_obj,
795 u32 ul_seg_id) 728 u32 ul_seg_id)
796{ 729{
797 DBC_REQUIRE(cmm_mgr_obj != NULL);
798 DBC_REQUIRE((ul_seg_id > 0) && (ul_seg_id <= CMM_MAXGPPSEGS));
799
800 return cmm_mgr_obj->pa_gppsm_seg_tab[ul_seg_id - 1]; 730 return cmm_mgr_obj->pa_gppsm_seg_tab[ul_seg_id - 1];
801} 731}
802 732
@@ -818,10 +748,6 @@ int cmm_xlator_create(struct cmm_xlatorobject **xlator,
818 struct cmm_xlator *xlator_object = NULL; 748 struct cmm_xlator *xlator_object = NULL;
819 int status = 0; 749 int status = 0;
820 750
821 DBC_REQUIRE(refs > 0);
822 DBC_REQUIRE(xlator != NULL);
823 DBC_REQUIRE(hcmm_mgr != NULL);
824
825 *xlator = NULL; 751 *xlator = NULL;
826 if (xlator_attrs == NULL) 752 if (xlator_attrs == NULL)
827 xlator_attrs = &cmm_dfltxlatorattrs; /* set defaults */ 753 xlator_attrs = &cmm_dfltxlatorattrs; /* set defaults */
@@ -851,13 +777,6 @@ void *cmm_xlator_alloc_buf(struct cmm_xlatorobject *xlator, void *va_buf,
851 void *tmp_va_buff; 777 void *tmp_va_buff;
852 struct cmm_attrs attrs; 778 struct cmm_attrs attrs;
853 779
854 DBC_REQUIRE(refs > 0);
855 DBC_REQUIRE(xlator != NULL);
856 DBC_REQUIRE(xlator_obj->cmm_mgr != NULL);
857 DBC_REQUIRE(va_buf != NULL);
858 DBC_REQUIRE(pa_size > 0);
859 DBC_REQUIRE(xlator_obj->seg_id > 0);
860
861 if (xlator_obj) { 780 if (xlator_obj) {
862 attrs.seg_id = xlator_obj->seg_id; 781 attrs.seg_id = xlator_obj->seg_id;
863 __raw_writel(0, va_buf); 782 __raw_writel(0, va_buf);
@@ -887,10 +806,6 @@ int cmm_xlator_free_buf(struct cmm_xlatorobject *xlator, void *buf_va)
887 int status = -EPERM; 806 int status = -EPERM;
888 void *buf_pa = NULL; 807 void *buf_pa = NULL;
889 808
890 DBC_REQUIRE(refs > 0);
891 DBC_REQUIRE(buf_va != NULL);
892 DBC_REQUIRE(xlator_obj->seg_id > 0);
893
894 if (xlator_obj) { 809 if (xlator_obj) {
895 /* convert Va to Pa so we can free it. */ 810 /* convert Va to Pa so we can free it. */
896 buf_pa = cmm_xlator_translate(xlator, buf_va, CMM_VA2PA); 811 buf_pa = cmm_xlator_translate(xlator, buf_va, CMM_VA2PA);
@@ -900,7 +815,8 @@ int cmm_xlator_free_buf(struct cmm_xlatorobject *xlator, void *buf_va)
900 if (status) { 815 if (status) {
901 /* Uh oh, this shouldn't happen. Descriptor 816 /* Uh oh, this shouldn't happen. Descriptor
902 * gone! */ 817 * gone! */
903 DBC_ASSERT(false); /* CMM is leaking mem */ 818 pr_err("%s, line %d: Assertion failed\n",
819 __FILE__, __LINE__);
904 } 820 }
905 } 821 }
906 } 822 }
@@ -918,10 +834,6 @@ int cmm_xlator_info(struct cmm_xlatorobject *xlator, u8 ** paddr,
918 struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator; 834 struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
919 int status = 0; 835 int status = 0;
920 836
921 DBC_REQUIRE(refs > 0);
922 DBC_REQUIRE(paddr != NULL);
923 DBC_REQUIRE((segm_id > 0) && (segm_id <= CMM_MAXGPPSEGS));
924
925 if (xlator_obj) { 837 if (xlator_obj) {
926 if (set_info) { 838 if (set_info) {
927 /* set translators virtual address range */ 839 /* set translators virtual address range */
@@ -948,16 +860,11 @@ void *cmm_xlator_translate(struct cmm_xlatorobject *xlator, void *paddr,
948 struct cmm_allocator *allocator = NULL; 860 struct cmm_allocator *allocator = NULL;
949 u32 dw_offset = 0; 861 u32 dw_offset = 0;
950 862
951 DBC_REQUIRE(refs > 0);
952 DBC_REQUIRE(paddr != NULL);
953 DBC_REQUIRE((xtype >= CMM_VA2PA) && (xtype <= CMM_DSPPA2PA));
954
955 if (!xlator_obj) 863 if (!xlator_obj)
956 goto loop_cont; 864 goto loop_cont;
957 865
958 cmm_mgr_obj = (struct cmm_object *)xlator_obj->cmm_mgr; 866 cmm_mgr_obj = (struct cmm_object *)xlator_obj->cmm_mgr;
959 /* get this translator's default SM allocator */ 867 /* get this translator's default SM allocator */
960 DBC_ASSERT(xlator_obj->seg_id > 0);
961 allocator = cmm_mgr_obj->pa_gppsm_seg_tab[xlator_obj->seg_id - 1]; 868 allocator = cmm_mgr_obj->pa_gppsm_seg_tab[xlator_obj->seg_id - 1];
962 if (!allocator) 869 if (!allocator)
963 goto loop_cont; 870 goto loop_cont;
diff --git a/drivers/staging/tidspbridge/pmgr/cod.c b/drivers/staging/tidspbridge/pmgr/cod.c
index 1a29264b5853..4007826f7abc 100644
--- a/drivers/staging/tidspbridge/pmgr/cod.c
+++ b/drivers/staging/tidspbridge/pmgr/cod.c
@@ -30,9 +30,6 @@
30/* ----------------------------------- DSP/BIOS Bridge */ 30/* ----------------------------------- DSP/BIOS Bridge */
31#include <dspbridge/dbdefs.h> 31#include <dspbridge/dbdefs.h>
32 32
33/* ----------------------------------- Trace & Debug */
34#include <dspbridge/dbc.h>
35
36/* ----------------------------------- Platform Manager */ 33/* ----------------------------------- Platform Manager */
37/* Include appropriate loader header file */ 34/* Include appropriate loader header file */
38#include <dspbridge/dbll.h> 35#include <dspbridge/dbll.h>
@@ -61,8 +58,6 @@ struct cod_libraryobj {
61 struct cod_manager *cod_mgr; 58 struct cod_manager *cod_mgr;
62}; 59};
63 60
64static u32 refs = 0L;
65
66static struct dbll_fxns ldr_fxns = { 61static struct dbll_fxns ldr_fxns = {
67 (dbll_close_fxn) dbll_close, 62 (dbll_close_fxn) dbll_close,
68 (dbll_create_fxn) dbll_create, 63 (dbll_create_fxn) dbll_create,
@@ -183,10 +178,6 @@ void cod_close(struct cod_libraryobj *lib)
183{ 178{
184 struct cod_manager *hmgr; 179 struct cod_manager *hmgr;
185 180
186 DBC_REQUIRE(refs > 0);
187 DBC_REQUIRE(lib != NULL);
188 DBC_REQUIRE(lib->cod_mgr);
189
190 hmgr = lib->cod_mgr; 181 hmgr = lib->cod_mgr;
191 hmgr->fxns.close_fxn(lib->dbll_lib); 182 hmgr->fxns.close_fxn(lib->dbll_lib);
192 183
@@ -208,9 +199,6 @@ int cod_create(struct cod_manager **mgr, char *str_zl_file)
208 struct dbll_attrs zl_attrs; 199 struct dbll_attrs zl_attrs;
209 int status = 0; 200 int status = 0;
210 201
211 DBC_REQUIRE(refs > 0);
212 DBC_REQUIRE(mgr != NULL);
213
214 /* assume failure */ 202 /* assume failure */
215 *mgr = NULL; 203 *mgr = NULL;
216 204
@@ -263,9 +251,6 @@ int cod_create(struct cod_manager **mgr, char *str_zl_file)
263 */ 251 */
264void cod_delete(struct cod_manager *cod_mgr_obj) 252void cod_delete(struct cod_manager *cod_mgr_obj)
265{ 253{
266 DBC_REQUIRE(refs > 0);
267 DBC_REQUIRE(cod_mgr_obj);
268
269 if (cod_mgr_obj->base_lib) { 254 if (cod_mgr_obj->base_lib) {
270 if (cod_mgr_obj->loaded) 255 if (cod_mgr_obj->loaded)
271 cod_mgr_obj->fxns.unload_fxn(cod_mgr_obj->base_lib, 256 cod_mgr_obj->fxns.unload_fxn(cod_mgr_obj->base_lib,
@@ -281,21 +266,6 @@ void cod_delete(struct cod_manager *cod_mgr_obj)
281} 266}
282 267
283/* 268/*
284 * ======== cod_exit ========
285 * Purpose:
286 * Discontinue usage of the COD module.
287 *
288 */
289void cod_exit(void)
290{
291 DBC_REQUIRE(refs > 0);
292
293 refs--;
294
295 DBC_ENSURE(refs >= 0);
296}
297
298/*
299 * ======== cod_get_base_lib ======== 269 * ======== cod_get_base_lib ========
300 * Purpose: 270 * Purpose:
301 * Get handle to the base image DBL library. 271 * Get handle to the base image DBL library.
@@ -305,10 +275,6 @@ int cod_get_base_lib(struct cod_manager *cod_mgr_obj,
305{ 275{
306 int status = 0; 276 int status = 0;
307 277
308 DBC_REQUIRE(refs > 0);
309 DBC_REQUIRE(cod_mgr_obj);
310 DBC_REQUIRE(plib != NULL);
311
312 *plib = (struct dbll_library_obj *)cod_mgr_obj->base_lib; 278 *plib = (struct dbll_library_obj *)cod_mgr_obj->base_lib;
313 279
314 return status; 280 return status;
@@ -322,10 +288,6 @@ int cod_get_base_name(struct cod_manager *cod_mgr_obj, char *sz_name,
322{ 288{
323 int status = 0; 289 int status = 0;
324 290
325 DBC_REQUIRE(refs > 0);
326 DBC_REQUIRE(cod_mgr_obj);
327 DBC_REQUIRE(sz_name != NULL);
328
329 if (usize <= COD_MAXPATHLENGTH) 291 if (usize <= COD_MAXPATHLENGTH)
330 strncpy(sz_name, cod_mgr_obj->sz_zl_file, usize); 292 strncpy(sz_name, cod_mgr_obj->sz_zl_file, usize);
331 else 293 else
@@ -342,10 +304,6 @@ int cod_get_base_name(struct cod_manager *cod_mgr_obj, char *sz_name,
342 */ 304 */
343int cod_get_entry(struct cod_manager *cod_mgr_obj, u32 *entry_pt) 305int cod_get_entry(struct cod_manager *cod_mgr_obj, u32 *entry_pt)
344{ 306{
345 DBC_REQUIRE(refs > 0);
346 DBC_REQUIRE(cod_mgr_obj);
347 DBC_REQUIRE(entry_pt != NULL);
348
349 *entry_pt = cod_mgr_obj->entry; 307 *entry_pt = cod_mgr_obj->entry;
350 308
351 return 0; 309 return 0;
@@ -361,10 +319,6 @@ int cod_get_loader(struct cod_manager *cod_mgr_obj,
361{ 319{
362 int status = 0; 320 int status = 0;
363 321
364 DBC_REQUIRE(refs > 0);
365 DBC_REQUIRE(cod_mgr_obj);
366 DBC_REQUIRE(loader != NULL);
367
368 *loader = (struct dbll_tar_obj *)cod_mgr_obj->target; 322 *loader = (struct dbll_tar_obj *)cod_mgr_obj->target;
369 323
370 return status; 324 return status;
@@ -382,13 +336,6 @@ int cod_get_section(struct cod_libraryobj *lib, char *str_sect,
382 struct cod_manager *cod_mgr_obj; 336 struct cod_manager *cod_mgr_obj;
383 int status = 0; 337 int status = 0;
384 338
385 DBC_REQUIRE(refs > 0);
386 DBC_REQUIRE(lib != NULL);
387 DBC_REQUIRE(lib->cod_mgr);
388 DBC_REQUIRE(str_sect != NULL);
389 DBC_REQUIRE(addr != NULL);
390 DBC_REQUIRE(len != NULL);
391
392 *addr = 0; 339 *addr = 0;
393 *len = 0; 340 *len = 0;
394 if (lib != NULL) { 341 if (lib != NULL) {
@@ -399,8 +346,6 @@ int cod_get_section(struct cod_libraryobj *lib, char *str_sect,
399 status = -ESPIPE; 346 status = -ESPIPE;
400 } 347 }
401 348
402 DBC_ENSURE(!status || ((*addr == 0) && (*len == 0)));
403
404 return status; 349 return status;
405} 350}
406 351
@@ -417,11 +362,6 @@ int cod_get_sym_value(struct cod_manager *cod_mgr_obj, char *str_sym,
417{ 362{
418 struct dbll_sym_val *dbll_sym; 363 struct dbll_sym_val *dbll_sym;
419 364
420 DBC_REQUIRE(refs > 0);
421 DBC_REQUIRE(cod_mgr_obj);
422 DBC_REQUIRE(str_sym != NULL);
423 DBC_REQUIRE(pul_value != NULL);
424
425 dev_dbg(bridge, "%s: cod_mgr_obj: %p str_sym: %s pul_value: %p\n", 365 dev_dbg(bridge, "%s: cod_mgr_obj: %p str_sym: %s pul_value: %p\n",
426 __func__, cod_mgr_obj, str_sym, pul_value); 366 __func__, cod_mgr_obj, str_sym, pul_value);
427 if (cod_mgr_obj->base_lib) { 367 if (cod_mgr_obj->base_lib) {
@@ -442,25 +382,6 @@ int cod_get_sym_value(struct cod_manager *cod_mgr_obj, char *str_sym,
442} 382}
443 383
444/* 384/*
445 * ======== cod_init ========
446 * Purpose:
447 * Initialize the COD module's private state.
448 *
449 */
450bool cod_init(void)
451{
452 bool ret = true;
453
454 DBC_REQUIRE(refs >= 0);
455
456 if (ret)
457 refs++;
458
459 DBC_ENSURE((ret && refs > 0) || (!ret && refs >= 0));
460 return ret;
461}
462
463/*
464 * ======== cod_load_base ======== 385 * ======== cod_load_base ========
465 * Purpose: 386 * Purpose:
466 * Load the initial program image, optionally with command-line arguments, 387 * Load the initial program image, optionally with command-line arguments,
@@ -482,14 +403,6 @@ int cod_load_base(struct cod_manager *cod_mgr_obj, u32 num_argc, char *args[],
482 int status; 403 int status;
483 u32 i; 404 u32 i;
484 405
485 DBC_REQUIRE(refs > 0);
486 DBC_REQUIRE(cod_mgr_obj);
487 DBC_REQUIRE(num_argc > 0);
488 DBC_REQUIRE(args != NULL);
489 DBC_REQUIRE(args[0] != NULL);
490 DBC_REQUIRE(pfn_write != NULL);
491 DBC_REQUIRE(cod_mgr_obj->base_lib != NULL);
492
493 /* 406 /*
494 * Make sure every argv[] stated in argc has a value, or change argc to 407 * Make sure every argv[] stated in argc has a value, or change argc to
495 * reflect true number in NULL terminated argv array. 408 * reflect true number in NULL terminated argv array.
@@ -538,12 +451,6 @@ int cod_open(struct cod_manager *hmgr, char *sz_coff_path,
538 int status = 0; 451 int status = 0;
539 struct cod_libraryobj *lib = NULL; 452 struct cod_libraryobj *lib = NULL;
540 453
541 DBC_REQUIRE(refs > 0);
542 DBC_REQUIRE(hmgr);
543 DBC_REQUIRE(sz_coff_path != NULL);
544 DBC_REQUIRE(flags == COD_NOLOAD || flags == COD_SYMB);
545 DBC_REQUIRE(lib_obj != NULL);
546
547 *lib_obj = NULL; 454 *lib_obj = NULL;
548 455
549 lib = kzalloc(sizeof(struct cod_libraryobj), GFP_KERNEL); 456 lib = kzalloc(sizeof(struct cod_libraryobj), GFP_KERNEL);
@@ -575,10 +482,6 @@ int cod_open_base(struct cod_manager *hmgr, char *sz_coff_path,
575 int status = 0; 482 int status = 0;
576 struct dbll_library_obj *lib; 483 struct dbll_library_obj *lib;
577 484
578 DBC_REQUIRE(refs > 0);
579 DBC_REQUIRE(hmgr);
580 DBC_REQUIRE(sz_coff_path != NULL);
581
582 /* if we previously opened a base image, close it now */ 485 /* if we previously opened a base image, close it now */
583 if (hmgr->base_lib) { 486 if (hmgr->base_lib) {
584 if (hmgr->loaded) { 487 if (hmgr->loaded) {
@@ -612,12 +515,6 @@ int cod_read_section(struct cod_libraryobj *lib, char *str_sect,
612{ 515{
613 int status = 0; 516 int status = 0;
614 517
615 DBC_REQUIRE(refs > 0);
616 DBC_REQUIRE(lib != NULL);
617 DBC_REQUIRE(lib->cod_mgr);
618 DBC_REQUIRE(str_sect != NULL);
619 DBC_REQUIRE(str_content != NULL);
620
621 if (lib != NULL) 518 if (lib != NULL)
622 status = 519 status =
623 lib->cod_mgr->fxns.read_sect_fxn(lib->dbll_lib, str_sect, 520 lib->cod_mgr->fxns.read_sect_fxn(lib->dbll_lib, str_sect,
diff --git a/drivers/staging/tidspbridge/pmgr/dbll.c b/drivers/staging/tidspbridge/pmgr/dbll.c
index 31da62b14bc9..9f07036cd411 100644
--- a/drivers/staging/tidspbridge/pmgr/dbll.c
+++ b/drivers/staging/tidspbridge/pmgr/dbll.c
@@ -21,8 +21,6 @@
21/* ----------------------------------- DSP/BIOS Bridge */ 21/* ----------------------------------- DSP/BIOS Bridge */
22#include <dspbridge/dbdefs.h> 22#include <dspbridge/dbdefs.h>
23 23
24/* ----------------------------------- Trace & Debug */
25#include <dspbridge/dbc.h>
26#include <dspbridge/gh.h> 24#include <dspbridge/gh.h>
27 25
28/* ----------------------------------- OS Adaptation Layer */ 26/* ----------------------------------- OS Adaptation Layer */
@@ -189,8 +187,6 @@ static u16 name_hash(void *key, u16 max_bucket);
189static bool name_match(void *key, void *sp); 187static bool name_match(void *key, void *sp);
190static void sym_delete(void *value); 188static void sym_delete(void *value);
191 189
192static u32 refs; /* module reference count */
193
194/* Symbol Redefinition */ 190/* Symbol Redefinition */
195static int redefined_symbol; 191static int redefined_symbol;
196static int gbl_search = 1; 192static int gbl_search = 1;
@@ -202,9 +198,6 @@ void dbll_close(struct dbll_library_obj *zl_lib)
202{ 198{
203 struct dbll_tar_obj *zl_target; 199 struct dbll_tar_obj *zl_target;
204 200
205 DBC_REQUIRE(refs > 0);
206 DBC_REQUIRE(zl_lib);
207 DBC_REQUIRE(zl_lib->open_ref > 0);
208 zl_target = zl_lib->target_obj; 201 zl_target = zl_lib->target_obj;
209 zl_lib->open_ref--; 202 zl_lib->open_ref--;
210 if (zl_lib->open_ref == 0) { 203 if (zl_lib->open_ref == 0) {
@@ -241,10 +234,6 @@ int dbll_create(struct dbll_tar_obj **target_obj,
241 struct dbll_tar_obj *pzl_target; 234 struct dbll_tar_obj *pzl_target;
242 int status = 0; 235 int status = 0;
243 236
244 DBC_REQUIRE(refs > 0);
245 DBC_REQUIRE(pattrs != NULL);
246 DBC_REQUIRE(target_obj != NULL);
247
248 /* Allocate DBL target object */ 237 /* Allocate DBL target object */
249 pzl_target = kzalloc(sizeof(struct dbll_tar_obj), GFP_KERNEL); 238 pzl_target = kzalloc(sizeof(struct dbll_tar_obj), GFP_KERNEL);
250 if (target_obj != NULL) { 239 if (target_obj != NULL) {
@@ -255,8 +244,6 @@ int dbll_create(struct dbll_tar_obj **target_obj,
255 pzl_target->attrs = *pattrs; 244 pzl_target->attrs = *pattrs;
256 *target_obj = (struct dbll_tar_obj *)pzl_target; 245 *target_obj = (struct dbll_tar_obj *)pzl_target;
257 } 246 }
258 DBC_ENSURE((!status && *target_obj) ||
259 (status && *target_obj == NULL));
260 } 247 }
261 248
262 return status; 249 return status;
@@ -269,9 +256,6 @@ void dbll_delete(struct dbll_tar_obj *target)
269{ 256{
270 struct dbll_tar_obj *zl_target = (struct dbll_tar_obj *)target; 257 struct dbll_tar_obj *zl_target = (struct dbll_tar_obj *)target;
271 258
272 DBC_REQUIRE(refs > 0);
273 DBC_REQUIRE(zl_target);
274
275 kfree(zl_target); 259 kfree(zl_target);
276 260
277} 261}
@@ -282,14 +266,7 @@ void dbll_delete(struct dbll_tar_obj *target)
282 */ 266 */
283void dbll_exit(void) 267void dbll_exit(void)
284{ 268{
285 DBC_REQUIRE(refs > 0); 269 /* do nothing */
286
287 refs--;
288
289 if (refs == 0)
290 gh_exit();
291
292 DBC_ENSURE(refs >= 0);
293} 270}
294 271
295/* 272/*
@@ -302,12 +279,6 @@ bool dbll_get_addr(struct dbll_library_obj *zl_lib, char *name,
302 struct dbll_symbol *sym; 279 struct dbll_symbol *sym;
303 bool status = false; 280 bool status = false;
304 281
305 DBC_REQUIRE(refs > 0);
306 DBC_REQUIRE(zl_lib);
307 DBC_REQUIRE(name != NULL);
308 DBC_REQUIRE(sym_val != NULL);
309 DBC_REQUIRE(zl_lib->sym_tab != NULL);
310
311 sym = (struct dbll_symbol *)gh_find(zl_lib->sym_tab, name); 282 sym = (struct dbll_symbol *)gh_find(zl_lib->sym_tab, name);
312 if (sym != NULL) { 283 if (sym != NULL) {
313 *sym_val = &sym->value; 284 *sym_val = &sym->value;
@@ -327,10 +298,6 @@ void dbll_get_attrs(struct dbll_tar_obj *target, struct dbll_attrs *pattrs)
327{ 298{
328 struct dbll_tar_obj *zl_target = (struct dbll_tar_obj *)target; 299 struct dbll_tar_obj *zl_target = (struct dbll_tar_obj *)target;
329 300
330 DBC_REQUIRE(refs > 0);
331 DBC_REQUIRE(zl_target);
332 DBC_REQUIRE(pattrs != NULL);
333
334 if ((pattrs != NULL) && (zl_target != NULL)) 301 if ((pattrs != NULL) && (zl_target != NULL))
335 *pattrs = zl_target->attrs; 302 *pattrs = zl_target->attrs;
336 303
@@ -347,12 +314,6 @@ bool dbll_get_c_addr(struct dbll_library_obj *zl_lib, char *name,
347 char cname[MAXEXPR + 1]; 314 char cname[MAXEXPR + 1];
348 bool status = false; 315 bool status = false;
349 316
350 DBC_REQUIRE(refs > 0);
351 DBC_REQUIRE(zl_lib);
352 DBC_REQUIRE(sym_val != NULL);
353 DBC_REQUIRE(zl_lib->sym_tab != NULL);
354 DBC_REQUIRE(name != NULL);
355
356 cname[0] = '_'; 317 cname[0] = '_';
357 318
358 strncpy(cname + 1, name, sizeof(cname) - 2); 319 strncpy(cname + 1, name, sizeof(cname) - 2);
@@ -382,12 +343,6 @@ int dbll_get_sect(struct dbll_library_obj *lib, char *name, u32 *paddr,
382 struct dbll_library_obj *zl_lib = (struct dbll_library_obj *)lib; 343 struct dbll_library_obj *zl_lib = (struct dbll_library_obj *)lib;
383 int status = 0; 344 int status = 0;
384 345
385 DBC_REQUIRE(refs > 0);
386 DBC_REQUIRE(name != NULL);
387 DBC_REQUIRE(paddr != NULL);
388 DBC_REQUIRE(psize != NULL);
389 DBC_REQUIRE(zl_lib);
390
391 /* If DOFF file is not open, we open it. */ 346 /* If DOFF file is not open, we open it. */
392 if (zl_lib != NULL) { 347 if (zl_lib != NULL) {
393 if (zl_lib->fp == NULL) { 348 if (zl_lib->fp == NULL) {
@@ -434,12 +389,7 @@ int dbll_get_sect(struct dbll_library_obj *lib, char *name, u32 *paddr,
434 */ 389 */
435bool dbll_init(void) 390bool dbll_init(void)
436{ 391{
437 DBC_REQUIRE(refs >= 0); 392 /* do nothing */
438
439 if (refs == 0)
440 gh_init();
441
442 refs++;
443 393
444 return true; 394 return true;
445} 395}
@@ -456,10 +406,6 @@ int dbll_load(struct dbll_library_obj *lib, dbll_flags flags,
456 s32 err; 406 s32 err;
457 int status = 0; 407 int status = 0;
458 bool opened_doff = false; 408 bool opened_doff = false;
459 DBC_REQUIRE(refs > 0);
460 DBC_REQUIRE(zl_lib);
461 DBC_REQUIRE(entry != NULL);
462 DBC_REQUIRE(attrs != NULL);
463 409
464 /* 410 /*
465 * Load if not already loaded. 411 * Load if not already loaded.
@@ -558,8 +504,6 @@ int dbll_load(struct dbll_library_obj *lib, dbll_flags flags,
558 if (opened_doff) 504 if (opened_doff)
559 dof_close(zl_lib); 505 dof_close(zl_lib);
560 506
561 DBC_ENSURE(status || zl_lib->load_ref > 0);
562
563 dev_dbg(bridge, "%s: lib: %p flags: 0x%x entry: %p, status 0x%x\n", 507 dev_dbg(bridge, "%s: lib: %p flags: 0x%x entry: %p, status 0x%x\n",
564 __func__, lib, flags, entry, status); 508 __func__, lib, flags, entry, status);
565 509
@@ -577,12 +521,6 @@ int dbll_open(struct dbll_tar_obj *target, char *file, dbll_flags flags,
577 s32 err; 521 s32 err;
578 int status = 0; 522 int status = 0;
579 523
580 DBC_REQUIRE(refs > 0);
581 DBC_REQUIRE(zl_target);
582 DBC_REQUIRE(zl_target->attrs.fopen != NULL);
583 DBC_REQUIRE(file != NULL);
584 DBC_REQUIRE(lib_obj != NULL);
585
586 zl_lib = zl_target->head; 524 zl_lib = zl_target->head;
587 while (zl_lib != NULL) { 525 while (zl_lib != NULL) {
588 if (strcmp(zl_lib->file_name, file) == 0) { 526 if (strcmp(zl_lib->file_name, file) == 0) {
@@ -699,8 +637,6 @@ func_cont:
699 dbll_close((struct dbll_library_obj *)zl_lib); 637 dbll_close((struct dbll_library_obj *)zl_lib);
700 638
701 } 639 }
702 DBC_ENSURE((!status && (zl_lib->open_ref > 0) && *lib_obj)
703 || (status && *lib_obj == NULL));
704 640
705 dev_dbg(bridge, "%s: target: %p file: %s lib_obj: %p, status 0x%x\n", 641 dev_dbg(bridge, "%s: target: %p file: %s lib_obj: %p, status 0x%x\n",
706 __func__, target, file, lib_obj, status); 642 __func__, target, file, lib_obj, status);
@@ -722,12 +658,6 @@ int dbll_read_sect(struct dbll_library_obj *lib, char *name,
722 const struct ldr_section_info *sect = NULL; 658 const struct ldr_section_info *sect = NULL;
723 int status = 0; 659 int status = 0;
724 660
725 DBC_REQUIRE(refs > 0);
726 DBC_REQUIRE(zl_lib);
727 DBC_REQUIRE(name != NULL);
728 DBC_REQUIRE(buf != NULL);
729 DBC_REQUIRE(size != 0);
730
731 /* If DOFF file is not open, we open it. */ 661 /* If DOFF file is not open, we open it. */
732 if (zl_lib != NULL) { 662 if (zl_lib != NULL) {
733 if (zl_lib->fp == NULL) { 663 if (zl_lib->fp == NULL) {
@@ -788,14 +718,11 @@ void dbll_unload(struct dbll_library_obj *lib, struct dbll_attrs *attrs)
788 struct dbll_library_obj *zl_lib = (struct dbll_library_obj *)lib; 718 struct dbll_library_obj *zl_lib = (struct dbll_library_obj *)lib;
789 s32 err = 0; 719 s32 err = 0;
790 720
791 DBC_REQUIRE(refs > 0);
792 DBC_REQUIRE(zl_lib);
793 DBC_REQUIRE(zl_lib->load_ref > 0);
794 dev_dbg(bridge, "%s: lib: %p\n", __func__, lib); 721 dev_dbg(bridge, "%s: lib: %p\n", __func__, lib);
795 zl_lib->load_ref--; 722 zl_lib->load_ref--;
796 /* Unload only if reference count is 0 */ 723 /* Unload only if reference count is 0 */
797 if (zl_lib->load_ref != 0) 724 if (zl_lib->load_ref != 0)
798 goto func_end; 725 return;
799 726
800 zl_lib->target_obj->attrs = *attrs; 727 zl_lib->target_obj->attrs = *attrs;
801 if (zl_lib->dload_mod_obj) { 728 if (zl_lib->dload_mod_obj) {
@@ -814,8 +741,6 @@ void dbll_unload(struct dbll_library_obj *lib, struct dbll_attrs *attrs)
814 /* delete DOFF desc since it holds *lots* of host OS 741 /* delete DOFF desc since it holds *lots* of host OS
815 * resources */ 742 * resources */
816 dof_close(zl_lib); 743 dof_close(zl_lib);
817func_end:
818 DBC_ENSURE(zl_lib->load_ref >= 0);
819} 744}
820 745
821/* 746/*
@@ -874,8 +799,6 @@ static u16 name_hash(void *key, u16 max_bucket)
874 u16 hash; 799 u16 hash;
875 char *name = (char *)key; 800 char *name = (char *)key;
876 801
877 DBC_REQUIRE(name != NULL);
878
879 hash = 0; 802 hash = 0;
880 803
881 while (*name) { 804 while (*name) {
@@ -893,9 +816,6 @@ static u16 name_hash(void *key, u16 max_bucket)
893 */ 816 */
894static bool name_match(void *key, void *sp) 817static bool name_match(void *key, void *sp)
895{ 818{
896 DBC_REQUIRE(key != NULL);
897 DBC_REQUIRE(sp != NULL);
898
899 if ((key != NULL) && (sp != NULL)) { 819 if ((key != NULL) && (sp != NULL)) {
900 if (strcmp((char *)key, ((struct dbll_symbol *)sp)->name) == 820 if (strcmp((char *)key, ((struct dbll_symbol *)sp)->name) ==
901 0) 821 0)
@@ -938,10 +858,7 @@ static int dbll_read_buffer(struct dynamic_loader_stream *this, void *buffer,
938 struct dbll_library_obj *lib; 858 struct dbll_library_obj *lib;
939 int bytes_read = 0; 859 int bytes_read = 0;
940 860
941 DBC_REQUIRE(this != NULL);
942 lib = pstream->lib; 861 lib = pstream->lib;
943 DBC_REQUIRE(lib);
944
945 if (lib != NULL) { 862 if (lib != NULL) {
946 bytes_read = 863 bytes_read =
947 (*(lib->target_obj->attrs.fread)) (buffer, 1, bufsize, 864 (*(lib->target_obj->attrs.fread)) (buffer, 1, bufsize,
@@ -960,10 +877,7 @@ static int dbll_set_file_posn(struct dynamic_loader_stream *this,
960 struct dbll_library_obj *lib; 877 struct dbll_library_obj *lib;
961 int status = 0; /* Success */ 878 int status = 0; /* Success */
962 879
963 DBC_REQUIRE(this != NULL);
964 lib = pstream->lib; 880 lib = pstream->lib;
965 DBC_REQUIRE(lib);
966
967 if (lib != NULL) { 881 if (lib != NULL) {
968 status = (*(lib->target_obj->attrs.fseek)) (lib->fp, (long)pos, 882 status = (*(lib->target_obj->attrs.fseek)) (lib->fp, (long)pos,
969 SEEK_SET); 883 SEEK_SET);
@@ -986,10 +900,7 @@ static struct dynload_symbol *dbll_find_symbol(struct dynamic_loader_sym *this,
986 struct dbll_sym_val *dbll_sym = NULL; 900 struct dbll_sym_val *dbll_sym = NULL;
987 bool status = false; /* Symbol not found yet */ 901 bool status = false; /* Symbol not found yet */
988 902
989 DBC_REQUIRE(this != NULL);
990 lib = ldr_sym->lib; 903 lib = ldr_sym->lib;
991 DBC_REQUIRE(lib);
992
993 if (lib != NULL) { 904 if (lib != NULL) {
994 if (lib->target_obj->attrs.sym_lookup) { 905 if (lib->target_obj->attrs.sym_lookup) {
995 /* Check current lib + base lib + dep lib + 906 /* Check current lib + base lib + dep lib +
@@ -1015,9 +926,6 @@ static struct dynload_symbol *dbll_find_symbol(struct dynamic_loader_sym *this,
1015 if (!status && gbl_search) 926 if (!status && gbl_search)
1016 dev_dbg(bridge, "%s: Symbol not found: %s\n", __func__, name); 927 dev_dbg(bridge, "%s: Symbol not found: %s\n", __func__, name);
1017 928
1018 DBC_ASSERT((status && (dbll_sym != NULL))
1019 || (!status && (dbll_sym == NULL)));
1020
1021 ret_sym = (struct dynload_symbol *)dbll_sym; 929 ret_sym = (struct dynload_symbol *)dbll_sym;
1022 return ret_sym; 930 return ret_sym;
1023} 931}
@@ -1034,11 +942,7 @@ static struct dynload_symbol *find_in_symbol_table(struct dynamic_loader_sym
1034 struct dbll_library_obj *lib; 942 struct dbll_library_obj *lib;
1035 struct dbll_symbol *sym; 943 struct dbll_symbol *sym;
1036 944
1037 DBC_REQUIRE(this != NULL);
1038 lib = ldr_sym->lib; 945 lib = ldr_sym->lib;
1039 DBC_REQUIRE(lib);
1040 DBC_REQUIRE(lib->sym_tab != NULL);
1041
1042 sym = (struct dbll_symbol *)gh_find(lib->sym_tab, (char *)name); 946 sym = (struct dbll_symbol *)gh_find(lib->sym_tab, (char *)name);
1043 947
1044 ret_sym = (struct dynload_symbol *)&sym->value; 948 ret_sym = (struct dynload_symbol *)&sym->value;
@@ -1059,10 +963,7 @@ static struct dynload_symbol *dbll_add_to_symbol_table(struct dynamic_loader_sym
1059 struct dbll_library_obj *lib; 963 struct dbll_library_obj *lib;
1060 struct dynload_symbol *ret; 964 struct dynload_symbol *ret;
1061 965
1062 DBC_REQUIRE(this != NULL);
1063 DBC_REQUIRE(name);
1064 lib = ldr_sym->lib; 966 lib = ldr_sym->lib;
1065 DBC_REQUIRE(lib);
1066 967
1067 /* Check to see if symbol is already defined in symbol table */ 968 /* Check to see if symbol is already defined in symbol table */
1068 if (!(lib->target_obj->attrs.base_image)) { 969 if (!(lib->target_obj->attrs.base_image)) {
@@ -1111,10 +1012,7 @@ static void dbll_purge_symbol_table(struct dynamic_loader_sym *this,
1111 struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this; 1012 struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this;
1112 struct dbll_library_obj *lib; 1013 struct dbll_library_obj *lib;
1113 1014
1114 DBC_REQUIRE(this != NULL);
1115 lib = ldr_sym->lib; 1015 lib = ldr_sym->lib;
1116 DBC_REQUIRE(lib);
1117
1118 /* May not need to do anything */ 1016 /* May not need to do anything */
1119} 1017}
1120 1018
@@ -1127,9 +1025,7 @@ static void *allocate(struct dynamic_loader_sym *this, unsigned memsize)
1127 struct dbll_library_obj *lib; 1025 struct dbll_library_obj *lib;
1128 void *buf; 1026 void *buf;
1129 1027
1130 DBC_REQUIRE(this != NULL);
1131 lib = ldr_sym->lib; 1028 lib = ldr_sym->lib;
1132 DBC_REQUIRE(lib);
1133 1029
1134 buf = kzalloc(memsize, GFP_KERNEL); 1030 buf = kzalloc(memsize, GFP_KERNEL);
1135 1031
@@ -1144,9 +1040,7 @@ static void deallocate(struct dynamic_loader_sym *this, void *mem_ptr)
1144 struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this; 1040 struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this;
1145 struct dbll_library_obj *lib; 1041 struct dbll_library_obj *lib;
1146 1042
1147 DBC_REQUIRE(this != NULL);
1148 lib = ldr_sym->lib; 1043 lib = ldr_sym->lib;
1149 DBC_REQUIRE(lib);
1150 1044
1151 kfree(mem_ptr); 1045 kfree(mem_ptr);
1152} 1046}
@@ -1161,9 +1055,7 @@ static void dbll_err_report(struct dynamic_loader_sym *this, const char *errstr,
1161 struct dbll_library_obj *lib; 1055 struct dbll_library_obj *lib;
1162 char temp_buf[MAXEXPR]; 1056 char temp_buf[MAXEXPR];
1163 1057
1164 DBC_REQUIRE(this != NULL);
1165 lib = ldr_sym->lib; 1058 lib = ldr_sym->lib;
1166 DBC_REQUIRE(lib);
1167 vsnprintf((char *)temp_buf, MAXEXPR, (char *)errstr, args); 1059 vsnprintf((char *)temp_buf, MAXEXPR, (char *)errstr, args);
1168 dev_dbg(bridge, "%s\n", temp_buf); 1060 dev_dbg(bridge, "%s\n", temp_buf);
1169} 1061}
@@ -1195,9 +1087,7 @@ static int dbll_rmm_alloc(struct dynamic_loader_allocate *this,
1195 u32 alloc_size = 0; 1087 u32 alloc_size = 0;
1196 u32 run_addr_flag = 0; 1088 u32 run_addr_flag = 0;
1197 1089
1198 DBC_REQUIRE(this != NULL);
1199 lib = dbll_alloc_obj->lib; 1090 lib = dbll_alloc_obj->lib;
1200 DBC_REQUIRE(lib);
1201 1091
1202 mem_sect_type = 1092 mem_sect_type =
1203 (stype == DLOAD_TEXT) ? DBLL_CODE : (stype == 1093 (stype == DLOAD_TEXT) ? DBLL_CODE : (stype ==
@@ -1206,7 +1096,6 @@ static int dbll_rmm_alloc(struct dynamic_loader_allocate *this,
1206 1096
1207 /* Attempt to extract the segment ID and requirement information from 1097 /* Attempt to extract the segment ID and requirement information from
1208 the name of the section */ 1098 the name of the section */
1209 DBC_REQUIRE(info->name);
1210 token_len = strlen((char *)(info->name)) + 1; 1099 token_len = strlen((char *)(info->name)) + 1;
1211 1100
1212 sz_sect_name = kzalloc(token_len, GFP_KERNEL); 1101 sz_sect_name = kzalloc(token_len, GFP_KERNEL);
@@ -1307,9 +1196,7 @@ static void rmm_dealloc(struct dynamic_loader_allocate *this,
1307 (stype == DLOAD_TEXT) ? DBLL_CODE : (stype == 1196 (stype == DLOAD_TEXT) ? DBLL_CODE : (stype ==
1308 DLOAD_BSS) ? DBLL_BSS : 1197 DLOAD_BSS) ? DBLL_BSS :
1309 DBLL_DATA; 1198 DBLL_DATA;
1310 DBC_REQUIRE(this != NULL);
1311 lib = dbll_alloc_obj->lib; 1199 lib = dbll_alloc_obj->lib;
1312 DBC_REQUIRE(lib);
1313 /* segid was set by alloc function */ 1200 /* segid was set by alloc function */
1314 segid = (u32) info->context; 1201 segid = (u32) info->context;
1315 if (mem_sect_type == DBLL_CODE) 1202 if (mem_sect_type == DBLL_CODE)
@@ -1347,9 +1234,7 @@ static int read_mem(struct dynamic_loader_initialize *this, void *buf,
1347 struct dbll_library_obj *lib; 1234 struct dbll_library_obj *lib;
1348 int bytes_read = 0; 1235 int bytes_read = 0;
1349 1236
1350 DBC_REQUIRE(this != NULL);
1351 lib = init_obj->lib; 1237 lib = init_obj->lib;
1352 DBC_REQUIRE(lib);
1353 /* Need bridge_brd_read function */ 1238 /* Need bridge_brd_read function */
1354 return bytes_read; 1239 return bytes_read;
1355} 1240}
@@ -1368,7 +1253,6 @@ static int write_mem(struct dynamic_loader_initialize *this, void *buf,
1368 u32 mem_sect_type; 1253 u32 mem_sect_type;
1369 bool ret = true; 1254 bool ret = true;
1370 1255
1371 DBC_REQUIRE(this != NULL);
1372 lib = init_obj->lib; 1256 lib = init_obj->lib;
1373 if (!lib) 1257 if (!lib)
1374 return false; 1258 return false;
@@ -1415,7 +1299,6 @@ static int fill_mem(struct dynamic_loader_initialize *this, ldr_addr addr,
1415 struct dbll_library_obj *lib; 1299 struct dbll_library_obj *lib;
1416 struct dbll_init_obj *init_obj = (struct dbll_init_obj *)this; 1300 struct dbll_init_obj *init_obj = (struct dbll_init_obj *)this;
1417 1301
1418 DBC_REQUIRE(this != NULL);
1419 lib = init_obj->lib; 1302 lib = init_obj->lib;
1420 pbuf = NULL; 1303 pbuf = NULL;
1421 /* Pass the NULL pointer to write_mem to get the start address of Shared 1304 /* Pass the NULL pointer to write_mem to get the start address of Shared
@@ -1439,9 +1322,7 @@ static int execute(struct dynamic_loader_initialize *this, ldr_addr start)
1439 struct dbll_library_obj *lib; 1322 struct dbll_library_obj *lib;
1440 bool ret = true; 1323 bool ret = true;
1441 1324
1442 DBC_REQUIRE(this != NULL);
1443 lib = init_obj->lib; 1325 lib = init_obj->lib;
1444 DBC_REQUIRE(lib);
1445 /* Save entry point */ 1326 /* Save entry point */
1446 if (lib != NULL) 1327 if (lib != NULL)
1447 lib->entry = (u32) start; 1328 lib->entry = (u32) start;
diff --git a/drivers/staging/tidspbridge/pmgr/dev.c b/drivers/staging/tidspbridge/pmgr/dev.c
index 522810bc7427..6234ffb5e8a3 100644
--- a/drivers/staging/tidspbridge/pmgr/dev.c
+++ b/drivers/staging/tidspbridge/pmgr/dev.c
@@ -24,9 +24,6 @@
24/* ----------------------------------- DSP/BIOS Bridge */ 24/* ----------------------------------- DSP/BIOS Bridge */
25#include <dspbridge/dbdefs.h> 25#include <dspbridge/dbdefs.h>
26 26
27/* ----------------------------------- Trace & Debug */
28#include <dspbridge/dbc.h>
29
30/* ----------------------------------- Platform Manager */ 27/* ----------------------------------- Platform Manager */
31#include <dspbridge/cod.h> 28#include <dspbridge/cod.h>
32#include <dspbridge/drv.h> 29#include <dspbridge/drv.h>
@@ -84,9 +81,6 @@ struct drv_ext {
84 char sz_string[MAXREGPATHLENGTH]; 81 char sz_string[MAXREGPATHLENGTH];
85}; 82};
86 83
87/* ----------------------------------- Globals */
88static u32 refs; /* Module reference count */
89
90/* ----------------------------------- Function Prototypes */ 84/* ----------------------------------- Function Prototypes */
91static int fxn_not_implemented(int arg, ...); 85static int fxn_not_implemented(int arg, ...);
92static int init_cod_mgr(struct dev_object *dev_obj); 86static int init_cod_mgr(struct dev_object *dev_obj);
@@ -106,11 +100,8 @@ u32 dev_brd_write_fxn(void *arb, u32 dsp_add, void *host_buf,
106 u32 ul_written = 0; 100 u32 ul_written = 0;
107 int status; 101 int status;
108 102
109 DBC_REQUIRE(refs > 0);
110 DBC_REQUIRE(host_buf != NULL); /* Required of BrdWrite(). */
111 if (dev_obj) { 103 if (dev_obj) {
112 /* Require of BrdWrite() */ 104 /* Require of BrdWrite() */
113 DBC_ASSERT(dev_obj->bridge_context != NULL);
114 status = (*dev_obj->bridge_interface.brd_write) ( 105 status = (*dev_obj->bridge_interface.brd_write) (
115 dev_obj->bridge_context, host_buf, 106 dev_obj->bridge_context, host_buf,
116 dsp_add, ul_num_bytes, mem_space); 107 dsp_add, ul_num_bytes, mem_space);
@@ -143,9 +134,6 @@ int dev_create_device(struct dev_object **device_obj,
143 struct drv_object *hdrv_obj = NULL; 134 struct drv_object *hdrv_obj = NULL;
144 struct drv_data *drv_datap = dev_get_drvdata(bridge); 135 struct drv_data *drv_datap = dev_get_drvdata(bridge);
145 int status = 0; 136 int status = 0;
146 DBC_REQUIRE(refs > 0);
147 DBC_REQUIRE(device_obj != NULL);
148 DBC_REQUIRE(driver_file_name != NULL);
149 137
150 status = drv_request_bridge_res_dsp((void *)&host_res); 138 status = drv_request_bridge_res_dsp((void *)&host_res);
151 139
@@ -169,7 +157,6 @@ int dev_create_device(struct dev_object **device_obj,
169 /* Create the device object, and pass a handle to the Bridge driver for 157 /* Create the device object, and pass a handle to the Bridge driver for
170 * storage. */ 158 * storage. */
171 if (!status) { 159 if (!status) {
172 DBC_ASSERT(drv_fxns);
173 dev_obj = kzalloc(sizeof(struct dev_object), GFP_KERNEL); 160 dev_obj = kzalloc(sizeof(struct dev_object), GFP_KERNEL);
174 if (dev_obj) { 161 if (dev_obj) {
175 /* Fill out the rest of the Dev Object structure: */ 162 /* Fill out the rest of the Dev Object structure: */
@@ -191,9 +178,6 @@ int dev_create_device(struct dev_object **device_obj,
191 status = (dev_obj->bridge_interface.dev_create) 178 status = (dev_obj->bridge_interface.dev_create)
192 (&dev_obj->bridge_context, dev_obj, 179 (&dev_obj->bridge_context, dev_obj,
193 host_res); 180 host_res);
194 /* Assert bridge_dev_create()'s ensure clause: */
195 DBC_ASSERT(status
196 || (dev_obj->bridge_context != NULL));
197 } else { 181 } else {
198 status = -ENOMEM; 182 status = -ENOMEM;
199 } 183 }
@@ -271,7 +255,6 @@ leave:
271 *device_obj = NULL; 255 *device_obj = NULL;
272 } 256 }
273 257
274 DBC_ENSURE((!status && *device_obj) || (status && !*device_obj));
275 return status; 258 return status;
276} 259}
277 260
@@ -287,17 +270,11 @@ int dev_create2(struct dev_object *hdev_obj)
287 int status = 0; 270 int status = 0;
288 struct dev_object *dev_obj = hdev_obj; 271 struct dev_object *dev_obj = hdev_obj;
289 272
290 DBC_REQUIRE(refs > 0);
291 DBC_REQUIRE(hdev_obj);
292
293 /* There can be only one Node Manager per DEV object */ 273 /* There can be only one Node Manager per DEV object */
294 DBC_ASSERT(!dev_obj->node_mgr);
295 status = node_create_mgr(&dev_obj->node_mgr, hdev_obj); 274 status = node_create_mgr(&dev_obj->node_mgr, hdev_obj);
296 if (status) 275 if (status)
297 dev_obj->node_mgr = NULL; 276 dev_obj->node_mgr = NULL;
298 277
299 DBC_ENSURE((!status && dev_obj->node_mgr != NULL)
300 || (status && dev_obj->node_mgr == NULL));
301 return status; 278 return status;
302} 279}
303 280
@@ -311,9 +288,6 @@ int dev_destroy2(struct dev_object *hdev_obj)
311 int status = 0; 288 int status = 0;
312 struct dev_object *dev_obj = hdev_obj; 289 struct dev_object *dev_obj = hdev_obj;
313 290
314 DBC_REQUIRE(refs > 0);
315 DBC_REQUIRE(hdev_obj);
316
317 if (dev_obj->node_mgr) { 291 if (dev_obj->node_mgr) {
318 if (node_delete_mgr(dev_obj->node_mgr)) 292 if (node_delete_mgr(dev_obj->node_mgr))
319 status = -EPERM; 293 status = -EPERM;
@@ -322,7 +296,6 @@ int dev_destroy2(struct dev_object *hdev_obj)
322 296
323 } 297 }
324 298
325 DBC_ENSURE((!status && dev_obj->node_mgr == NULL) || status);
326 return status; 299 return status;
327} 300}
328 301
@@ -337,8 +310,6 @@ int dev_destroy_device(struct dev_object *hdev_obj)
337 int status = 0; 310 int status = 0;
338 struct dev_object *dev_obj = hdev_obj; 311 struct dev_object *dev_obj = hdev_obj;
339 312
340 DBC_REQUIRE(refs > 0);
341
342 if (hdev_obj) { 313 if (hdev_obj) {
343 if (dev_obj->cod_mgr) { 314 if (dev_obj->cod_mgr) {
344 cod_delete(dev_obj->cod_mgr); 315 cod_delete(dev_obj->cod_mgr);
@@ -415,9 +386,6 @@ int dev_get_chnl_mgr(struct dev_object *hdev_obj,
415 int status = 0; 386 int status = 0;
416 struct dev_object *dev_obj = hdev_obj; 387 struct dev_object *dev_obj = hdev_obj;
417 388
418 DBC_REQUIRE(refs > 0);
419 DBC_REQUIRE(mgr != NULL);
420
421 if (hdev_obj) { 389 if (hdev_obj) {
422 *mgr = dev_obj->chnl_mgr; 390 *mgr = dev_obj->chnl_mgr;
423 } else { 391 } else {
@@ -425,7 +393,6 @@ int dev_get_chnl_mgr(struct dev_object *hdev_obj,
425 status = -EFAULT; 393 status = -EFAULT;
426 } 394 }
427 395
428 DBC_ENSURE(!status || (mgr != NULL && *mgr == NULL));
429 return status; 396 return status;
430} 397}
431 398
@@ -441,9 +408,6 @@ int dev_get_cmm_mgr(struct dev_object *hdev_obj,
441 int status = 0; 408 int status = 0;
442 struct dev_object *dev_obj = hdev_obj; 409 struct dev_object *dev_obj = hdev_obj;
443 410
444 DBC_REQUIRE(refs > 0);
445 DBC_REQUIRE(mgr != NULL);
446
447 if (hdev_obj) { 411 if (hdev_obj) {
448 *mgr = dev_obj->cmm_mgr; 412 *mgr = dev_obj->cmm_mgr;
449 } else { 413 } else {
@@ -451,7 +415,6 @@ int dev_get_cmm_mgr(struct dev_object *hdev_obj,
451 status = -EFAULT; 415 status = -EFAULT;
452 } 416 }
453 417
454 DBC_ENSURE(!status || (mgr != NULL && *mgr == NULL));
455 return status; 418 return status;
456} 419}
457 420
@@ -467,9 +430,6 @@ int dev_get_dmm_mgr(struct dev_object *hdev_obj,
467 int status = 0; 430 int status = 0;
468 struct dev_object *dev_obj = hdev_obj; 431 struct dev_object *dev_obj = hdev_obj;
469 432
470 DBC_REQUIRE(refs > 0);
471 DBC_REQUIRE(mgr != NULL);
472
473 if (hdev_obj) { 433 if (hdev_obj) {
474 *mgr = dev_obj->dmm_mgr; 434 *mgr = dev_obj->dmm_mgr;
475 } else { 435 } else {
@@ -477,7 +437,6 @@ int dev_get_dmm_mgr(struct dev_object *hdev_obj,
477 status = -EFAULT; 437 status = -EFAULT;
478 } 438 }
479 439
480 DBC_ENSURE(!status || (mgr != NULL && *mgr == NULL));
481 return status; 440 return status;
482} 441}
483 442
@@ -492,9 +451,6 @@ int dev_get_cod_mgr(struct dev_object *hdev_obj,
492 int status = 0; 451 int status = 0;
493 struct dev_object *dev_obj = hdev_obj; 452 struct dev_object *dev_obj = hdev_obj;
494 453
495 DBC_REQUIRE(refs > 0);
496 DBC_REQUIRE(cod_mgr != NULL);
497
498 if (hdev_obj) { 454 if (hdev_obj) {
499 *cod_mgr = dev_obj->cod_mgr; 455 *cod_mgr = dev_obj->cod_mgr;
500 } else { 456 } else {
@@ -502,7 +458,6 @@ int dev_get_cod_mgr(struct dev_object *hdev_obj,
502 status = -EFAULT; 458 status = -EFAULT;
503 } 459 }
504 460
505 DBC_ENSURE(!status || (cod_mgr != NULL && *cod_mgr == NULL));
506 return status; 461 return status;
507} 462}
508 463
@@ -514,9 +469,6 @@ int dev_get_deh_mgr(struct dev_object *hdev_obj,
514{ 469{
515 int status = 0; 470 int status = 0;
516 471
517 DBC_REQUIRE(refs > 0);
518 DBC_REQUIRE(deh_manager != NULL);
519 DBC_REQUIRE(hdev_obj);
520 if (hdev_obj) { 472 if (hdev_obj) {
521 *deh_manager = hdev_obj->deh_mgr; 473 *deh_manager = hdev_obj->deh_mgr;
522 } else { 474 } else {
@@ -537,9 +489,6 @@ int dev_get_dev_node(struct dev_object *hdev_obj,
537 int status = 0; 489 int status = 0;
538 struct dev_object *dev_obj = hdev_obj; 490 struct dev_object *dev_obj = hdev_obj;
539 491
540 DBC_REQUIRE(refs > 0);
541 DBC_REQUIRE(dev_nde != NULL);
542
543 if (hdev_obj) { 492 if (hdev_obj) {
544 *dev_nde = dev_obj->dev_node_obj; 493 *dev_nde = dev_obj->dev_node_obj;
545 } else { 494 } else {
@@ -547,7 +496,6 @@ int dev_get_dev_node(struct dev_object *hdev_obj,
547 status = -EFAULT; 496 status = -EFAULT;
548 } 497 }
549 498
550 DBC_ENSURE(!status || (dev_nde != NULL && *dev_nde == NULL));
551 return status; 499 return status;
552} 500}
553 501
@@ -578,9 +526,6 @@ int dev_get_intf_fxns(struct dev_object *hdev_obj,
578 int status = 0; 526 int status = 0;
579 struct dev_object *dev_obj = hdev_obj; 527 struct dev_object *dev_obj = hdev_obj;
580 528
581 DBC_REQUIRE(refs > 0);
582 DBC_REQUIRE(if_fxns != NULL);
583
584 if (hdev_obj) { 529 if (hdev_obj) {
585 *if_fxns = &dev_obj->bridge_interface; 530 *if_fxns = &dev_obj->bridge_interface;
586 } else { 531 } else {
@@ -588,7 +533,6 @@ int dev_get_intf_fxns(struct dev_object *hdev_obj,
588 status = -EFAULT; 533 status = -EFAULT;
589 } 534 }
590 535
591 DBC_ENSURE(!status || ((if_fxns != NULL) && (*if_fxns == NULL)));
592 return status; 536 return status;
593} 537}
594 538
@@ -600,10 +544,6 @@ int dev_get_io_mgr(struct dev_object *hdev_obj,
600{ 544{
601 int status = 0; 545 int status = 0;
602 546
603 DBC_REQUIRE(refs > 0);
604 DBC_REQUIRE(io_man != NULL);
605 DBC_REQUIRE(hdev_obj);
606
607 if (hdev_obj) { 547 if (hdev_obj) {
608 *io_man = hdev_obj->iomgr; 548 *io_man = hdev_obj->iomgr;
609 } else { 549 } else {
@@ -638,10 +578,6 @@ struct dev_object *dev_get_next(struct dev_object *hdev_obj)
638 */ 578 */
639void dev_get_msg_mgr(struct dev_object *hdev_obj, struct msg_mgr **msg_man) 579void dev_get_msg_mgr(struct dev_object *hdev_obj, struct msg_mgr **msg_man)
640{ 580{
641 DBC_REQUIRE(refs > 0);
642 DBC_REQUIRE(msg_man != NULL);
643 DBC_REQUIRE(hdev_obj);
644
645 *msg_man = hdev_obj->msg_mgr; 581 *msg_man = hdev_obj->msg_mgr;
646} 582}
647 583
@@ -656,9 +592,6 @@ int dev_get_node_manager(struct dev_object *hdev_obj,
656 int status = 0; 592 int status = 0;
657 struct dev_object *dev_obj = hdev_obj; 593 struct dev_object *dev_obj = hdev_obj;
658 594
659 DBC_REQUIRE(refs > 0);
660 DBC_REQUIRE(node_man != NULL);
661
662 if (hdev_obj) { 595 if (hdev_obj) {
663 *node_man = dev_obj->node_mgr; 596 *node_man = dev_obj->node_mgr;
664 } else { 597 } else {
@@ -666,7 +599,6 @@ int dev_get_node_manager(struct dev_object *hdev_obj,
666 status = -EFAULT; 599 status = -EFAULT;
667 } 600 }
668 601
669 DBC_ENSURE(!status || (node_man != NULL && *node_man == NULL));
670 return status; 602 return status;
671} 603}
672 604
@@ -679,9 +611,6 @@ int dev_get_symbol(struct dev_object *hdev_obj,
679 int status = 0; 611 int status = 0;
680 struct cod_manager *cod_mgr; 612 struct cod_manager *cod_mgr;
681 613
682 DBC_REQUIRE(refs > 0);
683 DBC_REQUIRE(str_sym != NULL && pul_value != NULL);
684
685 if (hdev_obj) { 614 if (hdev_obj) {
686 status = dev_get_cod_mgr(hdev_obj, &cod_mgr); 615 status = dev_get_cod_mgr(hdev_obj, &cod_mgr);
687 if (cod_mgr) 616 if (cod_mgr)
@@ -706,9 +635,6 @@ int dev_get_bridge_context(struct dev_object *hdev_obj,
706 int status = 0; 635 int status = 0;
707 struct dev_object *dev_obj = hdev_obj; 636 struct dev_object *dev_obj = hdev_obj;
708 637
709 DBC_REQUIRE(refs > 0);
710 DBC_REQUIRE(phbridge_context != NULL);
711
712 if (hdev_obj) { 638 if (hdev_obj) {
713 *phbridge_context = dev_obj->bridge_context; 639 *phbridge_context = dev_obj->bridge_context;
714 } else { 640 } else {
@@ -716,67 +642,10 @@ int dev_get_bridge_context(struct dev_object *hdev_obj,
716 status = -EFAULT; 642 status = -EFAULT;
717 } 643 }
718 644
719 DBC_ENSURE(!status || ((phbridge_context != NULL) &&
720 (*phbridge_context == NULL)));
721 return status; 645 return status;
722} 646}
723 647
724/* 648/*
725 * ======== dev_exit ========
726 * Purpose:
727 * Decrement reference count, and free resources when reference count is
728 * 0.
729 */
730void dev_exit(void)
731{
732 DBC_REQUIRE(refs > 0);
733
734 refs--;
735
736 if (refs == 0) {
737 cmm_exit();
738 dmm_exit();
739 }
740
741 DBC_ENSURE(refs >= 0);
742}
743
744/*
745 * ======== dev_init ========
746 * Purpose:
747 * Initialize DEV's private state, keeping a reference count on each call.
748 */
749bool dev_init(void)
750{
751 bool cmm_ret, dmm_ret, ret = true;
752
753 DBC_REQUIRE(refs >= 0);
754
755 if (refs == 0) {
756 cmm_ret = cmm_init();
757 dmm_ret = dmm_init();
758
759 ret = cmm_ret && dmm_ret;
760
761 if (!ret) {
762 if (cmm_ret)
763 cmm_exit();
764
765 if (dmm_ret)
766 dmm_exit();
767
768 }
769 }
770
771 if (ret)
772 refs++;
773
774 DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
775
776 return ret;
777}
778
779/*
780 * ======== dev_notify_clients ======== 649 * ======== dev_notify_clients ========
781 * Purpose: 650 * Purpose:
782 * Notify all clients of this device of a change in device status. 651 * Notify all clients of this device of a change in device status.
@@ -841,14 +710,11 @@ int dev_set_chnl_mgr(struct dev_object *hdev_obj,
841 int status = 0; 710 int status = 0;
842 struct dev_object *dev_obj = hdev_obj; 711 struct dev_object *dev_obj = hdev_obj;
843 712
844 DBC_REQUIRE(refs > 0);
845
846 if (hdev_obj) 713 if (hdev_obj)
847 dev_obj->chnl_mgr = hmgr; 714 dev_obj->chnl_mgr = hmgr;
848 else 715 else
849 status = -EFAULT; 716 status = -EFAULT;
850 717
851 DBC_ENSURE(status || (dev_obj->chnl_mgr == hmgr));
852 return status; 718 return status;
853} 719}
854 720
@@ -859,9 +725,6 @@ int dev_set_chnl_mgr(struct dev_object *hdev_obj,
859 */ 725 */
860void dev_set_msg_mgr(struct dev_object *hdev_obj, struct msg_mgr *hmgr) 726void dev_set_msg_mgr(struct dev_object *hdev_obj, struct msg_mgr *hmgr)
861{ 727{
862 DBC_REQUIRE(refs > 0);
863 DBC_REQUIRE(hdev_obj);
864
865 hdev_obj->msg_mgr = hmgr; 728 hdev_obj->msg_mgr = hmgr;
866} 729}
867 730
@@ -879,8 +742,6 @@ int dev_start_device(struct cfg_devnode *dev_node_obj)
879 struct mgr_object *hmgr_obj = NULL; 742 struct mgr_object *hmgr_obj = NULL;
880 struct drv_data *drv_datap = dev_get_drvdata(bridge); 743 struct drv_data *drv_datap = dev_get_drvdata(bridge);
881 744
882 DBC_REQUIRE(refs > 0);
883
884 /* Given all resources, create a device object. */ 745 /* Given all resources, create a device object. */
885 status = dev_create_device(&hdev_obj, bridge_file_name, 746 status = dev_create_device(&hdev_obj, bridge_file_name,
886 dev_node_obj); 747 dev_node_obj);
@@ -944,9 +805,6 @@ static int init_cod_mgr(struct dev_object *dev_obj)
944 int status = 0; 805 int status = 0;
945 char *sz_dummy_file = "dummy"; 806 char *sz_dummy_file = "dummy";
946 807
947 DBC_REQUIRE(refs > 0);
948 DBC_REQUIRE(!dev_obj || (dev_obj->cod_mgr == NULL));
949
950 status = cod_create(&dev_obj->cod_mgr, sz_dummy_file); 808 status = cod_create(&dev_obj->cod_mgr, sz_dummy_file);
951 809
952 return status; 810 return status;
@@ -976,10 +834,6 @@ int dev_insert_proc_object(struct dev_object *hdev_obj,
976{ 834{
977 struct dev_object *dev_obj = (struct dev_object *)hdev_obj; 835 struct dev_object *dev_obj = (struct dev_object *)hdev_obj;
978 836
979 DBC_REQUIRE(refs > 0);
980 DBC_REQUIRE(dev_obj);
981 DBC_REQUIRE(proc_obj != 0);
982 DBC_REQUIRE(already_attached != NULL);
983 if (!list_empty(&dev_obj->proc_list)) 837 if (!list_empty(&dev_obj->proc_list))
984 *already_attached = true; 838 *already_attached = true;
985 839
@@ -1017,10 +871,6 @@ int dev_remove_proc_object(struct dev_object *hdev_obj, u32 proc_obj)
1017 struct list_head *cur_elem; 871 struct list_head *cur_elem;
1018 struct dev_object *dev_obj = (struct dev_object *)hdev_obj; 872 struct dev_object *dev_obj = (struct dev_object *)hdev_obj;
1019 873
1020 DBC_REQUIRE(dev_obj);
1021 DBC_REQUIRE(proc_obj != 0);
1022 DBC_REQUIRE(!list_empty(&dev_obj->proc_list));
1023
1024 /* Search list for dev_obj: */ 874 /* Search list for dev_obj: */
1025 list_for_each(cur_elem, &dev_obj->proc_list) { 875 list_for_each(cur_elem, &dev_obj->proc_list) {
1026 if ((u32) cur_elem == proc_obj) { 876 if ((u32) cur_elem == proc_obj) {
@@ -1069,10 +919,6 @@ static void store_interface_fxns(struct bridge_drv_interface *drv_fxns,
1069 (intf_fxns->pfn = ((drv_fxns->pfn != NULL) ? drv_fxns->pfn : \ 919 (intf_fxns->pfn = ((drv_fxns->pfn != NULL) ? drv_fxns->pfn : \
1070 (cast)fxn_not_implemented)) 920 (cast)fxn_not_implemented))
1071 921
1072 DBC_REQUIRE(intf_fxns != NULL);
1073 DBC_REQUIRE(drv_fxns != NULL);
1074 DBC_REQUIRE(MAKEVERSION(drv_fxns->brd_api_major_version,
1075 drv_fxns->brd_api_minor_version) <= BRD_API_VERSION);
1076 bridge_version = MAKEVERSION(drv_fxns->brd_api_major_version, 922 bridge_version = MAKEVERSION(drv_fxns->brd_api_major_version,
1077 drv_fxns->brd_api_minor_version); 923 drv_fxns->brd_api_minor_version);
1078 intf_fxns->brd_api_major_version = drv_fxns->brd_api_major_version; 924 intf_fxns->brd_api_major_version = drv_fxns->brd_api_major_version;
@@ -1119,33 +965,5 @@ static void store_interface_fxns(struct bridge_drv_interface *drv_fxns,
1119 STORE_FXN(fxn_msg_setqueueid, msg_set_queue_id); 965 STORE_FXN(fxn_msg_setqueueid, msg_set_queue_id);
1120 } 966 }
1121 /* Add code for any additional functions in newerBridge versions here */ 967 /* Add code for any additional functions in newerBridge versions here */
1122 /* Ensure postcondition: */
1123 DBC_ENSURE(intf_fxns->dev_create != NULL);
1124 DBC_ENSURE(intf_fxns->dev_destroy != NULL);
1125 DBC_ENSURE(intf_fxns->dev_cntrl != NULL);
1126 DBC_ENSURE(intf_fxns->brd_monitor != NULL);
1127 DBC_ENSURE(intf_fxns->brd_start != NULL);
1128 DBC_ENSURE(intf_fxns->brd_stop != NULL);
1129 DBC_ENSURE(intf_fxns->brd_status != NULL);
1130 DBC_ENSURE(intf_fxns->brd_read != NULL);
1131 DBC_ENSURE(intf_fxns->brd_write != NULL);
1132 DBC_ENSURE(intf_fxns->chnl_create != NULL);
1133 DBC_ENSURE(intf_fxns->chnl_destroy != NULL);
1134 DBC_ENSURE(intf_fxns->chnl_open != NULL);
1135 DBC_ENSURE(intf_fxns->chnl_close != NULL);
1136 DBC_ENSURE(intf_fxns->chnl_add_io_req != NULL);
1137 DBC_ENSURE(intf_fxns->chnl_get_ioc != NULL);
1138 DBC_ENSURE(intf_fxns->chnl_cancel_io != NULL);
1139 DBC_ENSURE(intf_fxns->chnl_flush_io != NULL);
1140 DBC_ENSURE(intf_fxns->chnl_get_info != NULL);
1141 DBC_ENSURE(intf_fxns->chnl_get_mgr_info != NULL);
1142 DBC_ENSURE(intf_fxns->chnl_idle != NULL);
1143 DBC_ENSURE(intf_fxns->chnl_register_notify != NULL);
1144 DBC_ENSURE(intf_fxns->io_create != NULL);
1145 DBC_ENSURE(intf_fxns->io_destroy != NULL);
1146 DBC_ENSURE(intf_fxns->io_on_loaded != NULL);
1147 DBC_ENSURE(intf_fxns->io_get_proc_load != NULL);
1148 DBC_ENSURE(intf_fxns->msg_set_queue_id != NULL);
1149
1150#undef STORE_FXN 968#undef STORE_FXN
1151} 969}
diff --git a/drivers/staging/tidspbridge/pmgr/dmm.c b/drivers/staging/tidspbridge/pmgr/dmm.c
index 8685233d7627..7c9f83916068 100644
--- a/drivers/staging/tidspbridge/pmgr/dmm.c
+++ b/drivers/staging/tidspbridge/pmgr/dmm.c
@@ -28,9 +28,6 @@
28/* ----------------------------------- DSP/BIOS Bridge */ 28/* ----------------------------------- DSP/BIOS Bridge */
29#include <dspbridge/dbdefs.h> 29#include <dspbridge/dbdefs.h>
30 30
31/* ----------------------------------- Trace & Debug */
32#include <dspbridge/dbc.h>
33
34/* ----------------------------------- OS Adaptation Layer */ 31/* ----------------------------------- OS Adaptation Layer */
35#include <dspbridge/sync.h> 32#include <dspbridge/sync.h>
36 33
@@ -54,8 +51,6 @@ struct dmm_object {
54 spinlock_t dmm_lock; /* Lock to access dmm mgr */ 51 spinlock_t dmm_lock; /* Lock to access dmm mgr */
55}; 52};
56 53
57/* ----------------------------------- Globals */
58static u32 refs; /* module reference count */
59struct map_page { 54struct map_page {
60 u32 region_size:15; 55 u32 region_size:15;
61 u32 mapped_size:15; 56 u32 mapped_size:15;
@@ -123,8 +118,6 @@ int dmm_create(struct dmm_object **dmm_manager,
123{ 118{
124 struct dmm_object *dmm_obj = NULL; 119 struct dmm_object *dmm_obj = NULL;
125 int status = 0; 120 int status = 0;
126 DBC_REQUIRE(refs > 0);
127 DBC_REQUIRE(dmm_manager != NULL);
128 121
129 *dmm_manager = NULL; 122 *dmm_manager = NULL;
130 /* create, zero, and tag a cmm mgr object */ 123 /* create, zero, and tag a cmm mgr object */
@@ -149,7 +142,6 @@ int dmm_destroy(struct dmm_object *dmm_mgr)
149 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr; 142 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
150 int status = 0; 143 int status = 0;
151 144
152 DBC_REQUIRE(refs > 0);
153 if (dmm_mgr) { 145 if (dmm_mgr) {
154 status = dmm_delete_tables(dmm_obj); 146 status = dmm_delete_tables(dmm_obj);
155 if (!status) 147 if (!status)
@@ -169,7 +161,6 @@ int dmm_delete_tables(struct dmm_object *dmm_mgr)
169{ 161{
170 int status = 0; 162 int status = 0;
171 163
172 DBC_REQUIRE(refs > 0);
173 /* Delete all DMM tables */ 164 /* Delete all DMM tables */
174 if (dmm_mgr) 165 if (dmm_mgr)
175 vfree(virtual_mapping_table); 166 vfree(virtual_mapping_table);
@@ -179,19 +170,6 @@ int dmm_delete_tables(struct dmm_object *dmm_mgr)
179} 170}
180 171
181/* 172/*
182 * ======== dmm_exit ========
183 * Purpose:
184 * Discontinue usage of module; free resources when reference count
185 * reaches 0.
186 */
187void dmm_exit(void)
188{
189 DBC_REQUIRE(refs > 0);
190
191 refs--;
192}
193
194/*
195 * ======== dmm_get_handle ======== 173 * ======== dmm_get_handle ========
196 * Purpose: 174 * Purpose:
197 * Return the dynamic memory manager object for this device. 175 * Return the dynamic memory manager object for this device.
@@ -202,8 +180,6 @@ int dmm_get_handle(void *hprocessor, struct dmm_object **dmm_manager)
202 int status = 0; 180 int status = 0;
203 struct dev_object *hdev_obj; 181 struct dev_object *hdev_obj;
204 182
205 DBC_REQUIRE(refs > 0);
206 DBC_REQUIRE(dmm_manager != NULL);
207 if (hprocessor != NULL) 183 if (hprocessor != NULL)
208 status = proc_get_dev_object(hprocessor, &hdev_obj); 184 status = proc_get_dev_object(hprocessor, &hdev_obj);
209 else 185 else
@@ -216,28 +192,6 @@ int dmm_get_handle(void *hprocessor, struct dmm_object **dmm_manager)
216} 192}
217 193
218/* 194/*
219 * ======== dmm_init ========
220 * Purpose:
221 * Initializes private state of DMM module.
222 */
223bool dmm_init(void)
224{
225 bool ret = true;
226
227 DBC_REQUIRE(refs >= 0);
228
229 if (ret)
230 refs++;
231
232 DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
233
234 virtual_mapping_table = NULL;
235 table_size = 0;
236
237 return ret;
238}
239
240/*
241 * ======== dmm_map_memory ======== 195 * ======== dmm_map_memory ========
242 * Purpose: 196 * Purpose:
243 * Add a mapping block to the reserved chunk. DMM assumes that this block 197 * Add a mapping block to the reserved chunk. DMM assumes that this block
diff --git a/drivers/staging/tidspbridge/pmgr/dspapi.c b/drivers/staging/tidspbridge/pmgr/dspapi.c
index 767ffe270ed6..9ef1ad9527af 100644
--- a/drivers/staging/tidspbridge/pmgr/dspapi.c
+++ b/drivers/staging/tidspbridge/pmgr/dspapi.c
@@ -24,9 +24,6 @@
24/* ----------------------------------- DSP/BIOS Bridge */ 24/* ----------------------------------- DSP/BIOS Bridge */
25#include <dspbridge/dbdefs.h> 25#include <dspbridge/dbdefs.h>
26 26
27/* ----------------------------------- Trace & Debug */
28#include <dspbridge/dbc.h>
29
30/* ----------------------------------- OS Adaptation Layer */ 27/* ----------------------------------- OS Adaptation Layer */
31#include <dspbridge/ntfy.h> 28#include <dspbridge/ntfy.h>
32 29
@@ -266,25 +263,10 @@ err:
266 */ 263 */
267void api_exit(void) 264void api_exit(void)
268{ 265{
269 DBC_REQUIRE(api_c_refs > 0);
270 api_c_refs--; 266 api_c_refs--;
271 267
272 if (api_c_refs == 0) { 268 if (api_c_refs == 0)
273 /* Release all modules initialized in api_init(). */
274 cod_exit();
275 dev_exit();
276 chnl_exit();
277 msg_exit();
278 io_exit();
279 strm_exit();
280 disp_exit();
281 node_exit();
282 proc_exit();
283 mgr_exit(); 269 mgr_exit();
284 rmm_exit();
285 drv_exit();
286 }
287 DBC_ENSURE(api_c_refs >= 0);
288} 270}
289 271
290/* 272/*
@@ -295,64 +277,10 @@ void api_exit(void)
295bool api_init(void) 277bool api_init(void)
296{ 278{
297 bool ret = true; 279 bool ret = true;
298 bool fdrv, fdev, fcod, fchnl, fmsg, fio;
299 bool fmgr, fproc, fnode, fdisp, fstrm, frmm;
300
301 if (api_c_refs == 0) {
302 /* initialize driver and other modules */
303 fdrv = drv_init();
304 fmgr = mgr_init();
305 fproc = proc_init();
306 fnode = node_init();
307 fdisp = disp_init();
308 fstrm = strm_init();
309 frmm = rmm_init();
310 fchnl = chnl_init();
311 fmsg = msg_mod_init();
312 fio = io_init();
313 fdev = dev_init();
314 fcod = cod_init();
315 ret = fdrv && fdev && fchnl && fcod && fmsg && fio;
316 ret = ret && fmgr && fproc && frmm;
317 if (!ret) {
318 if (fdrv)
319 drv_exit();
320
321 if (fmgr)
322 mgr_exit();
323
324 if (fstrm)
325 strm_exit();
326
327 if (fproc)
328 proc_exit();
329
330 if (fnode)
331 node_exit();
332
333 if (fdisp)
334 disp_exit();
335
336 if (fchnl)
337 chnl_exit();
338
339 if (fmsg)
340 msg_exit();
341
342 if (fio)
343 io_exit();
344
345 if (fdev)
346 dev_exit();
347
348 if (fcod)
349 cod_exit();
350
351 if (frmm)
352 rmm_exit();
353 280
354 } 281 if (api_c_refs == 0)
355 } 282 ret = mgr_init();
283
356 if (ret) 284 if (ret)
357 api_c_refs++; 285 api_c_refs++;
358 286
@@ -382,8 +310,6 @@ int api_init_complete2(void)
382 struct drv_data *drv_datap; 310 struct drv_data *drv_datap;
383 u8 dev_type; 311 u8 dev_type;
384 312
385 DBC_REQUIRE(api_c_refs > 0);
386
387 /* Walk the list of DevObjects, get each devnode, and attempting to 313 /* Walk the list of DevObjects, get each devnode, and attempting to
388 * autostart the board. Note that this requires COF loading, which 314 * autostart the board. Note that this requires COF loading, which
389 * requires KFILE. */ 315 * requires KFILE. */
diff --git a/drivers/staging/tidspbridge/pmgr/io.c b/drivers/staging/tidspbridge/pmgr/io.c
index 65245f310f89..4073c9c672fd 100644
--- a/drivers/staging/tidspbridge/pmgr/io.c
+++ b/drivers/staging/tidspbridge/pmgr/io.c
@@ -23,9 +23,6 @@
23/* ----------------------------------- DSP/BIOS Bridge */ 23/* ----------------------------------- DSP/BIOS Bridge */
24#include <dspbridge/dbdefs.h> 24#include <dspbridge/dbdefs.h>
25 25
26/* ----------------------------------- Trace & Debug */
27#include <dspbridge/dbc.h>
28
29/* ----------------------------------- Platform Manager */ 26/* ----------------------------------- Platform Manager */
30#include <dspbridge/dev.h> 27#include <dspbridge/dev.h>
31 28
@@ -33,9 +30,6 @@
33#include <ioobj.h> 30#include <ioobj.h>
34#include <dspbridge/io.h> 31#include <dspbridge/io.h>
35 32
36/* ----------------------------------- Globals */
37static u32 refs;
38
39/* 33/*
40 * ======== io_create ======== 34 * ======== io_create ========
41 * Purpose: 35 * Purpose:
@@ -50,10 +44,6 @@ int io_create(struct io_mgr **io_man, struct dev_object *hdev_obj,
50 struct io_mgr_ *pio_mgr = NULL; 44 struct io_mgr_ *pio_mgr = NULL;
51 int status = 0; 45 int status = 0;
52 46
53 DBC_REQUIRE(refs > 0);
54 DBC_REQUIRE(io_man != NULL);
55 DBC_REQUIRE(mgr_attrts != NULL);
56
57 *io_man = NULL; 47 *io_man = NULL;
58 48
59 /* A memory base of 0 implies no memory base: */ 49 /* A memory base of 0 implies no memory base: */
@@ -94,8 +84,6 @@ int io_destroy(struct io_mgr *hio_mgr)
94 struct io_mgr_ *pio_mgr = (struct io_mgr_ *)hio_mgr; 84 struct io_mgr_ *pio_mgr = (struct io_mgr_ *)hio_mgr;
95 int status; 85 int status;
96 86
97 DBC_REQUIRE(refs > 0);
98
99 intf_fxns = pio_mgr->intf_fxns; 87 intf_fxns = pio_mgr->intf_fxns;
100 88
101 /* Let Bridge channel module destroy the io_mgr: */ 89 /* Let Bridge channel module destroy the io_mgr: */
@@ -103,36 +91,3 @@ int io_destroy(struct io_mgr *hio_mgr)
103 91
104 return status; 92 return status;
105} 93}
106
107/*
108 * ======== io_exit ========
109 * Purpose:
110 * Discontinue usage of the IO module.
111 */
112void io_exit(void)
113{
114 DBC_REQUIRE(refs > 0);
115
116 refs--;
117
118 DBC_ENSURE(refs >= 0);
119}
120
121/*
122 * ======== io_init ========
123 * Purpose:
124 * Initialize the IO module's private state.
125 */
126bool io_init(void)
127{
128 bool ret = true;
129
130 DBC_REQUIRE(refs >= 0);
131
132 if (ret)
133 refs++;
134
135 DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
136
137 return ret;
138}
diff --git a/drivers/staging/tidspbridge/pmgr/msg.c b/drivers/staging/tidspbridge/pmgr/msg.c
index a6916039eed6..f093cfb51c00 100644
--- a/drivers/staging/tidspbridge/pmgr/msg.c
+++ b/drivers/staging/tidspbridge/pmgr/msg.c
@@ -23,9 +23,6 @@
23/* ----------------------------------- DSP/BIOS Bridge */ 23/* ----------------------------------- DSP/BIOS Bridge */
24#include <dspbridge/dbdefs.h> 24#include <dspbridge/dbdefs.h>
25 25
26/* ----------------------------------- Trace & Debug */
27#include <dspbridge/dbc.h>
28
29/* ----------------------------------- Bridge Driver */ 26/* ----------------------------------- Bridge Driver */
30#include <dspbridge/dspdefs.h> 27#include <dspbridge/dspdefs.h>
31 28
@@ -36,9 +33,6 @@
36#include <msgobj.h> 33#include <msgobj.h>
37#include <dspbridge/msg.h> 34#include <dspbridge/msg.h>
38 35
39/* ----------------------------------- Globals */
40static u32 refs; /* module reference count */
41
42/* 36/*
43 * ======== msg_create ======== 37 * ======== msg_create ========
44 * Purpose: 38 * Purpose:
@@ -53,11 +47,6 @@ int msg_create(struct msg_mgr **msg_man,
53 struct msg_mgr *hmsg_mgr; 47 struct msg_mgr *hmsg_mgr;
54 int status = 0; 48 int status = 0;
55 49
56 DBC_REQUIRE(refs > 0);
57 DBC_REQUIRE(msg_man != NULL);
58 DBC_REQUIRE(msg_callback != NULL);
59 DBC_REQUIRE(hdev_obj != NULL);
60
61 *msg_man = NULL; 50 *msg_man = NULL;
62 51
63 dev_get_intf_fxns(hdev_obj, &intf_fxns); 52 dev_get_intf_fxns(hdev_obj, &intf_fxns);
@@ -90,8 +79,6 @@ void msg_delete(struct msg_mgr *hmsg_mgr)
90 struct msg_mgr_ *msg_mgr_obj = (struct msg_mgr_ *)hmsg_mgr; 79 struct msg_mgr_ *msg_mgr_obj = (struct msg_mgr_ *)hmsg_mgr;
91 struct bridge_drv_interface *intf_fxns; 80 struct bridge_drv_interface *intf_fxns;
92 81
93 DBC_REQUIRE(refs > 0);
94
95 if (msg_mgr_obj) { 82 if (msg_mgr_obj) {
96 intf_fxns = msg_mgr_obj->intf_fxns; 83 intf_fxns = msg_mgr_obj->intf_fxns;
97 84
@@ -102,28 +89,3 @@ void msg_delete(struct msg_mgr *hmsg_mgr)
102 __func__, hmsg_mgr); 89 __func__, hmsg_mgr);
103 } 90 }
104} 91}
105
106/*
107 * ======== msg_exit ========
108 */
109void msg_exit(void)
110{
111 DBC_REQUIRE(refs > 0);
112 refs--;
113
114 DBC_ENSURE(refs >= 0);
115}
116
117/*
118 * ======== msg_mod_init ========
119 */
120bool msg_mod_init(void)
121{
122 DBC_REQUIRE(refs >= 0);
123
124 refs++;
125
126 DBC_ENSURE(refs >= 0);
127
128 return true;
129}
diff --git a/drivers/staging/tidspbridge/rmgr/dbdcd.c b/drivers/staging/tidspbridge/rmgr/dbdcd.c
index fda240214cd6..12a1d34b3954 100644
--- a/drivers/staging/tidspbridge/rmgr/dbdcd.c
+++ b/drivers/staging/tidspbridge/rmgr/dbdcd.c
@@ -29,8 +29,6 @@
29 29
30/* ----------------------------------- DSP/BIOS Bridge */ 30/* ----------------------------------- DSP/BIOS Bridge */
31#include <dspbridge/dbdefs.h> 31#include <dspbridge/dbdefs.h>
32/* ----------------------------------- Trace & Debug */
33#include <dspbridge/dbc.h>
34 32
35/* ----------------------------------- Platform Manager */ 33/* ----------------------------------- Platform Manager */
36#include <dspbridge/cod.h> 34#include <dspbridge/cod.h>
@@ -85,8 +83,6 @@ int dcd_auto_register(struct dcd_manager *hdcd_mgr,
85{ 83{
86 int status = 0; 84 int status = 0;
87 85
88 DBC_REQUIRE(refs > 0);
89
90 if (hdcd_mgr) 86 if (hdcd_mgr)
91 status = dcd_get_objects(hdcd_mgr, sz_coff_path, 87 status = dcd_get_objects(hdcd_mgr, sz_coff_path,
92 (dcd_registerfxn) dcd_register_object, 88 (dcd_registerfxn) dcd_register_object,
@@ -107,8 +103,6 @@ int dcd_auto_unregister(struct dcd_manager *hdcd_mgr,
107{ 103{
108 int status = 0; 104 int status = 0;
109 105
110 DBC_REQUIRE(refs > 0);
111
112 if (hdcd_mgr) 106 if (hdcd_mgr)
113 status = dcd_get_objects(hdcd_mgr, sz_coff_path, 107 status = dcd_get_objects(hdcd_mgr, sz_coff_path,
114 (dcd_registerfxn) dcd_register_object, 108 (dcd_registerfxn) dcd_register_object,
@@ -131,9 +125,6 @@ int dcd_create_manager(char *sz_zl_dll_name,
131 struct dcd_manager *dcd_mgr_obj = NULL; /* DCD Manager pointer */ 125 struct dcd_manager *dcd_mgr_obj = NULL; /* DCD Manager pointer */
132 int status = 0; 126 int status = 0;
133 127
134 DBC_REQUIRE(refs >= 0);
135 DBC_REQUIRE(dcd_mgr);
136
137 status = cod_create(&cod_mgr, sz_zl_dll_name); 128 status = cod_create(&cod_mgr, sz_zl_dll_name);
138 if (status) 129 if (status)
139 goto func_end; 130 goto func_end;
@@ -156,9 +147,6 @@ int dcd_create_manager(char *sz_zl_dll_name,
156 cod_delete(cod_mgr); 147 cod_delete(cod_mgr);
157 } 148 }
158 149
159 DBC_ENSURE((!status) ||
160 ((dcd_mgr_obj == NULL) && (status == -ENOMEM)));
161
162func_end: 150func_end:
163 return status; 151 return status;
164} 152}
@@ -173,8 +161,6 @@ int dcd_destroy_manager(struct dcd_manager *hdcd_mgr)
173 struct dcd_manager *dcd_mgr_obj = hdcd_mgr; 161 struct dcd_manager *dcd_mgr_obj = hdcd_mgr;
174 int status = -EFAULT; 162 int status = -EFAULT;
175 163
176 DBC_REQUIRE(refs >= 0);
177
178 if (hdcd_mgr) { 164 if (hdcd_mgr) {
179 /* Delete the COD manager. */ 165 /* Delete the COD manager. */
180 cod_delete(dcd_mgr_obj->cod_mgr); 166 cod_delete(dcd_mgr_obj->cod_mgr);
@@ -205,10 +191,6 @@ int dcd_enumerate_object(s32 index, enum dsp_dcdobjtype obj_type,
205 struct dcd_key_elem *dcd_key; 191 struct dcd_key_elem *dcd_key;
206 int len; 192 int len;
207 193
208 DBC_REQUIRE(refs >= 0);
209 DBC_REQUIRE(index >= 0);
210 DBC_REQUIRE(uuid_obj != NULL);
211
212 if ((index != 0) && (enum_refs == 0)) { 194 if ((index != 0) && (enum_refs == 0)) {
213 /* 195 /*
214 * If an enumeration is being performed on an index greater 196 * If an enumeration is being performed on an index greater
@@ -222,7 +204,6 @@ int dcd_enumerate_object(s32 index, enum dsp_dcdobjtype obj_type,
222 * "_\0" + length of sz_obj_type string + terminating NULL. 204 * "_\0" + length of sz_obj_type string + terminating NULL.
223 */ 205 */
224 dw_key_len = strlen(DCD_REGKEY) + 1 + sizeof(sz_obj_type) + 1; 206 dw_key_len = strlen(DCD_REGKEY) + 1 + sizeof(sz_obj_type) + 1;
225 DBC_ASSERT(dw_key_len < DCD_MAXPATHLENGTH);
226 207
227 /* Create proper REG key; concatenate DCD_REGKEY with 208 /* Create proper REG key; concatenate DCD_REGKEY with
228 * obj_type. */ 209 * obj_type. */
@@ -294,8 +275,6 @@ int dcd_enumerate_object(s32 index, enum dsp_dcdobjtype obj_type,
294 } 275 }
295 } 276 }
296 277
297 DBC_ENSURE(uuid_obj || (status == -EPERM));
298
299 return status; 278 return status;
300} 279}
301 280
@@ -307,11 +286,9 @@ int dcd_enumerate_object(s32 index, enum dsp_dcdobjtype obj_type,
307void dcd_exit(void) 286void dcd_exit(void)
308{ 287{
309 struct dcd_key_elem *rv, *rv_tmp; 288 struct dcd_key_elem *rv, *rv_tmp;
310 DBC_REQUIRE(refs > 0);
311 289
312 refs--; 290 refs--;
313 if (refs == 0) { 291 if (refs == 0) {
314 cod_exit();
315 list_for_each_entry_safe(rv, rv_tmp, &reg_key_list, link) { 292 list_for_each_entry_safe(rv, rv_tmp, &reg_key_list, link) {
316 list_del(&rv->link); 293 list_del(&rv->link);
317 kfree(rv->path); 294 kfree(rv->path);
@@ -319,7 +296,6 @@ void dcd_exit(void)
319 } 296 }
320 } 297 }
321 298
322 DBC_ENSURE(refs >= 0);
323} 299}
324 300
325/* 301/*
@@ -333,12 +309,6 @@ int dcd_get_dep_libs(struct dcd_manager *hdcd_mgr,
333{ 309{
334 int status = 0; 310 int status = 0;
335 311
336 DBC_REQUIRE(refs > 0);
337 DBC_REQUIRE(hdcd_mgr);
338 DBC_REQUIRE(uuid_obj != NULL);
339 DBC_REQUIRE(dep_lib_uuids != NULL);
340 DBC_REQUIRE(prstnt_dep_libs != NULL);
341
342 status = 312 status =
343 get_dep_lib_info(hdcd_mgr, uuid_obj, &num_libs, NULL, dep_lib_uuids, 313 get_dep_lib_info(hdcd_mgr, uuid_obj, &num_libs, NULL, dep_lib_uuids,
344 prstnt_dep_libs, phase); 314 prstnt_dep_libs, phase);
@@ -356,12 +326,6 @@ int dcd_get_num_dep_libs(struct dcd_manager *hdcd_mgr,
356{ 326{
357 int status = 0; 327 int status = 0;
358 328
359 DBC_REQUIRE(refs > 0);
360 DBC_REQUIRE(hdcd_mgr);
361 DBC_REQUIRE(num_libs != NULL);
362 DBC_REQUIRE(num_pers_libs != NULL);
363 DBC_REQUIRE(uuid_obj != NULL);
364
365 status = get_dep_lib_info(hdcd_mgr, uuid_obj, num_libs, num_pers_libs, 329 status = get_dep_lib_info(hdcd_mgr, uuid_obj, num_libs, num_pers_libs,
366 NULL, NULL, phase); 330 NULL, NULL, phase);
367 331
@@ -393,10 +357,6 @@ int dcd_get_object_def(struct dcd_manager *hdcd_mgr,
393 u32 dw_key_len; /* Len of REG key. */ 357 u32 dw_key_len; /* Len of REG key. */
394 char sz_obj_type[MAX_INT2CHAR_LENGTH]; /* str. rep. of obj_type. */ 358 char sz_obj_type[MAX_INT2CHAR_LENGTH]; /* str. rep. of obj_type. */
395 359
396 DBC_REQUIRE(refs > 0);
397 DBC_REQUIRE(obj_def != NULL);
398 DBC_REQUIRE(obj_uuid != NULL);
399
400 sz_uuid = kzalloc(MAXUUIDLEN, GFP_KERNEL); 360 sz_uuid = kzalloc(MAXUUIDLEN, GFP_KERNEL);
401 if (!sz_uuid) { 361 if (!sz_uuid) {
402 status = -ENOMEM; 362 status = -ENOMEM;
@@ -411,7 +371,6 @@ int dcd_get_object_def(struct dcd_manager *hdcd_mgr,
411 /* Pre-determine final key length. It's length of DCD_REGKEY + 371 /* Pre-determine final key length. It's length of DCD_REGKEY +
412 * "_\0" + length of sz_obj_type string + terminating NULL */ 372 * "_\0" + length of sz_obj_type string + terminating NULL */
413 dw_key_len = strlen(DCD_REGKEY) + 1 + sizeof(sz_obj_type) + 1; 373 dw_key_len = strlen(DCD_REGKEY) + 1 + sizeof(sz_obj_type) + 1;
414 DBC_ASSERT(dw_key_len < DCD_MAXPATHLENGTH);
415 374
416 /* Create proper REG key; concatenate DCD_REGKEY with obj_type. */ 375 /* Create proper REG key; concatenate DCD_REGKEY with obj_type. */
417 strncpy(sz_reg_key, DCD_REGKEY, strlen(DCD_REGKEY) + 1); 376 strncpy(sz_reg_key, DCD_REGKEY, strlen(DCD_REGKEY) + 1);
@@ -470,7 +429,6 @@ int dcd_get_object_def(struct dcd_manager *hdcd_mgr,
470 } 429 }
471 430
472 /* Ensure sz_uuid + 1 is not greater than sizeof sz_sect_name. */ 431 /* Ensure sz_uuid + 1 is not greater than sizeof sz_sect_name. */
473 DBC_ASSERT((strlen(sz_uuid) + 1) < sizeof(sz_sect_name));
474 432
475 /* Create section name based on node UUID. A period is 433 /* Create section name based on node UUID. A period is
476 * pre-pended to the UUID string to form the section name. 434 * pre-pended to the UUID string to form the section name.
@@ -553,7 +511,6 @@ int dcd_get_objects(struct dcd_manager *hdcd_mgr,
553 struct dsp_uuid dsp_uuid_obj; 511 struct dsp_uuid dsp_uuid_obj;
554 s32 object_type; 512 s32 object_type;
555 513
556 DBC_REQUIRE(refs > 0);
557 if (!hdcd_mgr) { 514 if (!hdcd_mgr) {
558 status = -EFAULT; 515 status = -EFAULT;
559 goto func_end; 516 goto func_end;
@@ -663,11 +620,6 @@ int dcd_get_library_name(struct dcd_manager *hdcd_mgr,
663 int status = 0; 620 int status = 0;
664 struct dcd_key_elem *dcd_key = NULL; 621 struct dcd_key_elem *dcd_key = NULL;
665 622
666 DBC_REQUIRE(uuid_obj != NULL);
667 DBC_REQUIRE(str_lib_name != NULL);
668 DBC_REQUIRE(buff_size != NULL);
669 DBC_REQUIRE(hdcd_mgr);
670
671 dev_dbg(bridge, "%s: hdcd_mgr %p, uuid_obj %p, str_lib_name %p," 623 dev_dbg(bridge, "%s: hdcd_mgr %p, uuid_obj %p, str_lib_name %p,"
672 " buff_size %p\n", __func__, hdcd_mgr, uuid_obj, str_lib_name, 624 " buff_size %p\n", __func__, hdcd_mgr, uuid_obj, str_lib_name,
673 buff_size); 625 buff_size);
@@ -677,7 +629,6 @@ int dcd_get_library_name(struct dcd_manager *hdcd_mgr,
677 * "_\0" + length of sz_obj_type string + terminating NULL. 629 * "_\0" + length of sz_obj_type string + terminating NULL.
678 */ 630 */
679 dw_key_len = strlen(DCD_REGKEY) + 1 + sizeof(sz_obj_type) + 1; 631 dw_key_len = strlen(DCD_REGKEY) + 1 + sizeof(sz_obj_type) + 1;
680 DBC_ASSERT(dw_key_len < DCD_MAXPATHLENGTH);
681 632
682 /* Create proper REG key; concatenate DCD_REGKEY with obj_type. */ 633 /* Create proper REG key; concatenate DCD_REGKEY with obj_type. */
683 strncpy(sz_reg_key, DCD_REGKEY, strlen(DCD_REGKEY) + 1); 634 strncpy(sz_reg_key, DCD_REGKEY, strlen(DCD_REGKEY) + 1);
@@ -705,7 +656,6 @@ int dcd_get_library_name(struct dcd_manager *hdcd_mgr,
705 break; 656 break;
706 default: 657 default:
707 status = -EINVAL; 658 status = -EINVAL;
708 DBC_ASSERT(false);
709 } 659 }
710 if (!status) { 660 if (!status) {
711 if ((strlen(sz_reg_key) + strlen(sz_obj_type)) < 661 if ((strlen(sz_reg_key) + strlen(sz_obj_type)) <
@@ -787,30 +737,14 @@ int dcd_get_library_name(struct dcd_manager *hdcd_mgr,
787 */ 737 */
788bool dcd_init(void) 738bool dcd_init(void)
789{ 739{
790 bool init_cod;
791 bool ret = true; 740 bool ret = true;
792 741
793 DBC_REQUIRE(refs >= 0); 742 if (refs == 0)
794
795 if (refs == 0) {
796 /* Initialize required modules. */
797 init_cod = cod_init();
798
799 if (!init_cod) {
800 ret = false;
801 /* Exit initialized modules. */
802 if (init_cod)
803 cod_exit();
804 }
805
806 INIT_LIST_HEAD(&reg_key_list); 743 INIT_LIST_HEAD(&reg_key_list);
807 }
808 744
809 if (ret) 745 if (ret)
810 refs++; 746 refs++;
811 747
812 DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs == 0)));
813
814 return ret; 748 return ret;
815} 749}
816 750
@@ -832,15 +766,6 @@ int dcd_register_object(struct dsp_uuid *uuid_obj,
832 char sz_obj_type[MAX_INT2CHAR_LENGTH]; /* str. rep. of obj_type. */ 766 char sz_obj_type[MAX_INT2CHAR_LENGTH]; /* str. rep. of obj_type. */
833 struct dcd_key_elem *dcd_key = NULL; 767 struct dcd_key_elem *dcd_key = NULL;
834 768
835 DBC_REQUIRE(refs > 0);
836 DBC_REQUIRE(uuid_obj != NULL);
837 DBC_REQUIRE((obj_type == DSP_DCDNODETYPE) ||
838 (obj_type == DSP_DCDPROCESSORTYPE) ||
839 (obj_type == DSP_DCDLIBRARYTYPE) ||
840 (obj_type == DSP_DCDCREATELIBTYPE) ||
841 (obj_type == DSP_DCDEXECUTELIBTYPE) ||
842 (obj_type == DSP_DCDDELETELIBTYPE));
843
844 dev_dbg(bridge, "%s: object UUID %p, obj_type %d, szPathName %s\n", 769 dev_dbg(bridge, "%s: object UUID %p, obj_type %d, szPathName %s\n",
845 __func__, uuid_obj, obj_type, psz_path_name); 770 __func__, uuid_obj, obj_type, psz_path_name);
846 771
@@ -849,7 +774,6 @@ int dcd_register_object(struct dsp_uuid *uuid_obj,
849 * "_\0" + length of sz_obj_type string + terminating NULL. 774 * "_\0" + length of sz_obj_type string + terminating NULL.
850 */ 775 */
851 dw_key_len = strlen(DCD_REGKEY) + 1 + sizeof(sz_obj_type) + 1; 776 dw_key_len = strlen(DCD_REGKEY) + 1 + sizeof(sz_obj_type) + 1;
852 DBC_ASSERT(dw_key_len < DCD_MAXPATHLENGTH);
853 777
854 /* Create proper REG key; concatenate DCD_REGKEY with obj_type. */ 778 /* Create proper REG key; concatenate DCD_REGKEY with obj_type. */
855 strncpy(sz_reg_key, DCD_REGKEY, strlen(DCD_REGKEY) + 1); 779 strncpy(sz_reg_key, DCD_REGKEY, strlen(DCD_REGKEY) + 1);
@@ -987,15 +911,6 @@ int dcd_unregister_object(struct dsp_uuid *uuid_obj,
987{ 911{
988 int status = 0; 912 int status = 0;
989 913
990 DBC_REQUIRE(refs > 0);
991 DBC_REQUIRE(uuid_obj != NULL);
992 DBC_REQUIRE((obj_type == DSP_DCDNODETYPE) ||
993 (obj_type == DSP_DCDPROCESSORTYPE) ||
994 (obj_type == DSP_DCDLIBRARYTYPE) ||
995 (obj_type == DSP_DCDCREATELIBTYPE) ||
996 (obj_type == DSP_DCDEXECUTELIBTYPE) ||
997 (obj_type == DSP_DCDDELETELIBTYPE));
998
999 /* 914 /*
1000 * When dcd_register_object is called with NULL as pathname, 915 * When dcd_register_object is called with NULL as pathname,
1001 * it indicates an unregister object operation. 916 * it indicates an unregister object operation.
@@ -1055,12 +970,6 @@ static int get_attrs_from_buf(char *psz_buf, u32 ul_buf_size,
1055 s32 entry_id; 970 s32 entry_id;
1056#endif 971#endif
1057 972
1058 DBC_REQUIRE(psz_buf != NULL);
1059 DBC_REQUIRE(ul_buf_size != 0);
1060 DBC_REQUIRE((obj_type == DSP_DCDNODETYPE)
1061 || (obj_type == DSP_DCDPROCESSORTYPE));
1062 DBC_REQUIRE(gen_obj != NULL);
1063
1064 switch (obj_type) { 973 switch (obj_type) {
1065 case DSP_DCDNODETYPE: 974 case DSP_DCDNODETYPE:
1066 /* 975 /*
@@ -1082,7 +991,6 @@ static int get_attrs_from_buf(char *psz_buf, u32 ul_buf_size,
1082 token = strsep(&psz_cur, seps); 991 token = strsep(&psz_cur, seps);
1083 992
1084 /* ac_name */ 993 /* ac_name */
1085 DBC_REQUIRE(token);
1086 token_len = strlen(token); 994 token_len = strlen(token);
1087 if (token_len > DSP_MAXNAMELEN - 1) 995 if (token_len > DSP_MAXNAMELEN - 1)
1088 token_len = DSP_MAXNAMELEN - 1; 996 token_len = DSP_MAXNAMELEN - 1;
@@ -1167,7 +1075,6 @@ static int get_attrs_from_buf(char *psz_buf, u32 ul_buf_size,
1167 token = strsep(&psz_cur, seps); 1075 token = strsep(&psz_cur, seps);
1168 1076
1169 /* char *str_create_phase_fxn */ 1077 /* char *str_create_phase_fxn */
1170 DBC_REQUIRE(token);
1171 token_len = strlen(token); 1078 token_len = strlen(token);
1172 gen_obj->obj_data.node_obj.str_create_phase_fxn = 1079 gen_obj->obj_data.node_obj.str_create_phase_fxn =
1173 kzalloc(token_len + 1, GFP_KERNEL); 1080 kzalloc(token_len + 1, GFP_KERNEL);
@@ -1178,7 +1085,6 @@ static int get_attrs_from_buf(char *psz_buf, u32 ul_buf_size,
1178 token = strsep(&psz_cur, seps); 1085 token = strsep(&psz_cur, seps);
1179 1086
1180 /* char *str_execute_phase_fxn */ 1087 /* char *str_execute_phase_fxn */
1181 DBC_REQUIRE(token);
1182 token_len = strlen(token); 1088 token_len = strlen(token);
1183 gen_obj->obj_data.node_obj.str_execute_phase_fxn = 1089 gen_obj->obj_data.node_obj.str_execute_phase_fxn =
1184 kzalloc(token_len + 1, GFP_KERNEL); 1090 kzalloc(token_len + 1, GFP_KERNEL);
@@ -1189,7 +1095,6 @@ static int get_attrs_from_buf(char *psz_buf, u32 ul_buf_size,
1189 token = strsep(&psz_cur, seps); 1095 token = strsep(&psz_cur, seps);
1190 1096
1191 /* char *str_delete_phase_fxn */ 1097 /* char *str_delete_phase_fxn */
1192 DBC_REQUIRE(token);
1193 token_len = strlen(token); 1098 token_len = strlen(token);
1194 gen_obj->obj_data.node_obj.str_delete_phase_fxn = 1099 gen_obj->obj_data.node_obj.str_delete_phase_fxn =
1195 kzalloc(token_len + 1, GFP_KERNEL); 1100 kzalloc(token_len + 1, GFP_KERNEL);
@@ -1421,12 +1326,6 @@ static int get_dep_lib_info(struct dcd_manager *hdcd_mgr,
1421 u16 dep_libs = 0; 1326 u16 dep_libs = 0;
1422 int status = 0; 1327 int status = 0;
1423 1328
1424 DBC_REQUIRE(refs > 0);
1425
1426 DBC_REQUIRE(hdcd_mgr);
1427 DBC_REQUIRE(num_libs != NULL);
1428 DBC_REQUIRE(uuid_obj != NULL);
1429
1430 /* Initialize to 0 dependent libraries, if only counting number of 1329 /* Initialize to 0 dependent libraries, if only counting number of
1431 * dependent libraries */ 1330 * dependent libraries */
1432 if (!get_uuids) { 1331 if (!get_uuids) {
diff --git a/drivers/staging/tidspbridge/rmgr/disp.c b/drivers/staging/tidspbridge/rmgr/disp.c
index a9aa22f3b4f6..4af51b75aeab 100644
--- a/drivers/staging/tidspbridge/rmgr/disp.c
+++ b/drivers/staging/tidspbridge/rmgr/disp.c
@@ -24,9 +24,6 @@
24/* ----------------------------------- DSP/BIOS Bridge */ 24/* ----------------------------------- DSP/BIOS Bridge */
25#include <dspbridge/dbdefs.h> 25#include <dspbridge/dbdefs.h>
26 26
27/* ----------------------------------- Trace & Debug */
28#include <dspbridge/dbc.h>
29
30/* ----------------------------------- OS Adaptation Layer */ 27/* ----------------------------------- OS Adaptation Layer */
31#include <dspbridge/sync.h> 28#include <dspbridge/sync.h>
32 29
@@ -72,8 +69,6 @@ struct disp_object {
72 u32 data_mau_size; /* Size of DSP Data MAU */ 69 u32 data_mau_size; /* Size of DSP Data MAU */
73}; 70};
74 71
75static u32 refs;
76
77static void delete_disp(struct disp_object *disp_obj); 72static void delete_disp(struct disp_object *disp_obj);
78static int fill_stream_def(rms_word *pdw_buf, u32 *ptotal, u32 offset, 73static int fill_stream_def(rms_word *pdw_buf, u32 *ptotal, u32 offset,
79 struct node_strmdef strm_def, u32 max, 74 struct node_strmdef strm_def, u32 max,
@@ -96,11 +91,6 @@ int disp_create(struct disp_object **dispatch_obj,
96 int status = 0; 91 int status = 0;
97 u8 dev_type; 92 u8 dev_type;
98 93
99 DBC_REQUIRE(refs > 0);
100 DBC_REQUIRE(dispatch_obj != NULL);
101 DBC_REQUIRE(disp_attrs != NULL);
102 DBC_REQUIRE(hdev_obj != NULL);
103
104 *dispatch_obj = NULL; 94 *dispatch_obj = NULL;
105 95
106 /* Allocate Node Dispatcher object */ 96 /* Allocate Node Dispatcher object */
@@ -168,8 +158,6 @@ func_cont:
168 else 158 else
169 delete_disp(disp_obj); 159 delete_disp(disp_obj);
170 160
171 DBC_ENSURE((status && *dispatch_obj == NULL) ||
172 (!status && *dispatch_obj));
173 return status; 161 return status;
174} 162}
175 163
@@ -179,43 +167,10 @@ func_cont:
179 */ 167 */
180void disp_delete(struct disp_object *disp_obj) 168void disp_delete(struct disp_object *disp_obj)
181{ 169{
182 DBC_REQUIRE(refs > 0);
183 DBC_REQUIRE(disp_obj);
184
185 delete_disp(disp_obj); 170 delete_disp(disp_obj);
186} 171}
187 172
188/* 173/*
189 * ======== disp_exit ========
190 * Discontinue usage of DISP module.
191 */
192void disp_exit(void)
193{
194 DBC_REQUIRE(refs > 0);
195
196 refs--;
197
198 DBC_ENSURE(refs >= 0);
199}
200
201/*
202 * ======== disp_init ========
203 * Initialize the DISP module.
204 */
205bool disp_init(void)
206{
207 bool ret = true;
208
209 DBC_REQUIRE(refs >= 0);
210
211 if (ret)
212 refs++;
213
214 DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
215 return ret;
216}
217
218/*
219 * ======== disp_node_change_priority ======== 174 * ======== disp_node_change_priority ========
220 * Change the priority of a node currently running on the target. 175 * Change the priority of a node currently running on the target.
221 */ 176 */
@@ -227,10 +182,6 @@ int disp_node_change_priority(struct disp_object *disp_obj,
227 struct rms_command *rms_cmd; 182 struct rms_command *rms_cmd;
228 int status = 0; 183 int status = 0;
229 184
230 DBC_REQUIRE(refs > 0);
231 DBC_REQUIRE(disp_obj);
232 DBC_REQUIRE(hnode != NULL);
233
234 /* Send message to RMS to change priority */ 185 /* Send message to RMS to change priority */
235 rms_cmd = (struct rms_command *)(disp_obj->buf); 186 rms_cmd = (struct rms_command *)(disp_obj->buf);
236 rms_cmd->fxn = (rms_word) (rms_fxn); 187 rms_cmd->fxn = (rms_word) (rms_fxn);
@@ -276,12 +227,6 @@ int disp_node_create(struct disp_object *disp_obj,
276 struct dsp_nodeinfo node_info; 227 struct dsp_nodeinfo node_info;
277 u8 dev_type; 228 u8 dev_type;
278 229
279 DBC_REQUIRE(refs > 0);
280 DBC_REQUIRE(disp_obj);
281 DBC_REQUIRE(hnode != NULL);
282 DBC_REQUIRE(node_get_type(hnode) != NODE_DEVICE);
283 DBC_REQUIRE(node_env != NULL);
284
285 status = dev_get_dev_type(disp_obj->dev_obj, &dev_type); 230 status = dev_get_dev_type(disp_obj->dev_obj, &dev_type);
286 231
287 if (status) 232 if (status)
@@ -292,11 +237,9 @@ int disp_node_create(struct disp_object *disp_obj,
292 __func__, dev_type); 237 __func__, dev_type);
293 goto func_end; 238 goto func_end;
294 } 239 }
295 DBC_REQUIRE(pargs != NULL);
296 node_type = node_get_type(hnode); 240 node_type = node_get_type(hnode);
297 node_msg_args = pargs->asa.node_msg_args; 241 node_msg_args = pargs->asa.node_msg_args;
298 max = disp_obj->bufsize_rms; /*Max # of RMS words that can be sent */ 242 max = disp_obj->bufsize_rms; /*Max # of RMS words that can be sent */
299 DBC_ASSERT(max == RMS_COMMANDBUFSIZE);
300 chars_in_rms_word = sizeof(rms_word) / disp_obj->char_size; 243 chars_in_rms_word = sizeof(rms_word) / disp_obj->char_size;
301 /* Number of RMS words needed to hold arg data */ 244 /* Number of RMS words needed to hold arg data */
302 dw_length = 245 dw_length =
@@ -457,7 +400,6 @@ int disp_node_create(struct disp_object *disp_obj,
457 } 400 }
458 if (!status) { 401 if (!status) {
459 ul_bytes = total * sizeof(rms_word); 402 ul_bytes = total * sizeof(rms_word);
460 DBC_ASSERT(ul_bytes < (RMS_COMMANDBUFSIZE * sizeof(rms_word)));
461 status = send_message(disp_obj, node_get_timeout(hnode), 403 status = send_message(disp_obj, node_get_timeout(hnode),
462 ul_bytes, node_env); 404 ul_bytes, node_env);
463 } 405 }
@@ -480,10 +422,6 @@ int disp_node_delete(struct disp_object *disp_obj,
480 int status = 0; 422 int status = 0;
481 u8 dev_type; 423 u8 dev_type;
482 424
483 DBC_REQUIRE(refs > 0);
484 DBC_REQUIRE(disp_obj);
485 DBC_REQUIRE(hnode != NULL);
486
487 status = dev_get_dev_type(disp_obj->dev_obj, &dev_type); 425 status = dev_get_dev_type(disp_obj->dev_obj, &dev_type);
488 426
489 if (!status) { 427 if (!status) {
@@ -521,9 +459,6 @@ int disp_node_run(struct disp_object *disp_obj,
521 struct rms_command *rms_cmd; 459 struct rms_command *rms_cmd;
522 int status = 0; 460 int status = 0;
523 u8 dev_type; 461 u8 dev_type;
524 DBC_REQUIRE(refs > 0);
525 DBC_REQUIRE(disp_obj);
526 DBC_REQUIRE(hnode != NULL);
527 462
528 status = dev_get_dev_type(disp_obj->dev_obj, &dev_type); 463 status = dev_get_dev_type(disp_obj->dev_obj, &dev_type);
529 464
@@ -620,7 +555,6 @@ static int fill_stream_def(rms_word *pdw_buf, u32 *ptotal, u32 offset,
620 * 1 from total. 555 * 1 from total.
621 */ 556 */
622 total += sizeof(struct rms_strm_def) / sizeof(rms_word) - 1; 557 total += sizeof(struct rms_strm_def) / sizeof(rms_word) - 1;
623 DBC_REQUIRE(strm_def.sz_device);
624 dw_length = strlen(strm_def.sz_device) + 1; 558 dw_length = strlen(strm_def.sz_device) + 1;
625 559
626 /* Number of RMS_WORDS needed to hold device name */ 560 /* Number of RMS_WORDS needed to hold device name */
@@ -659,8 +593,6 @@ static int send_message(struct disp_object *disp_obj, u32 timeout,
659 struct chnl_ioc chnl_ioc_obj; 593 struct chnl_ioc chnl_ioc_obj;
660 int status = 0; 594 int status = 0;
661 595
662 DBC_REQUIRE(pdw_arg != NULL);
663
664 *pdw_arg = (u32) NULL; 596 *pdw_arg = (u32) NULL;
665 intf_fxns = disp_obj->intf_fxns; 597 intf_fxns = disp_obj->intf_fxns;
666 chnl_obj = disp_obj->chnl_to_dsp; 598 chnl_obj = disp_obj->chnl_to_dsp;
@@ -703,7 +635,6 @@ static int send_message(struct disp_object *disp_obj, u32 timeout,
703 status = -EPERM; 635 status = -EPERM;
704 } else { 636 } else {
705 if (CHNL_IS_IO_COMPLETE(chnl_ioc_obj)) { 637 if (CHNL_IS_IO_COMPLETE(chnl_ioc_obj)) {
706 DBC_ASSERT(chnl_ioc_obj.buf == pbuf);
707 if (*((int *)chnl_ioc_obj.buf) < 0) { 638 if (*((int *)chnl_ioc_obj.buf) < 0) {
708 /* Translate DSP's to kernel error */ 639 /* Translate DSP's to kernel error */
709 status = -EREMOTEIO; 640 status = -EREMOTEIO;
diff --git a/drivers/staging/tidspbridge/rmgr/drv.c b/drivers/staging/tidspbridge/rmgr/drv.c
index db8215f540d8..6795205b0155 100644
--- a/drivers/staging/tidspbridge/rmgr/drv.c
+++ b/drivers/staging/tidspbridge/rmgr/drv.c
@@ -24,9 +24,6 @@
24/* ----------------------------------- DSP/BIOS Bridge */ 24/* ----------------------------------- DSP/BIOS Bridge */
25#include <dspbridge/dbdefs.h> 25#include <dspbridge/dbdefs.h>
26 26
27/* ----------------------------------- Trace & Debug */
28#include <dspbridge/dbc.h>
29
30/* ----------------------------------- This */ 27/* ----------------------------------- This */
31#include <dspbridge/drv.h> 28#include <dspbridge/drv.h>
32#include <dspbridge/dev.h> 29#include <dspbridge/dev.h>
@@ -54,7 +51,6 @@ struct drv_ext {
54}; 51};
55 52
56/* ----------------------------------- Globals */ 53/* ----------------------------------- Globals */
57static s32 refs;
58static bool ext_phys_mem_pool_enabled; 54static bool ext_phys_mem_pool_enabled;
59struct ext_phys_mem_pool { 55struct ext_phys_mem_pool {
60 u32 phys_mem_base; 56 u32 phys_mem_base;
@@ -172,7 +168,6 @@ void drv_proc_node_update_status(void *node_resource, s32 status)
172{ 168{
173 struct node_res_object *node_res_obj = 169 struct node_res_object *node_res_obj =
174 (struct node_res_object *)node_resource; 170 (struct node_res_object *)node_resource;
175 DBC_ASSERT(node_resource != NULL);
176 node_res_obj->node_allocated = status; 171 node_res_obj->node_allocated = status;
177} 172}
178 173
@@ -181,7 +176,6 @@ void drv_proc_node_update_heap_status(void *node_resource, s32 status)
181{ 176{
182 struct node_res_object *node_res_obj = 177 struct node_res_object *node_res_obj =
183 (struct node_res_object *)node_resource; 178 (struct node_res_object *)node_resource;
184 DBC_ASSERT(node_resource != NULL);
185 node_res_obj->heap_allocated = status; 179 node_res_obj->heap_allocated = status;
186} 180}
187 181
@@ -308,9 +302,6 @@ int drv_create(struct drv_object **drv_obj)
308 struct drv_object *pdrv_object = NULL; 302 struct drv_object *pdrv_object = NULL;
309 struct drv_data *drv_datap = dev_get_drvdata(bridge); 303 struct drv_data *drv_datap = dev_get_drvdata(bridge);
310 304
311 DBC_REQUIRE(drv_obj != NULL);
312 DBC_REQUIRE(refs > 0);
313
314 pdrv_object = kzalloc(sizeof(struct drv_object), GFP_KERNEL); 305 pdrv_object = kzalloc(sizeof(struct drv_object), GFP_KERNEL);
315 if (pdrv_object) { 306 if (pdrv_object) {
316 /* Create and Initialize List of device objects */ 307 /* Create and Initialize List of device objects */
@@ -336,25 +327,10 @@ int drv_create(struct drv_object **drv_obj)
336 kfree(pdrv_object); 327 kfree(pdrv_object);
337 } 328 }
338 329
339 DBC_ENSURE(status || pdrv_object);
340 return status; 330 return status;
341} 331}
342 332
343/* 333/*
344 * ======== drv_exit ========
345 * Purpose:
346 * Discontinue usage of the DRV module.
347 */
348void drv_exit(void)
349{
350 DBC_REQUIRE(refs > 0);
351
352 refs--;
353
354 DBC_ENSURE(refs >= 0);
355}
356
357/*
358 * ======== = drv_destroy ======== = 334 * ======== = drv_destroy ======== =
359 * purpose: 335 * purpose:
360 * Invoked during bridge de-initialization 336 * Invoked during bridge de-initialization
@@ -365,9 +341,6 @@ int drv_destroy(struct drv_object *driver_obj)
365 struct drv_object *pdrv_object = (struct drv_object *)driver_obj; 341 struct drv_object *pdrv_object = (struct drv_object *)driver_obj;
366 struct drv_data *drv_datap = dev_get_drvdata(bridge); 342 struct drv_data *drv_datap = dev_get_drvdata(bridge);
367 343
368 DBC_REQUIRE(refs > 0);
369 DBC_REQUIRE(pdrv_object);
370
371 kfree(pdrv_object); 344 kfree(pdrv_object);
372 /* Update the DRV Object in the driver data */ 345 /* Update the DRV Object in the driver data */
373 if (drv_datap) { 346 if (drv_datap) {
@@ -389,17 +362,8 @@ int drv_get_dev_object(u32 index, struct drv_object *hdrv_obj,
389 struct dev_object **device_obj) 362 struct dev_object **device_obj)
390{ 363{
391 int status = 0; 364 int status = 0;
392#ifdef CONFIG_TIDSPBRIDGE_DEBUG
393 /* used only for Assertions and debug messages */
394 struct drv_object *pdrv_obj = (struct drv_object *)hdrv_obj;
395#endif
396 struct dev_object *dev_obj; 365 struct dev_object *dev_obj;
397 u32 i; 366 u32 i;
398 DBC_REQUIRE(pdrv_obj);
399 DBC_REQUIRE(device_obj != NULL);
400 DBC_REQUIRE(index >= 0);
401 DBC_REQUIRE(refs > 0);
402 DBC_ASSERT(!(list_empty(&pdrv_obj->dev_list)));
403 367
404 dev_obj = (struct dev_object *)drv_get_first_dev_object(); 368 dev_obj = (struct dev_object *)drv_get_first_dev_object();
405 for (i = 0; i < index; i++) { 369 for (i = 0; i < index; i++) {
@@ -524,25 +488,6 @@ u32 drv_get_next_dev_extension(u32 dev_extension)
524} 488}
525 489
526/* 490/*
527 * ======== drv_init ========
528 * Purpose:
529 * Initialize DRV module private state.
530 */
531int drv_init(void)
532{
533 s32 ret = 1; /* function return value */
534
535 DBC_REQUIRE(refs >= 0);
536
537 if (ret)
538 refs++;
539
540 DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
541
542 return ret;
543}
544
545/*
546 * ======== drv_insert_dev_object ======== 491 * ======== drv_insert_dev_object ========
547 * Purpose: 492 * Purpose:
548 * Insert a DevObject into the list of Manager object. 493 * Insert a DevObject into the list of Manager object.
@@ -552,10 +497,6 @@ int drv_insert_dev_object(struct drv_object *driver_obj,
552{ 497{
553 struct drv_object *pdrv_object = (struct drv_object *)driver_obj; 498 struct drv_object *pdrv_object = (struct drv_object *)driver_obj;
554 499
555 DBC_REQUIRE(refs > 0);
556 DBC_REQUIRE(hdev_obj != NULL);
557 DBC_REQUIRE(pdrv_object);
558
559 list_add_tail((struct list_head *)hdev_obj, &pdrv_object->dev_list); 500 list_add_tail((struct list_head *)hdev_obj, &pdrv_object->dev_list);
560 501
561 return 0; 502 return 0;
@@ -574,12 +515,6 @@ int drv_remove_dev_object(struct drv_object *driver_obj,
574 struct drv_object *pdrv_object = (struct drv_object *)driver_obj; 515 struct drv_object *pdrv_object = (struct drv_object *)driver_obj;
575 struct list_head *cur_elem; 516 struct list_head *cur_elem;
576 517
577 DBC_REQUIRE(refs > 0);
578 DBC_REQUIRE(pdrv_object);
579 DBC_REQUIRE(hdev_obj != NULL);
580
581 DBC_REQUIRE(!list_empty(&pdrv_object->dev_list));
582
583 /* Search list for p_proc_object: */ 518 /* Search list for p_proc_object: */
584 list_for_each(cur_elem, &pdrv_object->dev_list) { 519 list_for_each(cur_elem, &pdrv_object->dev_list) {
585 /* If found, remove it. */ 520 /* If found, remove it. */
@@ -605,9 +540,6 @@ int drv_request_resources(u32 dw_context, u32 *dev_node_strg)
605 struct drv_ext *pszdev_node; 540 struct drv_ext *pszdev_node;
606 struct drv_data *drv_datap = dev_get_drvdata(bridge); 541 struct drv_data *drv_datap = dev_get_drvdata(bridge);
607 542
608 DBC_REQUIRE(dw_context != 0);
609 DBC_REQUIRE(dev_node_strg != NULL);
610
611 /* 543 /*
612 * Allocate memory to hold the string. This will live until 544 * Allocate memory to hold the string. This will live until
613 * it is freed in the Release resources. Update the driver object 545 * it is freed in the Release resources. Update the driver object
@@ -639,10 +571,6 @@ int drv_request_resources(u32 dw_context, u32 *dev_node_strg)
639 *dev_node_strg = 0; 571 *dev_node_strg = 0;
640 } 572 }
641 573
642 DBC_ENSURE((!status && dev_node_strg != NULL &&
643 !list_empty(&pdrv_object->dev_node_string)) ||
644 (status && *dev_node_strg == 0));
645
646 return status; 574 return status;
647} 575}
648 576
@@ -900,8 +828,6 @@ void *mem_alloc_phys_mem(u32 byte_size, u32 align_mask,
900void mem_free_phys_mem(void *virtual_address, u32 physical_address, 828void mem_free_phys_mem(void *virtual_address, u32 physical_address,
901 u32 byte_size) 829 u32 byte_size)
902{ 830{
903 DBC_REQUIRE(virtual_address != NULL);
904
905 if (!ext_phys_mem_pool_enabled) 831 if (!ext_phys_mem_pool_enabled)
906 dma_free_coherent(NULL, byte_size, virtual_address, 832 dma_free_coherent(NULL, byte_size, virtual_address,
907 physical_address); 833 physical_address);
diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.c b/drivers/staging/tidspbridge/rmgr/drv_interface.c
index 385740bad0de..3cac01492063 100644
--- a/drivers/staging/tidspbridge/rmgr/drv_interface.c
+++ b/drivers/staging/tidspbridge/rmgr/drv_interface.c
@@ -16,11 +16,8 @@
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. 16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */ 17 */
18 18
19/* ----------------------------------- Host OS */
20
21#include <plat/dsp.h> 19#include <plat/dsp.h>
22 20
23#include <dspbridge/host_os.h>
24#include <linux/types.h> 21#include <linux/types.h>
25#include <linux/platform_device.h> 22#include <linux/platform_device.h>
26#include <linux/pm.h> 23#include <linux/pm.h>
@@ -33,36 +30,25 @@
33/* ----------------------------------- DSP/BIOS Bridge */ 30/* ----------------------------------- DSP/BIOS Bridge */
34#include <dspbridge/dbdefs.h> 31#include <dspbridge/dbdefs.h>
35 32
36/* ----------------------------------- Trace & Debug */
37#include <dspbridge/dbc.h>
38
39/* ----------------------------------- OS Adaptation Layer */ 33/* ----------------------------------- OS Adaptation Layer */
40#include <dspbridge/clk.h> 34#include <dspbridge/clk.h>
41#include <dspbridge/sync.h>
42 35
43/* ----------------------------------- Platform Manager */ 36/* ----------------------------------- Platform Manager */
44#include <dspbridge/dspapi-ioctl.h>
45#include <dspbridge/dspapi.h> 37#include <dspbridge/dspapi.h>
46#include <dspbridge/dspdrv.h> 38#include <dspbridge/dspdrv.h>
47 39
48/* ----------------------------------- Resource Manager */ 40/* ----------------------------------- Resource Manager */
49#include <dspbridge/pwr.h> 41#include <dspbridge/pwr.h>
50 42
51/* ----------------------------------- This */
52#include <drv_interface.h>
53
54#include <dspbridge/resourcecleanup.h> 43#include <dspbridge/resourcecleanup.h>
55#include <dspbridge/chnl.h>
56#include <dspbridge/proc.h> 44#include <dspbridge/proc.h>
57#include <dspbridge/dev.h> 45#include <dspbridge/dev.h>
58#include <dspbridge/drv.h>
59 46
60#ifdef CONFIG_TIDSPBRIDGE_DVFS 47#ifdef CONFIG_TIDSPBRIDGE_DVFS
61#include <mach-omap2/omap3-opp.h> 48#include <mach-omap2/omap3-opp.h>
62#endif 49#endif
63 50
64/* ----------------------------------- Globals */ 51/* ----------------------------------- Globals */
65#define DRIVER_NAME "DspBridge"
66#define DSPBRIDGE_VERSION "0.3" 52#define DSPBRIDGE_VERSION "0.3"
67s32 dsp_debug; 53s32 dsp_debug;
68 54
@@ -131,7 +117,166 @@ MODULE_AUTHOR("Texas Instruments");
131MODULE_LICENSE("GPL"); 117MODULE_LICENSE("GPL");
132MODULE_VERSION(DSPBRIDGE_VERSION); 118MODULE_VERSION(DSPBRIDGE_VERSION);
133 119
134static char *driver_name = DRIVER_NAME; 120/*
121 * This function is called when an application opens handle to the
122 * bridge driver.
123 */
124static int bridge_open(struct inode *ip, struct file *filp)
125{
126 int status = 0;
127 struct process_context *pr_ctxt = NULL;
128
129 /*
130 * Allocate a new process context and insert it into global
131 * process context list.
132 */
133
134#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
135 if (recover) {
136 if (filp->f_flags & O_NONBLOCK ||
137 wait_for_completion_interruptible(&bridge_open_comp))
138 return -EBUSY;
139 }
140#endif
141 pr_ctxt = kzalloc(sizeof(struct process_context), GFP_KERNEL);
142 if (!pr_ctxt)
143 return -ENOMEM;
144
145 pr_ctxt->res_state = PROC_RES_ALLOCATED;
146 spin_lock_init(&pr_ctxt->dmm_map_lock);
147 INIT_LIST_HEAD(&pr_ctxt->dmm_map_list);
148 spin_lock_init(&pr_ctxt->dmm_rsv_lock);
149 INIT_LIST_HEAD(&pr_ctxt->dmm_rsv_list);
150
151 pr_ctxt->node_id = kzalloc(sizeof(struct idr), GFP_KERNEL);
152 if (!pr_ctxt->node_id) {
153 status = -ENOMEM;
154 goto err1;
155 }
156
157 idr_init(pr_ctxt->node_id);
158
159 pr_ctxt->stream_id = kzalloc(sizeof(struct idr), GFP_KERNEL);
160 if (!pr_ctxt->stream_id) {
161 status = -ENOMEM;
162 goto err2;
163 }
164
165 idr_init(pr_ctxt->stream_id);
166
167 filp->private_data = pr_ctxt;
168
169#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
170 atomic_inc(&bridge_cref);
171#endif
172 return 0;
173
174err2:
175 kfree(pr_ctxt->node_id);
176err1:
177 kfree(pr_ctxt);
178 return status;
179}
180
181/*
182 * This function is called when an application closes handle to the bridge
183 * driver.
184 */
185static int bridge_release(struct inode *ip, struct file *filp)
186{
187 int status = 0;
188 struct process_context *pr_ctxt;
189
190 if (!filp->private_data) {
191 status = -EIO;
192 goto err;
193 }
194
195 pr_ctxt = filp->private_data;
196 flush_signals(current);
197 drv_remove_all_resources(pr_ctxt);
198 proc_detach(pr_ctxt);
199 kfree(pr_ctxt->node_id);
200 kfree(pr_ctxt->stream_id);
201 kfree(pr_ctxt);
202
203 filp->private_data = NULL;
204
205err:
206#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
207 if (!atomic_dec_return(&bridge_cref))
208 complete(&bridge_comp);
209#endif
210 return status;
211}
212
213/* This function provides IO interface to the bridge driver. */
214static long bridge_ioctl(struct file *filp, unsigned int code,
215 unsigned long args)
216{
217 int status;
218 u32 retval = 0;
219 union trapped_args buf_in;
220
221#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
222 if (recover) {
223 status = -EIO;
224 goto err;
225 }
226#endif
227#ifdef CONFIG_PM
228 status = omap34_xxbridge_suspend_lockout(&bridge_suspend_data, filp);
229 if (status != 0)
230 return status;
231#endif
232
233 if (!filp->private_data) {
234 status = -EIO;
235 goto err;
236 }
237
238 status = copy_from_user(&buf_in, (union trapped_args *)args,
239 sizeof(union trapped_args));
240
241 if (!status) {
242 status = api_call_dev_ioctl(code, &buf_in, &retval,
243 filp->private_data);
244
245 if (!status) {
246 status = retval;
247 } else {
248 dev_dbg(bridge, "%s: IOCTL Failed, code: 0x%x "
249 "status 0x%x\n", __func__, code, status);
250 status = -1;
251 }
252
253 }
254
255err:
256 return status;
257}
258
259/* This function maps kernel space memory to user space memory. */
260static int bridge_mmap(struct file *filp, struct vm_area_struct *vma)
261{
262 u32 status;
263
264 vma->vm_flags |= VM_RESERVED | VM_IO;
265 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
266
267 dev_dbg(bridge, "%s: vm filp %p start %lx end %lx page_prot %ulx "
268 "flags %lx\n", __func__, filp,
269 vma->vm_start, vma->vm_end, vma->vm_page_prot,
270 vma->vm_flags);
271
272 status = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
273 vma->vm_end - vma->vm_start,
274 vma->vm_page_prot);
275 if (status != 0)
276 status = -EAGAIN;
277
278 return status;
279}
135 280
136static const struct file_operations bridge_fops = { 281static const struct file_operations bridge_fops = {
137 .open = bridge_open, 282 .open = bridge_open,
@@ -211,10 +356,10 @@ void bridge_recover_schedule(void)
211#endif 356#endif
212#ifdef CONFIG_TIDSPBRIDGE_DVFS 357#ifdef CONFIG_TIDSPBRIDGE_DVFS
213static int dspbridge_scale_notification(struct notifier_block *op, 358static int dspbridge_scale_notification(struct notifier_block *op,
214 unsigned long val, void *ptr) 359 unsigned long val, void *ptr)
215{ 360{
216 struct omap_dsp_platform_data *pdata = 361 struct omap_dsp_platform_data *pdata =
217 omap_dspbridge_dev->dev.platform_data; 362 omap_dspbridge_dev->dev.platform_data;
218 363
219 if (CPUFREQ_POSTCHANGE == val && pdata->dsp_get_opp) 364 if (CPUFREQ_POSTCHANGE == val && pdata->dsp_get_opp)
220 pwr_pm_post_scale(PRCM_VDD1, pdata->dsp_get_opp()); 365 pwr_pm_post_scale(PRCM_VDD1, pdata->dsp_get_opp());
@@ -319,7 +464,7 @@ err2:
319err1: 464err1:
320#ifdef CONFIG_TIDSPBRIDGE_DVFS 465#ifdef CONFIG_TIDSPBRIDGE_DVFS
321 cpufreq_unregister_notifier(&iva_clk_notifier, 466 cpufreq_unregister_notifier(&iva_clk_notifier,
322 CPUFREQ_TRANSITION_NOTIFIER); 467 CPUFREQ_TRANSITION_NOTIFIER);
323#endif 468#endif
324 dsp_clk_exit(); 469 dsp_clk_exit();
325 470
@@ -345,7 +490,7 @@ static int __devinit omap34_xx_bridge_probe(struct platform_device *pdev)
345 goto err1; 490 goto err1;
346 491
347 /* use 2.6 device model */ 492 /* use 2.6 device model */
348 err = alloc_chrdev_region(&dev, 0, 1, driver_name); 493 err = alloc_chrdev_region(&dev, 0, 1, "DspBridge");
349 if (err) { 494 if (err) {
350 pr_err("%s: Can't get major %d\n", __func__, driver_major); 495 pr_err("%s: Can't get major %d\n", __func__, driver_major);
351 goto err1; 496 goto err1;
@@ -385,7 +530,6 @@ err1:
385static int __devexit omap34_xx_bridge_remove(struct platform_device *pdev) 530static int __devexit omap34_xx_bridge_remove(struct platform_device *pdev)
386{ 531{
387 dev_t devno; 532 dev_t devno;
388 bool ret;
389 int status = 0; 533 int status = 0;
390 struct drv_data *drv_datap = dev_get_drvdata(bridge); 534 struct drv_data *drv_datap = dev_get_drvdata(bridge);
391 535
@@ -398,16 +542,15 @@ static int __devexit omap34_xx_bridge_remove(struct platform_device *pdev)
398 542
399#ifdef CONFIG_TIDSPBRIDGE_DVFS 543#ifdef CONFIG_TIDSPBRIDGE_DVFS
400 if (cpufreq_unregister_notifier(&iva_clk_notifier, 544 if (cpufreq_unregister_notifier(&iva_clk_notifier,
401 CPUFREQ_TRANSITION_NOTIFIER)) 545 CPUFREQ_TRANSITION_NOTIFIER))
402 pr_err("%s: cpufreq_unregister_notifier failed for iva2_ck\n", 546 pr_err("%s: cpufreq_unregister_notifier failed for iva2_ck\n",
403 __func__); 547 __func__);
404#endif /* #ifdef CONFIG_TIDSPBRIDGE_DVFS */ 548#endif /* #ifdef CONFIG_TIDSPBRIDGE_DVFS */
405 549
406 if (driver_context) { 550 if (driver_context) {
407 /* Put the DSP in reset state */ 551 /* Put the DSP in reset state */
408 ret = dsp_deinit(driver_context); 552 dsp_deinit(driver_context);
409 driver_context = 0; 553 driver_context = 0;
410 DBC_ASSERT(ret == true);
411 } 554 }
412 555
413 kfree(drv_datap); 556 kfree(drv_datap);
@@ -431,7 +574,7 @@ func_cont:
431} 574}
432 575
433#ifdef CONFIG_PM 576#ifdef CONFIG_PM
434static int BRIDGE_SUSPEND(struct platform_device *pdev, pm_message_t state) 577static int bridge_suspend(struct platform_device *pdev, pm_message_t state)
435{ 578{
436 u32 status; 579 u32 status;
437 u32 command = PWR_EMERGENCYDEEPSLEEP; 580 u32 command = PWR_EMERGENCYDEEPSLEEP;
@@ -444,7 +587,7 @@ static int BRIDGE_SUSPEND(struct platform_device *pdev, pm_message_t state)
444 return 0; 587 return 0;
445} 588}
446 589
447static int BRIDGE_RESUME(struct platform_device *pdev) 590static int bridge_resume(struct platform_device *pdev)
448{ 591{
449 u32 status; 592 u32 status;
450 593
@@ -456,9 +599,6 @@ static int BRIDGE_RESUME(struct platform_device *pdev)
456 wake_up(&bridge_suspend_data.suspend_wq); 599 wake_up(&bridge_suspend_data.suspend_wq);
457 return 0; 600 return 0;
458} 601}
459#else
460#define BRIDGE_SUSPEND NULL
461#define BRIDGE_RESUME NULL
462#endif 602#endif
463 603
464static struct platform_driver bridge_driver = { 604static struct platform_driver bridge_driver = {
@@ -467,8 +607,10 @@ static struct platform_driver bridge_driver = {
467 }, 607 },
468 .probe = omap34_xx_bridge_probe, 608 .probe = omap34_xx_bridge_probe,
469 .remove = __devexit_p(omap34_xx_bridge_remove), 609 .remove = __devexit_p(omap34_xx_bridge_remove),
470 .suspend = BRIDGE_SUSPEND, 610#ifdef CONFIG_PM
471 .resume = BRIDGE_RESUME, 611 .suspend = bridge_suspend,
612 .resume = bridge_resume,
613#endif
472}; 614};
473 615
474static int __init bridge_init(void) 616static int __init bridge_init(void)
@@ -481,170 +623,6 @@ static void __exit bridge_exit(void)
481 platform_driver_unregister(&bridge_driver); 623 platform_driver_unregister(&bridge_driver);
482} 624}
483 625
484/*
485 * This function is called when an application opens handle to the
486 * bridge driver.
487 */
488static int bridge_open(struct inode *ip, struct file *filp)
489{
490 int status = 0;
491 struct process_context *pr_ctxt = NULL;
492
493 /*
494 * Allocate a new process context and insert it into global
495 * process context list.
496 */
497
498#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
499 if (recover) {
500 if (filp->f_flags & O_NONBLOCK ||
501 wait_for_completion_interruptible(&bridge_open_comp))
502 return -EBUSY;
503 }
504#endif
505 pr_ctxt = kzalloc(sizeof(struct process_context), GFP_KERNEL);
506 if (!pr_ctxt)
507 return -ENOMEM;
508
509 pr_ctxt->res_state = PROC_RES_ALLOCATED;
510 spin_lock_init(&pr_ctxt->dmm_map_lock);
511 INIT_LIST_HEAD(&pr_ctxt->dmm_map_list);
512 spin_lock_init(&pr_ctxt->dmm_rsv_lock);
513 INIT_LIST_HEAD(&pr_ctxt->dmm_rsv_list);
514
515 pr_ctxt->node_id = kzalloc(sizeof(struct idr), GFP_KERNEL);
516 if (!pr_ctxt->node_id) {
517 status = -ENOMEM;
518 goto err1;
519 }
520
521 idr_init(pr_ctxt->node_id);
522
523 pr_ctxt->stream_id = kzalloc(sizeof(struct idr), GFP_KERNEL);
524 if (!pr_ctxt->stream_id) {
525 status = -ENOMEM;
526 goto err2;
527 }
528
529 idr_init(pr_ctxt->stream_id);
530
531 filp->private_data = pr_ctxt;
532
533#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
534 atomic_inc(&bridge_cref);
535#endif
536 return 0;
537
538err2:
539 kfree(pr_ctxt->node_id);
540err1:
541 kfree(pr_ctxt);
542 return status;
543}
544
545/*
546 * This function is called when an application closes handle to the bridge
547 * driver.
548 */
549static int bridge_release(struct inode *ip, struct file *filp)
550{
551 int status = 0;
552 struct process_context *pr_ctxt;
553
554 if (!filp->private_data) {
555 status = -EIO;
556 goto err;
557 }
558
559 pr_ctxt = filp->private_data;
560 flush_signals(current);
561 drv_remove_all_resources(pr_ctxt);
562 proc_detach(pr_ctxt);
563 kfree(pr_ctxt->node_id);
564 kfree(pr_ctxt->stream_id);
565 kfree(pr_ctxt);
566
567 filp->private_data = NULL;
568
569err:
570#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
571 if (!atomic_dec_return(&bridge_cref))
572 complete(&bridge_comp);
573#endif
574 return status;
575}
576
577/* This function provides IO interface to the bridge driver. */
578static long bridge_ioctl(struct file *filp, unsigned int code,
579 unsigned long args)
580{
581 int status;
582 u32 retval = 0;
583 union trapped_args buf_in;
584
585 DBC_REQUIRE(filp != NULL);
586#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
587 if (recover) {
588 status = -EIO;
589 goto err;
590 }
591#endif
592#ifdef CONFIG_PM
593 status = omap34_xxbridge_suspend_lockout(&bridge_suspend_data, filp);
594 if (status != 0)
595 return status;
596#endif
597
598 if (!filp->private_data) {
599 status = -EIO;
600 goto err;
601 }
602
603 status = copy_from_user(&buf_in, (union trapped_args *)args,
604 sizeof(union trapped_args));
605
606 if (!status) {
607 status = api_call_dev_ioctl(code, &buf_in, &retval,
608 filp->private_data);
609
610 if (!status) {
611 status = retval;
612 } else {
613 dev_dbg(bridge, "%s: IOCTL Failed, code: 0x%x "
614 "status 0x%x\n", __func__, code, status);
615 status = -1;
616 }
617
618 }
619
620err:
621 return status;
622}
623
624/* This function maps kernel space memory to user space memory. */
625static int bridge_mmap(struct file *filp, struct vm_area_struct *vma)
626{
627 u32 offset = vma->vm_pgoff << PAGE_SHIFT;
628 u32 status;
629
630 DBC_ASSERT(vma->vm_start < vma->vm_end);
631
632 vma->vm_flags |= VM_RESERVED | VM_IO;
633 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
634
635 dev_dbg(bridge, "%s: vm filp %p offset %x start %lx end %lx page_prot "
636 "%lx flags %lx\n", __func__, filp, offset,
637 vma->vm_start, vma->vm_end, vma->vm_page_prot, vma->vm_flags);
638
639 status = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
640 vma->vm_end - vma->vm_start,
641 vma->vm_page_prot);
642 if (status != 0)
643 status = -EAGAIN;
644
645 return status;
646}
647
648/* To remove all process resources before removing the process from the 626/* To remove all process resources before removing the process from the
649 * process context list */ 627 * process context list */
650int drv_remove_all_resources(void *process_ctxt) 628int drv_remove_all_resources(void *process_ctxt)
diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.h b/drivers/staging/tidspbridge/rmgr/drv_interface.h
deleted file mode 100644
index ab070602adc2..000000000000
--- a/drivers/staging/tidspbridge/rmgr/drv_interface.h
+++ /dev/null
@@ -1,28 +0,0 @@
1/*
2 * drv_interface.h
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * Copyright (C) 2005-2006 Texas Instruments, Inc.
7 *
8 * This package is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
13 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
14 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
15 */
16
17#ifndef _DRV_INTERFACE_H_
18#define _DRV_INTERFACE_H_
19
20/* Prototypes for all functions in this bridge */
21static int __init bridge_init(void); /* Initialize bridge */
22static void __exit bridge_exit(void); /* Opposite of initialize */
23static int bridge_open(struct inode *ip, struct file *filp); /* Open */
24static int bridge_release(struct inode *ip, struct file *filp); /* Release */
25static long bridge_ioctl(struct file *filp, unsigned int code,
26 unsigned long args);
27static int bridge_mmap(struct file *filp, struct vm_area_struct *vma);
28#endif /* ifndef _DRV_INTERFACE_H_ */
diff --git a/drivers/staging/tidspbridge/rmgr/dspdrv.c b/drivers/staging/tidspbridge/rmgr/dspdrv.c
index 7a6fc737872c..dc767b183cdf 100644
--- a/drivers/staging/tidspbridge/rmgr/dspdrv.c
+++ b/drivers/staging/tidspbridge/rmgr/dspdrv.c
@@ -23,9 +23,6 @@
23/* ----------------------------------- DSP/BIOS Bridge */ 23/* ----------------------------------- DSP/BIOS Bridge */
24#include <dspbridge/dbdefs.h> 24#include <dspbridge/dbdefs.h>
25 25
26/* ----------------------------------- Trace & Debug */
27#include <dspbridge/dbc.h>
28
29/* ----------------------------------- Platform Manager */ 26/* ----------------------------------- Platform Manager */
30#include <dspbridge/drv.h> 27#include <dspbridge/drv.h>
31#include <dspbridge/dev.h> 28#include <dspbridge/dev.h>
@@ -102,8 +99,6 @@ func_cont:
102 } else { 99 } else {
103 dev_dbg(bridge, "%s: Failed\n", __func__); 100 dev_dbg(bridge, "%s: Failed\n", __func__);
104 } /* End api_init_complete2 */ 101 } /* End api_init_complete2 */
105 DBC_ENSURE((!status && drv_obj != NULL) ||
106 (status && drv_obj == NULL));
107 *init_status = status; 102 *init_status = status;
108 /* Return the Driver Object */ 103 /* Return the Driver Object */
109 return (u32) drv_obj; 104 return (u32) drv_obj;
diff --git a/drivers/staging/tidspbridge/rmgr/mgr.c b/drivers/staging/tidspbridge/rmgr/mgr.c
index d635c01c015e..8a1e9287cff6 100644
--- a/drivers/staging/tidspbridge/rmgr/mgr.c
+++ b/drivers/staging/tidspbridge/rmgr/mgr.c
@@ -26,9 +26,6 @@
26/* ----------------------------------- DSP/BIOS Bridge */ 26/* ----------------------------------- DSP/BIOS Bridge */
27#include <dspbridge/dbdefs.h> 27#include <dspbridge/dbdefs.h>
28 28
29/* ----------------------------------- Trace & Debug */
30#include <dspbridge/dbc.h>
31
32/* ----------------------------------- OS Adaptation Layer */ 29/* ----------------------------------- OS Adaptation Layer */
33#include <dspbridge/sync.h> 30#include <dspbridge/sync.h>
34 31
@@ -62,9 +59,6 @@ int mgr_create(struct mgr_object **mgr_obj,
62 struct mgr_object *pmgr_obj = NULL; 59 struct mgr_object *pmgr_obj = NULL;
63 struct drv_data *drv_datap = dev_get_drvdata(bridge); 60 struct drv_data *drv_datap = dev_get_drvdata(bridge);
64 61
65 DBC_REQUIRE(mgr_obj != NULL);
66 DBC_REQUIRE(refs > 0);
67
68 pmgr_obj = kzalloc(sizeof(struct mgr_object), GFP_KERNEL); 62 pmgr_obj = kzalloc(sizeof(struct mgr_object), GFP_KERNEL);
69 if (pmgr_obj) { 63 if (pmgr_obj) {
70 status = dcd_create_manager(ZLDLLNAME, &pmgr_obj->dcd_mgr); 64 status = dcd_create_manager(ZLDLLNAME, &pmgr_obj->dcd_mgr);
@@ -92,7 +86,6 @@ int mgr_create(struct mgr_object **mgr_obj,
92 status = -ENOMEM; 86 status = -ENOMEM;
93 } 87 }
94 88
95 DBC_ENSURE(status || pmgr_obj);
96 return status; 89 return status;
97} 90}
98 91
@@ -106,9 +99,6 @@ int mgr_destroy(struct mgr_object *hmgr_obj)
106 struct mgr_object *pmgr_obj = (struct mgr_object *)hmgr_obj; 99 struct mgr_object *pmgr_obj = (struct mgr_object *)hmgr_obj;
107 struct drv_data *drv_datap = dev_get_drvdata(bridge); 100 struct drv_data *drv_datap = dev_get_drvdata(bridge);
108 101
109 DBC_REQUIRE(refs > 0);
110 DBC_REQUIRE(hmgr_obj);
111
112 /* Free resources */ 102 /* Free resources */
113 if (hmgr_obj->dcd_mgr) 103 if (hmgr_obj->dcd_mgr)
114 dcd_destroy_manager(hmgr_obj->dcd_mgr); 104 dcd_destroy_manager(hmgr_obj->dcd_mgr);
@@ -140,11 +130,6 @@ int mgr_enum_node_info(u32 node_id, struct dsp_ndbprops *pndb_props,
140 struct mgr_object *pmgr_obj = NULL; 130 struct mgr_object *pmgr_obj = NULL;
141 struct drv_data *drv_datap = dev_get_drvdata(bridge); 131 struct drv_data *drv_datap = dev_get_drvdata(bridge);
142 132
143 DBC_REQUIRE(pndb_props != NULL);
144 DBC_REQUIRE(pu_num_nodes != NULL);
145 DBC_REQUIRE(undb_props_size >= sizeof(struct dsp_ndbprops));
146 DBC_REQUIRE(refs > 0);
147
148 *pu_num_nodes = 0; 133 *pu_num_nodes = 0;
149 /* Get the Manager Object from the driver data */ 134 /* Get the Manager Object from the driver data */
150 if (!drv_datap || !drv_datap->mgr_object) { 135 if (!drv_datap || !drv_datap->mgr_object) {
@@ -153,7 +138,6 @@ int mgr_enum_node_info(u32 node_id, struct dsp_ndbprops *pndb_props,
153 } 138 }
154 pmgr_obj = drv_datap->mgr_object; 139 pmgr_obj = drv_datap->mgr_object;
155 140
156 DBC_ASSERT(pmgr_obj);
157 /* Forever loop till we hit failed or no more items in the 141 /* Forever loop till we hit failed or no more items in the
158 * Enumeration. We will exit the loop other than 0; */ 142 * Enumeration. We will exit the loop other than 0; */
159 while (!status) { 143 while (!status) {
@@ -205,11 +189,6 @@ int mgr_enum_processor_info(u32 processor_id,
205 struct drv_data *drv_datap = dev_get_drvdata(bridge); 189 struct drv_data *drv_datap = dev_get_drvdata(bridge);
206 bool proc_detect = false; 190 bool proc_detect = false;
207 191
208 DBC_REQUIRE(processor_info != NULL);
209 DBC_REQUIRE(pu_num_procs != NULL);
210 DBC_REQUIRE(processor_info_size >= sizeof(struct dsp_processorinfo));
211 DBC_REQUIRE(refs > 0);
212
213 *pu_num_procs = 0; 192 *pu_num_procs = 0;
214 193
215 /* Retrieve the Object handle from the driver data */ 194 /* Retrieve the Object handle from the driver data */
@@ -242,7 +221,6 @@ int mgr_enum_processor_info(u32 processor_id,
242 dev_dbg(bridge, "%s: Failed to get MGR Object\n", __func__); 221 dev_dbg(bridge, "%s: Failed to get MGR Object\n", __func__);
243 goto func_end; 222 goto func_end;
244 } 223 }
245 DBC_ASSERT(pmgr_obj);
246 /* Forever loop till we hit no more items in the 224 /* Forever loop till we hit no more items in the
247 * Enumeration. We will exit the loop other than 0; */ 225 * Enumeration. We will exit the loop other than 0; */
248 while (status1 == 0) { 226 while (status1 == 0) {
@@ -310,12 +288,9 @@ func_end:
310 */ 288 */
311void mgr_exit(void) 289void mgr_exit(void)
312{ 290{
313 DBC_REQUIRE(refs > 0);
314 refs--; 291 refs--;
315 if (refs == 0) 292 if (refs == 0)
316 dcd_exit(); 293 dcd_exit();
317
318 DBC_ENSURE(refs >= 0);
319} 294}
320 295
321/* 296/*
@@ -328,16 +303,11 @@ int mgr_get_dcd_handle(struct mgr_object *mgr_handle,
328 int status = -EPERM; 303 int status = -EPERM;
329 struct mgr_object *pmgr_obj = (struct mgr_object *)mgr_handle; 304 struct mgr_object *pmgr_obj = (struct mgr_object *)mgr_handle;
330 305
331 DBC_REQUIRE(refs > 0);
332 DBC_REQUIRE(dcd_handle != NULL);
333
334 *dcd_handle = (u32) NULL; 306 *dcd_handle = (u32) NULL;
335 if (pmgr_obj) { 307 if (pmgr_obj) {
336 *dcd_handle = (u32) pmgr_obj->dcd_mgr; 308 *dcd_handle = (u32) pmgr_obj->dcd_mgr;
337 status = 0; 309 status = 0;
338 } 310 }
339 DBC_ENSURE((!status && *dcd_handle != (u32) NULL) ||
340 (status && *dcd_handle == (u32) NULL));
341 311
342 return status; 312 return status;
343} 313}
@@ -349,22 +319,13 @@ int mgr_get_dcd_handle(struct mgr_object *mgr_handle,
349bool mgr_init(void) 319bool mgr_init(void)
350{ 320{
351 bool ret = true; 321 bool ret = true;
352 bool init_dcd = false;
353 322
354 DBC_REQUIRE(refs >= 0); 323 if (refs == 0)
355 324 ret = dcd_init(); /* DCD Module */
356 if (refs == 0) {
357 init_dcd = dcd_init(); /* DCD Module */
358
359 if (!init_dcd)
360 ret = false;
361 }
362 325
363 if (ret) 326 if (ret)
364 refs++; 327 refs++;
365 328
366 DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
367
368 return ret; 329 return ret;
369} 330}
370 331
@@ -380,8 +341,6 @@ int mgr_wait_for_bridge_events(struct dsp_notification **anotifications,
380 struct sync_object *sync_events[MAX_EVENTS]; 341 struct sync_object *sync_events[MAX_EVENTS];
381 u32 i; 342 u32 i;
382 343
383 DBC_REQUIRE(count < MAX_EVENTS);
384
385 for (i = 0; i < count; i++) 344 for (i = 0; i < count; i++)
386 sync_events[i] = anotifications[i]->handle; 345 sync_events[i] = anotifications[i]->handle;
387 346
diff --git a/drivers/staging/tidspbridge/rmgr/nldr.c b/drivers/staging/tidspbridge/rmgr/nldr.c
index 0e70cba15ebc..30d5480fcdcc 100644
--- a/drivers/staging/tidspbridge/rmgr/nldr.c
+++ b/drivers/staging/tidspbridge/rmgr/nldr.c
@@ -22,8 +22,6 @@
22 22
23#include <dspbridge/dbdefs.h> 23#include <dspbridge/dbdefs.h>
24 24
25#include <dspbridge/dbc.h>
26
27/* Platform manager */ 25/* Platform manager */
28#include <dspbridge/cod.h> 26#include <dspbridge/cod.h>
29#include <dspbridge/dev.h> 27#include <dspbridge/dev.h>
@@ -265,8 +263,6 @@ static struct dbll_fxns ldr_fxns = {
265 (dbll_unload_fxn) dbll_unload, 263 (dbll_unload_fxn) dbll_unload,
266}; 264};
267 265
268static u32 refs; /* module reference count */
269
270static int add_ovly_info(void *handle, struct dbll_sect_info *sect_info, 266static int add_ovly_info(void *handle, struct dbll_sect_info *sect_info,
271 u32 addr, u32 bytes); 267 u32 addr, u32 bytes);
272static int add_ovly_node(struct dsp_uuid *uuid_obj, 268static int add_ovly_node(struct dsp_uuid *uuid_obj,
@@ -313,11 +309,6 @@ int nldr_allocate(struct nldr_object *nldr_obj, void *priv_ref,
313 struct nldr_nodeobject *nldr_node_obj = NULL; 309 struct nldr_nodeobject *nldr_node_obj = NULL;
314 int status = 0; 310 int status = 0;
315 311
316 DBC_REQUIRE(refs > 0);
317 DBC_REQUIRE(node_props != NULL);
318 DBC_REQUIRE(nldr_nodeobj != NULL);
319 DBC_REQUIRE(nldr_obj);
320
321 /* Initialize handle in case of failure */ 312 /* Initialize handle in case of failure */
322 *nldr_nodeobj = NULL; 313 *nldr_nodeobj = NULL;
323 /* Allocate node object */ 314 /* Allocate node object */
@@ -398,8 +389,6 @@ int nldr_allocate(struct nldr_object *nldr_obj, void *priv_ref,
398 if (status && nldr_node_obj) 389 if (status && nldr_node_obj)
399 kfree(nldr_node_obj); 390 kfree(nldr_node_obj);
400 391
401 DBC_ENSURE((!status && *nldr_nodeobj)
402 || (status && *nldr_nodeobj == NULL));
403 return status; 392 return status;
404} 393}
405 394
@@ -425,12 +414,6 @@ int nldr_create(struct nldr_object **nldr,
425 struct rmm_segment *rmm_segs = NULL; 414 struct rmm_segment *rmm_segs = NULL;
426 u16 i; 415 u16 i;
427 int status = 0; 416 int status = 0;
428 DBC_REQUIRE(refs > 0);
429 DBC_REQUIRE(nldr != NULL);
430 DBC_REQUIRE(hdev_obj != NULL);
431 DBC_REQUIRE(pattrs != NULL);
432 DBC_REQUIRE(pattrs->ovly != NULL);
433 DBC_REQUIRE(pattrs->write != NULL);
434 417
435 /* Allocate dynamic loader object */ 418 /* Allocate dynamic loader object */
436 nldr_obj = kzalloc(sizeof(struct nldr_object), GFP_KERNEL); 419 nldr_obj = kzalloc(sizeof(struct nldr_object), GFP_KERNEL);
@@ -440,13 +423,10 @@ int nldr_create(struct nldr_object **nldr,
440 dev_get_cod_mgr(hdev_obj, &cod_mgr); 423 dev_get_cod_mgr(hdev_obj, &cod_mgr);
441 if (cod_mgr) { 424 if (cod_mgr) {
442 status = cod_get_loader(cod_mgr, &nldr_obj->dbll); 425 status = cod_get_loader(cod_mgr, &nldr_obj->dbll);
443 DBC_ASSERT(!status);
444 status = cod_get_base_lib(cod_mgr, &nldr_obj->base_lib); 426 status = cod_get_base_lib(cod_mgr, &nldr_obj->base_lib);
445 DBC_ASSERT(!status);
446 status = 427 status =
447 cod_get_base_name(cod_mgr, sz_zl_file, 428 cod_get_base_name(cod_mgr, sz_zl_file,
448 COD_MAXPATHLENGTH); 429 COD_MAXPATHLENGTH);
449 DBC_ASSERT(!status);
450 } 430 }
451 status = 0; 431 status = 0;
452 /* end lazy status checking */ 432 /* end lazy status checking */
@@ -547,7 +527,6 @@ int nldr_create(struct nldr_object **nldr,
547 status = 527 status =
548 cod_get_base_name(cod_mgr, sz_zl_file, COD_MAXPATHLENGTH); 528 cod_get_base_name(cod_mgr, sz_zl_file, COD_MAXPATHLENGTH);
549 /* lazy check */ 529 /* lazy check */
550 DBC_ASSERT(!status);
551 /* First count number of overlay nodes */ 530 /* First count number of overlay nodes */
552 status = 531 status =
553 dcd_get_objects(nldr_obj->dcd_mgr, sz_zl_file, 532 dcd_get_objects(nldr_obj->dcd_mgr, sz_zl_file,
@@ -583,7 +562,6 @@ int nldr_create(struct nldr_object **nldr,
583 *nldr = NULL; 562 *nldr = NULL;
584 } 563 }
585 /* FIXME:Temp. Fix. Must be removed */ 564 /* FIXME:Temp. Fix. Must be removed */
586 DBC_ENSURE((!status && *nldr) || (status && *nldr == NULL));
587 return status; 565 return status;
588} 566}
589 567
@@ -595,8 +573,6 @@ void nldr_delete(struct nldr_object *nldr_obj)
595 struct ovly_sect *ovly_section; 573 struct ovly_sect *ovly_section;
596 struct ovly_sect *next; 574 struct ovly_sect *next;
597 u16 i; 575 u16 i;
598 DBC_REQUIRE(refs > 0);
599 DBC_REQUIRE(nldr_obj);
600 576
601 nldr_obj->ldr_fxns.exit_fxn(); 577 nldr_obj->ldr_fxns.exit_fxn();
602 if (nldr_obj->rmm) 578 if (nldr_obj->rmm)
@@ -644,22 +620,6 @@ void nldr_delete(struct nldr_object *nldr_obj)
644} 620}
645 621
646/* 622/*
647 * ======== nldr_exit ========
648 * Discontinue usage of NLDR module.
649 */
650void nldr_exit(void)
651{
652 DBC_REQUIRE(refs > 0);
653
654 refs--;
655
656 if (refs == 0)
657 rmm_exit();
658
659 DBC_ENSURE(refs >= 0);
660}
661
662/*
663 * ======== nldr_get_fxn_addr ======== 623 * ======== nldr_get_fxn_addr ========
664 */ 624 */
665int nldr_get_fxn_addr(struct nldr_nodeobject *nldr_node_obj, 625int nldr_get_fxn_addr(struct nldr_nodeobject *nldr_node_obj,
@@ -671,10 +631,6 @@ int nldr_get_fxn_addr(struct nldr_nodeobject *nldr_node_obj,
671 bool status1 = false; 631 bool status1 = false;
672 s32 i = 0; 632 s32 i = 0;
673 struct lib_node root = { NULL, 0, NULL }; 633 struct lib_node root = { NULL, 0, NULL };
674 DBC_REQUIRE(refs > 0);
675 DBC_REQUIRE(nldr_node_obj);
676 DBC_REQUIRE(addr != NULL);
677 DBC_REQUIRE(str_fxn != NULL);
678 634
679 nldr_obj = nldr_node_obj->nldr_obj; 635 nldr_obj = nldr_node_obj->nldr_obj;
680 /* Called from node_create(), node_delete(), or node_run(). */ 636 /* Called from node_create(), node_delete(), or node_run(). */
@@ -690,7 +646,6 @@ int nldr_get_fxn_addr(struct nldr_nodeobject *nldr_node_obj,
690 root = nldr_node_obj->delete_lib; 646 root = nldr_node_obj->delete_lib;
691 break; 647 break;
692 default: 648 default:
693 DBC_ASSERT(false);
694 break; 649 break;
695 } 650 }
696 } else { 651 } else {
@@ -760,7 +715,6 @@ int nldr_get_rmm_manager(struct nldr_object *nldr,
760{ 715{
761 int status = 0; 716 int status = 0;
762 struct nldr_object *nldr_obj = nldr; 717 struct nldr_object *nldr_obj = nldr;
763 DBC_REQUIRE(rmm_mgr != NULL);
764 718
765 if (nldr) { 719 if (nldr) {
766 *rmm_mgr = nldr_obj->rmm; 720 *rmm_mgr = nldr_obj->rmm;
@@ -769,29 +723,10 @@ int nldr_get_rmm_manager(struct nldr_object *nldr,
769 status = -EFAULT; 723 status = -EFAULT;
770 } 724 }
771 725
772 DBC_ENSURE(!status || (rmm_mgr != NULL && *rmm_mgr == NULL));
773
774 return status; 726 return status;
775} 727}
776 728
777/* 729/*
778 * ======== nldr_init ========
779 * Initialize the NLDR module.
780 */
781bool nldr_init(void)
782{
783 DBC_REQUIRE(refs >= 0);
784
785 if (refs == 0)
786 rmm_init();
787
788 refs++;
789
790 DBC_ENSURE(refs > 0);
791 return true;
792}
793
794/*
795 * ======== nldr_load ======== 730 * ======== nldr_load ========
796 */ 731 */
797int nldr_load(struct nldr_nodeobject *nldr_node_obj, 732int nldr_load(struct nldr_nodeobject *nldr_node_obj,
@@ -801,9 +736,6 @@ int nldr_load(struct nldr_nodeobject *nldr_node_obj,
801 struct dsp_uuid lib_uuid; 736 struct dsp_uuid lib_uuid;
802 int status = 0; 737 int status = 0;
803 738
804 DBC_REQUIRE(refs > 0);
805 DBC_REQUIRE(nldr_node_obj);
806
807 nldr_obj = nldr_node_obj->nldr_obj; 739 nldr_obj = nldr_node_obj->nldr_obj;
808 740
809 if (nldr_node_obj->dynamic) { 741 if (nldr_node_obj->dynamic) {
@@ -839,7 +771,6 @@ int nldr_load(struct nldr_nodeobject *nldr_node_obj,
839 break; 771 break;
840 772
841 default: 773 default:
842 DBC_ASSERT(false);
843 break; 774 break;
844 } 775 }
845 } 776 }
@@ -863,9 +794,6 @@ int nldr_unload(struct nldr_nodeobject *nldr_node_obj,
863 struct lib_node *root_lib = NULL; 794 struct lib_node *root_lib = NULL;
864 s32 i = 0; 795 s32 i = 0;
865 796
866 DBC_REQUIRE(refs > 0);
867 DBC_REQUIRE(nldr_node_obj);
868
869 if (nldr_node_obj != NULL) { 797 if (nldr_node_obj != NULL) {
870 if (nldr_node_obj->dynamic) { 798 if (nldr_node_obj->dynamic) {
871 if (*nldr_node_obj->phase_split) { 799 if (*nldr_node_obj->phase_split) {
@@ -889,7 +817,6 @@ int nldr_unload(struct nldr_nodeobject *nldr_node_obj,
889 nldr_node_obj->pers_libs = 0; 817 nldr_node_obj->pers_libs = 0;
890 break; 818 break;
891 default: 819 default:
892 DBC_ASSERT(false);
893 break; 820 break;
894 } 821 }
895 } else { 822 } else {
@@ -929,7 +856,6 @@ static int add_ovly_info(void *handle, struct dbll_sect_info *sect_info,
929 /* Find the node it belongs to */ 856 /* Find the node it belongs to */
930 for (i = 0; i < nldr_obj->ovly_nodes; i++) { 857 for (i = 0; i < nldr_obj->ovly_nodes; i++) {
931 node_name = nldr_obj->ovly_table[i].node_name; 858 node_name = nldr_obj->ovly_table[i].node_name;
932 DBC_REQUIRE(node_name);
933 if (strncmp(node_name, sect_name + 1, strlen(node_name)) == 0) { 859 if (strncmp(node_name, sect_name + 1, strlen(node_name)) == 0) {
934 /* Found the node */ 860 /* Found the node */
935 break; 861 break;
@@ -1018,8 +944,6 @@ static int add_ovly_node(struct dsp_uuid *uuid_obj,
1018 /* Add node to table */ 944 /* Add node to table */
1019 nldr_obj->ovly_table[nldr_obj->ovly_nid].uuid = 945 nldr_obj->ovly_table[nldr_obj->ovly_nid].uuid =
1020 *uuid_obj; 946 *uuid_obj;
1021 DBC_REQUIRE(obj_def.obj_data.node_obj.ndb_props.
1022 ac_name);
1023 len = 947 len =
1024 strlen(obj_def.obj_data.node_obj.ndb_props.ac_name); 948 strlen(obj_def.obj_data.node_obj.ndb_props.ac_name);
1025 node_name = obj_def.obj_data.node_obj.ndb_props.ac_name; 949 node_name = obj_def.obj_data.node_obj.ndb_props.ac_name;
@@ -1129,7 +1053,6 @@ static void free_sects(struct nldr_object *nldr_obj,
1129 ret = 1053 ret =
1130 rmm_free(nldr_obj->rmm, 0, ovly_section->sect_run_addr, 1054 rmm_free(nldr_obj->rmm, 0, ovly_section->sect_run_addr,
1131 ovly_section->size, true); 1055 ovly_section->size, true);
1132 DBC_ASSERT(ret);
1133 ovly_section = ovly_section->next_sect; 1056 ovly_section = ovly_section->next_sect;
1134 i++; 1057 i++;
1135 } 1058 }
@@ -1249,7 +1172,6 @@ static int load_lib(struct nldr_nodeobject *nldr_node_obj,
1249 1172
1250 if (depth > MAXDEPTH) { 1173 if (depth > MAXDEPTH) {
1251 /* Error */ 1174 /* Error */
1252 DBC_ASSERT(false);
1253 } 1175 }
1254 root->lib = NULL; 1176 root->lib = NULL;
1255 /* Allocate a buffer for library file name of size DBL_MAXPATHLENGTH */ 1177 /* Allocate a buffer for library file name of size DBL_MAXPATHLENGTH */
@@ -1312,7 +1234,6 @@ static int load_lib(struct nldr_nodeobject *nldr_node_obj,
1312 dcd_get_num_dep_libs(nldr_node_obj->nldr_obj->dcd_mgr, 1234 dcd_get_num_dep_libs(nldr_node_obj->nldr_obj->dcd_mgr,
1313 &uuid, &nd_libs, &np_libs, phase); 1235 &uuid, &nd_libs, &np_libs, phase);
1314 } 1236 }
1315 DBC_ASSERT(nd_libs >= np_libs);
1316 if (!status) { 1237 if (!status) {
1317 if (!(*nldr_node_obj->phase_split)) 1238 if (!(*nldr_node_obj->phase_split))
1318 np_libs = 0; 1239 np_libs = 0;
@@ -1474,7 +1395,6 @@ static int load_ovly(struct nldr_nodeobject *nldr_node_obj,
1474 } 1395 }
1475 } 1396 }
1476 1397
1477 DBC_ASSERT(i < nldr_obj->ovly_nodes);
1478 1398
1479 if (!po_node) { 1399 if (!po_node) {
1480 status = -ENOENT; 1400 status = -ENOENT;
@@ -1500,7 +1420,6 @@ static int load_ovly(struct nldr_nodeobject *nldr_node_obj,
1500 break; 1420 break;
1501 1421
1502 default: 1422 default:
1503 DBC_ASSERT(false);
1504 break; 1423 break;
1505 } 1424 }
1506 1425
@@ -1623,9 +1542,6 @@ static int remote_alloc(void **ref, u16 mem_sect, u32 size,
1623 struct rmm_addr *rmm_addr_obj = (struct rmm_addr *)dsp_address; 1542 struct rmm_addr *rmm_addr_obj = (struct rmm_addr *)dsp_address;
1624 bool mem_load_req = false; 1543 bool mem_load_req = false;
1625 int status = -ENOMEM; /* Set to fail */ 1544 int status = -ENOMEM; /* Set to fail */
1626 DBC_REQUIRE(hnode);
1627 DBC_REQUIRE(mem_sect == DBLL_CODE || mem_sect == DBLL_DATA ||
1628 mem_sect == DBLL_BSS);
1629 nldr_obj = hnode->nldr_obj; 1545 nldr_obj = hnode->nldr_obj;
1630 rmm = nldr_obj->rmm; 1546 rmm = nldr_obj->rmm;
1631 /* Convert size to DSP words */ 1547 /* Convert size to DSP words */
@@ -1651,7 +1567,6 @@ static int remote_alloc(void **ref, u16 mem_sect, u32 size,
1651 mem_phase_bit = EXECUTEDATAFLAGBIT; 1567 mem_phase_bit = EXECUTEDATAFLAGBIT;
1652 break; 1568 break;
1653 default: 1569 default:
1654 DBC_ASSERT(false);
1655 break; 1570 break;
1656 } 1571 }
1657 if (mem_sect == DBLL_CODE) 1572 if (mem_sect == DBLL_CODE)
@@ -1670,11 +1585,9 @@ static int remote_alloc(void **ref, u16 mem_sect, u32 size,
1670 /* Find an appropriate segment based on mem_sect */ 1585 /* Find an appropriate segment based on mem_sect */
1671 if (segid == NULLID) { 1586 if (segid == NULLID) {
1672 /* No memory requirements of preferences */ 1587 /* No memory requirements of preferences */
1673 DBC_ASSERT(!mem_load_req);
1674 goto func_cont; 1588 goto func_cont;
1675 } 1589 }
1676 if (segid <= MAXSEGID) { 1590 if (segid <= MAXSEGID) {
1677 DBC_ASSERT(segid < nldr_obj->dload_segs);
1678 /* Attempt to allocate from segid first. */ 1591 /* Attempt to allocate from segid first. */
1679 rmm_addr_obj->segid = segid; 1592 rmm_addr_obj->segid = segid;
1680 status = 1593 status =
@@ -1685,7 +1598,6 @@ static int remote_alloc(void **ref, u16 mem_sect, u32 size,
1685 } 1598 }
1686 } else { 1599 } else {
1687 /* segid > MAXSEGID ==> Internal or external memory */ 1600 /* segid > MAXSEGID ==> Internal or external memory */
1688 DBC_ASSERT(segid == MEMINTERNALID || segid == MEMEXTERNALID);
1689 /* Check for any internal or external memory segment, 1601 /* Check for any internal or external memory segment,
1690 * depending on segid. */ 1602 * depending on segid. */
1691 mem_sect_type |= segid == MEMINTERNALID ? 1603 mem_sect_type |= segid == MEMINTERNALID ?
@@ -1736,8 +1648,6 @@ static int remote_free(void **ref, u16 space, u32 dsp_address,
1736 u32 word_size; 1648 u32 word_size;
1737 int status = -ENOMEM; /* Set to fail */ 1649 int status = -ENOMEM; /* Set to fail */
1738 1650
1739 DBC_REQUIRE(nldr_obj);
1740
1741 rmm = nldr_obj->rmm; 1651 rmm = nldr_obj->rmm;
1742 1652
1743 /* Convert size to DSP words */ 1653 /* Convert size to DSP words */
@@ -1761,7 +1671,6 @@ static void unload_lib(struct nldr_nodeobject *nldr_node_obj,
1761 struct nldr_object *nldr_obj = nldr_node_obj->nldr_obj; 1671 struct nldr_object *nldr_obj = nldr_node_obj->nldr_obj;
1762 u16 i; 1672 u16 i;
1763 1673
1764 DBC_ASSERT(root != NULL);
1765 1674
1766 /* Unload dependent libraries */ 1675 /* Unload dependent libraries */
1767 for (i = 0; i < root->dep_libs; i++) 1676 for (i = 0; i < root->dep_libs; i++)
@@ -1812,7 +1721,6 @@ static void unload_ovly(struct nldr_nodeobject *nldr_node_obj,
1812 } 1721 }
1813 } 1722 }
1814 1723
1815 DBC_ASSERT(i < nldr_obj->ovly_nodes);
1816 1724
1817 if (!po_node) 1725 if (!po_node)
1818 /* TODO: Should we print warning here? */ 1726 /* TODO: Should we print warning here? */
@@ -1839,14 +1747,11 @@ static void unload_ovly(struct nldr_nodeobject *nldr_node_obj,
1839 other_alloc = po_node->other_sects; 1747 other_alloc = po_node->other_sects;
1840 break; 1748 break;
1841 default: 1749 default:
1842 DBC_ASSERT(false);
1843 break; 1750 break;
1844 } 1751 }
1845 DBC_ASSERT(ref_count && (*ref_count > 0));
1846 if (ref_count && (*ref_count > 0)) { 1752 if (ref_count && (*ref_count > 0)) {
1847 *ref_count -= 1; 1753 *ref_count -= 1;
1848 if (other_ref) { 1754 if (other_ref) {
1849 DBC_ASSERT(*other_ref > 0);
1850 *other_ref -= 1; 1755 *other_ref -= 1;
1851 } 1756 }
1852 } 1757 }
@@ -1897,9 +1802,6 @@ int nldr_find_addr(struct nldr_nodeobject *nldr_node, u32 sym_addr,
1897 bool status1 = false; 1802 bool status1 = false;
1898 s32 i = 0; 1803 s32 i = 0;
1899 struct lib_node root = { NULL, 0, NULL }; 1804 struct lib_node root = { NULL, 0, NULL };
1900 DBC_REQUIRE(refs > 0);
1901 DBC_REQUIRE(offset_output != NULL);
1902 DBC_REQUIRE(sym_name != NULL);
1903 pr_debug("%s(0x%x, 0x%x, 0x%x, 0x%x, %s)\n", __func__, (u32) nldr_node, 1805 pr_debug("%s(0x%x, 0x%x, 0x%x, 0x%x, %s)\n", __func__, (u32) nldr_node,
1904 sym_addr, offset_range, (u32) offset_output, sym_name); 1806 sym_addr, offset_range, (u32) offset_output, sym_name);
1905 1807
@@ -1915,7 +1817,6 @@ int nldr_find_addr(struct nldr_nodeobject *nldr_node, u32 sym_addr,
1915 root = nldr_node->delete_lib; 1817 root = nldr_node->delete_lib;
1916 break; 1818 break;
1917 default: 1819 default:
1918 DBC_ASSERT(false);
1919 break; 1820 break;
1920 } 1821 }
1921 } else { 1822 } else {
diff --git a/drivers/staging/tidspbridge/rmgr/node.c b/drivers/staging/tidspbridge/rmgr/node.c
index 5dadaa445ad9..7fb426c5251c 100644
--- a/drivers/staging/tidspbridge/rmgr/node.c
+++ b/drivers/staging/tidspbridge/rmgr/node.c
@@ -26,9 +26,6 @@
26/* ----------------------------------- DSP/BIOS Bridge */ 26/* ----------------------------------- DSP/BIOS Bridge */
27#include <dspbridge/dbdefs.h> 27#include <dspbridge/dbdefs.h>
28 28
29/* ----------------------------------- Trace & Debug */
30#include <dspbridge/dbc.h>
31
32/* ----------------------------------- OS Adaptation Layer */ 29/* ----------------------------------- OS Adaptation Layer */
33#include <dspbridge/memdefs.h> 30#include <dspbridge/memdefs.h>
34#include <dspbridge/proc.h> 31#include <dspbridge/proc.h>
@@ -162,7 +159,6 @@ struct node_mgr {
162 /* Loader properties */ 159 /* Loader properties */
163 struct nldr_object *nldr_obj; /* Handle to loader */ 160 struct nldr_object *nldr_obj; /* Handle to loader */
164 struct node_ldr_fxns nldr_fxns; /* Handle to loader functions */ 161 struct node_ldr_fxns nldr_fxns; /* Handle to loader functions */
165 bool loader_init; /* Loader Init function succeeded? */
166}; 162};
167 163
168/* 164/*
@@ -264,16 +260,12 @@ static u32 ovly(void *priv_ref, u32 dsp_run_addr, u32 dsp_load_addr,
264static u32 mem_write(void *priv_ref, u32 dsp_add, void *pbuf, 260static u32 mem_write(void *priv_ref, u32 dsp_add, void *pbuf,
265 u32 ul_num_bytes, u32 mem_space); 261 u32 ul_num_bytes, u32 mem_space);
266 262
267static u32 refs; /* module reference count */
268
269/* Dynamic loader functions. */ 263/* Dynamic loader functions. */
270static struct node_ldr_fxns nldr_fxns = { 264static struct node_ldr_fxns nldr_fxns = {
271 nldr_allocate, 265 nldr_allocate,
272 nldr_create, 266 nldr_create,
273 nldr_delete, 267 nldr_delete,
274 nldr_exit,
275 nldr_get_fxn_addr, 268 nldr_get_fxn_addr,
276 nldr_init,
277 nldr_load, 269 nldr_load,
278 nldr_unload, 270 nldr_unload,
279}; 271};
@@ -326,11 +318,6 @@ int node_allocate(struct proc_object *hprocessor,
326 318
327 void *node_res; 319 void *node_res;
328 320
329 DBC_REQUIRE(refs > 0);
330 DBC_REQUIRE(hprocessor != NULL);
331 DBC_REQUIRE(noderes != NULL);
332 DBC_REQUIRE(node_uuid != NULL);
333
334 *noderes = NULL; 321 *noderes = NULL;
335 322
336 status = proc_get_processor_id(hprocessor, &proc_id); 323 status = proc_get_processor_id(hprocessor, &proc_id);
@@ -673,7 +660,6 @@ func_cont:
673 drv_proc_node_update_heap_status(node_res, true); 660 drv_proc_node_update_heap_status(node_res, true);
674 drv_proc_node_update_status(node_res, true); 661 drv_proc_node_update_status(node_res, true);
675 } 662 }
676 DBC_ENSURE((status && *noderes == NULL) || (!status && *noderes));
677func_end: 663func_end:
678 dev_dbg(bridge, "%s: hprocessor: %p pNodeId: %p pargs: %p attr_in: %p " 664 dev_dbg(bridge, "%s: hprocessor: %p pNodeId: %p pargs: %p attr_in: %p "
679 "node_res: %p status: 0x%x\n", __func__, hprocessor, 665 "node_res: %p status: 0x%x\n", __func__, hprocessor,
@@ -696,11 +682,6 @@ DBAPI node_alloc_msg_buf(struct node_object *hnode, u32 usize,
696 bool set_info; 682 bool set_info;
697 u32 proc_id; 683 u32 proc_id;
698 684
699 DBC_REQUIRE(refs > 0);
700 DBC_REQUIRE(pbuffer != NULL);
701
702 DBC_REQUIRE(usize > 0);
703
704 if (!pnode) 685 if (!pnode)
705 status = -EFAULT; 686 status = -EFAULT;
706 else if (node_get_type(pnode) == NODE_DEVICE) 687 else if (node_get_type(pnode) == NODE_DEVICE)
@@ -714,7 +695,6 @@ DBAPI node_alloc_msg_buf(struct node_object *hnode, u32 usize,
714 695
715 status = proc_get_processor_id(pnode->processor, &proc_id); 696 status = proc_get_processor_id(pnode->processor, &proc_id);
716 if (proc_id != DSP_UNIT) { 697 if (proc_id != DSP_UNIT) {
717 DBC_ASSERT(NULL);
718 goto func_end; 698 goto func_end;
719 } 699 }
720 /* If segment ID includes MEM_SETVIRTUALSEGID then pbuffer is a 700 /* If segment ID includes MEM_SETVIRTUALSEGID then pbuffer is a
@@ -782,8 +762,6 @@ int node_change_priority(struct node_object *hnode, s32 prio)
782 int status = 0; 762 int status = 0;
783 u32 proc_id; 763 u32 proc_id;
784 764
785 DBC_REQUIRE(refs > 0);
786
787 if (!hnode || !hnode->node_mgr) { 765 if (!hnode || !hnode->node_mgr) {
788 status = -EFAULT; 766 status = -EFAULT;
789 } else { 767 } else {
@@ -854,7 +832,6 @@ int node_connect(struct node_object *node1, u32 stream1,
854 s8 chnl_mode; 832 s8 chnl_mode;
855 u32 dw_length; 833 u32 dw_length;
856 int status = 0; 834 int status = 0;
857 DBC_REQUIRE(refs > 0);
858 835
859 if (!node1 || !node2) 836 if (!node1 || !node2)
860 return -EFAULT; 837 return -EFAULT;
@@ -903,7 +880,6 @@ int node_connect(struct node_object *node1, u32 stream1,
903 if (node1_type != NODE_GPP) { 880 if (node1_type != NODE_GPP) {
904 hnode_mgr = node1->node_mgr; 881 hnode_mgr = node1->node_mgr;
905 } else { 882 } else {
906 DBC_ASSERT(node2 != (struct node_object *)DSP_HGPPNODE);
907 hnode_mgr = node2->node_mgr; 883 hnode_mgr = node2->node_mgr;
908 } 884 }
909 885
@@ -982,9 +958,6 @@ int node_connect(struct node_object *node1, u32 stream1,
982 goto out_unlock; 958 goto out_unlock;
983 } 959 }
984 960
985 DBC_ASSERT((node1_type == NODE_GPP) ||
986 (node2_type == NODE_GPP));
987
988 chnl_mode = (node1_type == NODE_GPP) ? 961 chnl_mode = (node1_type == NODE_GPP) ?
989 CHNL_MODETODSP : CHNL_MODEFROMDSP; 962 CHNL_MODETODSP : CHNL_MODEFROMDSP;
990 963
@@ -1139,7 +1112,6 @@ int node_create(struct node_object *hnode)
1139 omap_dspbridge_dev->dev.platform_data; 1112 omap_dspbridge_dev->dev.platform_data;
1140#endif 1113#endif
1141 1114
1142 DBC_REQUIRE(refs > 0);
1143 if (!pnode) { 1115 if (!pnode) {
1144 status = -EFAULT; 1116 status = -EFAULT;
1145 goto func_end; 1117 goto func_end;
@@ -1291,10 +1263,6 @@ int node_create_mgr(struct node_mgr **node_man,
1291 int status = 0; 1263 int status = 0;
1292 u8 dev_type; 1264 u8 dev_type;
1293 1265
1294 DBC_REQUIRE(refs > 0);
1295 DBC_REQUIRE(node_man != NULL);
1296 DBC_REQUIRE(hdev_obj != NULL);
1297
1298 *node_man = NULL; 1266 *node_man = NULL;
1299 /* Allocate Node manager object */ 1267 /* Allocate Node manager object */
1300 node_mgr_obj = kzalloc(sizeof(struct node_mgr), GFP_KERNEL); 1268 node_mgr_obj = kzalloc(sizeof(struct node_mgr), GFP_KERNEL);
@@ -1366,7 +1334,6 @@ int node_create_mgr(struct node_mgr **node_man,
1366 nldr_attrs_obj.write = mem_write; 1334 nldr_attrs_obj.write = mem_write;
1367 nldr_attrs_obj.dsp_word_size = node_mgr_obj->dsp_word_size; 1335 nldr_attrs_obj.dsp_word_size = node_mgr_obj->dsp_word_size;
1368 nldr_attrs_obj.dsp_mau_size = node_mgr_obj->dsp_mau_size; 1336 nldr_attrs_obj.dsp_mau_size = node_mgr_obj->dsp_mau_size;
1369 node_mgr_obj->loader_init = node_mgr_obj->nldr_fxns.init();
1370 status = node_mgr_obj->nldr_fxns.create(&node_mgr_obj->nldr_obj, 1337 status = node_mgr_obj->nldr_fxns.create(&node_mgr_obj->nldr_obj,
1371 hdev_obj, 1338 hdev_obj,
1372 &nldr_attrs_obj); 1339 &nldr_attrs_obj);
@@ -1375,8 +1342,6 @@ int node_create_mgr(struct node_mgr **node_man,
1375 1342
1376 *node_man = node_mgr_obj; 1343 *node_man = node_mgr_obj;
1377 1344
1378 DBC_ENSURE((status && *node_man == NULL) || (!status && *node_man));
1379
1380 return status; 1345 return status;
1381out_err: 1346out_err:
1382 delete_node_mgr(node_mgr_obj); 1347 delete_node_mgr(node_mgr_obj);
@@ -1409,7 +1374,6 @@ int node_delete(struct node_res_object *noderes,
1409 void *node_res = noderes; 1374 void *node_res = noderes;
1410 1375
1411 struct dsp_processorstate proc_state; 1376 struct dsp_processorstate proc_state;
1412 DBC_REQUIRE(refs > 0);
1413 1377
1414 if (!pnode) { 1378 if (!pnode) {
1415 status = -EFAULT; 1379 status = -EFAULT;
@@ -1554,8 +1518,6 @@ func_end:
1554 */ 1518 */
1555int node_delete_mgr(struct node_mgr *hnode_mgr) 1519int node_delete_mgr(struct node_mgr *hnode_mgr)
1556{ 1520{
1557 DBC_REQUIRE(refs > 0);
1558
1559 if (!hnode_mgr) 1521 if (!hnode_mgr)
1560 return -EFAULT; 1522 return -EFAULT;
1561 1523
@@ -1576,10 +1538,6 @@ int node_enum_nodes(struct node_mgr *hnode_mgr, void **node_tab,
1576 struct node_object *hnode; 1538 struct node_object *hnode;
1577 u32 i = 0; 1539 u32 i = 0;
1578 int status = 0; 1540 int status = 0;
1579 DBC_REQUIRE(refs > 0);
1580 DBC_REQUIRE(node_tab != NULL || node_tab_size == 0);
1581 DBC_REQUIRE(pu_num_nodes != NULL);
1582 DBC_REQUIRE(pu_allocated != NULL);
1583 1541
1584 if (!hnode_mgr) { 1542 if (!hnode_mgr) {
1585 status = -EFAULT; 1543 status = -EFAULT;
@@ -1605,20 +1563,6 @@ func_end:
1605} 1563}
1606 1564
1607/* 1565/*
1608 * ======== node_exit ========
1609 * Purpose:
1610 * Discontinue usage of NODE module.
1611 */
1612void node_exit(void)
1613{
1614 DBC_REQUIRE(refs > 0);
1615
1616 refs--;
1617
1618 DBC_ENSURE(refs >= 0);
1619}
1620
1621/*
1622 * ======== node_free_msg_buf ======== 1566 * ======== node_free_msg_buf ========
1623 * Purpose: 1567 * Purpose:
1624 * Frees the message buffer. 1568 * Frees the message buffer.
@@ -1629,10 +1573,6 @@ int node_free_msg_buf(struct node_object *hnode, u8 * pbuffer,
1629 struct node_object *pnode = (struct node_object *)hnode; 1573 struct node_object *pnode = (struct node_object *)hnode;
1630 int status = 0; 1574 int status = 0;
1631 u32 proc_id; 1575 u32 proc_id;
1632 DBC_REQUIRE(refs > 0);
1633 DBC_REQUIRE(pbuffer != NULL);
1634 DBC_REQUIRE(pnode != NULL);
1635 DBC_REQUIRE(pnode->xlator != NULL);
1636 1576
1637 if (!hnode) { 1577 if (!hnode) {
1638 status = -EFAULT; 1578 status = -EFAULT;
@@ -1653,7 +1593,6 @@ int node_free_msg_buf(struct node_object *hnode, u8 * pbuffer,
1653 status = cmm_xlator_free_buf(pnode->xlator, pbuffer); 1593 status = cmm_xlator_free_buf(pnode->xlator, pbuffer);
1654 } 1594 }
1655 } else { 1595 } else {
1656 DBC_ASSERT(NULL); /* BUG */
1657 } 1596 }
1658func_end: 1597func_end:
1659 return status; 1598 return status;
@@ -1669,9 +1608,6 @@ int node_get_attr(struct node_object *hnode,
1669 struct dsp_nodeattr *pattr, u32 attr_size) 1608 struct dsp_nodeattr *pattr, u32 attr_size)
1670{ 1609{
1671 struct node_mgr *hnode_mgr; 1610 struct node_mgr *hnode_mgr;
1672 DBC_REQUIRE(refs > 0);
1673 DBC_REQUIRE(pattr != NULL);
1674 DBC_REQUIRE(attr_size >= sizeof(struct dsp_nodeattr));
1675 1611
1676 if (!hnode) 1612 if (!hnode)
1677 return -EFAULT; 1613 return -EFAULT;
@@ -1713,9 +1649,6 @@ int node_get_channel_id(struct node_object *hnode, u32 dir, u32 index,
1713{ 1649{
1714 enum node_type node_type; 1650 enum node_type node_type;
1715 int status = -EINVAL; 1651 int status = -EINVAL;
1716 DBC_REQUIRE(refs > 0);
1717 DBC_REQUIRE(dir == DSP_TONODE || dir == DSP_FROMNODE);
1718 DBC_REQUIRE(chan_id != NULL);
1719 1652
1720 if (!hnode) { 1653 if (!hnode) {
1721 status = -EFAULT; 1654 status = -EFAULT;
@@ -1734,7 +1667,6 @@ int node_get_channel_id(struct node_object *hnode, u32 dir, u32 index,
1734 } 1667 }
1735 } 1668 }
1736 } else { 1669 } else {
1737 DBC_ASSERT(dir == DSP_FROMNODE);
1738 if (index < MAX_OUTPUTS(hnode)) { 1670 if (index < MAX_OUTPUTS(hnode)) {
1739 if (hnode->outputs[index].type == HOSTCONNECT) { 1671 if (hnode->outputs[index].type == HOSTCONNECT) {
1740 *chan_id = hnode->outputs[index].dev_id; 1672 *chan_id = hnode->outputs[index].dev_id;
@@ -1761,9 +1693,6 @@ int node_get_message(struct node_object *hnode,
1761 struct dsp_processorstate proc_state; 1693 struct dsp_processorstate proc_state;
1762 struct proc_object *hprocessor; 1694 struct proc_object *hprocessor;
1763 1695
1764 DBC_REQUIRE(refs > 0);
1765 DBC_REQUIRE(message != NULL);
1766
1767 if (!hnode) { 1696 if (!hnode) {
1768 status = -EFAULT; 1697 status = -EFAULT;
1769 goto func_end; 1698 goto func_end;
@@ -1831,14 +1760,12 @@ int node_get_nldr_obj(struct node_mgr *hnode_mgr,
1831{ 1760{
1832 int status = 0; 1761 int status = 0;
1833 struct node_mgr *node_mgr_obj = hnode_mgr; 1762 struct node_mgr *node_mgr_obj = hnode_mgr;
1834 DBC_REQUIRE(nldr_ovlyobj != NULL);
1835 1763
1836 if (!hnode_mgr) 1764 if (!hnode_mgr)
1837 status = -EFAULT; 1765 status = -EFAULT;
1838 else 1766 else
1839 *nldr_ovlyobj = node_mgr_obj->nldr_obj; 1767 *nldr_ovlyobj = node_mgr_obj->nldr_obj;
1840 1768
1841 DBC_ENSURE(!status || (nldr_ovlyobj != NULL && *nldr_ovlyobj == NULL));
1842 return status; 1769 return status;
1843} 1770}
1844 1771
@@ -1852,8 +1779,6 @@ int node_get_strm_mgr(struct node_object *hnode,
1852{ 1779{
1853 int status = 0; 1780 int status = 0;
1854 1781
1855 DBC_REQUIRE(refs > 0);
1856
1857 if (!hnode) 1782 if (!hnode)
1858 status = -EFAULT; 1783 status = -EFAULT;
1859 else 1784 else
@@ -1867,8 +1792,6 @@ int node_get_strm_mgr(struct node_object *hnode,
1867 */ 1792 */
1868enum nldr_loadtype node_get_load_type(struct node_object *hnode) 1793enum nldr_loadtype node_get_load_type(struct node_object *hnode)
1869{ 1794{
1870 DBC_REQUIRE(refs > 0);
1871 DBC_REQUIRE(hnode);
1872 if (!hnode) { 1795 if (!hnode) {
1873 dev_dbg(bridge, "%s: Failed. hnode: %p\n", __func__, hnode); 1796 dev_dbg(bridge, "%s: Failed. hnode: %p\n", __func__, hnode);
1874 return -1; 1797 return -1;
@@ -1884,8 +1807,6 @@ enum nldr_loadtype node_get_load_type(struct node_object *hnode)
1884 */ 1807 */
1885u32 node_get_timeout(struct node_object *hnode) 1808u32 node_get_timeout(struct node_object *hnode)
1886{ 1809{
1887 DBC_REQUIRE(refs > 0);
1888 DBC_REQUIRE(hnode);
1889 if (!hnode) { 1810 if (!hnode) {
1890 dev_dbg(bridge, "%s: failed. hnode: %p\n", __func__, hnode); 1811 dev_dbg(bridge, "%s: failed. hnode: %p\n", __func__, hnode);
1891 return 0; 1812 return 0;
@@ -1915,20 +1836,6 @@ enum node_type node_get_type(struct node_object *hnode)
1915} 1836}
1916 1837
1917/* 1838/*
1918 * ======== node_init ========
1919 * Purpose:
1920 * Initialize the NODE module.
1921 */
1922bool node_init(void)
1923{
1924 DBC_REQUIRE(refs >= 0);
1925
1926 refs++;
1927
1928 return true;
1929}
1930
1931/*
1932 * ======== node_on_exit ======== 1839 * ======== node_on_exit ========
1933 * Purpose: 1840 * Purpose:
1934 * Gets called when RMS_EXIT is received for a node. 1841 * Gets called when RMS_EXIT is received for a node.
@@ -1970,8 +1877,6 @@ int node_pause(struct node_object *hnode)
1970 struct dsp_processorstate proc_state; 1877 struct dsp_processorstate proc_state;
1971 struct proc_object *hprocessor; 1878 struct proc_object *hprocessor;
1972 1879
1973 DBC_REQUIRE(refs > 0);
1974
1975 if (!hnode) { 1880 if (!hnode) {
1976 status = -EFAULT; 1881 status = -EFAULT;
1977 } else { 1882 } else {
@@ -2054,9 +1959,6 @@ int node_put_message(struct node_object *hnode,
2054 struct dsp_processorstate proc_state; 1959 struct dsp_processorstate proc_state;
2055 struct proc_object *hprocessor; 1960 struct proc_object *hprocessor;
2056 1961
2057 DBC_REQUIRE(refs > 0);
2058 DBC_REQUIRE(pmsg != NULL);
2059
2060 if (!hnode) { 1962 if (!hnode) {
2061 status = -EFAULT; 1963 status = -EFAULT;
2062 goto func_end; 1964 goto func_end;
@@ -2146,9 +2048,6 @@ int node_register_notify(struct node_object *hnode, u32 event_mask,
2146 struct bridge_drv_interface *intf_fxns; 2048 struct bridge_drv_interface *intf_fxns;
2147 int status = 0; 2049 int status = 0;
2148 2050
2149 DBC_REQUIRE(refs > 0);
2150 DBC_REQUIRE(hnotification != NULL);
2151
2152 if (!hnode) { 2051 if (!hnode) {
2153 status = -EFAULT; 2052 status = -EFAULT;
2154 } else { 2053 } else {
@@ -2207,8 +2106,6 @@ int node_run(struct node_object *hnode)
2207 struct dsp_processorstate proc_state; 2106 struct dsp_processorstate proc_state;
2208 struct proc_object *hprocessor; 2107 struct proc_object *hprocessor;
2209 2108
2210 DBC_REQUIRE(refs > 0);
2211
2212 if (!hnode) { 2109 if (!hnode) {
2213 status = -EFAULT; 2110 status = -EFAULT;
2214 goto func_end; 2111 goto func_end;
@@ -2287,7 +2184,6 @@ int node_run(struct node_object *hnode)
2287 NODE_GET_PRIORITY(hnode)); 2184 NODE_GET_PRIORITY(hnode));
2288 } else { 2185 } else {
2289 /* We should never get here */ 2186 /* We should never get here */
2290 DBC_ASSERT(false);
2291 } 2187 }
2292func_cont1: 2188func_cont1:
2293 /* Update node state. */ 2189 /* Update node state. */
@@ -2326,9 +2222,6 @@ int node_terminate(struct node_object *hnode, int *pstatus)
2326 struct deh_mgr *hdeh_mgr; 2222 struct deh_mgr *hdeh_mgr;
2327 struct dsp_processorstate proc_state; 2223 struct dsp_processorstate proc_state;
2328 2224
2329 DBC_REQUIRE(refs > 0);
2330 DBC_REQUIRE(pstatus != NULL);
2331
2332 if (!hnode || !hnode->node_mgr) { 2225 if (!hnode || !hnode->node_mgr) {
2333 status = -EFAULT; 2226 status = -EFAULT;
2334 goto func_end; 2227 goto func_end;
@@ -2610,9 +2503,6 @@ static void delete_node_mgr(struct node_mgr *hnode_mgr)
2610 if (hnode_mgr->nldr_obj) 2503 if (hnode_mgr->nldr_obj)
2611 hnode_mgr->nldr_fxns.delete(hnode_mgr->nldr_obj); 2504 hnode_mgr->nldr_fxns.delete(hnode_mgr->nldr_obj);
2612 2505
2613 if (hnode_mgr->loader_init)
2614 hnode_mgr->nldr_fxns.exit();
2615
2616 kfree(hnode_mgr); 2506 kfree(hnode_mgr);
2617 } 2507 }
2618} 2508}
@@ -2668,7 +2558,6 @@ static void fill_stream_connect(struct node_object *node1,
2668 strm1->connect_type = CONNECTTYPE_GPPOUTPUT; 2558 strm1->connect_type = CONNECTTYPE_GPPOUTPUT;
2669 } else { 2559 } else {
2670 /* GPP == > NODE */ 2560 /* GPP == > NODE */
2671 DBC_ASSERT(node2 != (struct node_object *)DSP_HGPPNODE);
2672 strm_index = node2->num_inputs + node2->num_outputs - 1; 2561 strm_index = node2->num_inputs + node2->num_outputs - 1;
2673 strm2 = &(node2->stream_connect[strm_index]); 2562 strm2 = &(node2->stream_connect[strm_index]);
2674 strm2->cb_struct = sizeof(struct dsp_streamconnect); 2563 strm2->cb_struct = sizeof(struct dsp_streamconnect);
@@ -2748,9 +2637,6 @@ static int get_fxn_address(struct node_object *hnode, u32 * fxn_addr,
2748 char *pstr_fxn_name = NULL; 2637 char *pstr_fxn_name = NULL;
2749 struct node_mgr *hnode_mgr = hnode->node_mgr; 2638 struct node_mgr *hnode_mgr = hnode->node_mgr;
2750 int status = 0; 2639 int status = 0;
2751 DBC_REQUIRE(node_get_type(hnode) == NODE_TASK ||
2752 node_get_type(hnode) == NODE_DAISSOCKET ||
2753 node_get_type(hnode) == NODE_MESSAGE);
2754 2640
2755 switch (phase) { 2641 switch (phase) {
2756 case CREATEPHASE: 2642 case CREATEPHASE:
@@ -2767,7 +2653,6 @@ static int get_fxn_address(struct node_object *hnode, u32 * fxn_addr,
2767 break; 2653 break;
2768 default: 2654 default:
2769 /* Should never get here */ 2655 /* Should never get here */
2770 DBC_ASSERT(false);
2771 break; 2656 break;
2772 } 2657 }
2773 2658
@@ -2787,9 +2672,6 @@ void get_node_info(struct node_object *hnode, struct dsp_nodeinfo *node_info)
2787{ 2672{
2788 u32 i; 2673 u32 i;
2789 2674
2790 DBC_REQUIRE(hnode);
2791 DBC_REQUIRE(node_info != NULL);
2792
2793 node_info->cb_struct = sizeof(struct dsp_nodeinfo); 2675 node_info->cb_struct = sizeof(struct dsp_nodeinfo);
2794 node_info->nb_node_database_props = 2676 node_info->nb_node_database_props =
2795 hnode->dcd_props.obj_data.node_obj.ndb_props; 2677 hnode->dcd_props.obj_data.node_obj.ndb_props;
@@ -2848,9 +2730,7 @@ static int get_node_props(struct dcd_manager *hdcd_mgr,
2848 pmsg_args->max_msgs); 2730 pmsg_args->max_msgs);
2849 } else { 2731 } else {
2850 /* Copy device name */ 2732 /* Copy device name */
2851 DBC_REQUIRE(pndb_props->ac_name);
2852 len = strlen(pndb_props->ac_name); 2733 len = strlen(pndb_props->ac_name);
2853 DBC_ASSERT(len < MAXDEVNAMELEN);
2854 hnode->str_dev_name = kzalloc(len + 1, GFP_KERNEL); 2734 hnode->str_dev_name = kzalloc(len + 1, GFP_KERNEL);
2855 if (hnode->str_dev_name == NULL) { 2735 if (hnode->str_dev_name == NULL) {
2856 status = -ENOMEM; 2736 status = -ENOMEM;
@@ -2938,10 +2818,6 @@ int node_get_uuid_props(void *hprocessor,
2938 struct dcd_nodeprops dcd_node_props; 2818 struct dcd_nodeprops dcd_node_props;
2939 struct dsp_processorstate proc_state; 2819 struct dsp_processorstate proc_state;
2940 2820
2941 DBC_REQUIRE(refs > 0);
2942 DBC_REQUIRE(hprocessor != NULL);
2943 DBC_REQUIRE(node_uuid != NULL);
2944
2945 if (hprocessor == NULL || node_uuid == NULL) { 2821 if (hprocessor == NULL || node_uuid == NULL) {
2946 status = -EFAULT; 2822 status = -EFAULT;
2947 goto func_end; 2823 goto func_end;
@@ -3063,8 +2939,6 @@ static u32 ovly(void *priv_ref, u32 dsp_run_addr, u32 dsp_load_addr,
3063 /* Function interface to Bridge driver*/ 2939 /* Function interface to Bridge driver*/
3064 struct bridge_drv_interface *intf_fxns; 2940 struct bridge_drv_interface *intf_fxns;
3065 2941
3066 DBC_REQUIRE(hnode);
3067
3068 hnode_mgr = hnode->node_mgr; 2942 hnode_mgr = hnode->node_mgr;
3069 2943
3070 ul_size = ul_num_bytes / hnode_mgr->dsp_word_size; 2944 ul_size = ul_num_bytes / hnode_mgr->dsp_word_size;
@@ -3106,9 +2980,6 @@ static u32 mem_write(void *priv_ref, u32 dsp_add, void *pbuf,
3106 /* Function interface to Bridge driver */ 2980 /* Function interface to Bridge driver */
3107 struct bridge_drv_interface *intf_fxns; 2981 struct bridge_drv_interface *intf_fxns;
3108 2982
3109 DBC_REQUIRE(hnode);
3110 DBC_REQUIRE(mem_space & DBLL_CODE || mem_space & DBLL_DATA);
3111
3112 hnode_mgr = hnode->node_mgr; 2983 hnode_mgr = hnode->node_mgr;
3113 2984
3114 ul_timeout = hnode->timeout; 2985 ul_timeout = hnode->timeout;
diff --git a/drivers/staging/tidspbridge/rmgr/proc.c b/drivers/staging/tidspbridge/rmgr/proc.c
index 242dd1399996..7e4f12f6be42 100644
--- a/drivers/staging/tidspbridge/rmgr/proc.c
+++ b/drivers/staging/tidspbridge/rmgr/proc.c
@@ -25,9 +25,6 @@
25/* ----------------------------------- DSP/BIOS Bridge */ 25/* ----------------------------------- DSP/BIOS Bridge */
26#include <dspbridge/dbdefs.h> 26#include <dspbridge/dbdefs.h>
27 27
28/* ----------------------------------- Trace & Debug */
29#include <dspbridge/dbc.h>
30
31/* ----------------------------------- OS Adaptation Layer */ 28/* ----------------------------------- OS Adaptation Layer */
32#include <dspbridge/ntfy.h> 29#include <dspbridge/ntfy.h>
33#include <dspbridge/sync.h> 30#include <dspbridge/sync.h>
@@ -101,8 +98,6 @@ struct proc_object {
101 struct list_head proc_list; 98 struct list_head proc_list;
102}; 99};
103 100
104static u32 refs;
105
106DEFINE_MUTEX(proc_lock); /* For critical sections */ 101DEFINE_MUTEX(proc_lock); /* For critical sections */
107 102
108/* ----------------------------------- Function Prototypes */ 103/* ----------------------------------- Function Prototypes */
@@ -281,9 +276,6 @@ proc_attach(u32 processor_id,
281 struct drv_data *drv_datap = dev_get_drvdata(bridge); 276 struct drv_data *drv_datap = dev_get_drvdata(bridge);
282 u8 dev_type; 277 u8 dev_type;
283 278
284 DBC_REQUIRE(refs > 0);
285 DBC_REQUIRE(ph_processor != NULL);
286
287 if (pr_ctxt->processor) { 279 if (pr_ctxt->processor) {
288 *ph_processor = pr_ctxt->processor; 280 *ph_processor = pr_ctxt->processor;
289 return status; 281 return status;
@@ -382,10 +374,6 @@ proc_attach(u32 processor_id,
382 kfree(p_proc_object); 374 kfree(p_proc_object);
383 } 375 }
384func_end: 376func_end:
385 DBC_ENSURE((status == -EPERM && *ph_processor == NULL) ||
386 (!status && p_proc_object) ||
387 (status == 0 && p_proc_object));
388
389 return status; 377 return status;
390} 378}
391 379
@@ -445,10 +433,6 @@ int proc_auto_start(struct cfg_devnode *dev_node_obj,
445 struct drv_data *drv_datap = dev_get_drvdata(bridge); 433 struct drv_data *drv_datap = dev_get_drvdata(bridge);
446 u8 dev_type; 434 u8 dev_type;
447 435
448 DBC_REQUIRE(refs > 0);
449 DBC_REQUIRE(dev_node_obj != NULL);
450 DBC_REQUIRE(hdev_obj != NULL);
451
452 /* Create a Dummy PROC Object */ 436 /* Create a Dummy PROC Object */
453 if (!drv_datap || !drv_datap->mgr_object) { 437 if (!drv_datap || !drv_datap->mgr_object) {
454 status = -ENODATA; 438 status = -ENODATA;
@@ -516,8 +500,6 @@ int proc_ctrl(void *hprocessor, u32 dw_cmd, struct dsp_cbdata * arg)
516 struct proc_object *p_proc_object = hprocessor; 500 struct proc_object *p_proc_object = hprocessor;
517 u32 timeout = 0; 501 u32 timeout = 0;
518 502
519 DBC_REQUIRE(refs > 0);
520
521 if (p_proc_object) { 503 if (p_proc_object) {
522 /* intercept PWR deep sleep command */ 504 /* intercept PWR deep sleep command */
523 if (dw_cmd == BRDIOCTL_DEEPSLEEP) { 505 if (dw_cmd == BRDIOCTL_DEEPSLEEP) {
@@ -565,8 +547,6 @@ int proc_detach(struct process_context *pr_ctxt)
565 int status = 0; 547 int status = 0;
566 struct proc_object *p_proc_object = NULL; 548 struct proc_object *p_proc_object = NULL;
567 549
568 DBC_REQUIRE(refs > 0);
569
570 p_proc_object = (struct proc_object *)pr_ctxt->processor; 550 p_proc_object = (struct proc_object *)pr_ctxt->processor;
571 551
572 if (p_proc_object) { 552 if (p_proc_object) {
@@ -607,11 +587,6 @@ int proc_enum_nodes(void *hprocessor, void **node_tab,
607 struct proc_object *p_proc_object = (struct proc_object *)hprocessor; 587 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
608 struct node_mgr *hnode_mgr = NULL; 588 struct node_mgr *hnode_mgr = NULL;
609 589
610 DBC_REQUIRE(refs > 0);
611 DBC_REQUIRE(node_tab != NULL || node_tab_size == 0);
612 DBC_REQUIRE(pu_num_nodes != NULL);
613 DBC_REQUIRE(pu_allocated != NULL);
614
615 if (p_proc_object) { 590 if (p_proc_object) {
616 if (!(dev_get_node_manager(p_proc_object->dev_obj, 591 if (!(dev_get_node_manager(p_proc_object->dev_obj,
617 &hnode_mgr))) { 592 &hnode_mgr))) {
@@ -768,8 +743,6 @@ int proc_begin_dma(void *hprocessor, void *pmpu_addr, u32 ul_size,
768 struct process_context *pr_ctxt = (struct process_context *) hprocessor; 743 struct process_context *pr_ctxt = (struct process_context *) hprocessor;
769 struct dmm_map_object *map_obj; 744 struct dmm_map_object *map_obj;
770 745
771 DBC_REQUIRE(refs > 0);
772
773 if (!pr_ctxt) { 746 if (!pr_ctxt) {
774 status = -EFAULT; 747 status = -EFAULT;
775 goto err_out; 748 goto err_out;
@@ -810,8 +783,6 @@ int proc_end_dma(void *hprocessor, void *pmpu_addr, u32 ul_size,
810 struct process_context *pr_ctxt = (struct process_context *) hprocessor; 783 struct process_context *pr_ctxt = (struct process_context *) hprocessor;
811 struct dmm_map_object *map_obj; 784 struct dmm_map_object *map_obj;
812 785
813 DBC_REQUIRE(refs > 0);
814
815 if (!pr_ctxt) { 786 if (!pr_ctxt) {
816 status = -EFAULT; 787 status = -EFAULT;
817 goto err_out; 788 goto err_out;
@@ -884,10 +855,6 @@ int proc_get_resource_info(void *hprocessor, u32 resource_type,
884 struct rmm_target_obj *rmm = NULL; 855 struct rmm_target_obj *rmm = NULL;
885 struct io_mgr *hio_mgr = NULL; /* IO manager handle */ 856 struct io_mgr *hio_mgr = NULL; /* IO manager handle */
886 857
887 DBC_REQUIRE(refs > 0);
888 DBC_REQUIRE(resource_info != NULL);
889 DBC_REQUIRE(resource_info_size >= sizeof(struct dsp_resourceinfo));
890
891 if (!p_proc_object) { 858 if (!p_proc_object) {
892 status = -EFAULT; 859 status = -EFAULT;
893 goto func_end; 860 goto func_end;
@@ -940,21 +907,6 @@ func_end:
940} 907}
941 908
942/* 909/*
943 * ======== proc_exit ========
944 * Purpose:
945 * Decrement reference count, and free resources when reference count is
946 * 0.
947 */
948void proc_exit(void)
949{
950 DBC_REQUIRE(refs > 0);
951
952 refs--;
953
954 DBC_ENSURE(refs >= 0);
955}
956
957/*
958 * ======== proc_get_dev_object ======== 910 * ======== proc_get_dev_object ========
959 * Purpose: 911 * Purpose:
960 * Return the Dev Object handle for a given Processor. 912 * Return the Dev Object handle for a given Processor.
@@ -966,9 +918,6 @@ int proc_get_dev_object(void *hprocessor,
966 int status = -EPERM; 918 int status = -EPERM;
967 struct proc_object *p_proc_object = (struct proc_object *)hprocessor; 919 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
968 920
969 DBC_REQUIRE(refs > 0);
970 DBC_REQUIRE(device_obj != NULL);
971
972 if (p_proc_object) { 921 if (p_proc_object) {
973 *device_obj = p_proc_object->dev_obj; 922 *device_obj = p_proc_object->dev_obj;
974 status = 0; 923 status = 0;
@@ -977,9 +926,6 @@ int proc_get_dev_object(void *hprocessor,
977 status = -EFAULT; 926 status = -EFAULT;
978 } 927 }
979 928
980 DBC_ENSURE((!status && *device_obj != NULL) ||
981 (status && *device_obj == NULL));
982
983 return status; 929 return status;
984} 930}
985 931
@@ -996,10 +942,6 @@ int proc_get_state(void *hprocessor,
996 struct proc_object *p_proc_object = (struct proc_object *)hprocessor; 942 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
997 int brd_status; 943 int brd_status;
998 944
999 DBC_REQUIRE(refs > 0);
1000 DBC_REQUIRE(proc_state_obj != NULL);
1001 DBC_REQUIRE(state_info_size >= sizeof(struct dsp_processorstate));
1002
1003 if (p_proc_object) { 945 if (p_proc_object) {
1004 /* First, retrieve BRD state information */ 946 /* First, retrieve BRD state information */
1005 status = (*p_proc_object->intf_fxns->brd_status) 947 status = (*p_proc_object->intf_fxns->brd_status)
@@ -1055,25 +997,6 @@ int proc_get_trace(void *hprocessor, u8 * pbuf, u32 max_size)
1055} 997}
1056 998
1057/* 999/*
1058 * ======== proc_init ========
1059 * Purpose:
1060 * Initialize PROC's private state, keeping a reference count on each call
1061 */
1062bool proc_init(void)
1063{
1064 bool ret = true;
1065
1066 DBC_REQUIRE(refs >= 0);
1067
1068 if (ret)
1069 refs++;
1070
1071 DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
1072
1073 return ret;
1074}
1075
1076/*
1077 * ======== proc_load ======== 1000 * ======== proc_load ========
1078 * Purpose: 1001 * Purpose:
1079 * Reset a processor and load a new base program image. 1002 * Reset a processor and load a new base program image.
@@ -1111,10 +1034,6 @@ int proc_load(void *hprocessor, const s32 argc_index,
1111 omap_dspbridge_dev->dev.platform_data; 1034 omap_dspbridge_dev->dev.platform_data;
1112#endif 1035#endif
1113 1036
1114 DBC_REQUIRE(refs > 0);
1115 DBC_REQUIRE(argc_index > 0);
1116 DBC_REQUIRE(user_args != NULL);
1117
1118#ifdef OPT_LOAD_TIME_INSTRUMENTATION 1037#ifdef OPT_LOAD_TIME_INSTRUMENTATION
1119 do_gettimeofday(&tv1); 1038 do_gettimeofday(&tv1);
1120#endif 1039#endif
@@ -1202,8 +1121,6 @@ int proc_load(void *hprocessor, const s32 argc_index,
1202 if (status) { 1121 if (status) {
1203 status = -EPERM; 1122 status = -EPERM;
1204 } else { 1123 } else {
1205 DBC_ASSERT(p_proc_object->last_coff ==
1206 NULL);
1207 /* Allocate memory for pszLastCoff */ 1124 /* Allocate memory for pszLastCoff */
1208 p_proc_object->last_coff = 1125 p_proc_object->last_coff =
1209 kzalloc((strlen(user_args[0]) + 1126 kzalloc((strlen(user_args[0]) +
@@ -1226,7 +1143,6 @@ int proc_load(void *hprocessor, const s32 argc_index,
1226 if (!hmsg_mgr) { 1143 if (!hmsg_mgr) {
1227 status = msg_create(&hmsg_mgr, p_proc_object->dev_obj, 1144 status = msg_create(&hmsg_mgr, p_proc_object->dev_obj,
1228 (msg_onexit) node_on_exit); 1145 (msg_onexit) node_on_exit);
1229 DBC_ASSERT(!status);
1230 dev_set_msg_mgr(p_proc_object->dev_obj, hmsg_mgr); 1146 dev_set_msg_mgr(p_proc_object->dev_obj, hmsg_mgr);
1231 } 1147 }
1232 } 1148 }
@@ -1322,7 +1238,6 @@ int proc_load(void *hprocessor, const s32 argc_index,
1322 strlen(pargv0) + 1); 1238 strlen(pargv0) + 1);
1323 else 1239 else
1324 status = -ENOMEM; 1240 status = -ENOMEM;
1325 DBC_ASSERT(brd_state == BRD_LOADED);
1326 } 1241 }
1327 } 1242 }
1328 1243
@@ -1331,9 +1246,6 @@ func_end:
1331 pr_err("%s: Processor failed to load\n", __func__); 1246 pr_err("%s: Processor failed to load\n", __func__);
1332 proc_stop(p_proc_object); 1247 proc_stop(p_proc_object);
1333 } 1248 }
1334 DBC_ENSURE((!status
1335 && p_proc_object->proc_state == PROC_LOADED)
1336 || status);
1337#ifdef OPT_LOAD_TIME_INSTRUMENTATION 1249#ifdef OPT_LOAD_TIME_INSTRUMENTATION
1338 do_gettimeofday(&tv2); 1250 do_gettimeofday(&tv2);
1339 if (tv2.tv_usec < tv1.tv_usec) { 1251 if (tv2.tv_usec < tv1.tv_usec) {
@@ -1443,9 +1355,6 @@ int proc_register_notify(void *hprocessor, u32 event_mask,
1443 struct proc_object *p_proc_object = (struct proc_object *)hprocessor; 1355 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1444 struct deh_mgr *hdeh_mgr; 1356 struct deh_mgr *hdeh_mgr;
1445 1357
1446 DBC_REQUIRE(hnotification != NULL);
1447 DBC_REQUIRE(refs > 0);
1448
1449 /* Check processor handle */ 1358 /* Check processor handle */
1450 if (!p_proc_object) { 1359 if (!p_proc_object) {
1451 status = -EFAULT; 1360 status = -EFAULT;
@@ -1567,7 +1476,6 @@ int proc_start(void *hprocessor)
1567 u32 dw_dsp_addr; /* Loaded code's entry point. */ 1476 u32 dw_dsp_addr; /* Loaded code's entry point. */
1568 int brd_state; 1477 int brd_state;
1569 1478
1570 DBC_REQUIRE(refs > 0);
1571 if (!p_proc_object) { 1479 if (!p_proc_object) {
1572 status = -EFAULT; 1480 status = -EFAULT;
1573 goto func_end; 1481 goto func_end;
@@ -1616,7 +1524,6 @@ func_cont:
1616 if (!((*p_proc_object->intf_fxns->brd_status) 1524 if (!((*p_proc_object->intf_fxns->brd_status)
1617 (p_proc_object->bridge_context, &brd_state))) { 1525 (p_proc_object->bridge_context, &brd_state))) {
1618 pr_info("%s: dsp in running state\n", __func__); 1526 pr_info("%s: dsp in running state\n", __func__);
1619 DBC_ASSERT(brd_state != BRD_HIBERNATION);
1620 } 1527 }
1621 } else { 1528 } else {
1622 pr_err("%s: Failed to start the dsp\n", __func__); 1529 pr_err("%s: Failed to start the dsp\n", __func__);
@@ -1624,8 +1531,6 @@ func_cont:
1624 } 1531 }
1625 1532
1626func_end: 1533func_end:
1627 DBC_ENSURE((!status && p_proc_object->proc_state ==
1628 PROC_RUNNING) || status);
1629 return status; 1534 return status;
1630} 1535}
1631 1536
@@ -1644,9 +1549,7 @@ int proc_stop(void *hprocessor)
1644 u32 node_tab_size = 1; 1549 u32 node_tab_size = 1;
1645 u32 num_nodes = 0; 1550 u32 num_nodes = 0;
1646 u32 nodes_allocated = 0; 1551 u32 nodes_allocated = 0;
1647 int brd_state;
1648 1552
1649 DBC_REQUIRE(refs > 0);
1650 if (!p_proc_object) { 1553 if (!p_proc_object) {
1651 status = -EFAULT; 1554 status = -EFAULT;
1652 goto func_end; 1555 goto func_end;
@@ -1678,11 +1581,6 @@ int proc_stop(void *hprocessor)
1678 msg_delete(hmsg_mgr); 1581 msg_delete(hmsg_mgr);
1679 dev_set_msg_mgr(p_proc_object->dev_obj, NULL); 1582 dev_set_msg_mgr(p_proc_object->dev_obj, NULL);
1680 } 1583 }
1681 if (!((*p_proc_object->
1682 intf_fxns->brd_status) (p_proc_object->
1683 bridge_context,
1684 &brd_state)))
1685 DBC_ASSERT(brd_state == BRD_STOPPED);
1686 } 1584 }
1687 } else { 1585 } else {
1688 pr_err("%s: Failed to stop the processor\n", __func__); 1586 pr_err("%s: Failed to stop the processor\n", __func__);
@@ -1820,10 +1718,6 @@ static int proc_monitor(struct proc_object *proc_obj)
1820{ 1718{
1821 int status = -EPERM; 1719 int status = -EPERM;
1822 struct msg_mgr *hmsg_mgr; 1720 struct msg_mgr *hmsg_mgr;
1823 int brd_state;
1824
1825 DBC_REQUIRE(refs > 0);
1826 DBC_REQUIRE(proc_obj);
1827 1721
1828 /* This is needed only when Device is loaded when it is 1722 /* This is needed only when Device is loaded when it is
1829 * already 'ACTIVE' */ 1723 * already 'ACTIVE' */
@@ -1840,13 +1734,8 @@ static int proc_monitor(struct proc_object *proc_obj)
1840 if (!((*proc_obj->intf_fxns->brd_monitor) 1734 if (!((*proc_obj->intf_fxns->brd_monitor)
1841 (proc_obj->bridge_context))) { 1735 (proc_obj->bridge_context))) {
1842 status = 0; 1736 status = 0;
1843 if (!((*proc_obj->intf_fxns->brd_status)
1844 (proc_obj->bridge_context, &brd_state)))
1845 DBC_ASSERT(brd_state == BRD_IDLE);
1846 } 1737 }
1847 1738
1848 DBC_ENSURE((!status && brd_state == BRD_IDLE) ||
1849 status);
1850 return status; 1739 return status;
1851} 1740}
1852 1741
@@ -1880,8 +1769,6 @@ static char **prepend_envp(char **new_envp, char **envp, s32 envp_elems,
1880{ 1769{
1881 char **pp_envp = new_envp; 1770 char **pp_envp = new_envp;
1882 1771
1883 DBC_REQUIRE(new_envp);
1884
1885 /* Prepend new environ var=value string */ 1772 /* Prepend new environ var=value string */
1886 *new_envp++ = sz_var; 1773 *new_envp++ = sz_var;
1887 1774
@@ -1906,9 +1793,6 @@ int proc_notify_clients(void *proc, u32 events)
1906 int status = 0; 1793 int status = 0;
1907 struct proc_object *p_proc_object = (struct proc_object *)proc; 1794 struct proc_object *p_proc_object = (struct proc_object *)proc;
1908 1795
1909 DBC_REQUIRE(p_proc_object);
1910 DBC_REQUIRE(is_valid_proc_event(events));
1911 DBC_REQUIRE(refs > 0);
1912 if (!p_proc_object) { 1796 if (!p_proc_object) {
1913 status = -EFAULT; 1797 status = -EFAULT;
1914 goto func_end; 1798 goto func_end;
@@ -1930,9 +1814,6 @@ int proc_notify_all_clients(void *proc, u32 events)
1930 int status = 0; 1814 int status = 0;
1931 struct proc_object *p_proc_object = (struct proc_object *)proc; 1815 struct proc_object *p_proc_object = (struct proc_object *)proc;
1932 1816
1933 DBC_REQUIRE(is_valid_proc_event(events));
1934 DBC_REQUIRE(refs > 0);
1935
1936 if (!p_proc_object) { 1817 if (!p_proc_object) {
1937 status = -EFAULT; 1818 status = -EFAULT;
1938 goto func_end; 1819 goto func_end;
diff --git a/drivers/staging/tidspbridge/rmgr/rmm.c b/drivers/staging/tidspbridge/rmgr/rmm.c
index f3dc0ddbfacc..52187bd97729 100644
--- a/drivers/staging/tidspbridge/rmgr/rmm.c
+++ b/drivers/staging/tidspbridge/rmgr/rmm.c
@@ -46,9 +46,6 @@
46/* ----------------------------------- DSP/BIOS Bridge */ 46/* ----------------------------------- DSP/BIOS Bridge */
47#include <dspbridge/dbdefs.h> 47#include <dspbridge/dbdefs.h>
48 48
49/* ----------------------------------- Trace & Debug */
50#include <dspbridge/dbc.h>
51
52/* ----------------------------------- This */ 49/* ----------------------------------- This */
53#include <dspbridge/rmm.h> 50#include <dspbridge/rmm.h>
54 51
@@ -83,8 +80,6 @@ struct rmm_target_obj {
83 struct list_head ovly_list; /* List of overlay memory in use */ 80 struct list_head ovly_list; /* List of overlay memory in use */
84}; 81};
85 82
86static u32 refs; /* module reference count */
87
88static bool alloc_block(struct rmm_target_obj *target, u32 segid, u32 size, 83static bool alloc_block(struct rmm_target_obj *target, u32 segid, u32 size,
89 u32 align, u32 *dsp_address); 84 u32 align, u32 *dsp_address);
90static bool free_block(struct rmm_target_obj *target, u32 segid, u32 addr, 85static bool free_block(struct rmm_target_obj *target, u32 segid, u32 addr,
@@ -101,12 +96,6 @@ int rmm_alloc(struct rmm_target_obj *target, u32 segid, u32 size,
101 u32 addr; 96 u32 addr;
102 int status = 0; 97 int status = 0;
103 98
104 DBC_REQUIRE(target);
105 DBC_REQUIRE(dsp_address != NULL);
106 DBC_REQUIRE(size > 0);
107 DBC_REQUIRE(reserve || (target->num_segs > 0));
108 DBC_REQUIRE(refs > 0);
109
110 if (!reserve) { 99 if (!reserve) {
111 if (!alloc_block(target, segid, size, align, dsp_address)) { 100 if (!alloc_block(target, segid, size, align, dsp_address)) {
112 status = -ENOMEM; 101 status = -ENOMEM;
@@ -170,9 +159,6 @@ int rmm_create(struct rmm_target_obj **target_obj,
170 s32 i; 159 s32 i;
171 int status = 0; 160 int status = 0;
172 161
173 DBC_REQUIRE(target_obj != NULL);
174 DBC_REQUIRE(num_segs == 0 || seg_tab != NULL);
175
176 /* Allocate DBL target object */ 162 /* Allocate DBL target object */
177 target = kzalloc(sizeof(struct rmm_target_obj), GFP_KERNEL); 163 target = kzalloc(sizeof(struct rmm_target_obj), GFP_KERNEL);
178 164
@@ -235,9 +221,6 @@ func_cont:
235 221
236 } 222 }
237 223
238 DBC_ENSURE((!status && *target_obj)
239 || (status && *target_obj == NULL));
240
241 return status; 224 return status;
242} 225}
243 226
@@ -251,8 +234,6 @@ void rmm_delete(struct rmm_target_obj *target)
251 struct rmm_header *next; 234 struct rmm_header *next;
252 u32 i; 235 u32 i;
253 236
254 DBC_REQUIRE(target);
255
256 kfree(target->seg_tab); 237 kfree(target->seg_tab);
257 238
258 list_for_each_entry_safe(sect, tmp, &target->ovly_list, list_elem) { 239 list_for_each_entry_safe(sect, tmp, &target->ovly_list, list_elem) {
@@ -277,18 +258,6 @@ void rmm_delete(struct rmm_target_obj *target)
277} 258}
278 259
279/* 260/*
280 * ======== rmm_exit ========
281 */
282void rmm_exit(void)
283{
284 DBC_REQUIRE(refs > 0);
285
286 refs--;
287
288 DBC_ENSURE(refs >= 0);
289}
290
291/*
292 * ======== rmm_free ======== 261 * ======== rmm_free ========
293 */ 262 */
294bool rmm_free(struct rmm_target_obj *target, u32 segid, u32 dsp_addr, u32 size, 263bool rmm_free(struct rmm_target_obj *target, u32 segid, u32 dsp_addr, u32 size,
@@ -297,15 +266,6 @@ bool rmm_free(struct rmm_target_obj *target, u32 segid, u32 dsp_addr, u32 size,
297 struct rmm_ovly_sect *sect, *tmp; 266 struct rmm_ovly_sect *sect, *tmp;
298 bool ret = false; 267 bool ret = false;
299 268
300 DBC_REQUIRE(target);
301
302 DBC_REQUIRE(reserved || segid < target->num_segs);
303 DBC_REQUIRE(reserved || (dsp_addr >= target->seg_tab[segid].base &&
304 (dsp_addr + size) <= (target->seg_tab[segid].
305 base +
306 target->seg_tab[segid].
307 length)));
308
309 /* 269 /*
310 * Free or unreserve memory. 270 * Free or unreserve memory.
311 */ 271 */
@@ -319,7 +279,6 @@ bool rmm_free(struct rmm_target_obj *target, u32 segid, u32 dsp_addr, u32 size,
319 list_for_each_entry_safe(sect, tmp, &target->ovly_list, 279 list_for_each_entry_safe(sect, tmp, &target->ovly_list,
320 list_elem) { 280 list_elem) {
321 if (dsp_addr == sect->addr) { 281 if (dsp_addr == sect->addr) {
322 DBC_ASSERT(size == sect->size);
323 /* Remove from list */ 282 /* Remove from list */
324 list_del(&sect->list_elem); 283 list_del(&sect->list_elem);
325 kfree(sect); 284 kfree(sect);
@@ -331,18 +290,6 @@ bool rmm_free(struct rmm_target_obj *target, u32 segid, u32 dsp_addr, u32 size,
331} 290}
332 291
333/* 292/*
334 * ======== rmm_init ========
335 */
336bool rmm_init(void)
337{
338 DBC_REQUIRE(refs >= 0);
339
340 refs++;
341
342 return true;
343}
344
345/*
346 * ======== rmm_stat ======== 293 * ======== rmm_stat ========
347 */ 294 */
348bool rmm_stat(struct rmm_target_obj *target, enum dsp_memtype segid, 295bool rmm_stat(struct rmm_target_obj *target, enum dsp_memtype segid,
@@ -354,9 +301,6 @@ bool rmm_stat(struct rmm_target_obj *target, enum dsp_memtype segid,
354 u32 total_free_size = 0; 301 u32 total_free_size = 0;
355 u32 free_blocks = 0; 302 u32 free_blocks = 0;
356 303
357 DBC_REQUIRE(mem_stat_buf != NULL);
358 DBC_ASSERT(target != NULL);
359
360 if ((u32) segid < target->num_segs) { 304 if ((u32) segid < target->num_segs) {
361 head = target->free_list[segid]; 305 head = target->free_list[segid];
362 306
diff --git a/drivers/staging/tidspbridge/rmgr/strm.c b/drivers/staging/tidspbridge/rmgr/strm.c
index 3fae0e9f511e..34cc934e0c3d 100644
--- a/drivers/staging/tidspbridge/rmgr/strm.c
+++ b/drivers/staging/tidspbridge/rmgr/strm.c
@@ -24,9 +24,6 @@
24/* ----------------------------------- DSP/BIOS Bridge */ 24/* ----------------------------------- DSP/BIOS Bridge */
25#include <dspbridge/dbdefs.h> 25#include <dspbridge/dbdefs.h>
26 26
27/* ----------------------------------- Trace & Debug */
28#include <dspbridge/dbc.h>
29
30/* ----------------------------------- OS Adaptation Layer */ 27/* ----------------------------------- OS Adaptation Layer */
31#include <dspbridge/sync.h> 28#include <dspbridge/sync.h>
32 29
@@ -84,9 +81,6 @@ struct strm_object {
84 struct cmm_xlatorobject *xlator; 81 struct cmm_xlatorobject *xlator;
85}; 82};
86 83
87/* ----------------------------------- Globals */
88static u32 refs; /* module reference count */
89
90/* ----------------------------------- Function Prototypes */ 84/* ----------------------------------- Function Prototypes */
91static int delete_strm(struct strm_object *stream_obj); 85static int delete_strm(struct strm_object *stream_obj);
92 86
@@ -104,9 +98,6 @@ int strm_allocate_buffer(struct strm_res_object *strmres, u32 usize,
104 u32 i; 98 u32 i;
105 struct strm_object *stream_obj = strmres->stream; 99 struct strm_object *stream_obj = strmres->stream;
106 100
107 DBC_REQUIRE(refs > 0);
108 DBC_REQUIRE(ap_buffer != NULL);
109
110 if (stream_obj) { 101 if (stream_obj) {
111 /* 102 /*
112 * Allocate from segment specified at time of stream open. 103 * Allocate from segment specified at time of stream open.
@@ -122,7 +113,6 @@ int strm_allocate_buffer(struct strm_res_object *strmres, u32 usize,
122 goto func_end; 113 goto func_end;
123 114
124 for (i = 0; i < num_bufs; i++) { 115 for (i = 0; i < num_bufs; i++) {
125 DBC_ASSERT(stream_obj->xlator != NULL);
126 (void)cmm_xlator_alloc_buf(stream_obj->xlator, &ap_buffer[i], 116 (void)cmm_xlator_alloc_buf(stream_obj->xlator, &ap_buffer[i],
127 usize); 117 usize);
128 if (ap_buffer[i] == NULL) { 118 if (ap_buffer[i] == NULL) {
@@ -156,8 +146,6 @@ int strm_close(struct strm_res_object *strmres,
156 int status = 0; 146 int status = 0;
157 struct strm_object *stream_obj = strmres->stream; 147 struct strm_object *stream_obj = strmres->stream;
158 148
159 DBC_REQUIRE(refs > 0);
160
161 if (!stream_obj) { 149 if (!stream_obj) {
162 status = -EFAULT; 150 status = -EFAULT;
163 } else { 151 } else {
@@ -167,7 +155,6 @@ int strm_close(struct strm_res_object *strmres,
167 status = 155 status =
168 (*intf_fxns->chnl_get_info) (stream_obj->chnl_obj, 156 (*intf_fxns->chnl_get_info) (stream_obj->chnl_obj,
169 &chnl_info_obj); 157 &chnl_info_obj);
170 DBC_ASSERT(!status);
171 158
172 if (chnl_info_obj.cio_cs > 0 || chnl_info_obj.cio_reqs > 0) 159 if (chnl_info_obj.cio_cs > 0 || chnl_info_obj.cio_reqs > 0)
173 status = -EPIPE; 160 status = -EPIPE;
@@ -180,9 +167,6 @@ int strm_close(struct strm_res_object *strmres,
180 167
181 idr_remove(pr_ctxt->stream_id, strmres->id); 168 idr_remove(pr_ctxt->stream_id, strmres->id);
182func_end: 169func_end:
183 DBC_ENSURE(status == 0 || status == -EFAULT ||
184 status == -EPIPE || status == -EPERM);
185
186 dev_dbg(bridge, "%s: stream_obj: %p, status 0x%x\n", __func__, 170 dev_dbg(bridge, "%s: stream_obj: %p, status 0x%x\n", __func__,
187 stream_obj, status); 171 stream_obj, status);
188 return status; 172 return status;
@@ -199,10 +183,6 @@ int strm_create(struct strm_mgr **strm_man,
199 struct strm_mgr *strm_mgr_obj; 183 struct strm_mgr *strm_mgr_obj;
200 int status = 0; 184 int status = 0;
201 185
202 DBC_REQUIRE(refs > 0);
203 DBC_REQUIRE(strm_man != NULL);
204 DBC_REQUIRE(dev_obj != NULL);
205
206 *strm_man = NULL; 186 *strm_man = NULL;
207 /* Allocate STRM manager object */ 187 /* Allocate STRM manager object */
208 strm_mgr_obj = kzalloc(sizeof(struct strm_mgr), GFP_KERNEL); 188 strm_mgr_obj = kzalloc(sizeof(struct strm_mgr), GFP_KERNEL);
@@ -217,7 +197,6 @@ int strm_create(struct strm_mgr **strm_man,
217 if (!status) { 197 if (!status) {
218 (void)dev_get_intf_fxns(dev_obj, 198 (void)dev_get_intf_fxns(dev_obj,
219 &(strm_mgr_obj->intf_fxns)); 199 &(strm_mgr_obj->intf_fxns));
220 DBC_ASSERT(strm_mgr_obj->intf_fxns != NULL);
221 } 200 }
222 } 201 }
223 202
@@ -226,8 +205,6 @@ int strm_create(struct strm_mgr **strm_man,
226 else 205 else
227 kfree(strm_mgr_obj); 206 kfree(strm_mgr_obj);
228 207
229 DBC_ENSURE((!status && *strm_man) || (status && *strm_man == NULL));
230
231 return status; 208 return status;
232} 209}
233 210
@@ -238,27 +215,10 @@ int strm_create(struct strm_mgr **strm_man,
238 */ 215 */
239void strm_delete(struct strm_mgr *strm_mgr_obj) 216void strm_delete(struct strm_mgr *strm_mgr_obj)
240{ 217{
241 DBC_REQUIRE(refs > 0);
242 DBC_REQUIRE(strm_mgr_obj);
243
244 kfree(strm_mgr_obj); 218 kfree(strm_mgr_obj);
245} 219}
246 220
247/* 221/*
248 * ======== strm_exit ========
249 * Purpose:
250 * Discontinue usage of STRM module.
251 */
252void strm_exit(void)
253{
254 DBC_REQUIRE(refs > 0);
255
256 refs--;
257
258 DBC_ENSURE(refs >= 0);
259}
260
261/*
262 * ======== strm_free_buffer ======== 222 * ======== strm_free_buffer ========
263 * Purpose: 223 * Purpose:
264 * Frees the buffers allocated for a stream. 224 * Frees the buffers allocated for a stream.
@@ -270,15 +230,11 @@ int strm_free_buffer(struct strm_res_object *strmres, u8 ** ap_buffer,
270 u32 i = 0; 230 u32 i = 0;
271 struct strm_object *stream_obj = strmres->stream; 231 struct strm_object *stream_obj = strmres->stream;
272 232
273 DBC_REQUIRE(refs > 0);
274 DBC_REQUIRE(ap_buffer != NULL);
275
276 if (!stream_obj) 233 if (!stream_obj)
277 status = -EFAULT; 234 status = -EFAULT;
278 235
279 if (!status) { 236 if (!status) {
280 for (i = 0; i < num_bufs; i++) { 237 for (i = 0; i < num_bufs; i++) {
281 DBC_ASSERT(stream_obj->xlator != NULL);
282 status = 238 status =
283 cmm_xlator_free_buf(stream_obj->xlator, 239 cmm_xlator_free_buf(stream_obj->xlator,
284 ap_buffer[i]); 240 ap_buffer[i]);
@@ -306,10 +262,6 @@ int strm_get_info(struct strm_object *stream_obj,
306 int status = 0; 262 int status = 0;
307 void *virt_base = NULL; /* NULL if no SM used */ 263 void *virt_base = NULL; /* NULL if no SM used */
308 264
309 DBC_REQUIRE(refs > 0);
310 DBC_REQUIRE(stream_info != NULL);
311 DBC_REQUIRE(stream_info_size >= sizeof(struct stream_info));
312
313 if (!stream_obj) { 265 if (!stream_obj) {
314 status = -EFAULT; 266 status = -EFAULT;
315 } else { 267 } else {
@@ -330,7 +282,6 @@ int strm_get_info(struct strm_object *stream_obj,
330 282
331 if (stream_obj->xlator) { 283 if (stream_obj->xlator) {
332 /* We have a translator */ 284 /* We have a translator */
333 DBC_ASSERT(stream_obj->segment_id > 0);
334 cmm_xlator_info(stream_obj->xlator, (u8 **) &virt_base, 0, 285 cmm_xlator_info(stream_obj->xlator, (u8 **) &virt_base, 0,
335 stream_obj->segment_id, false); 286 stream_obj->segment_id, false);
336 } 287 }
@@ -370,8 +321,6 @@ int strm_idle(struct strm_object *stream_obj, bool flush_data)
370 struct bridge_drv_interface *intf_fxns; 321 struct bridge_drv_interface *intf_fxns;
371 int status = 0; 322 int status = 0;
372 323
373 DBC_REQUIRE(refs > 0);
374
375 if (!stream_obj) { 324 if (!stream_obj) {
376 status = -EFAULT; 325 status = -EFAULT;
377 } else { 326 } else {
@@ -388,25 +337,6 @@ int strm_idle(struct strm_object *stream_obj, bool flush_data)
388} 337}
389 338
390/* 339/*
391 * ======== strm_init ========
392 * Purpose:
393 * Initialize the STRM module.
394 */
395bool strm_init(void)
396{
397 bool ret = true;
398
399 DBC_REQUIRE(refs >= 0);
400
401 if (ret)
402 refs++;
403
404 DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
405
406 return ret;
407}
408
409/*
410 * ======== strm_issue ======== 340 * ======== strm_issue ========
411 * Purpose: 341 * Purpose:
412 * Issues a buffer on a stream 342 * Issues a buffer on a stream
@@ -418,9 +348,6 @@ int strm_issue(struct strm_object *stream_obj, u8 *pbuf, u32 ul_bytes,
418 int status = 0; 348 int status = 0;
419 void *tmp_buf = NULL; 349 void *tmp_buf = NULL;
420 350
421 DBC_REQUIRE(refs > 0);
422 DBC_REQUIRE(pbuf != NULL);
423
424 if (!stream_obj) { 351 if (!stream_obj) {
425 status = -EFAULT; 352 status = -EFAULT;
426 } else { 353 } else {
@@ -471,9 +398,6 @@ int strm_open(struct node_object *hnode, u32 dir, u32 index,
471 398
472 void *stream_res; 399 void *stream_res;
473 400
474 DBC_REQUIRE(refs > 0);
475 DBC_REQUIRE(strmres != NULL);
476 DBC_REQUIRE(pattr != NULL);
477 *strmres = NULL; 401 *strmres = NULL;
478 if (dir != DSP_TONODE && dir != DSP_FROMNODE) { 402 if (dir != DSP_TONODE && dir != DSP_FROMNODE) {
479 status = -EPERM; 403 status = -EPERM;
@@ -536,14 +460,12 @@ int strm_open(struct node_object *hnode, u32 dir, u32 index,
536 goto func_cont; 460 goto func_cont;
537 461
538 /* No System DMA */ 462 /* No System DMA */
539 DBC_ASSERT(strm_obj->strm_mode != STRMMODE_LDMA);
540 /* Get the shared mem mgr for this streams dev object */ 463 /* Get the shared mem mgr for this streams dev object */
541 status = dev_get_cmm_mgr(strm_mgr_obj->dev_obj, &hcmm_mgr); 464 status = dev_get_cmm_mgr(strm_mgr_obj->dev_obj, &hcmm_mgr);
542 if (!status) { 465 if (!status) {
543 /*Allocate a SM addr translator for this strm. */ 466 /*Allocate a SM addr translator for this strm. */
544 status = cmm_xlator_create(&strm_obj->xlator, hcmm_mgr, NULL); 467 status = cmm_xlator_create(&strm_obj->xlator, hcmm_mgr, NULL);
545 if (!status) { 468 if (!status) {
546 DBC_ASSERT(strm_obj->segment_id > 0);
547 /* Set translators Virt Addr attributes */ 469 /* Set translators Virt Addr attributes */
548 status = cmm_xlator_info(strm_obj->xlator, 470 status = cmm_xlator_info(strm_obj->xlator,
549 (u8 **) &pattr->virt_base, 471 (u8 **) &pattr->virt_base,
@@ -575,10 +497,6 @@ func_cont:
575 * strm_mgr_obj->chnl_mgr better be valid or we 497 * strm_mgr_obj->chnl_mgr better be valid or we
576 * assert here), and then return -EPERM. 498 * assert here), and then return -EPERM.
577 */ 499 */
578 DBC_ASSERT(status == -ENOSR ||
579 status == -ECHRNG ||
580 status == -EALREADY ||
581 status == -EIO);
582 status = -EPERM; 500 status = -EPERM;
583 } 501 }
584 } 502 }
@@ -594,12 +512,6 @@ func_cont:
594 (void)delete_strm(strm_obj); 512 (void)delete_strm(strm_obj);
595 } 513 }
596 514
597 /* ensure we return a documented error code */
598 DBC_ENSURE((!status && strm_obj) ||
599 (*strmres == NULL && (status == -EFAULT ||
600 status == -EPERM
601 || status == -EINVAL)));
602
603 dev_dbg(bridge, "%s: hnode: %p dir: 0x%x index: 0x%x pattr: %p " 515 dev_dbg(bridge, "%s: hnode: %p dir: 0x%x index: 0x%x pattr: %p "
604 "strmres: %p status: 0x%x\n", __func__, 516 "strmres: %p status: 0x%x\n", __func__,
605 hnode, dir, index, pattr, strmres, status); 517 hnode, dir, index, pattr, strmres, status);
@@ -619,11 +531,6 @@ int strm_reclaim(struct strm_object *stream_obj, u8 ** buf_ptr,
619 int status = 0; 531 int status = 0;
620 void *tmp_buf = NULL; 532 void *tmp_buf = NULL;
621 533
622 DBC_REQUIRE(refs > 0);
623 DBC_REQUIRE(buf_ptr != NULL);
624 DBC_REQUIRE(nbytes != NULL);
625 DBC_REQUIRE(pdw_arg != NULL);
626
627 if (!stream_obj) { 534 if (!stream_obj) {
628 status = -EFAULT; 535 status = -EFAULT;
629 goto func_end; 536 goto func_end;
@@ -679,11 +586,6 @@ int strm_reclaim(struct strm_object *stream_obj, u8 ** buf_ptr,
679 *buf_ptr = chnl_ioc_obj.buf; 586 *buf_ptr = chnl_ioc_obj.buf;
680 } 587 }
681func_end: 588func_end:
682 /* ensure we return a documented return code */
683 DBC_ENSURE(!status || status == -EFAULT ||
684 status == -ETIME || status == -ESRCH ||
685 status == -EPERM);
686
687 dev_dbg(bridge, "%s: stream_obj: %p buf_ptr: %p nbytes: %p " 589 dev_dbg(bridge, "%s: stream_obj: %p buf_ptr: %p nbytes: %p "
688 "pdw_arg: %p status 0x%x\n", __func__, stream_obj, 590 "pdw_arg: %p status 0x%x\n", __func__, stream_obj,
689 buf_ptr, nbytes, pdw_arg, status); 591 buf_ptr, nbytes, pdw_arg, status);
@@ -702,9 +604,6 @@ int strm_register_notify(struct strm_object *stream_obj, u32 event_mask,
702 struct bridge_drv_interface *intf_fxns; 604 struct bridge_drv_interface *intf_fxns;
703 int status = 0; 605 int status = 0;
704 606
705 DBC_REQUIRE(refs > 0);
706 DBC_REQUIRE(hnotification != NULL);
707
708 if (!stream_obj) { 607 if (!stream_obj) {
709 status = -EFAULT; 608 status = -EFAULT;
710 } else if ((event_mask & ~((DSP_STREAMIOCOMPLETION) | 609 } else if ((event_mask & ~((DSP_STREAMIOCOMPLETION) |
@@ -725,10 +624,7 @@ int strm_register_notify(struct strm_object *stream_obj, u32 event_mask,
725 notify_type, 624 notify_type,
726 hnotification); 625 hnotification);
727 } 626 }
728 /* ensure we return a documented return code */ 627
729 DBC_ENSURE(!status || status == -EFAULT ||
730 status == -ETIME || status == -ESRCH ||
731 status == -ENOSYS || status == -EPERM);
732 return status; 628 return status;
733} 629}
734 630
@@ -747,11 +643,6 @@ int strm_select(struct strm_object **strm_tab, u32 strms,
747 u32 i; 643 u32 i;
748 int status = 0; 644 int status = 0;
749 645
750 DBC_REQUIRE(refs > 0);
751 DBC_REQUIRE(strm_tab != NULL);
752 DBC_REQUIRE(pmask != NULL);
753 DBC_REQUIRE(strms > 0);
754
755 *pmask = 0; 646 *pmask = 0;
756 for (i = 0; i < strms; i++) { 647 for (i = 0; i < strms; i++) {
757 if (!strm_tab[i]) { 648 if (!strm_tab[i]) {
@@ -811,9 +702,6 @@ int strm_select(struct strm_object **strm_tab, u32 strms,
811func_end: 702func_end:
812 kfree(sync_events); 703 kfree(sync_events);
813 704
814 DBC_ENSURE((!status && (*pmask != 0 || utimeout == 0)) ||
815 (status && *pmask == 0));
816
817 return status; 705 return status;
818} 706}
819 707
diff --git a/drivers/staging/usbip/stub.h b/drivers/staging/usbip/stub.h
index d4073684eacd..a73e437ec215 100644
--- a/drivers/staging/usbip/stub.h
+++ b/drivers/staging/usbip/stub.h
@@ -35,7 +35,6 @@
35struct stub_device { 35struct stub_device {
36 struct usb_interface *interface; 36 struct usb_interface *interface;
37 struct usb_device *udev; 37 struct usb_device *udev;
38 struct list_head list;
39 38
40 struct usbip_device ud; 39 struct usbip_device ud;
41 __u32 devid; 40 __u32 devid;
diff --git a/drivers/staging/usbip/stub_dev.c b/drivers/staging/usbip/stub_dev.c
index 03420e25d9c6..fa870e3f7f6a 100644
--- a/drivers/staging/usbip/stub_dev.c
+++ b/drivers/staging/usbip/stub_dev.c
@@ -297,7 +297,6 @@ static struct stub_device *stub_device_alloc(struct usb_device *udev,
297 sdev->devid = (busnum << 16) | devnum; 297 sdev->devid = (busnum << 16) | devnum;
298 sdev->ud.side = USBIP_STUB; 298 sdev->ud.side = USBIP_STUB;
299 sdev->ud.status = SDEV_ST_AVAILABLE; 299 sdev->ud.status = SDEV_ST_AVAILABLE;
300 /* sdev->ud.lock = SPIN_LOCK_UNLOCKED; */
301 spin_lock_init(&sdev->ud.lock); 300 spin_lock_init(&sdev->ud.lock);
302 sdev->ud.tcp_socket = NULL; 301 sdev->ud.tcp_socket = NULL;
303 302
@@ -306,7 +305,6 @@ static struct stub_device *stub_device_alloc(struct usb_device *udev,
306 INIT_LIST_HEAD(&sdev->priv_free); 305 INIT_LIST_HEAD(&sdev->priv_free);
307 INIT_LIST_HEAD(&sdev->unlink_free); 306 INIT_LIST_HEAD(&sdev->unlink_free);
308 INIT_LIST_HEAD(&sdev->unlink_tx); 307 INIT_LIST_HEAD(&sdev->unlink_tx);
309 /* sdev->priv_lock = SPIN_LOCK_UNLOCKED; */
310 spin_lock_init(&sdev->priv_lock); 308 spin_lock_init(&sdev->priv_lock);
311 309
312 init_waitqueue_head(&sdev->tx_waitq); 310 init_waitqueue_head(&sdev->tx_waitq);
diff --git a/drivers/staging/usbip/stub_rx.c b/drivers/staging/usbip/stub_rx.c
index 27ac363d1cfa..1d5b3fc62160 100644
--- a/drivers/staging/usbip/stub_rx.c
+++ b/drivers/staging/usbip/stub_rx.c
@@ -367,15 +367,6 @@ static int get_pipe(struct stub_device *sdev, int epnum, int dir)
367 } 367 }
368 368
369 epd = &ep->desc; 369 epd = &ep->desc;
370#if 0
371 /* epnum 0 is always control */
372 if (epnum == 0) {
373 if (dir == USBIP_DIR_OUT)
374 return usb_sndctrlpipe(udev, 0);
375 else
376 return usb_rcvctrlpipe(udev, 0);
377 }
378#endif
379 if (usb_endpoint_xfer_control(epd)) { 370 if (usb_endpoint_xfer_control(epd)) {
380 if (dir == USBIP_DIR_OUT) 371 if (dir == USBIP_DIR_OUT)
381 return usb_sndctrlpipe(udev, epnum); 372 return usb_sndctrlpipe(udev, epnum);
diff --git a/drivers/staging/usbip/usbip_common.c b/drivers/staging/usbip/usbip_common.c
index d93e7f1f7973..70f230269329 100644
--- a/drivers/staging/usbip/usbip_common.c
+++ b/drivers/staging/usbip/usbip_common.c
@@ -735,26 +735,25 @@ EXPORT_SYMBOL_GPL(usbip_recv_iso);
735 * buffer and iso packets need to be stored and be in propeper endian in urb 735 * buffer and iso packets need to be stored and be in propeper endian in urb
736 * before calling this function 736 * before calling this function
737 */ 737 */
738int usbip_pad_iso(struct usbip_device *ud, struct urb *urb) 738void usbip_pad_iso(struct usbip_device *ud, struct urb *urb)
739{ 739{
740 int np = urb->number_of_packets; 740 int np = urb->number_of_packets;
741 int i; 741 int i;
742 int ret;
743 int actualoffset = urb->actual_length; 742 int actualoffset = urb->actual_length;
744 743
745 if (!usb_pipeisoc(urb->pipe)) 744 if (!usb_pipeisoc(urb->pipe))
746 return 0; 745 return;
747 746
748 /* if no packets or length of data is 0, then nothing to unpack */ 747 /* if no packets or length of data is 0, then nothing to unpack */
749 if (np == 0 || urb->actual_length == 0) 748 if (np == 0 || urb->actual_length == 0)
750 return 0; 749 return;
751 750
752 /* 751 /*
753 * if actual_length is transfer_buffer_length then no padding is 752 * if actual_length is transfer_buffer_length then no padding is
754 * present. 753 * present.
755 */ 754 */
756 if (urb->actual_length == urb->transfer_buffer_length) 755 if (urb->actual_length == urb->transfer_buffer_length)
757 return 0; 756 return;
758 757
759 /* 758 /*
760 * loop over all packets from last to first (to prevent overwritting 759 * loop over all packets from last to first (to prevent overwritting
@@ -766,8 +765,6 @@ int usbip_pad_iso(struct usbip_device *ud, struct urb *urb)
766 urb->transfer_buffer + actualoffset, 765 urb->transfer_buffer + actualoffset,
767 urb->iso_frame_desc[i].actual_length); 766 urb->iso_frame_desc[i].actual_length);
768 } 767 }
769
770 return ret;
771} 768}
772EXPORT_SYMBOL_GPL(usbip_pad_iso); 769EXPORT_SYMBOL_GPL(usbip_pad_iso);
773 770
diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
index b8f8c48b8a72..c7b888ca54f5 100644
--- a/drivers/staging/usbip/usbip_common.h
+++ b/drivers/staging/usbip/usbip_common.h
@@ -306,7 +306,7 @@ void usbip_header_correct_endian(struct usbip_header *pdu, int send);
306void *usbip_alloc_iso_desc_pdu(struct urb *urb, ssize_t *bufflen); 306void *usbip_alloc_iso_desc_pdu(struct urb *urb, ssize_t *bufflen);
307/* some members of urb must be substituted before. */ 307/* some members of urb must be substituted before. */
308int usbip_recv_iso(struct usbip_device *ud, struct urb *urb); 308int usbip_recv_iso(struct usbip_device *ud, struct urb *urb);
309int usbip_pad_iso(struct usbip_device *ud, struct urb *urb); 309void usbip_pad_iso(struct usbip_device *ud, struct urb *urb);
310int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb); 310int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb);
311 311
312/* usbip_event.c */ 312/* usbip_event.c */
diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
index 2ee97e2095b0..dca9bf11f0c2 100644
--- a/drivers/staging/usbip/vhci_hcd.c
+++ b/drivers/staging/usbip/vhci_hcd.c
@@ -386,29 +386,6 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
386 dum->port_status[rhport] |= 386 dum->port_status[rhport] |=
387 USB_PORT_STAT_ENABLE; 387 USB_PORT_STAT_ENABLE;
388 } 388 }
389#if 0
390 if (dum->driver) {
391 dum->port_status[rhport] |=
392 USB_PORT_STAT_ENABLE;
393 /* give it the best speed we agree on */
394 dum->gadget.speed = dum->driver->speed;
395 dum->gadget.ep0->maxpacket = 64;
396 switch (dum->gadget.speed) {
397 case USB_SPEED_HIGH:
398 dum->port_status[rhport] |=
399 USB_PORT_STAT_HIGH_SPEED;
400 break;
401 case USB_SPEED_LOW:
402 dum->gadget.ep0->maxpacket = 8;
403 dum->port_status[rhport] |=
404 USB_PORT_STAT_LOW_SPEED;
405 break;
406 default:
407 dum->gadget.speed = USB_SPEED_FULL;
408 break;
409 }
410 }
411#endif
412 } 389 }
413 ((u16 *) buf)[0] = cpu_to_le16(dum->port_status[rhport]); 390 ((u16 *) buf)[0] = cpu_to_le16(dum->port_status[rhport]);
414 ((u16 *) buf)[1] = cpu_to_le16(dum->port_status[rhport] >> 16); 391 ((u16 *) buf)[1] = cpu_to_le16(dum->port_status[rhport] >> 16);
@@ -425,15 +402,6 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
425 case USB_PORT_FEAT_SUSPEND: 402 case USB_PORT_FEAT_SUSPEND:
426 usbip_dbg_vhci_rh(" SetPortFeature: " 403 usbip_dbg_vhci_rh(" SetPortFeature: "
427 "USB_PORT_FEAT_SUSPEND\n"); 404 "USB_PORT_FEAT_SUSPEND\n");
428#if 0
429 dum->port_status[rhport] |=
430 (1 << USB_PORT_FEAT_SUSPEND);
431 if (dum->driver->suspend) {
432 spin_unlock(&dum->lock);
433 dum->driver->suspend(&dum->gadget);
434 spin_lock(&dum->lock);
435 }
436#endif
437 break; 405 break;
438 case USB_PORT_FEAT_RESET: 406 case USB_PORT_FEAT_RESET:
439 usbip_dbg_vhci_rh(" SetPortFeature: " 407 usbip_dbg_vhci_rh(" SetPortFeature: "
@@ -444,13 +412,6 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
444 ~(USB_PORT_STAT_ENABLE | 412 ~(USB_PORT_STAT_ENABLE |
445 USB_PORT_STAT_LOW_SPEED | 413 USB_PORT_STAT_LOW_SPEED |
446 USB_PORT_STAT_HIGH_SPEED); 414 USB_PORT_STAT_HIGH_SPEED);
447#if 0
448 if (dum->driver) {
449 dev_dbg(hardware, "disconnect\n");
450 stop_activity(dum, dum->driver);
451 }
452#endif
453
454 /* FIXME test that code path! */ 415 /* FIXME test that code path! */
455 } 416 }
456 /* 50msec reset signaling */ 417 /* 50msec reset signaling */
@@ -934,14 +895,12 @@ static void vhci_device_init(struct vhci_device *vdev)
934 895
935 vdev->ud.side = USBIP_VHCI; 896 vdev->ud.side = USBIP_VHCI;
936 vdev->ud.status = VDEV_ST_NULL; 897 vdev->ud.status = VDEV_ST_NULL;
937 /* vdev->ud.lock = SPIN_LOCK_UNLOCKED; */
938 spin_lock_init(&vdev->ud.lock); 898 spin_lock_init(&vdev->ud.lock);
939 899
940 INIT_LIST_HEAD(&vdev->priv_rx); 900 INIT_LIST_HEAD(&vdev->priv_rx);
941 INIT_LIST_HEAD(&vdev->priv_tx); 901 INIT_LIST_HEAD(&vdev->priv_tx);
942 INIT_LIST_HEAD(&vdev->unlink_tx); 902 INIT_LIST_HEAD(&vdev->unlink_tx);
943 INIT_LIST_HEAD(&vdev->unlink_rx); 903 INIT_LIST_HEAD(&vdev->unlink_rx);
944 /* vdev->priv_lock = SPIN_LOCK_UNLOCKED; */
945 spin_lock_init(&vdev->priv_lock); 904 spin_lock_init(&vdev->priv_lock);
946 905
947 init_waitqueue_head(&vdev->waitq_tx); 906 init_waitqueue_head(&vdev->waitq_tx);
diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
index 3f511b47563d..f5fba7320c5a 100644
--- a/drivers/staging/usbip/vhci_rx.c
+++ b/drivers/staging/usbip/vhci_rx.c
@@ -94,8 +94,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
94 return; 94 return;
95 95
96 /* restore the padding in iso packets */ 96 /* restore the padding in iso packets */
97 if (usbip_pad_iso(ud, urb) < 0) 97 usbip_pad_iso(ud, urb);
98 return;
99 98
100 if (usbip_dbg_flag_vhci_rx) 99 if (usbip_dbg_flag_vhci_rx)
101 usbip_dump_urb(urb); 100 usbip_dump_urb(urb);
diff --git a/drivers/staging/vme/devices/vme_pio2.h b/drivers/staging/vme/devices/vme_pio2.h
index 3c5931364535..72d9ce0bcb45 100644
--- a/drivers/staging/vme/devices/vme_pio2.h
+++ b/drivers/staging/vme/devices/vme_pio2.h
@@ -243,7 +243,7 @@ struct pio2_card {
243int pio2_cntr_reset(struct pio2_card *); 243int pio2_cntr_reset(struct pio2_card *);
244 244
245int pio2_gpio_reset(struct pio2_card *); 245int pio2_gpio_reset(struct pio2_card *);
246int __init pio2_gpio_init(struct pio2_card *); 246int __devinit pio2_gpio_init(struct pio2_card *);
247void __exit pio2_gpio_exit(struct pio2_card *); 247void pio2_gpio_exit(struct pio2_card *);
248 248
249#endif /* _VME_PIO2_H_ */ 249#endif /* _VME_PIO2_H_ */
diff --git a/drivers/staging/vme/devices/vme_pio2_gpio.c b/drivers/staging/vme/devices/vme_pio2_gpio.c
index dc837deb99dd..858484915f08 100644
--- a/drivers/staging/vme/devices/vme_pio2_gpio.c
+++ b/drivers/staging/vme/devices/vme_pio2_gpio.c
@@ -187,7 +187,7 @@ int pio2_gpio_reset(struct pio2_card *card)
187 return 0; 187 return 0;
188} 188}
189 189
190int __init pio2_gpio_init(struct pio2_card *card) 190int __devinit pio2_gpio_init(struct pio2_card *card)
191{ 191{
192 int retval = 0; 192 int retval = 0;
193 char *label; 193 char *label;
@@ -220,7 +220,7 @@ int __init pio2_gpio_init(struct pio2_card *card)
220 return retval; 220 return retval;
221}; 221};
222 222
223void __exit pio2_gpio_exit(struct pio2_card *card) 223void pio2_gpio_exit(struct pio2_card *card)
224{ 224{
225 const char *label = card->gc.label; 225 const char *label = card->gc.label;
226 226
diff --git a/drivers/staging/vme/vme.h b/drivers/staging/vme/vme.h
index 9d38ceed60e2..c9d65bf14cec 100644
--- a/drivers/staging/vme/vme.h
+++ b/drivers/staging/vme/vme.h
@@ -156,7 +156,7 @@ int vme_irq_request(struct vme_dev *, int, int,
156void vme_irq_free(struct vme_dev *, int, int); 156void vme_irq_free(struct vme_dev *, int, int);
157int vme_irq_generate(struct vme_dev *, int, int); 157int vme_irq_generate(struct vme_dev *, int, int);
158 158
159struct vme_resource * vme_lm_request(struct vme_dev *); 159struct vme_resource *vme_lm_request(struct vme_dev *);
160int vme_lm_count(struct vme_resource *); 160int vme_lm_count(struct vme_resource *);
161int vme_lm_set(struct vme_resource *, unsigned long long, u32, u32); 161int vme_lm_set(struct vme_resource *, unsigned long long, u32, u32);
162int vme_lm_get(struct vme_resource *, unsigned long long *, u32 *, u32 *); 162int vme_lm_get(struct vme_resource *, unsigned long long *, u32 *, u32 *);
diff --git a/drivers/staging/vt6655/bssdb.c b/drivers/staging/vt6655/bssdb.c
index 577599ed70ad..1368e8cc9add 100644
--- a/drivers/staging/vt6655/bssdb.c
+++ b/drivers/staging/vt6655/bssdb.c
@@ -1327,13 +1327,13 @@ start:
1327 } 1327 }
1328 1328
1329 if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) { 1329 if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
1330 // if adhoc started which essid is NULL string, rescaning. 1330 // if adhoc started which essid is NULL string, rescanning.
1331 if ((pMgmt->eCurrState == WMAC_STATE_STARTED) && (pCurrSSID->len == 0)) { 1331 if ((pMgmt->eCurrState == WMAC_STATE_STARTED) && (pCurrSSID->len == 0)) {
1332 if (pDevice->uAutoReConnectTime < 10) { 1332 if (pDevice->uAutoReConnectTime < 10) {
1333 pDevice->uAutoReConnectTime++; 1333 pDevice->uAutoReConnectTime++;
1334 } 1334 }
1335 else { 1335 else {
1336 DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Adhoc re-scaning ...\n"); 1336 DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Adhoc re-scanning ...\n");
1337 pMgmt->eScanType = WMAC_SCAN_ACTIVE; 1337 pMgmt->eScanType = WMAC_SCAN_ACTIVE;
1338 bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, NULL); 1338 bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, NULL);
1339 bScheduleCommand((void *) pDevice, WLAN_CMD_SSID, NULL); 1339 bScheduleCommand((void *) pDevice, WLAN_CMD_SSID, NULL);
diff --git a/drivers/staging/vt6655/ioctl.c b/drivers/staging/vt6655/ioctl.c
index 7fd5cc5a55f6..ef197efab049 100644
--- a/drivers/staging/vt6655/ioctl.c
+++ b/drivers/staging/vt6655/ioctl.c
@@ -324,16 +324,16 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq)
324 pItemSSID = (PWLAN_IE_SSID)pBSS->abySSID; 324 pItemSSID = (PWLAN_IE_SSID)pBSS->abySSID;
325 memset(pList->sBSSIDList[ii].abySSID, 0, WLAN_SSID_MAXLEN + 1); 325 memset(pList->sBSSIDList[ii].abySSID, 0, WLAN_SSID_MAXLEN + 1);
326 memcpy(pList->sBSSIDList[ii].abySSID, pItemSSID->abySSID, pItemSSID->len); 326 memcpy(pList->sBSSIDList[ii].abySSID, pItemSSID->abySSID, pItemSSID->len);
327 if (WLAN_GET_CAP_INFO_ESS(pBSS->wCapInfo)) { 327 if (WLAN_GET_CAP_INFO_ESS(pBSS->wCapInfo))
328 pList->sBSSIDList[ii].byNetType = INFRA; 328 pList->sBSSIDList[ii].byNetType = INFRA;
329 } else { 329 else
330 pList->sBSSIDList[ii].byNetType = ADHOC; 330 pList->sBSSIDList[ii].byNetType = ADHOC;
331 } 331
332 if (WLAN_GET_CAP_INFO_PRIVACY(pBSS->wCapInfo)) { 332 if (WLAN_GET_CAP_INFO_PRIVACY(pBSS->wCapInfo))
333 pList->sBSSIDList[ii].bWEPOn = true; 333 pList->sBSSIDList[ii].bWEPOn = true;
334 } else { 334 else
335 pList->sBSSIDList[ii].bWEPOn = false; 335 pList->sBSSIDList[ii].bWEPOn = false;
336 } 336
337 ii++; 337 ii++;
338 if (ii >= pList->uItem) 338 if (ii >= pList->uItem)
339 break; 339 break;
@@ -367,9 +367,9 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq)
367 netif_stop_queue(pDevice->dev); 367 netif_stop_queue(pDevice->dev);
368 368
369 spin_lock_irq(&pDevice->lock); 369 spin_lock_irq(&pDevice->lock);
370 if (pDevice->bRadioOff == false) { 370 if (pDevice->bRadioOff == false)
371 CARDbRadioPowerOff(pDevice); 371 CARDbRadioPowerOff(pDevice);
372 } 372
373 pDevice->bLinkPass = false; 373 pDevice->bLinkPass = false;
374 memset(pMgmt->abyCurrBSSID, 0, 6); 374 memset(pMgmt->abyCurrBSSID, 0, 6);
375 pMgmt->eCurrState = WMAC_STATE_IDLE; 375 pMgmt->eCurrState = WMAC_STATE_IDLE;
@@ -489,13 +489,12 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq)
489 break; 489 break;
490 } 490 }
491 491
492 if (sStartAPCmd.wBBPType == PHY80211g) { 492 if (sStartAPCmd.wBBPType == PHY80211g)
493 pMgmt->byAPBBType = PHY_TYPE_11G; 493 pMgmt->byAPBBType = PHY_TYPE_11G;
494 } else if (sStartAPCmd.wBBPType == PHY80211a) { 494 else if (sStartAPCmd.wBBPType == PHY80211a)
495 pMgmt->byAPBBType = PHY_TYPE_11A; 495 pMgmt->byAPBBType = PHY_TYPE_11A;
496 } else { 496 else
497 pMgmt->byAPBBType = PHY_TYPE_11B; 497 pMgmt->byAPBBType = PHY_TYPE_11B;
498 }
499 498
500 pItemSSID = (PWLAN_IE_SSID)sStartAPCmd.ssid; 499 pItemSSID = (PWLAN_IE_SSID)sStartAPCmd.ssid;
501 if (pItemSSID->len > WLAN_SSID_MAXLEN + 1) 500 if (pItemSSID->len > WLAN_SSID_MAXLEN + 1)
diff --git a/drivers/staging/vt6656/bssdb.c b/drivers/staging/vt6656/bssdb.c
index 32c67ed8435a..619c257e8773 100644
--- a/drivers/staging/vt6656/bssdb.c
+++ b/drivers/staging/vt6656/bssdb.c
@@ -1195,13 +1195,13 @@ else {
1195 } 1195 }
1196 1196
1197 if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) { 1197 if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
1198 // if adhoc started which essid is NULL string, rescaning. 1198 // if adhoc started which essid is NULL string, rescanning.
1199 if ((pMgmt->eCurrState == WMAC_STATE_STARTED) && (pCurrSSID->len == 0)) { 1199 if ((pMgmt->eCurrState == WMAC_STATE_STARTED) && (pCurrSSID->len == 0)) {
1200 if (pDevice->uAutoReConnectTime < 10) { 1200 if (pDevice->uAutoReConnectTime < 10) {
1201 pDevice->uAutoReConnectTime++; 1201 pDevice->uAutoReConnectTime++;
1202 } 1202 }
1203 else { 1203 else {
1204 DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Adhoc re-scaning ...\n"); 1204 DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Adhoc re-scanning ...\n");
1205 pMgmt->eScanType = WMAC_SCAN_ACTIVE; 1205 pMgmt->eScanType = WMAC_SCAN_ACTIVE;
1206 bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, NULL); 1206 bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, NULL);
1207 bScheduleCommand((void *) pDevice, WLAN_CMD_SSID, NULL); 1207 bScheduleCommand((void *) pDevice, WLAN_CMD_SSID, NULL);
diff --git a/drivers/staging/vt6656/iwctl.c b/drivers/staging/vt6656/iwctl.c
index ecfda5272fa1..b24e5314a6af 100644
--- a/drivers/staging/vt6656/iwctl.c
+++ b/drivers/staging/vt6656/iwctl.c
@@ -46,9 +46,6 @@
46 46
47#include <net/iw_handler.h> 47#include <net/iw_handler.h>
48 48
49
50/*--------------------- Static Definitions -------------------------*/
51
52#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT 49#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
53#define SUPPORTED_WIRELESS_EXT 18 50#define SUPPORTED_WIRELESS_EXT 18
54#else 51#else
@@ -63,19 +60,8 @@ static const long frequency_list[] = {
63 5700, 5745, 5765, 5785, 5805, 5825 60 5700, 5745, 5765, 5785, 5805, 5825
64 }; 61 };
65 62
66
67/*--------------------- Static Classes ----------------------------*/
68
69
70//static int msglevel =MSG_LEVEL_DEBUG;
71static int msglevel =MSG_LEVEL_INFO; 63static int msglevel =MSG_LEVEL_INFO;
72 64
73
74/*--------------------- Static Variables --------------------------*/
75/*--------------------- Static Functions --------------------------*/
76
77/*--------------------- Export Variables --------------------------*/
78
79struct iw_statistics *iwctl_get_wireless_stats(struct net_device *dev) 65struct iw_statistics *iwctl_get_wireless_stats(struct net_device *dev)
80{ 66{
81 PSDevice pDevice = netdev_priv(dev); 67 PSDevice pDevice = netdev_priv(dev);
@@ -87,7 +73,6 @@ struct iw_statistics *iwctl_get_wireless_stats(struct net_device *dev)
87 pDevice->wstats.qual.qual =(BYTE) pDevice->scStatistic.LinkQuality; 73 pDevice->wstats.qual.qual =(BYTE) pDevice->scStatistic.LinkQuality;
88 RFvRSSITodBm(pDevice, (BYTE)(pDevice->uCurrRSSI), &ldBm); 74 RFvRSSITodBm(pDevice, (BYTE)(pDevice->uCurrRSSI), &ldBm);
89 pDevice->wstats.qual.level = ldBm; 75 pDevice->wstats.qual.level = ldBm;
90 //pDevice->wstats.qual.level = 0x100 - pDevice->uCurrRSSI;
91 pDevice->wstats.qual.noise = 0; 76 pDevice->wstats.qual.noise = 0;
92 pDevice->wstats.qual.updated = 1; 77 pDevice->wstats.qual.updated = 1;
93 pDevice->wstats.discard.nwid = 0; 78 pDevice->wstats.discard.nwid = 0;
@@ -100,21 +85,6 @@ struct iw_statistics *iwctl_get_wireless_stats(struct net_device *dev)
100 return &pDevice->wstats; 85 return &pDevice->wstats;
101} 86}
102 87
103
104
105/*------------------------------------------------------------------*/
106
107
108static int iwctl_commit(struct net_device *dev,
109 struct iw_request_info *info,
110 void *wrq,
111 char *extra)
112{
113 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWCOMMIT\n");
114
115 return 0;
116}
117
118/* 88/*
119 * Wireless Handler : get protocol name 89 * Wireless Handler : get protocol name
120 */ 90 */
@@ -197,14 +167,12 @@ if(pDevice->byReAssocCount > 0) { //reject scan when re-associating!
197 } 167 }
198 168
199 pMgmt->eScanType = WMAC_SCAN_PASSIVE; 169 pMgmt->eScanType = WMAC_SCAN_PASSIVE;
200 //printk("SIOCSIWSCAN:WLAN_CMD_BSSID_SCAN\n");
201 bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, NULL); 170 bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, NULL);
202 spin_unlock_irq(&pDevice->lock); 171 spin_unlock_irq(&pDevice->lock);
203 172
204 return 0; 173 return 0;
205} 174}
206 175
207
208/* 176/*
209 * Wireless Handler : get scan results 177 * Wireless Handler : get scan results
210 */ 178 */
@@ -503,7 +471,7 @@ int iwctl_siwmode(struct net_device *dev,
503 * Wireless Handler : get operation mode 471 * Wireless Handler : get operation mode
504 */ 472 */
505 473
506int iwctl_giwmode(struct net_device *dev, 474void iwctl_giwmode(struct net_device *dev,
507 struct iw_request_info *info, 475 struct iw_request_info *info,
508 __u32 *wmode, 476 __u32 *wmode,
509 char *extra) 477 char *extra)
@@ -530,8 +498,6 @@ int iwctl_giwmode(struct net_device *dev,
530 default: 498 default:
531 *wmode = IW_MODE_ADHOC; 499 *wmode = IW_MODE_ADHOC;
532 } 500 }
533
534 return 0;
535} 501}
536 502
537 503
@@ -539,7 +505,7 @@ int iwctl_giwmode(struct net_device *dev,
539 * Wireless Handler : get capability range 505 * Wireless Handler : get capability range
540 */ 506 */
541 507
542int iwctl_giwrange(struct net_device *dev, 508void iwctl_giwrange(struct net_device *dev,
543 struct iw_request_info *info, 509 struct iw_request_info *info,
544 struct iw_point *wrq, 510 struct iw_point *wrq,
545 char *extra) 511 char *extra)
@@ -634,9 +600,6 @@ int iwctl_giwrange(struct net_device *dev,
634 range->avg_qual.level = 176; // -80 dBm 600 range->avg_qual.level = 176; // -80 dBm
635 range->avg_qual.noise = 0; 601 range->avg_qual.noise = 0;
636 } 602 }
637
638
639 return 0;
640} 603}
641 604
642 605
@@ -708,9 +671,7 @@ int iwctl_giwap(struct net_device *dev,
708 671
709 memcpy(wrq->sa_data, pMgmt->abyCurrBSSID, 6); 672 memcpy(wrq->sa_data, pMgmt->abyCurrBSSID, 6);
710 673
711//20080123-02,<Modify> by Einsn Liu
712 if ((pDevice->bLinkPass == FALSE) && (pMgmt->eCurrMode != WMAC_MODE_ESS_AP)) 674 if ((pDevice->bLinkPass == FALSE) && (pMgmt->eCurrMode != WMAC_MODE_ESS_AP))
713 // if ((pDevice->bLinkPass == FALSE) && (pMgmt->eCurrMode == WMAC_MODE_ESS_STA))
714 memset(wrq->sa_data, 0, 6); 675 memset(wrq->sa_data, 0, 6);
715 676
716 if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) { 677 if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
@@ -895,8 +856,7 @@ int iwctl_siwessid(struct net_device *dev,
895/* 856/*
896 * Wireless Handler : get essid 857 * Wireless Handler : get essid
897 */ 858 */
898 859void iwctl_giwessid(struct net_device *dev,
899int iwctl_giwessid(struct net_device *dev,
900 struct iw_request_info *info, 860 struct iw_request_info *info,
901 struct iw_point *wrq, 861 struct iw_point *wrq,
902 char *extra) 862 char *extra)
@@ -913,14 +873,11 @@ int iwctl_giwessid(struct net_device *dev,
913 873
914 // Get the current SSID 874 // Get the current SSID
915 pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID; 875 pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
916 //pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
917 memcpy(extra, pItemSSID->abySSID , pItemSSID->len); 876 memcpy(extra, pItemSSID->abySSID , pItemSSID->len);
918 extra[pItemSSID->len] = '\0'; 877 extra[pItemSSID->len] = '\0';
919 878
920 wrq->length = pItemSSID->len; 879 wrq->length = pItemSSID->len;
921 wrq->flags = 1; // active 880 wrq->flags = 1; // active
922
923 return 0;
924} 881}
925 882
926/* 883/*
@@ -1008,8 +965,7 @@ int iwctl_siwrate(struct net_device *dev,
1008/* 965/*
1009 * Wireless Handler : get data rate 966 * Wireless Handler : get data rate
1010 */ 967 */
1011 968void iwctl_giwrate(struct net_device *dev,
1012int iwctl_giwrate(struct net_device *dev,
1013 struct iw_request_info *info, 969 struct iw_request_info *info,
1014 struct iw_param *wrq, 970 struct iw_param *wrq,
1015 char *extra) 971 char *extra)
@@ -1047,9 +1003,6 @@ int iwctl_giwrate(struct net_device *dev,
1047 if (pDevice->bFixRate == TRUE) 1003 if (pDevice->bFixRate == TRUE)
1048 wrq->fixed = TRUE; 1004 wrq->fixed = TRUE;
1049 } 1005 }
1050
1051
1052 return 0;
1053} 1006}
1054 1007
1055 1008
@@ -1057,27 +1010,19 @@ int iwctl_giwrate(struct net_device *dev,
1057/* 1010/*
1058 * Wireless Handler : set rts threshold 1011 * Wireless Handler : set rts threshold
1059 */ 1012 */
1060
1061int iwctl_siwrts(struct net_device *dev, 1013int iwctl_siwrts(struct net_device *dev,
1062 struct iw_request_info *info, 1014 struct iw_param *wrq)
1063 struct iw_param *wrq,
1064 char *extra)
1065{ 1015{
1066 PSDevice pDevice = (PSDevice)netdev_priv(dev); 1016 PSDevice pDevice = (PSDevice)netdev_priv(dev);
1067 int rc = 0;
1068 1017
1069 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWRTS \n"); 1018 if ((wrq->value < 0 || wrq->value > 2312) && !wrq->disabled)
1019 return -EINVAL;
1070 1020
1071 { 1021 else if (wrq->disabled)
1072 int rthr = wrq->value; 1022 pDevice->wRTSThreshold = 2312;
1073 if(wrq->disabled) 1023
1074 rthr = 2312; 1024 else
1075 if((rthr < 0) || (rthr > 2312)) { 1025 pDevice->wRTSThreshold = wrq->value;
1076 rc = -EINVAL;
1077 }else {
1078 pDevice->wRTSThreshold = rthr;
1079 }
1080 }
1081 1026
1082 return 0; 1027 return 0;
1083} 1028}
@@ -1327,55 +1272,6 @@ int iwctl_siwencode(struct net_device *dev,
1327 return rc; 1272 return rc;
1328} 1273}
1329 1274
1330/*
1331 * Wireless Handler : get encode mode
1332 */
1333//2008-0409-06, <Mark> by Einsn Liu
1334 /*
1335int iwctl_giwencode(struct net_device *dev,
1336 struct iw_request_info *info,
1337 struct iw_point *wrq,
1338 char *extra)
1339{
1340 PSDevice pDevice = (PSDevice)netdev_priv(dev);
1341 PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
1342 int rc = 0;
1343 char abyKey[WLAN_WEP232_KEYLEN];
1344 unsigned int index = (unsigned int)(wrq->flags & IW_ENCODE_INDEX);
1345 PSKeyItem pKey = NULL;
1346
1347 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWENCODE\n");
1348
1349
1350 memset(abyKey, 0, sizeof(abyKey));
1351 // Check encryption mode
1352 wrq->flags = IW_ENCODE_NOKEY;
1353 // Is WEP enabled ???
1354 if (pDevice->bEncryptionEnable)
1355 wrq->flags |= IW_ENCODE_ENABLED;
1356 else
1357 wrq->flags |= IW_ENCODE_DISABLED;
1358
1359 if (pMgmt->bShareKeyAlgorithm)
1360 wrq->flags |= IW_ENCODE_RESTRICTED;
1361 else
1362 wrq->flags |= IW_ENCODE_OPEN;
1363
1364 if (KeybGetKey(&(pDevice->sKey), pDevice->abyBroadcastAddr, (BYTE)index , &pKey)){
1365 wrq->length = pKey->uKeyLength;
1366 memcpy(abyKey, pKey->abyKey, pKey->uKeyLength);
1367 }
1368 else {
1369 rc = -EINVAL;
1370 return rc;
1371 }
1372 wrq->flags |= index;
1373 // Copy the key to the user buffer
1374 memcpy(extra, abyKey, WLAN_WEP232_KEYLEN);
1375 return 0;
1376}
1377*/
1378
1379int iwctl_giwencode(struct net_device *dev, 1275int iwctl_giwencode(struct net_device *dev,
1380 struct iw_request_info *info, 1276 struct iw_request_info *info,
1381 struct iw_point *wrq, 1277 struct iw_point *wrq,
@@ -1562,7 +1458,6 @@ int iwctl_siwauth(struct net_device *dev,
1562 wpa_version = wrq->value; 1458 wpa_version = wrq->value;
1563 if(wrq->value == IW_AUTH_WPA_VERSION_DISABLED) { 1459 if(wrq->value == IW_AUTH_WPA_VERSION_DISABLED) {
1564 PRINT_K("iwctl_siwauth:set WPADEV to disable at 1??????\n"); 1460 PRINT_K("iwctl_siwauth:set WPADEV to disable at 1??????\n");
1565 //pDevice->bWPADEVUp = FALSE;
1566 } 1461 }
1567 else if(wrq->value == IW_AUTH_WPA_VERSION_WPA) { 1462 else if(wrq->value == IW_AUTH_WPA_VERSION_WPA) {
1568 PRINT_K("iwctl_siwauth:set WPADEV to WPA1******\n"); 1463 PRINT_K("iwctl_siwauth:set WPADEV to WPA1******\n");
@@ -1570,7 +1465,6 @@ int iwctl_siwauth(struct net_device *dev,
1570 else { 1465 else {
1571 PRINT_K("iwctl_siwauth:set WPADEV to WPA2******\n"); 1466 PRINT_K("iwctl_siwauth:set WPADEV to WPA2******\n");
1572 } 1467 }
1573 //pDevice->bWPASuppWextEnabled =TRUE;
1574 break; 1468 break;
1575 case IW_AUTH_CIPHER_PAIRWISE: 1469 case IW_AUTH_CIPHER_PAIRWISE:
1576 pairwise = wrq->value; 1470 pairwise = wrq->value;
@@ -1627,11 +1521,6 @@ int iwctl_siwauth(struct net_device *dev,
1627 } 1521 }
1628 break; 1522 break;
1629 case IW_AUTH_WPA_ENABLED: 1523 case IW_AUTH_WPA_ENABLED:
1630 //pDevice->bWPADEVUp = !! wrq->value;
1631 //if(pDevice->bWPADEVUp==TRUE)
1632 // printk("iwctl_siwauth:set WPADEV to enable successful*******\n");
1633 //else
1634 // printk("iwctl_siwauth:set WPADEV to enable fail?????\n");
1635 break; 1524 break;
1636 case IW_AUTH_RX_UNENCRYPTED_EAPOL: 1525 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
1637 break; 1526 break;
@@ -1646,7 +1535,6 @@ int iwctl_siwauth(struct net_device *dev,
1646 pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled; 1535 pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
1647 pMgmt->bShareKeyAlgorithm = FALSE; 1536 pMgmt->bShareKeyAlgorithm = FALSE;
1648 pMgmt->eAuthenMode = WMAC_AUTH_OPEN; 1537 pMgmt->eAuthenMode = WMAC_AUTH_OPEN;
1649 //pDevice->bWPADEVUp = FALSE;
1650 PRINT_K("iwctl_siwauth:set WPADEV to disaable at 2?????\n"); 1538 PRINT_K("iwctl_siwauth:set WPADEV to disaable at 2?????\n");
1651 } 1539 }
1652 1540
@@ -1655,15 +1543,6 @@ int iwctl_siwauth(struct net_device *dev,
1655 ret = -EOPNOTSUPP; 1543 ret = -EOPNOTSUPP;
1656 break; 1544 break;
1657 } 1545 }
1658/*
1659 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_version = %d\n",wpa_version);
1660 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pairwise = %d\n",pairwise);
1661 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pDevice->eEncryptionStatus = %d\n",pDevice->eEncryptionStatus);
1662 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pMgmt->eAuthenMode = %d\n",pMgmt->eAuthenMode);
1663 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pMgmt->bShareKeyAlgorithm = %s\n",pMgmt->bShareKeyAlgorithm?"TRUE":"FALSE");
1664 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pDevice->bEncryptionEnable = %s\n",pDevice->bEncryptionEnable?"TRUE":"FALSE");
1665 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pDevice->bWPADEVUp = %s\n",pDevice->bWPADEVUp?"TRUE":"FALSE");
1666*/
1667 return ret; 1546 return ret;
1668} 1547}
1669 1548
@@ -1752,8 +1631,6 @@ int iwctl_siwencodeext(struct net_device *dev,
1752 u8 seq[IW_ENCODE_SEQ_MAX_SIZE]; 1631 u8 seq[IW_ENCODE_SEQ_MAX_SIZE];
1753 u8 key[64]; 1632 u8 key[64];
1754 size_t seq_len=0,key_len=0; 1633 size_t seq_len=0,key_len=0;
1755//
1756 // int ii;
1757 u8 *buf; 1634 u8 *buf;
1758 size_t blen; 1635 size_t blen;
1759 u8 key_array[64]; 1636 u8 key_array[64];
@@ -1883,7 +1760,6 @@ int iwctl_siwmlme(struct net_device *dev,
1883 PSDevice pDevice = (PSDevice)netdev_priv(dev); 1760 PSDevice pDevice = (PSDevice)netdev_priv(dev);
1884 PSMgmtObject pMgmt = &(pDevice->sMgmtObj); 1761 PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
1885 struct iw_mlme *mlme = (struct iw_mlme *)extra; 1762 struct iw_mlme *mlme = (struct iw_mlme *)extra;
1886 //u16 reason = cpu_to_le16(mlme->reason_code);
1887 int ret = 0; 1763 int ret = 0;
1888 1764
1889 if(memcmp(pMgmt->abyCurrBSSID, mlme->addr.sa_data, ETH_ALEN)){ 1765 if(memcmp(pMgmt->abyCurrBSSID, mlme->addr.sa_data, ETH_ALEN)){
@@ -1892,12 +1768,6 @@ int iwctl_siwmlme(struct net_device *dev,
1892 } 1768 }
1893 switch(mlme->cmd){ 1769 switch(mlme->cmd){
1894 case IW_MLME_DEAUTH: 1770 case IW_MLME_DEAUTH:
1895 //this command seems to be not complete,please test it --einsnliu
1896 //printk("iwctl_siwmlme--->send DEAUTH\n");
1897 /* bScheduleCommand((void *) pDevice,
1898 WLAN_CMD_DEAUTH,
1899 (PBYTE)&reason); */
1900 //break;
1901 case IW_MLME_DISASSOC: 1771 case IW_MLME_DISASSOC:
1902 if(pDevice->bLinkPass == TRUE){ 1772 if(pDevice->bLinkPass == TRUE){
1903 PRINT_K("iwctl_siwmlme--->send DISASSOCIATE\n"); 1773 PRINT_K("iwctl_siwmlme--->send DISASSOCIATE\n");
@@ -1916,77 +1786,9 @@ int iwctl_siwmlme(struct net_device *dev,
1916 1786
1917#endif 1787#endif
1918 1788
1919/*------------------------------------------------------------------*/
1920/*
1921 * Structures to export the Wireless Handlers
1922 */
1923
1924
1925/*
1926static const iw_handler iwctl_handler[] =
1927{
1928 (iw_handler) iwctl_commit, // SIOCSIWCOMMIT
1929 (iw_handler) iwctl_giwname, // SIOCGIWNAME
1930 (iw_handler) NULL, // SIOCSIWNWID
1931 (iw_handler) iwctl_siwfreq, // SIOCSIWFREQ
1932 (iw_handler) iwctl_giwfreq, // SIOCGIWFREQ
1933 (iw_handler) iwctl_siwmode, // SIOCSIWMODE
1934 (iw_handler) iwctl_giwmode, // SIOCGIWMODE
1935 (iw_handler) NULL, // SIOCSIWSENS
1936 (iw_handler) iwctl_giwsens, // SIOCGIWSENS
1937 (iw_handler) NULL, // SIOCSIWRANGE
1938 (iw_handler) iwctl_giwrange, // SIOCGIWRANGE
1939 (iw_handler) NULL, // SIOCSIWPRIV
1940 (iw_handler) NULL, // SIOCGIWPRIV
1941 (iw_handler) NULL, // SIOCSIWSTATS
1942 (iw_handler) NULL, // SIOCGIWSTATS
1943 (iw_handler) NULL, // SIOCSIWSPY
1944 (iw_handler) NULL, // SIOCGIWSPY
1945 (iw_handler) NULL, // -- hole --
1946 (iw_handler) NULL, // -- hole --
1947 (iw_handler) iwctl_siwap, // SIOCSIWAP
1948 (iw_handler) iwctl_giwap, // SIOCGIWAP
1949 (iw_handler) NULL, // -- hole -- 0x16
1950 (iw_handler) iwctl_giwaplist, // SIOCGIWAPLIST
1951 (iw_handler) iwctl_siwscan, // SIOCSIWSCAN
1952 (iw_handler) iwctl_giwscan, // SIOCGIWSCAN
1953 (iw_handler) iwctl_siwessid, // SIOCSIWESSID
1954 (iw_handler) iwctl_giwessid, // SIOCGIWESSID
1955 (iw_handler) NULL, // SIOCSIWNICKN
1956 (iw_handler) NULL, // SIOCGIWNICKN
1957 (iw_handler) NULL, // -- hole --
1958 (iw_handler) NULL, // -- hole --
1959 (iw_handler) iwctl_siwrate, // SIOCSIWRATE 0x20
1960 (iw_handler) iwctl_giwrate, // SIOCGIWRATE
1961 (iw_handler) iwctl_siwrts, // SIOCSIWRTS
1962 (iw_handler) iwctl_giwrts, // SIOCGIWRTS
1963 (iw_handler) iwctl_siwfrag, // SIOCSIWFRAG
1964 (iw_handler) iwctl_giwfrag, // SIOCGIWFRAG
1965 (iw_handler) NULL, // SIOCSIWTXPOW
1966 (iw_handler) NULL, // SIOCGIWTXPOW
1967 (iw_handler) iwctl_siwretry, // SIOCSIWRETRY
1968 (iw_handler) iwctl_giwretry, // SIOCGIWRETRY
1969 (iw_handler) iwctl_siwencode, // SIOCSIWENCODE
1970 (iw_handler) iwctl_giwencode, // SIOCGIWENCODE
1971 (iw_handler) iwctl_siwpower, // SIOCSIWPOWER
1972 (iw_handler) iwctl_giwpower, // SIOCGIWPOWER
1973 (iw_handler) NULL, // -- hole --
1974 (iw_handler) NULL, // -- hole --
1975 (iw_handler) iwctl_siwgenie, // SIOCSIWGENIE
1976 (iw_handler) iwctl_giwgenie, // SIOCGIWGENIE
1977 (iw_handler) iwctl_siwauth, // SIOCSIWAUTH
1978 (iw_handler) iwctl_giwauth, // SIOCGIWAUTH
1979 (iw_handler) iwctl_siwencodeext, // SIOCSIWENCODEEXT
1980 (iw_handler) iwctl_giwencodeext, // SIOCGIWENCODEEXT
1981 (iw_handler) NULL, // SIOCSIWPMKSA
1982 (iw_handler) NULL, // -- hole --
1983
1984};
1985*/
1986
1987static const iw_handler iwctl_handler[] = 1789static const iw_handler iwctl_handler[] =
1988{ 1790{
1989 (iw_handler) iwctl_commit, // SIOCSIWCOMMIT 1791 (iw_handler) NULL, /* SIOCSIWCOMMIT */
1990 (iw_handler) NULL, // SIOCGIWNAME 1792 (iw_handler) NULL, // SIOCGIWNAME
1991 (iw_handler) NULL, // SIOCSIWNWID 1793 (iw_handler) NULL, // SIOCSIWNWID
1992 (iw_handler) NULL, // SIOCGIWNWID 1794 (iw_handler) NULL, // SIOCGIWNWID
@@ -2063,13 +1865,9 @@ const struct iw_handler_def iwctl_handler_def =
2063{ 1865{
2064 .get_wireless_stats = &iwctl_get_wireless_stats, 1866 .get_wireless_stats = &iwctl_get_wireless_stats,
2065 .num_standard = sizeof(iwctl_handler)/sizeof(iw_handler), 1867 .num_standard = sizeof(iwctl_handler)/sizeof(iw_handler),
2066// .num_private = sizeof(iwctl_private_handler)/sizeof(iw_handler),
2067// .num_private_args = sizeof(iwctl_private_args)/sizeof(struct iw_priv_args),
2068 .num_private = 0, 1868 .num_private = 0,
2069 .num_private_args = 0, 1869 .num_private_args = 0,
2070 .standard = (iw_handler *) iwctl_handler, 1870 .standard = (iw_handler *) iwctl_handler,
2071// .private = (iw_handler *) iwctl_private_handler,
2072// .private_args = (struct iw_priv_args *)iwctl_private_args,
2073 .private = NULL, 1871 .private = NULL,
2074 .private_args = NULL, 1872 .private_args = NULL,
2075}; 1873};
diff --git a/drivers/staging/vt6656/iwctl.h b/drivers/staging/vt6656/iwctl.h
index 10a240e65012..0c6e0496779b 100644
--- a/drivers/staging/vt6656/iwctl.h
+++ b/drivers/staging/vt6656/iwctl.h
@@ -46,13 +46,13 @@ int iwctl_siwap(struct net_device *dev,
46 struct sockaddr *wrq, 46 struct sockaddr *wrq,
47 char *extra); 47 char *extra);
48 48
49int iwctl_giwrange(struct net_device *dev, 49void iwctl_giwrange(struct net_device *dev,
50 struct iw_request_info *info, 50 struct iw_request_info *info,
51 struct iw_point *wrq, 51 struct iw_point *wrq,
52 char *extra); 52 char *extra);
53 53
54 54
55int iwctl_giwmode(struct net_device *dev, 55void iwctl_giwmode(struct net_device *dev,
56 struct iw_request_info *info, 56 struct iw_request_info *info,
57 __u32 *wmode, 57 __u32 *wmode,
58 char *extra); 58 char *extra);
@@ -97,7 +97,7 @@ int iwctl_siwessid(struct net_device *dev,
97 struct iw_point *wrq, 97 struct iw_point *wrq,
98 char *extra); 98 char *extra);
99 99
100int iwctl_giwessid(struct net_device *dev, 100void iwctl_giwessid(struct net_device *dev,
101 struct iw_request_info *info, 101 struct iw_request_info *info,
102 struct iw_point *wrq, 102 struct iw_point *wrq,
103 char *extra); 103 char *extra);
@@ -107,16 +107,13 @@ int iwctl_siwrate(struct net_device *dev,
107 struct iw_param *wrq, 107 struct iw_param *wrq,
108 char *extra); 108 char *extra);
109 109
110int iwctl_giwrate(struct net_device *dev, 110void iwctl_giwrate(struct net_device *dev,
111 struct iw_request_info *info, 111 struct iw_request_info *info,
112 struct iw_param *wrq, 112 struct iw_param *wrq,
113 char *extra); 113 char *extra);
114 114
115int iwctl_siwrts(struct net_device *dev, 115int iwctl_siwrts(struct net_device *dev,
116 struct iw_request_info *info, 116 struct iw_param *wrq);
117 struct iw_param *wrq,
118 char *extra);
119
120 117
121int iwctl_giwrts(struct net_device *dev, 118int iwctl_giwrts(struct net_device *dev,
122 struct iw_request_info *info, 119 struct iw_request_info *info,
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 6a708f447651..763e028a5cc5 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -1657,8 +1657,8 @@ static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) {
1657 { 1657 {
1658 char essid[IW_ESSID_MAX_SIZE+1]; 1658 char essid[IW_ESSID_MAX_SIZE+1];
1659 if (wrq->u.essid.pointer) { 1659 if (wrq->u.essid.pointer) {
1660 rc = iwctl_giwessid(dev, NULL, 1660 iwctl_giwessid(dev, NULL,
1661 &(wrq->u.essid), essid); 1661 &(wrq->u.essid), essid);
1662 if (copy_to_user(wrq->u.essid.pointer, 1662 if (copy_to_user(wrq->u.essid.pointer,
1663 essid, 1663 essid,
1664 wrq->u.essid.length) ) 1664 wrq->u.essid.length) )
@@ -1698,14 +1698,13 @@ static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) {
1698 1698
1699 // Get the current bit-rate 1699 // Get the current bit-rate
1700 case SIOCGIWRATE: 1700 case SIOCGIWRATE:
1701 1701 iwctl_giwrate(dev, NULL, &(wrq->u.bitrate), NULL);
1702 rc = iwctl_giwrate(dev, NULL, &(wrq->u.bitrate), NULL);
1703 break; 1702 break;
1704 1703
1705 // Set the desired RTS threshold 1704 // Set the desired RTS threshold
1706 case SIOCSIWRTS: 1705 case SIOCSIWRTS:
1707 1706
1708 rc = iwctl_siwrts(dev, NULL, &(wrq->u.rts), NULL); 1707 rc = iwctl_siwrts(dev, &(wrq->u.rts));
1709 break; 1708 break;
1710 1709
1711 // Get the current RTS threshold 1710 // Get the current RTS threshold
@@ -1733,7 +1732,7 @@ static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) {
1733 1732
1734 // Get mode of operation 1733 // Get mode of operation
1735 case SIOCGIWMODE: 1734 case SIOCGIWMODE:
1736 rc = iwctl_giwmode(dev, NULL, &(wrq->u.mode), NULL); 1735 iwctl_giwmode(dev, NULL, &(wrq->u.mode), NULL);
1737 break; 1736 break;
1738 1737
1739 // Set WEP keys and mode 1738 // Set WEP keys and mode
@@ -1811,7 +1810,7 @@ static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) {
1811 { 1810 {
1812 struct iw_range range; 1811 struct iw_range range;
1813 1812
1814 rc = iwctl_giwrange(dev, NULL, &(wrq->u.data), (char *) &range); 1813 iwctl_giwrange(dev, NULL, &(wrq->u.data), (char *) &range);
1815 if (copy_to_user(wrq->u.data.pointer, &range, sizeof(struct iw_range))) 1814 if (copy_to_user(wrq->u.data.pointer, &range, sizeof(struct iw_range)))
1816 rc = -EFAULT; 1815 rc = -EFAULT;
1817 } 1816 }
diff --git a/drivers/staging/vt6656/wpactl.c b/drivers/staging/vt6656/wpactl.c
index 2fa4f845a755..5435e8205b2c 100644
--- a/drivers/staging/vt6656/wpactl.c
+++ b/drivers/staging/vt6656/wpactl.c
@@ -46,23 +46,18 @@
46 46
47#define VIAWGET_WPA_MAX_BUF_SIZE 1024 47#define VIAWGET_WPA_MAX_BUF_SIZE 1024
48 48
49
50
51static const int frequency_list[] = { 49static const int frequency_list[] = {
52 2412, 2417, 2422, 2427, 2432, 2437, 2442, 50 2412, 2417, 2422, 2427, 2432, 2437, 2442,
53 2447, 2452, 2457, 2462, 2467, 2472, 2484 51 2447, 2452, 2457, 2462, 2467, 2472, 2484
54}; 52};
53
55/*--------------------- Static Classes ----------------------------*/ 54/*--------------------- Static Classes ----------------------------*/
56 55
57/*--------------------- Static Variables --------------------------*/ 56/*--------------------- Static Variables --------------------------*/
58//static int msglevel =MSG_LEVEL_DEBUG; 57static int msglevel = MSG_LEVEL_INFO;
59static int msglevel =MSG_LEVEL_INFO;
60 58
61/*--------------------- Static Functions --------------------------*/ 59/*--------------------- Static Functions --------------------------*/
62 60
63
64
65
66/*--------------------- Export Variables --------------------------*/ 61/*--------------------- Export Variables --------------------------*/
67static void wpadev_setup(struct net_device *dev) 62static void wpadev_setup(struct net_device *dev)
68{ 63{
@@ -72,9 +67,9 @@ static void wpadev_setup(struct net_device *dev)
72 dev->addr_len = ETH_ALEN; 67 dev->addr_len = ETH_ALEN;
73 dev->tx_queue_len = 1000; 68 dev->tx_queue_len = 1000;
74 69
75 memset(dev->broadcast,0xFF, ETH_ALEN); 70 memset(dev->broadcast, 0xFF, ETH_ALEN);
76 71
77 dev->flags = IFF_BROADCAST|IFF_MULTICAST; 72 dev->flags = IFF_BROADCAST | IFF_MULTICAST;
78} 73}
79 74
80/* 75/*
@@ -90,45 +85,43 @@ static void wpadev_setup(struct net_device *dev)
90 * Return Value: 85 * Return Value:
91 * 86 *
92 */ 87 */
93
94static int wpa_init_wpadev(PSDevice pDevice) 88static int wpa_init_wpadev(PSDevice pDevice)
95{ 89{
96 PSDevice wpadev_priv; 90 PSDevice wpadev_priv;
97 struct net_device *dev = pDevice->dev; 91 struct net_device *dev = pDevice->dev;
98 int ret=0; 92 int ret = 0;
99 93
100 pDevice->wpadev = alloc_netdev(sizeof(PSDevice), "vntwpa", wpadev_setup); 94 pDevice->wpadev = alloc_netdev(sizeof(PSDevice), "vntwpa", wpadev_setup);
101 if (pDevice->wpadev == NULL) 95 if (pDevice->wpadev == NULL)
102 return -ENOMEM; 96 return -ENOMEM;
103 97
104 wpadev_priv = netdev_priv(pDevice->wpadev); 98 wpadev_priv = netdev_priv(pDevice->wpadev);
105 *wpadev_priv = *pDevice; 99 *wpadev_priv = *pDevice;
106 memcpy(pDevice->wpadev->dev_addr, dev->dev_addr, ETH_ALEN); 100 memcpy(pDevice->wpadev->dev_addr, dev->dev_addr, ETH_ALEN);
107 pDevice->wpadev->base_addr = dev->base_addr; 101 pDevice->wpadev->base_addr = dev->base_addr;
108 pDevice->wpadev->irq = dev->irq; 102 pDevice->wpadev->irq = dev->irq;
109 pDevice->wpadev->mem_start = dev->mem_start; 103 pDevice->wpadev->mem_start = dev->mem_start;
110 pDevice->wpadev->mem_end = dev->mem_end; 104 pDevice->wpadev->mem_end = dev->mem_end;
111 ret = register_netdev(pDevice->wpadev); 105 ret = register_netdev(pDevice->wpadev);
112 if (ret) { 106 if (ret) {
113 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: register_netdev(WPA) failed!\n", 107 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: register_netdev(WPA) failed!\n",
114 dev->name); 108 dev->name);
115 free_netdev(pDevice->wpadev); 109 free_netdev(pDevice->wpadev);
116 return -1; 110 return -1;
117 } 111 }
118 112
119 if (pDevice->skb == NULL) { 113 if (pDevice->skb == NULL) {
120 pDevice->skb = dev_alloc_skb((int)pDevice->rx_buf_sz); 114 pDevice->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
121 if (pDevice->skb == NULL) 115 if (pDevice->skb == NULL)
122 return -ENOMEM; 116 return -ENOMEM;
123 } 117 }
124 118
125 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Registered netdev %s for WPA management\n", 119 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Registered netdev %s for WPA management\n",
126 dev->name, pDevice->wpadev->name); 120 dev->name, pDevice->wpadev->name);
127 121
128 return 0; 122 return 0;
129} 123}
130 124
131
132/* 125/*
133 * Description: 126 * Description:
134 * unregister net_device (wpadev) 127 * unregister net_device (wpadev)
@@ -141,29 +134,24 @@ static int wpa_init_wpadev(PSDevice pDevice)
141 * Return Value: 134 * Return Value:
142 * 135 *
143 */ 136 */
144
145static int wpa_release_wpadev(PSDevice pDevice) 137static int wpa_release_wpadev(PSDevice pDevice)
146{ 138{
147 if (pDevice->skb) { 139 if (pDevice->skb) {
148 dev_kfree_skb(pDevice->skb); 140 dev_kfree_skb(pDevice->skb);
149 pDevice->skb = NULL; 141 pDevice->skb = NULL;
150 } 142 }
151 143
152 if (pDevice->wpadev) { 144 if (pDevice->wpadev) {
153 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n", 145 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n",
154 pDevice->dev->name, pDevice->wpadev->name); 146 pDevice->dev->name, pDevice->wpadev->name);
155 unregister_netdev(pDevice->wpadev); 147 unregister_netdev(pDevice->wpadev);
156 free_netdev(pDevice->wpadev); 148 free_netdev(pDevice->wpadev);
157 pDevice->wpadev = NULL; 149 pDevice->wpadev = NULL;
158 } 150 }
159 151
160 return 0; 152 return 0;
161} 153}
162 154
163
164
165
166
167/* 155/*
168 * Description: 156 * Description:
169 * Set enable/disable dev for wpa supplicant deamon 157 * Set enable/disable dev for wpa supplicant deamon
@@ -177,13 +165,11 @@ static int wpa_release_wpadev(PSDevice pDevice)
177 * Return Value: 165 * Return Value:
178 * 166 *
179 */ 167 */
180
181int wpa_set_wpadev(PSDevice pDevice, int val) 168int wpa_set_wpadev(PSDevice pDevice, int val)
182{ 169{
183 if (val) 170 if (val)
184 return wpa_init_wpadev(pDevice); 171 return wpa_init_wpadev(pDevice);
185 else 172 return wpa_release_wpadev(pDevice);
186 return wpa_release_wpadev(pDevice);
187} 173}
188 174
189/* 175/*
@@ -199,245 +185,217 @@ int wpa_set_wpadev(PSDevice pDevice, int val)
199 * Return Value: 185 * Return Value:
200 * 186 *
201 */ 187 */
202
203 int wpa_set_keys(PSDevice pDevice, void *ctx, BOOL fcpfkernel) 188 int wpa_set_keys(PSDevice pDevice, void *ctx, BOOL fcpfkernel)
204{ 189{
205 struct viawget_wpa_param *param=ctx; 190 struct viawget_wpa_param *param = ctx;
206 PSMgmtObject pMgmt = &(pDevice->sMgmtObj); 191 PSMgmtObject pMgmt = &pDevice->sMgmtObj;
207 DWORD dwKeyIndex = 0; 192 DWORD dwKeyIndex = 0;
208 BYTE abyKey[MAX_KEY_LEN]; 193 BYTE abyKey[MAX_KEY_LEN];
209 BYTE abySeq[MAX_KEY_LEN]; 194 BYTE abySeq[MAX_KEY_LEN];
210 QWORD KeyRSC; 195 QWORD KeyRSC;
211// NDIS_802_11_KEY_RSC KeyRSC; 196 BYTE byKeyDecMode = KEY_CTL_WEP;
212 BYTE byKeyDecMode = KEY_CTL_WEP;
213 int ret = 0; 197 int ret = 0;
214 int uu, ii; 198 int uu;
215 199 int ii;
216 200
217 if (param->u.wpa_key.alg_name > WPA_ALG_CCMP) 201 if (param->u.wpa_key.alg_name > WPA_ALG_CCMP)
218 return -EINVAL; 202 return -EINVAL;
219 203
220 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "param->u.wpa_key.alg_name = %d \n", param->u.wpa_key.alg_name); 204 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "param->u.wpa_key.alg_name = %d \n",
205 param->u.wpa_key.alg_name);
221 if (param->u.wpa_key.alg_name == WPA_ALG_NONE) { 206 if (param->u.wpa_key.alg_name == WPA_ALG_NONE) {
222 pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled; 207 pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
223 pDevice->bEncryptionEnable = FALSE; 208 pDevice->bEncryptionEnable = FALSE;
224 pDevice->byKeyIndex = 0; 209 pDevice->byKeyIndex = 0;
225 pDevice->bTransmitKey = FALSE; 210 pDevice->bTransmitKey = FALSE;
226 for (uu=0; uu<MAX_KEY_TABLE; uu++) { 211 for (uu=0; uu<MAX_KEY_TABLE; uu++) {
227 MACvDisableKeyEntry(pDevice, uu); 212 MACvDisableKeyEntry(pDevice, uu);
228 } 213 }
229 return ret; 214 return ret;
230 } 215 }
231 216
232 if (param->u.wpa_key.key && param->u.wpa_key.key_len > sizeof(abyKey)) 217 if (param->u.wpa_key.key && param->u.wpa_key.key_len > sizeof(abyKey))
233 return -EINVAL; 218 return -EINVAL;
234 219
235 spin_unlock_irq(&pDevice->lock); 220 spin_unlock_irq(&pDevice->lock);
236 if(param->u.wpa_key.key && fcpfkernel) { 221 if (param->u.wpa_key.key && fcpfkernel) {
237 memcpy(&abyKey[0], param->u.wpa_key.key, param->u.wpa_key.key_len); 222 memcpy(&abyKey[0], param->u.wpa_key.key, param->u.wpa_key.key_len);
238 } 223 } else {
239 else { 224 if (param->u.wpa_key.key &&
240 if (param->u.wpa_key.key && 225 copy_from_user(&abyKey[0], param->u.wpa_key.key,
241 copy_from_user(&abyKey[0], param->u.wpa_key.key, param->u.wpa_key.key_len)) { 226 param->u.wpa_key.key_len)) {
242 spin_lock_irq(&pDevice->lock); 227 spin_lock_irq(&pDevice->lock);
243 return -EINVAL; 228 return -EINVAL;
229 }
244 } 230 }
245 } 231 spin_lock_irq(&pDevice->lock);
246 spin_lock_irq(&pDevice->lock);
247 232
248 dwKeyIndex = (DWORD)(param->u.wpa_key.key_index); 233 dwKeyIndex = (DWORD)(param->u.wpa_key.key_index);
249 234
250 if (param->u.wpa_key.alg_name == WPA_ALG_WEP) { 235 if (param->u.wpa_key.alg_name == WPA_ALG_WEP) {
251 if (dwKeyIndex > 3) { 236 if (dwKeyIndex > 3) {
252 return -EINVAL; 237 return -EINVAL;
253 } 238 } else {
254 else { 239 if (param->u.wpa_key.set_tx) {
255 if (param->u.wpa_key.set_tx) { 240 pDevice->byKeyIndex = (BYTE)dwKeyIndex;
256 pDevice->byKeyIndex = (BYTE)dwKeyIndex; 241 pDevice->bTransmitKey = TRUE;
257 pDevice->bTransmitKey = TRUE; 242 dwKeyIndex |= (1 << 31);
258 dwKeyIndex |= (1 << 31); 243 }
259 } 244 KeybSetDefaultKey( pDevice,
260 KeybSetDefaultKey( pDevice, 245 &(pDevice->sKey),
261 &(pDevice->sKey), 246 dwKeyIndex & ~(BIT30 | USE_KEYRSC),
262 dwKeyIndex & ~(BIT30 | USE_KEYRSC), 247 param->u.wpa_key.key_len,
263 param->u.wpa_key.key_len, 248 NULL,
264 NULL, 249 abyKey,
265 abyKey, 250 KEY_CTL_WEP
266 KEY_CTL_WEP 251 );
267 ); 252
268 253 }
269 } 254 pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
270 pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled; 255 pDevice->bEncryptionEnable = TRUE;
271 pDevice->bEncryptionEnable = TRUE; 256 return ret;
272 return ret;
273 } 257 }
274 258
275 259
276 if (param->u.wpa_key.seq && param->u.wpa_key.seq_len > sizeof(abySeq)) 260 if (param->u.wpa_key.seq && param->u.wpa_key.seq_len > sizeof(abySeq))
277 return -EINVAL; 261 return -EINVAL;
278 262
279 spin_unlock_irq(&pDevice->lock); 263 spin_unlock_irq(&pDevice->lock);
280 if(param->u.wpa_key.seq && fcpfkernel) { 264 if (param->u.wpa_key.seq && fcpfkernel) {
281 memcpy(&abySeq[0], param->u.wpa_key.seq, param->u.wpa_key.seq_len); 265 memcpy(&abySeq[0], param->u.wpa_key.seq, param->u.wpa_key.seq_len);
282 } 266 } else {
283 else { 267 if (param->u.wpa_key.seq &&
284 if (param->u.wpa_key.seq && 268 copy_from_user(&abySeq[0], param->u.wpa_key.seq,
285 copy_from_user(&abySeq[0], param->u.wpa_key.seq, param->u.wpa_key.seq_len)) { 269 param->u.wpa_key.seq_len)) {
286 spin_lock_irq(&pDevice->lock); 270 spin_lock_irq(&pDevice->lock);
287 return -EINVAL; 271 return -EINVAL;
288 } 272 }
289 } 273 }
290 spin_lock_irq(&pDevice->lock); 274 spin_lock_irq(&pDevice->lock);
291 275
292 if (param->u.wpa_key.seq_len > 0) { 276 if (param->u.wpa_key.seq_len > 0) {
293 for (ii = 0 ; ii < param->u.wpa_key.seq_len ; ii++) { 277 for (ii = 0 ; ii < param->u.wpa_key.seq_len ; ii++) {
294 if (ii < 4) 278 if (ii < 4)
295 LODWORD(KeyRSC) |= (abySeq[ii] << (ii * 8)); 279 LODWORD(KeyRSC) |= (abySeq[ii] << (ii * 8));
296 else 280 else
297 HIDWORD(KeyRSC) |= (abySeq[ii] << ((ii-4) * 8)); 281 HIDWORD(KeyRSC) |= (abySeq[ii] << ((ii-4) * 8));
298 //KeyRSC |= (abySeq[ii] << (ii * 8));
299 } 282 }
300 dwKeyIndex |= 1 << 29; 283 dwKeyIndex |= 1 << 29;
301 } 284 }
302 285
303 if (param->u.wpa_key.key_index >= MAX_GROUP_KEY) { 286 if (param->u.wpa_key.key_index >= MAX_GROUP_KEY) {
304 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "return dwKeyIndex > 3\n"); 287 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "return dwKeyIndex > 3\n");
305 return -EINVAL; 288 return -EINVAL;
306 } 289 }
307 290
308 if (param->u.wpa_key.alg_name == WPA_ALG_TKIP) { 291 if (param->u.wpa_key.alg_name == WPA_ALG_TKIP) {
309 pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled; 292 pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled;
310 } 293 }
311 294
312 if (param->u.wpa_key.alg_name == WPA_ALG_CCMP) { 295 if (param->u.wpa_key.alg_name == WPA_ALG_CCMP) {
313 pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled; 296 pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled;
314 } 297 }
315 298
316 if (param->u.wpa_key.set_tx) 299 if (param->u.wpa_key.set_tx)
317 dwKeyIndex |= (1 << 31); 300 dwKeyIndex |= (1 << 31);
318 301
319 302
320 if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) 303 if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled)
321 byKeyDecMode = KEY_CTL_CCMP; 304 byKeyDecMode = KEY_CTL_CCMP;
322 else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled) 305 else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled)
323 byKeyDecMode = KEY_CTL_TKIP; 306 byKeyDecMode = KEY_CTL_TKIP;
324 else 307 else
325 byKeyDecMode = KEY_CTL_WEP; 308 byKeyDecMode = KEY_CTL_WEP;
326 309
327 // Fix HCT test that set 256 bits KEY and Ndis802_11Encryption3Enabled 310 // Fix HCT test that set 256 bits KEY and Ndis802_11Encryption3Enabled
328 if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) { 311 if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) {
329 if (param->u.wpa_key.key_len == MAX_KEY_LEN) 312 if (param->u.wpa_key.key_len == MAX_KEY_LEN)
330 byKeyDecMode = KEY_CTL_TKIP; 313 byKeyDecMode = KEY_CTL_TKIP;
331 else if (param->u.wpa_key.key_len == WLAN_WEP40_KEYLEN) 314 else if (param->u.wpa_key.key_len == WLAN_WEP40_KEYLEN)
332 byKeyDecMode = KEY_CTL_WEP; 315 byKeyDecMode = KEY_CTL_WEP;
333 else if (param->u.wpa_key.key_len == WLAN_WEP104_KEYLEN) 316 else if (param->u.wpa_key.key_len == WLAN_WEP104_KEYLEN)
334 byKeyDecMode = KEY_CTL_WEP; 317 byKeyDecMode = KEY_CTL_WEP;
335 } else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled) { 318 } else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled) {
336 if (param->u.wpa_key.key_len == WLAN_WEP40_KEYLEN) 319 if (param->u.wpa_key.key_len == WLAN_WEP40_KEYLEN)
337 byKeyDecMode = KEY_CTL_WEP; 320 byKeyDecMode = KEY_CTL_WEP;
338 else if (param->u.wpa_key.key_len == WLAN_WEP104_KEYLEN) 321 else if (param->u.wpa_key.key_len == WLAN_WEP104_KEYLEN)
339 byKeyDecMode = KEY_CTL_WEP; 322 byKeyDecMode = KEY_CTL_WEP;
340 } 323 }
341
342 // Check TKIP key length
343 if ((byKeyDecMode == KEY_CTL_TKIP) &&
344 (param->u.wpa_key.key_len != MAX_KEY_LEN)) {
345 // TKIP Key must be 256 bits
346 //DBG_PRN_WLAN03(("return NDIS_STATUS_INVALID_DATA - TKIP Key must be 256 bits\n"));
347 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "return- TKIP Key must be 256 bits!\n");
348 return -EINVAL;
349 }
350 // Check AES key length
351 if ((byKeyDecMode == KEY_CTL_CCMP) &&
352 (param->u.wpa_key.key_len != AES_KEY_LEN)) {
353 // AES Key must be 128 bits
354 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "return - AES Key must be 128 bits\n");
355 return -EINVAL;
356 }
357 324
358 if (is_broadcast_ether_addr(&param->addr[0]) || (param->addr == NULL)) { 325 // Check TKIP key length
359 /* if broadcast, set the key as every key entry's group key */ 326 if ((byKeyDecMode == KEY_CTL_TKIP) &&
360 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Groupe Key Assign.\n"); 327 (param->u.wpa_key.key_len != MAX_KEY_LEN)) {
361 328 // TKIP Key must be 256 bits
362 if ((KeybSetAllGroupKey(pDevice, 329 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "return- TKIP Key must be 256 bits!\n");
363 &(pDevice->sKey), 330 return -EINVAL;
364 dwKeyIndex,
365 param->u.wpa_key.key_len,
366 (PQWORD) &(KeyRSC),
367 (PBYTE)abyKey,
368 byKeyDecMode
369 ) == TRUE) &&
370 (KeybSetDefaultKey(pDevice,
371 &(pDevice->sKey),
372 dwKeyIndex,
373 param->u.wpa_key.key_len,
374 (PQWORD) &(KeyRSC),
375 (PBYTE)abyKey,
376 byKeyDecMode
377 ) == TRUE) ) {
378 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "GROUP Key Assign.\n");
379
380 } else {
381 //DBG_PRN_WLAN03(("return NDIS_STATUS_INVALID_DATA -KeybSetDefaultKey Fail.0\n"));
382 return -EINVAL;
383 }
384
385 } else {
386 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Pairwise Key Assign.\n");
387 // BSSID not 0xffffffffffff
388 // Pairwise Key can't be WEP
389 if (byKeyDecMode == KEY_CTL_WEP) {
390 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Pairwise Key can't be WEP\n");
391 return -EINVAL;
392 }
393
394 dwKeyIndex |= (1 << 30); // set pairwise key
395 if (pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA) {
396 //DBG_PRN_WLAN03(("return NDIS_STATUS_INVALID_DATA - WMAC_CONFIG_IBSS_STA\n"));
397 return -EINVAL;
398 }
399 if (KeybSetKey(pDevice,
400 &(pDevice->sKey),
401 &param->addr[0],
402 dwKeyIndex,
403 param->u.wpa_key.key_len,
404 (PQWORD) &(KeyRSC),
405 (PBYTE)abyKey,
406 byKeyDecMode
407 ) == TRUE) {
408 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Pairwise Key Set\n");
409
410 } else {
411 // Key Table Full
412 if (!compare_ether_addr(&param->addr[0], pDevice->abyBSSID)) {
413 //DBG_PRN_WLAN03(("return NDIS_STATUS_INVALID_DATA -Key Table Full.2\n"));
414 return -EINVAL;
415
416 } else {
417 // Save Key and configure just before associate/reassociate to BSSID
418 // we do not implement now
419 return -EINVAL;
420 }
421 }
422 } // BSSID not 0xffffffffffff
423 if ((ret == 0) && ((param->u.wpa_key.set_tx) != 0)) {
424 pDevice->byKeyIndex = (BYTE)param->u.wpa_key.key_index;
425 pDevice->bTransmitKey = TRUE;
426 } 331 }
427 pDevice->bEncryptionEnable = TRUE; 332 // Check AES key length
333 if ((byKeyDecMode == KEY_CTL_CCMP) &&
334 (param->u.wpa_key.key_len != AES_KEY_LEN)) {
335 // AES Key must be 128 bits
336 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "return - AES Key must be 128 bits\n");
337 return -EINVAL;
338 }
428 339
429/* 340 if (is_broadcast_ether_addr(&param->addr[0]) || (param->addr == NULL)) {
430 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " key=%x-%x-%x-%x-%x-xxxxx \n", 341 /* if broadcast, set the key as every key entry's group key */
431 pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[byKeyIndex][0], 342 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Groupe Key Assign.\n");
432 pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[byKeyIndex][1], 343
433 pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[byKeyIndex][2], 344 if ((KeybSetAllGroupKey(pDevice, &(pDevice->sKey), dwKeyIndex,
434 pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[byKeyIndex][3], 345 param->u.wpa_key.key_len,
435 pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[byKeyIndex][4] 346 (PQWORD) &(KeyRSC),
436 ); 347 (PBYTE)abyKey,
437*/ 348 byKeyDecMode
349 ) == TRUE) &&
350 (KeybSetDefaultKey(pDevice,
351 &(pDevice->sKey),
352 dwKeyIndex,
353 param->u.wpa_key.key_len,
354 (PQWORD) &(KeyRSC),
355 (PBYTE)abyKey,
356 byKeyDecMode
357 ) == TRUE) ) {
358 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "GROUP Key Assign.\n");
359 } else {
360 return -EINVAL;
361 }
362 } else {
363 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Pairwise Key Assign.\n");
364 // BSSID not 0xffffffffffff
365 // Pairwise Key can't be WEP
366 if (byKeyDecMode == KEY_CTL_WEP) {
367 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Pairwise Key can't be WEP\n");
368 return -EINVAL;
369 }
370 dwKeyIndex |= (1 << 30); // set pairwise key
371 if (pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA) {
372 //DBG_PRN_WLAN03(("return NDIS_STATUS_INVALID_DATA - WMAC_CONFIG_IBSS_STA\n"));
373 return -EINVAL;
374 }
375 if (KeybSetKey(pDevice, &(pDevice->sKey), &param->addr[0],
376 dwKeyIndex, param->u.wpa_key.key_len,
377 (PQWORD) &(KeyRSC), (PBYTE)abyKey, byKeyDecMode
378 ) == TRUE) {
379 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Pairwise Key Set\n");
380 } else {
381 // Key Table Full
382 if (!compare_ether_addr(&param->addr[0], pDevice->abyBSSID)) {
383 //DBG_PRN_WLAN03(("return NDIS_STATUS_INVALID_DATA -Key Table Full.2\n"));
384 return -EINVAL;
385 } else {
386 // Save Key and configure just before associate/reassociate to BSSID
387 // we do not implement now
388 return -EINVAL;
389 }
390 }
391 } // BSSID not 0xffffffffffff
392 if ((ret == 0) && ((param->u.wpa_key.set_tx) != 0)) {
393 pDevice->byKeyIndex = (BYTE)param->u.wpa_key.key_index;
394 pDevice->bTransmitKey = TRUE;
395 }
396 pDevice->bEncryptionEnable = TRUE;
438 397
439 return ret; 398 return ret;
440
441} 399}
442 400
443 401
@@ -454,23 +412,17 @@ int wpa_set_wpadev(PSDevice pDevice, int val)
454 * Return Value: 412 * Return Value:
455 * 413 *
456 */ 414 */
457 415static int wpa_set_wpa(PSDevice pDevice, struct viawget_wpa_param *param)
458static int wpa_set_wpa(PSDevice pDevice,
459 struct viawget_wpa_param *param)
460{ 416{
461 417 PSMgmtObject pMgmt = &pDevice->sMgmtObj;
462 PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
463 int ret = 0; 418 int ret = 0;
464 419
465 pMgmt->eAuthenMode = WMAC_AUTH_OPEN; 420 pMgmt->eAuthenMode = WMAC_AUTH_OPEN;
466 pMgmt->bShareKeyAlgorithm = FALSE; 421 pMgmt->bShareKeyAlgorithm = FALSE;
467 422
468 return ret; 423 return ret;
469} 424}
470 425
471
472
473
474 /* 426 /*
475 * Description: 427 * Description:
476 * set disassociate 428 * set disassociate
@@ -484,25 +436,21 @@ static int wpa_set_wpa(PSDevice pDevice,
484 * Return Value: 436 * Return Value:
485 * 437 *
486 */ 438 */
487 439static int wpa_set_disassociate(PSDevice pDevice, struct viawget_wpa_param *param)
488static int wpa_set_disassociate(PSDevice pDevice,
489 struct viawget_wpa_param *param)
490{ 440{
491 PSMgmtObject pMgmt = &(pDevice->sMgmtObj); 441 PSMgmtObject pMgmt = &pDevice->sMgmtObj;
492 int ret = 0; 442 int ret = 0;
493 443
494 spin_lock_irq(&pDevice->lock); 444 spin_lock_irq(&pDevice->lock);
495 if (pDevice->bLinkPass) { 445 if (pDevice->bLinkPass) {
496 if (!memcmp(param->addr, pMgmt->abyCurrBSSID, 6)) 446 if (!memcmp(param->addr, pMgmt->abyCurrBSSID, 6))
497 bScheduleCommand((void *) pDevice, WLAN_CMD_DISASSOCIATE, NULL); 447 bScheduleCommand((void *)pDevice, WLAN_CMD_DISASSOCIATE, NULL);
498 } 448 }
499 spin_unlock_irq(&pDevice->lock); 449 spin_unlock_irq(&pDevice->lock);
500 450
501 return ret; 451 return ret;
502} 452}
503 453
504
505
506/* 454/*
507 * Description: 455 * Description:
508 * enable scan process 456 * enable scan process
@@ -516,36 +464,30 @@ static int wpa_set_disassociate(PSDevice pDevice,
516 * Return Value: 464 * Return Value:
517 * 465 *
518 */ 466 */
519 467static int wpa_set_scan(PSDevice pDevice, struct viawget_wpa_param *param)
520static int wpa_set_scan(PSDevice pDevice,
521 struct viawget_wpa_param *param)
522{ 468{
523 int ret = 0; 469 int ret = 0;
524 470
525/**set ap_scan=1&&scan_ssid=1 under hidden ssid mode**/ 471/**set ap_scan=1&&scan_ssid=1 under hidden ssid mode**/
526 PSMgmtObject pMgmt = &(pDevice->sMgmtObj); 472 PSMgmtObject pMgmt = &pDevice->sMgmtObj;
527 PWLAN_IE_SSID pItemSSID; 473 PWLAN_IE_SSID pItemSSID;
528printk("wpa_set_scan-->desired [ssid=%s,ssid_len=%d]\n", 474 printk("wpa_set_scan-->desired [ssid=%s,ssid_len=%d]\n",
529 param->u.scan_req.ssid,param->u.scan_req.ssid_len); 475 param->u.scan_req.ssid,param->u.scan_req.ssid_len);
530// Set the SSID 476// Set the SSID
531memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1); 477 memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
532pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID; 478 pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
533pItemSSID->byElementID = WLAN_EID_SSID; 479 pItemSSID->byElementID = WLAN_EID_SSID;
534memcpy(pItemSSID->abySSID, param->u.scan_req.ssid, param->u.scan_req.ssid_len); 480 memcpy(pItemSSID->abySSID, param->u.scan_req.ssid, param->u.scan_req.ssid_len);
535pItemSSID->len = param->u.scan_req.ssid_len; 481 pItemSSID->len = param->u.scan_req.ssid_len;
536
537 spin_lock_irq(&pDevice->lock);
538 BSSvClearBSSList((void *) pDevice, pDevice->bLinkPass);
539 /* bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, NULL); */
540 bScheduleCommand((void *) pDevice,
541 WLAN_CMD_BSSID_SCAN,
542 pMgmt->abyDesireSSID);
543 spin_unlock_irq(&pDevice->lock);
544
545 return ret;
546}
547 482
483 spin_lock_irq(&pDevice->lock);
484 BSSvClearBSSList((void *) pDevice, pDevice->bLinkPass);
485 bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN,
486 pMgmt->abyDesireSSID);
487 spin_unlock_irq(&pDevice->lock);
548 488
489 return ret;
490}
549 491
550/* 492/*
551 * Description: 493 * Description:
@@ -560,19 +502,15 @@ pItemSSID->len = param->u.scan_req.ssid_len;
560 * Return Value: 502 * Return Value:
561 * 503 *
562 */ 504 */
563 505static int wpa_get_bssid(PSDevice pDevice, struct viawget_wpa_param *param)
564static int wpa_get_bssid(PSDevice pDevice,
565 struct viawget_wpa_param *param)
566{ 506{
567 PSMgmtObject pMgmt = &(pDevice->sMgmtObj); 507 PSMgmtObject pMgmt = &pDevice->sMgmtObj;
568 int ret = 0; 508 int ret = 0;
569 memcpy(param->u.wpa_associate.bssid, pMgmt->abyCurrBSSID , 6); 509 memcpy(param->u.wpa_associate.bssid, pMgmt->abyCurrBSSID, 6);
570 510
571 return ret; 511 return ret;
572
573} 512}
574 513
575
576/* 514/*
577 * Description: 515 * Description:
578 * get bssid 516 * get bssid
@@ -586,24 +524,20 @@ static int wpa_get_bssid(PSDevice pDevice,
586 * Return Value: 524 * Return Value:
587 * 525 *
588 */ 526 */
589 527static int wpa_get_ssid(PSDevice pDevice, struct viawget_wpa_param *param)
590static int wpa_get_ssid(PSDevice pDevice,
591 struct viawget_wpa_param *param)
592{ 528{
593 PSMgmtObject pMgmt = &(pDevice->sMgmtObj); 529 PSMgmtObject pMgmt = &pDevice->sMgmtObj;
594 PWLAN_IE_SSID pItemSSID; 530 PWLAN_IE_SSID pItemSSID;
595 int ret = 0; 531 int ret = 0;
596 532
597 pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID; 533 pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
598 534
599 memcpy(param->u.wpa_associate.ssid, pItemSSID->abySSID , pItemSSID->len); 535 memcpy(param->u.wpa_associate.ssid, pItemSSID->abySSID, pItemSSID->len);
600 param->u.wpa_associate.ssid_len = pItemSSID->len; 536 param->u.wpa_associate.ssid_len = pItemSSID->len;
601 537
602 return ret; 538 return ret;
603} 539}
604 540
605
606
607/* 541/*
608 * Description: 542 * Description:
609 * get scan results 543 * get scan results
@@ -617,135 +551,114 @@ static int wpa_get_ssid(PSDevice pDevice,
617 * Return Value: 551 * Return Value:
618 * 552 *
619 */ 553 */
620 554static int wpa_get_scan(PSDevice pDevice, struct viawget_wpa_param *param)
621static int wpa_get_scan(PSDevice pDevice,
622 struct viawget_wpa_param *param)
623{ 555{
624 struct viawget_scan_result *scan_buf; 556 struct viawget_scan_result *scan_buf;
625 PSMgmtObject pMgmt = &(pDevice->sMgmtObj); 557 PSMgmtObject pMgmt = &pDevice->sMgmtObj;
626 PWLAN_IE_SSID pItemSSID; 558 PWLAN_IE_SSID pItemSSID;
627 PKnownBSS pBSS; 559 PKnownBSS pBSS;
628 PBYTE pBuf; 560 PBYTE pBuf;
629 int ret = 0; 561 int ret = 0;
630 u16 count = 0; 562 u16 count = 0;
631 u16 ii, jj; 563 u16 ii;
632 long ldBm;//James //add 564 u16 jj;
565 long ldBm; //James //add
633 566
634//******mike:bubble sort by stronger RSSI*****// 567//******mike:bubble sort by stronger RSSI*****//
568 PBYTE ptempBSS;
635 569
636 PBYTE ptempBSS; 570 ptempBSS = kmalloc(sizeof(KnownBSS), GFP_ATOMIC);
637
638 571
572 if (ptempBSS == NULL) {
573 printk("bubble sort kmalloc memory fail@@@\n");
574 ret = -ENOMEM;
575 return ret;
576 }
639 577
640 ptempBSS = kmalloc(sizeof(KnownBSS), (int)GFP_ATOMIC); 578 for (ii = 0; ii < MAX_BSS_NUM; ii++) {
641 579 for (jj = 0; jj < MAX_BSS_NUM - ii - 1; jj++) {
642 if (ptempBSS == NULL) { 580 if ((pMgmt->sBSSList[jj].bActive != TRUE)
643 581 || ((pMgmt->sBSSList[jj].uRSSI > pMgmt->sBSSList[jj + 1].uRSSI)
644 printk("bubble sort kmalloc memory fail@@@\n"); 582 && (pMgmt->sBSSList[jj + 1].bActive != FALSE))) {
645 583 memcpy(ptempBSS,&pMgmt->sBSSList[jj], sizeof(KnownBSS));
646 ret = -ENOMEM; 584 memcpy(&pMgmt->sBSSList[jj], &pMgmt->sBSSList[jj + 1],
647 585 sizeof(KnownBSS));
648 return ret; 586 memcpy(&pMgmt->sBSSList[jj + 1], ptempBSS, sizeof(KnownBSS));
649 587 }
650 } 588 }
651 589 }
652 for (ii = 0; ii < MAX_BSS_NUM; ii++) { 590 kfree(ptempBSS);
653
654 for (jj = 0; jj < MAX_BSS_NUM - ii - 1; jj++) {
655
656 if ((pMgmt->sBSSList[jj].bActive != TRUE) ||
657
658 ((pMgmt->sBSSList[jj].uRSSI>pMgmt->sBSSList[jj+1].uRSSI) &&(pMgmt->sBSSList[jj+1].bActive!=FALSE))) {
659
660 memcpy(ptempBSS,&pMgmt->sBSSList[jj],sizeof(KnownBSS));
661
662 memcpy(&pMgmt->sBSSList[jj],&pMgmt->sBSSList[jj+1],sizeof(KnownBSS));
663
664 memcpy(&pMgmt->sBSSList[jj+1],ptempBSS,sizeof(KnownBSS));
665
666 }
667
668 }
669
670 }
671
672 kfree(ptempBSS);
673
674 // printk("bubble sort result:\n");
675 591
676 count = 0; 592 count = 0;
677 pBSS = &(pMgmt->sBSSList[0]); 593 pBSS = &(pMgmt->sBSSList[0]);
678 for (ii = 0; ii < MAX_BSS_NUM; ii++) { 594 for (ii = 0; ii < MAX_BSS_NUM; ii++) {
679 pBSS = &(pMgmt->sBSSList[ii]); 595 pBSS = &(pMgmt->sBSSList[ii]);
680 if (!pBSS->bActive) 596 if (!pBSS->bActive)
681 continue; 597 continue;
682 count++; 598 count++;
683 } 599 }
684 600
685 pBuf = kcalloc(count, sizeof(struct viawget_scan_result), (int)GFP_ATOMIC); 601 pBuf = kcalloc(count, sizeof(struct viawget_scan_result), GFP_ATOMIC);
686 602
687 if (pBuf == NULL) { 603 if (pBuf == NULL) {
688 ret = -ENOMEM; 604 ret = -ENOMEM;
689 return ret; 605 return ret;
690 } 606 }
691 scan_buf = (struct viawget_scan_result *)pBuf; 607 scan_buf = (struct viawget_scan_result *)pBuf;
692 pBSS = &(pMgmt->sBSSList[0]); 608 pBSS = &(pMgmt->sBSSList[0]);
693 for (ii = 0, jj = 0; ii < MAX_BSS_NUM ; ii++) { 609 for (ii = 0, jj = 0; ii < MAX_BSS_NUM; ii++) {
694 pBSS = &(pMgmt->sBSSList[ii]); 610 pBSS = &(pMgmt->sBSSList[ii]);
695 if (pBSS->bActive) { 611 if (pBSS->bActive) {
696 if (jj >= count) 612 if (jj >= count)
697 break; 613 break;
698 memcpy(scan_buf->bssid, pBSS->abyBSSID, WLAN_BSSID_LEN); 614 memcpy(scan_buf->bssid, pBSS->abyBSSID, WLAN_BSSID_LEN);
699 pItemSSID = (PWLAN_IE_SSID)pBSS->abySSID; 615 pItemSSID = (PWLAN_IE_SSID)pBSS->abySSID;
700 memcpy(scan_buf->ssid, pItemSSID->abySSID, pItemSSID->len); 616 memcpy(scan_buf->ssid, pItemSSID->abySSID, pItemSSID->len);
701 scan_buf->ssid_len = pItemSSID->len; 617 scan_buf->ssid_len = pItemSSID->len;
702 scan_buf->freq = frequency_list[pBSS->uChannel-1]; 618 scan_buf->freq = frequency_list[pBSS->uChannel-1];
703 scan_buf->caps = pBSS->wCapInfo; //DavidWang for sharemode 619 scan_buf->caps = pBSS->wCapInfo; // DavidWang for sharemode
704 620
705 RFvRSSITodBm(pDevice, (BYTE)(pBSS->uRSSI), &ldBm); 621 RFvRSSITodBm(pDevice, (BYTE)(pBSS->uRSSI), &ldBm);
706 if(-ldBm<50){ 622 if (-ldBm < 50)
707 scan_buf->qual = 100; 623 scan_buf->qual = 100;
708 }else if(-ldBm > 90) { 624 else if (-ldBm > 90)
709 scan_buf->qual = 0; 625 scan_buf->qual = 0;
710 }else { 626 else
711 scan_buf->qual=(40-(-ldBm-50))*100/40; 627 scan_buf->qual=(40-(-ldBm-50))*100/40;
712 }
713 628
714 //James 629 //James
715 //scan_buf->caps = pBSS->wCapInfo; 630 //scan_buf->caps = pBSS->wCapInfo;
716 //scan_buf->qual = 631 //scan_buf->qual =
717 scan_buf->noise = 0; 632 scan_buf->noise = 0;
718 scan_buf->level = ldBm; 633 scan_buf->level = ldBm;
719 634
720 //scan_buf->maxrate = 635 //scan_buf->maxrate =
721 if (pBSS->wWPALen != 0) { 636 if (pBSS->wWPALen != 0) {
722 scan_buf->wpa_ie_len = pBSS->wWPALen; 637 scan_buf->wpa_ie_len = pBSS->wWPALen;
723 memcpy(scan_buf->wpa_ie, pBSS->byWPAIE, pBSS->wWPALen); 638 memcpy(scan_buf->wpa_ie, pBSS->byWPAIE, pBSS->wWPALen);
724 } 639 }
725 if (pBSS->wRSNLen != 0) { 640 if (pBSS->wRSNLen != 0) {
726 scan_buf->rsn_ie_len = pBSS->wRSNLen; 641 scan_buf->rsn_ie_len = pBSS->wRSNLen;
727 memcpy(scan_buf->rsn_ie, pBSS->byRSNIE, pBSS->wRSNLen); 642 memcpy(scan_buf->rsn_ie, pBSS->byRSNIE, pBSS->wRSNLen);
728 } 643 }
729 scan_buf = (struct viawget_scan_result *)((PBYTE)scan_buf + sizeof(struct viawget_scan_result)); 644 scan_buf = (struct viawget_scan_result *)((PBYTE)scan_buf + sizeof(struct viawget_scan_result));
730 jj ++; 645 jj ++;
731 } 646 }
732 } 647 }
733 648
734 if (jj < count) 649 if (jj < count)
735 count = jj; 650 count = jj;
736 651
737 if (copy_to_user(param->u.scan_results.buf, pBuf, sizeof(struct viawget_scan_result) * count)) { 652 if (copy_to_user(param->u.scan_results.buf, pBuf, sizeof(struct viawget_scan_result) * count))
738 ret = -EFAULT; 653 ret = -EFAULT;
739 } 654
740 param->u.scan_results.scan_count = count; 655 param->u.scan_results.scan_count = count;
741 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " param->u.scan_results.scan_count = %d\n", count) 656 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " param->u.scan_results.scan_count = %d\n", count);
742 657
743 kfree(pBuf); 658 kfree(pBuf);
744 return ret; 659 return ret;
745} 660}
746 661
747
748
749/* 662/*
750 * Description: 663 * Description:
751 * set associate with AP 664 * set associate with AP
@@ -759,25 +672,23 @@ static int wpa_get_scan(PSDevice pDevice,
759 * Return Value: 672 * Return Value:
760 * 673 *
761 */ 674 */
762 675static int wpa_set_associate(PSDevice pDevice, struct viawget_wpa_param *param)
763static int wpa_set_associate(PSDevice pDevice,
764 struct viawget_wpa_param *param)
765{ 676{
766 PSMgmtObject pMgmt = &(pDevice->sMgmtObj); 677 PSMgmtObject pMgmt = &pDevice->sMgmtObj;
767 PWLAN_IE_SSID pItemSSID; 678 PWLAN_IE_SSID pItemSSID;
768 BYTE abyNullAddr[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; 679 BYTE abyNullAddr[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
769 BYTE abyWPAIE[64]; 680 BYTE abyWPAIE[64];
770 int ret = 0; 681 int ret = 0;
771 BOOL bwepEnabled=FALSE; 682 BOOL bwepEnabled=FALSE;
772 683
773 // set key type & algorithm 684 // set key type & algorithm
774 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pairwise_suite = %d\n", param->u.wpa_associate.pairwise_suite); 685 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pairwise_suite = %d\n", param->u.wpa_associate.pairwise_suite);
775 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "group_suite = %d\n", param->u.wpa_associate.group_suite); 686 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "group_suite = %d\n", param->u.wpa_associate.group_suite);
776 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "key_mgmt_suite = %d\n", param->u.wpa_associate.key_mgmt_suite); 687 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "key_mgmt_suite = %d\n", param->u.wpa_associate.key_mgmt_suite);
777 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "auth_alg = %d\n", param->u.wpa_associate.auth_alg); 688 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "auth_alg = %d\n", param->u.wpa_associate.auth_alg);
778 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "mode = %d\n", param->u.wpa_associate.mode); 689 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "mode = %d\n", param->u.wpa_associate.mode);
779 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_ie_len = %d\n", param->u.wpa_associate.wpa_ie_len); 690 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_ie_len = %d\n", param->u.wpa_associate.wpa_ie_len);
780 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Roaming dBm = %d\n", param->u.wpa_associate.roam_dbm); //Davidwang 691 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Roaming dBm = %d\n", param->u.wpa_associate.roam_dbm); // Davidwang
781 692
782 if (param->u.wpa_associate.wpa_ie) { 693 if (param->u.wpa_associate.wpa_ie) {
783 if (param->u.wpa_associate.wpa_ie_len > sizeof(abyWPAIE)) 694 if (param->u.wpa_associate.wpa_ie_len > sizeof(abyWPAIE))
@@ -789,25 +700,25 @@ static int wpa_set_associate(PSDevice pDevice,
789 } 700 }
790 701
791 if (param->u.wpa_associate.mode == 1) 702 if (param->u.wpa_associate.mode == 1)
792 pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA; 703 pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA;
793 else 704 else
794 pMgmt->eConfigMode = WMAC_CONFIG_ESS_STA; 705 pMgmt->eConfigMode = WMAC_CONFIG_ESS_STA;
795 706
796 // set bssid 707 // set bssid
797 if (memcmp(param->u.wpa_associate.bssid, &abyNullAddr[0], 6) != 0) 708 if (memcmp(param->u.wpa_associate.bssid, &abyNullAddr[0], 6) != 0)
798 memcpy(pMgmt->abyDesireBSSID, param->u.wpa_associate.bssid, 6); 709 memcpy(pMgmt->abyDesireBSSID, param->u.wpa_associate.bssid, 6);
799 // set ssid 710 // set ssid
800 memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1); 711 memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
801 pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID; 712 pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
802 pItemSSID->byElementID = WLAN_EID_SSID; 713 pItemSSID->byElementID = WLAN_EID_SSID;
803 pItemSSID->len = param->u.wpa_associate.ssid_len; 714 pItemSSID->len = param->u.wpa_associate.ssid_len;
804 memcpy(pItemSSID->abySSID, param->u.wpa_associate.ssid, pItemSSID->len); 715 memcpy(pItemSSID->abySSID, param->u.wpa_associate.ssid, pItemSSID->len);
805 716
806 if (param->u.wpa_associate.wpa_ie_len == 0) { 717 if (param->u.wpa_associate.wpa_ie_len == 0) {
807 if (param->u.wpa_associate.auth_alg & AUTH_ALG_SHARED_KEY) 718 if (param->u.wpa_associate.auth_alg & AUTH_ALG_SHARED_KEY)
808 pMgmt->eAuthenMode = WMAC_AUTH_SHAREKEY; 719 pMgmt->eAuthenMode = WMAC_AUTH_SHAREKEY;
809 else 720 else
810 pMgmt->eAuthenMode = WMAC_AUTH_OPEN; 721 pMgmt->eAuthenMode = WMAC_AUTH_OPEN;
811 } else if (abyWPAIE[0] == RSN_INFO_ELEM) { 722 } else if (abyWPAIE[0] == RSN_INFO_ELEM) {
812 if (param->u.wpa_associate.key_mgmt_suite == KEY_MGMT_PSK) 723 if (param->u.wpa_associate.key_mgmt_suite == KEY_MGMT_PSK)
813 pMgmt->eAuthenMode = WMAC_AUTH_WPA2PSK; 724 pMgmt->eAuthenMode = WMAC_AUTH_WPA2PSK;
@@ -817,9 +728,9 @@ static int wpa_set_associate(PSDevice pDevice,
817 if (param->u.wpa_associate.key_mgmt_suite == KEY_MGMT_WPA_NONE) 728 if (param->u.wpa_associate.key_mgmt_suite == KEY_MGMT_WPA_NONE)
818 pMgmt->eAuthenMode = WMAC_AUTH_WPANONE; 729 pMgmt->eAuthenMode = WMAC_AUTH_WPANONE;
819 else if (param->u.wpa_associate.key_mgmt_suite == KEY_MGMT_PSK) 730 else if (param->u.wpa_associate.key_mgmt_suite == KEY_MGMT_PSK)
820 pMgmt->eAuthenMode = WMAC_AUTH_WPAPSK; 731 pMgmt->eAuthenMode = WMAC_AUTH_WPAPSK;
821 else 732 else
822 pMgmt->eAuthenMode = WMAC_AUTH_WPA; 733 pMgmt->eAuthenMode = WMAC_AUTH_WPA;
823 } 734 }
824 735
825 switch (param->u.wpa_associate.pairwise_suite) { 736 switch (param->u.wpa_associate.pairwise_suite) {
@@ -833,7 +744,6 @@ static int wpa_set_associate(PSDevice pDevice,
833 case CIPHER_WEP104: 744 case CIPHER_WEP104:
834 pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled; 745 pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
835 bwepEnabled = TRUE; 746 bwepEnabled = TRUE;
836 // printk("****************wpa_set_associate:set CIPHER_WEP40_104\n");
837 break; 747 break;
838 case CIPHER_NONE: 748 case CIPHER_NONE:
839 if (param->u.wpa_associate.group_suite == CIPHER_CCMP) 749 if (param->u.wpa_associate.group_suite == CIPHER_CCMP)
@@ -845,70 +755,64 @@ static int wpa_set_associate(PSDevice pDevice,
845 pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled; 755 pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
846 } 756 }
847 757
848 pMgmt->Roam_dbm = param->u.wpa_associate.roam_dbm; 758 pMgmt->Roam_dbm = param->u.wpa_associate.roam_dbm;
849 // if ((pMgmt->Roam_dbm > 40)&&(pMgmt->Roam_dbm<80)) 759 if (pMgmt->eAuthenMode == WMAC_AUTH_SHAREKEY) { // @wep-sharekey
850 // pDevice->bEnableRoaming = TRUE; 760 pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
851 761 pMgmt->bShareKeyAlgorithm = TRUE;
852 if (pMgmt->eAuthenMode == WMAC_AUTH_SHAREKEY) { //@wep-sharekey 762 } else if (pMgmt->eAuthenMode == WMAC_AUTH_OPEN) {
853 pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled; 763 if(bwepEnabled==TRUE) { //@open-wep
854 pMgmt->bShareKeyAlgorithm = TRUE; 764 pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
855 } 765 } else {
856 else if (pMgmt->eAuthenMode == WMAC_AUTH_OPEN) { 766 // @only open
857 if(bwepEnabled==TRUE) { //@open-wep 767 pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
858 pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
859 }
860 else { //@only open
861 pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
862 } 768 }
863 } 769 }
864//mike save old encryption status 770 // mike save old encryption status
865 pDevice->eOldEncryptionStatus = pDevice->eEncryptionStatus; 771 pDevice->eOldEncryptionStatus = pDevice->eEncryptionStatus;
866 772
867 if (pDevice->eEncryptionStatus != Ndis802_11EncryptionDisabled) 773 if (pDevice->eEncryptionStatus != Ndis802_11EncryptionDisabled)
868 pDevice->bEncryptionEnable = TRUE; 774 pDevice->bEncryptionEnable = TRUE;
869 else 775 else
870 pDevice->bEncryptionEnable = FALSE; 776 pDevice->bEncryptionEnable = FALSE;
871
872 if ((pMgmt->eAuthenMode == WMAC_AUTH_SHAREKEY) ||
873 ((pMgmt->eAuthenMode == WMAC_AUTH_OPEN) && (bwepEnabled==TRUE))) {
874 //mike re-comment:open-wep && sharekey-wep needn't do initial key!!
875
876 }
877 else
878 KeyvInitTable(pDevice,&pDevice->sKey);
879 777
880 spin_lock_irq(&pDevice->lock); 778 if ((pMgmt->eAuthenMode == WMAC_AUTH_SHAREKEY) ||
881 pDevice->bLinkPass = FALSE; 779 ((pMgmt->eAuthenMode == WMAC_AUTH_OPEN) && (bwepEnabled==TRUE))) {
882 ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_SLOW); 780 // mike re-comment:open-wep && sharekey-wep needn't do initial key!!
883 memset(pMgmt->abyCurrBSSID, 0, 6); 781 } else {
884 pMgmt->eCurrState = WMAC_STATE_IDLE; 782 KeyvInitTable(pDevice,&pDevice->sKey);
885 netif_stop_queue(pDevice->dev); 783 }
886 784
887/*******search if ap_scan=2 ,which is associating request in hidden ssid mode ****/ 785 spin_lock_irq(&pDevice->lock);
888{ 786 pDevice->bLinkPass = FALSE;
889 PKnownBSS pCurr = NULL; 787 ControlvMaskByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_PAPEDELAY, LEDSTS_STS, LEDSTS_SLOW);
890 pCurr = BSSpSearchBSSList(pDevice, 788 memset(pMgmt->abyCurrBSSID, 0, 6);
891 pMgmt->abyDesireBSSID, 789 pMgmt->eCurrState = WMAC_STATE_IDLE;
892 pMgmt->abyDesireSSID, 790 netif_stop_queue(pDevice->dev);
893 pDevice->eConfigPHYMode 791
894 ); 792/******* search if ap_scan=2, which is associating request in hidden ssid mode ****/
895 793 {
896 if (pCurr == NULL){ 794 PKnownBSS pCurr = NULL;
897 printk("wpa_set_associate---->hidden mode site survey before associate.......\n"); 795 pCurr = BSSpSearchBSSList(pDevice,
898 bScheduleCommand((void *) pDevice, 796 pMgmt->abyDesireBSSID,
899 WLAN_CMD_BSSID_SCAN, 797 pMgmt->abyDesireSSID,
900 pMgmt->abyDesireSSID); 798 pDevice->eConfigPHYMode
901 } 799 );
902} 800
801 if (pCurr == NULL){
802 printk("wpa_set_associate---->hidden mode site survey before associate.......\n");
803 bScheduleCommand((void *)pDevice,
804 WLAN_CMD_BSSID_SCAN,
805 pMgmt->abyDesireSSID);
806 }
807 }
903/****************************************************************/ 808/****************************************************************/
904 809
905 bScheduleCommand((void *) pDevice, WLAN_CMD_SSID, NULL); 810 bScheduleCommand((void *)pDevice, WLAN_CMD_SSID, NULL);
906 spin_unlock_irq(&pDevice->lock); 811 spin_unlock_irq(&pDevice->lock);
907 812
908 return ret; 813 return ret;
909} 814}
910 815
911
912/* 816/*
913 * Description: 817 * Description:
914 * wpa_ioctl main function supported for wpa supplicant 818 * wpa_ioctl main function supported for wpa supplicant
@@ -922,7 +826,6 @@ static int wpa_set_associate(PSDevice pDevice,
922 * Return Value: 826 * Return Value:
923 * 827 *
924 */ 828 */
925
926int wpa_ioctl(PSDevice pDevice, struct iw_point *p) 829int wpa_ioctl(PSDevice pDevice, struct iw_point *p)
927{ 830{
928 struct viawget_wpa_param *param; 831 struct viawget_wpa_param *param;
@@ -930,10 +833,10 @@ int wpa_ioctl(PSDevice pDevice, struct iw_point *p)
930 int wpa_ioctl = 0; 833 int wpa_ioctl = 0;
931 834
932 if (p->length < sizeof(struct viawget_wpa_param) || 835 if (p->length < sizeof(struct viawget_wpa_param) ||
933 p->length > VIAWGET_WPA_MAX_BUF_SIZE || !p->pointer) 836 p->length > VIAWGET_WPA_MAX_BUF_SIZE || !p->pointer)
934 return -EINVAL; 837 return -EINVAL;
935 838
936 param = kmalloc((int)p->length, (int)GFP_KERNEL); 839 param = kmalloc((int)p->length, GFP_KERNEL);
937 if (param == NULL) 840 if (param == NULL)
938 return -ENOMEM; 841 return -ENOMEM;
939 842
@@ -944,63 +847,63 @@ int wpa_ioctl(PSDevice pDevice, struct iw_point *p)
944 847
945 switch (param->cmd) { 848 switch (param->cmd) {
946 case VIAWGET_SET_WPA: 849 case VIAWGET_SET_WPA:
947 ret = wpa_set_wpa(pDevice, param); 850 ret = wpa_set_wpa(pDevice, param);
948 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_WPA \n"); 851 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_WPA \n");
949 break; 852 break;
950 853
951 case VIAWGET_SET_KEY: 854 case VIAWGET_SET_KEY:
952 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_KEY \n"); 855 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_KEY \n");
953 spin_lock_irq(&pDevice->lock); 856 spin_lock_irq(&pDevice->lock);
954 ret = wpa_set_keys(pDevice, param, FALSE); 857 ret = wpa_set_keys(pDevice, param, FALSE);
955 spin_unlock_irq(&pDevice->lock); 858 spin_unlock_irq(&pDevice->lock);
956 break; 859 break;
957 860
958 case VIAWGET_SET_SCAN: 861 case VIAWGET_SET_SCAN:
959 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_SCAN \n"); 862 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_SCAN \n");
960 ret = wpa_set_scan(pDevice, param); 863 ret = wpa_set_scan(pDevice, param);
961 break; 864 break;
962 865
963 case VIAWGET_GET_SCAN: 866 case VIAWGET_GET_SCAN:
964 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_GET_SCAN\n"); 867 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_GET_SCAN\n");
965 ret = wpa_get_scan(pDevice, param); 868 ret = wpa_get_scan(pDevice, param);
966 wpa_ioctl = 1; 869 wpa_ioctl = 1;
967 break; 870 break;
968 871
969 case VIAWGET_GET_SSID: 872 case VIAWGET_GET_SSID:
970 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_GET_SSID \n"); 873 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_GET_SSID \n");
971 ret = wpa_get_ssid(pDevice, param); 874 ret = wpa_get_ssid(pDevice, param);
972 wpa_ioctl = 1; 875 wpa_ioctl = 1;
973 break; 876 break;
974 877
975 case VIAWGET_GET_BSSID: 878 case VIAWGET_GET_BSSID:
976 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_GET_BSSID \n"); 879 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_GET_BSSID \n");
977 ret = wpa_get_bssid(pDevice, param); 880 ret = wpa_get_bssid(pDevice, param);
978 wpa_ioctl = 1; 881 wpa_ioctl = 1;
979 break; 882 break;
980 883
981 case VIAWGET_SET_ASSOCIATE: 884 case VIAWGET_SET_ASSOCIATE:
982 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_ASSOCIATE \n"); 885 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_ASSOCIATE \n");
983 ret = wpa_set_associate(pDevice, param); 886 ret = wpa_set_associate(pDevice, param);
984 break; 887 break;
985 888
986 case VIAWGET_SET_DISASSOCIATE: 889 case VIAWGET_SET_DISASSOCIATE:
987 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DISASSOCIATE \n"); 890 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DISASSOCIATE \n");
988 ret = wpa_set_disassociate(pDevice, param); 891 ret = wpa_set_disassociate(pDevice, param);
989 break; 892 break;
990 893
991 case VIAWGET_SET_DROP_UNENCRYPT: 894 case VIAWGET_SET_DROP_UNENCRYPT:
992 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DROP_UNENCRYPT \n"); 895 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DROP_UNENCRYPT \n");
993 break; 896 break;
994 897
995 case VIAWGET_SET_DEAUTHENTICATE: 898 case VIAWGET_SET_DEAUTHENTICATE:
996 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DEAUTHENTICATE \n"); 899 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DEAUTHENTICATE \n");
997 break; 900 break;
998 901
999 default: 902 default:
1000 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_ioctl: unknown cmd=%d\n", 903 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_ioctl: unknown cmd=%d\n",
1001 param->cmd); 904 param->cmd);
905 kfree(param);
1002 return -EOPNOTSUPP; 906 return -EOPNOTSUPP;
1003 break;
1004 } 907 }
1005 908
1006 if ((ret == 0) && wpa_ioctl) { 909 if ((ret == 0) && wpa_ioctl) {
@@ -1012,7 +915,5 @@ int wpa_ioctl(PSDevice pDevice, struct iw_point *p)
1012 915
1013out: 916out:
1014 kfree(param); 917 kfree(param);
1015
1016 return ret; 918 return ret;
1017} 919}
1018
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index fb466f4c92e0..4cd3ba5d5646 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -356,7 +356,7 @@ int prism2_scan(struct wiphy *wiphy, struct net_device *dev,
356 msg1.msgcode = DIDmsg_dot11req_scan; 356 msg1.msgcode = DIDmsg_dot11req_scan;
357 msg1.bsstype.data = P80211ENUM_bsstype_any; 357 msg1.bsstype.data = P80211ENUM_bsstype_any;
358 358
359 memset(&(msg1.bssid.data), 0xFF, sizeof(p80211item_pstr6_t)); 359 memset(&msg1.bssid.data.data, 0xFF, sizeof(msg1.bssid.data.data));
360 msg1.bssid.data.len = 6; 360 msg1.bssid.data.len = 6;
361 361
362 if (request->n_ssids > 0) { 362 if (request->n_ssids > 0) {
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index 14bfeb2e704c..0f51b4ab3631 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -150,7 +150,7 @@ static int p80211knetdev_init(netdevice_t *netdev)
150* Returns: 150* Returns:
151* the address of the statistics structure 151* the address of the statistics structure
152----------------------------------------------------------------*/ 152----------------------------------------------------------------*/
153static struct net_device_stats *p80211knetdev_get_stats(netdevice_t * netdev) 153static struct net_device_stats *p80211knetdev_get_stats(netdevice_t *netdev)
154{ 154{
155 wlandevice_t *wlandev = netdev->ml_priv; 155 wlandevice_t *wlandev = netdev->ml_priv;
156 156
diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c
index 6675c8226cef..c3bb05dd744f 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.c
+++ b/drivers/staging/wlan-ng/prism2mgmt.c
@@ -406,6 +406,7 @@ int prism2mgmt_scan_results(wlandevice_t *wlandev, void *msgp)
406 /* SSID */ 406 /* SSID */
407 req->ssid.status = P80211ENUM_msgitem_status_data_ok; 407 req->ssid.status = P80211ENUM_msgitem_status_data_ok;
408 req->ssid.data.len = le16_to_cpu(item->ssid.len); 408 req->ssid.data.len = le16_to_cpu(item->ssid.len);
409 req->ssid.data.len = min_t(u16, req->ssid.data.len, WLAN_BSSID_LEN);
409 memcpy(req->ssid.data.data, item->ssid.data, req->ssid.data.len); 410 memcpy(req->ssid.data.data, item->ssid.data, req->ssid.data.len);
410 411
411 /* supported rates */ 412 /* supported rates */
diff --git a/drivers/staging/xgifb/XGI_main.h b/drivers/staging/xgifb/XGI_main.h
index 35f7b2a485e1..e828fd403c35 100644
--- a/drivers/staging/xgifb/XGI_main.h
+++ b/drivers/staging/xgifb/XGI_main.h
@@ -7,47 +7,32 @@
7 7
8#include "XGIfb.h" 8#include "XGIfb.h"
9#include "vb_struct.h" 9#include "vb_struct.h"
10#include "../../video/sis/sis.h"
10#include "vb_def.h" 11#include "vb_def.h"
11 12
12#define XGIFAIL(x) do { printk(x "\n"); return -EINVAL; } while (0) 13#define XGIFAIL(x) do { printk(x "\n"); return -EINVAL; } while (0)
13 14
14#ifndef PCI_VENDOR_ID_XG 15#ifndef PCI_DEVICE_ID_XGI_41
15#define PCI_VENDOR_ID_XG 0x18CA 16#define PCI_DEVICE_ID_XGI_41 0x041
16#endif 17#endif
17 18#ifndef PCI_DEVICE_ID_XGI_42
18#ifndef PCI_DEVICE_ID_XG_40 19#define PCI_DEVICE_ID_XGI_42 0x042
19#define PCI_DEVICE_ID_XG_40 0x040
20#endif
21#ifndef PCI_DEVICE_ID_XG_41
22#define PCI_DEVICE_ID_XG_41 0x041
23#endif
24#ifndef PCI_DEVICE_ID_XG_42
25#define PCI_DEVICE_ID_XG_42 0x042
26#endif 20#endif
27#ifndef PCI_DEVICE_ID_XG_20 21#ifndef PCI_DEVICE_ID_XGI_27
28#define PCI_DEVICE_ID_XG_20 0x020 22#define PCI_DEVICE_ID_XGI_27 0x027
29#endif
30#ifndef PCI_DEVICE_ID_XG_27
31#define PCI_DEVICE_ID_XG_27 0x027
32#endif 23#endif
33 24
34static DEFINE_PCI_DEVICE_TABLE(xgifb_pci_table) = { 25static DEFINE_PCI_DEVICE_TABLE(xgifb_pci_table) = {
35 {PCI_DEVICE(PCI_VENDOR_ID_XG, PCI_DEVICE_ID_XG_20)}, 26 {PCI_DEVICE(PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XGI_20)},
36 {PCI_DEVICE(PCI_VENDOR_ID_XG, PCI_DEVICE_ID_XG_27)}, 27 {PCI_DEVICE(PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XGI_27)},
37 {PCI_DEVICE(PCI_VENDOR_ID_XG, PCI_DEVICE_ID_XG_40)}, 28 {PCI_DEVICE(PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XGI_40)},
38 {PCI_DEVICE(PCI_VENDOR_ID_XG, PCI_DEVICE_ID_XG_42)}, 29 {PCI_DEVICE(PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XGI_42)},
39 {0} 30 {0}
40}; 31};
41 32
42MODULE_DEVICE_TABLE(pci, xgifb_pci_table); 33MODULE_DEVICE_TABLE(pci, xgifb_pci_table);
43 34
44/* To be included in fb.h */ 35/* To be included in fb.h */
45#ifndef FB_ACCEL_XGI_XABRE
46#define FB_ACCEL_XGI_XABRE 41 /* XGI 330 ("Xabre") */
47#endif
48
49#define SEQ_DATA 0x15
50
51#define XGISR (xgifb_info->dev_info.P3c4) 36#define XGISR (xgifb_info->dev_info.P3c4)
52#define XGICR (xgifb_info->dev_info.P3d4) 37#define XGICR (xgifb_info->dev_info.P3d4)
53#define XGIDACA (xgifb_info->dev_info.P3c8) 38#define XGIDACA (xgifb_info->dev_info.P3c8)
@@ -60,12 +45,6 @@ MODULE_DEVICE_TABLE(pci, xgifb_pci_table);
60#define XGIDAC2A XGIPART5 45#define XGIDAC2A XGIPART5
61#define XGIDAC2D (XGIPART5 + 1) 46#define XGIDAC2D (XGIPART5 + 1)
62 47
63#define IND_XGI_PASSWORD 0x05 /* SRs */
64#define IND_XGI_RAMDAC_CONTROL 0x07
65#define IND_XGI_DRAM_SIZE 0x14
66#define IND_XGI_MODULE_ENABLE 0x1E
67#define IND_XGI_PCI_ADDRESS_SET 0x20
68
69#define IND_XGI_SCRATCH_REG_CR30 0x30 /* CRs */ 48#define IND_XGI_SCRATCH_REG_CR30 0x30 /* CRs */
70#define IND_XGI_SCRATCH_REG_CR31 0x31 49#define IND_XGI_SCRATCH_REG_CR31 0x31
71#define IND_XGI_SCRATCH_REG_CR32 0x32 50#define IND_XGI_SCRATCH_REG_CR32 0x32
@@ -73,10 +52,6 @@ MODULE_DEVICE_TABLE(pci, xgifb_pci_table);
73#define IND_XGI_LCD_PANEL 0x36 52#define IND_XGI_LCD_PANEL 0x36
74#define IND_XGI_SCRATCH_REG_CR37 0x37 53#define IND_XGI_SCRATCH_REG_CR37 0x37
75 54
76#define IND_XGI_CRT2_WRITE_ENABLE_315 0x2F
77
78#define XGI_PASSWORD 0x86 /* SR05 */
79
80#define XGI_DRAM_SIZE_MASK 0xF0 /*SR14 */ 55#define XGI_DRAM_SIZE_MASK 0xF0 /*SR14 */
81#define XGI_DRAM_SIZE_1MB 0x00 56#define XGI_DRAM_SIZE_1MB 0x00
82#define XGI_DRAM_SIZE_2MB 0x01 57#define XGI_DRAM_SIZE_2MB 0x01
@@ -88,37 +63,6 @@ MODULE_DEVICE_TABLE(pci, xgifb_pci_table);
88#define XGI_DRAM_SIZE_128MB 0x07 63#define XGI_DRAM_SIZE_128MB 0x07
89#define XGI_DRAM_SIZE_256MB 0x08 64#define XGI_DRAM_SIZE_256MB 0x08
90 65
91#define XGI_ENABLE_2D 0x40 /* SR1E */
92
93#define XGI_MEM_MAP_IO_ENABLE 0x01 /* SR20 */
94#define XGI_PCI_ADDR_ENABLE 0x80
95
96#define XGI_SIMULTANEOUS_VIEW_ENABLE 0x01 /* CR30 */
97#define XGI_VB_OUTPUT_COMPOSITE 0x04
98#define XGI_VB_OUTPUT_SVIDEO 0x08
99#define XGI_VB_OUTPUT_SCART 0x10
100#define XGI_VB_OUTPUT_LCD 0x20
101#define XGI_VB_OUTPUT_CRT2 0x40
102#define XGI_VB_OUTPUT_HIVISION 0x80
103
104#define XGI_VB_OUTPUT_DISABLE 0x20 /* CR31 */
105#define XGI_DRIVER_MODE 0x40
106
107#define XGI_VB_COMPOSITE 0x01 /* CR32 */
108#define XGI_VB_SVIDEO 0x02
109#define XGI_VB_SCART 0x04
110#define XGI_VB_LCD 0x08
111#define XGI_VB_CRT2 0x10
112#define XGI_CRT1 0x20
113#define XGI_VB_HIVISION 0x40
114#define XGI_VB_YPBPR 0x80
115#define XGI_VB_TV (XGI_VB_COMPOSITE | XGI_VB_SVIDEO | \
116 XGI_VB_SCART | XGI_VB_HIVISION|XGI_VB_YPBPR)
117
118#define XGI_EXTERNAL_CHIP_MASK 0x0E /* CR37 */
119#define XGI310_EXTERNAL_CHIP_LVDS 0x02 /* in CR37 << 1 ! */
120#define XGI310_EXTERNAL_CHIP_LVDS_CHRONTEL 0x03 /* in CR37 << 1 ! */
121
122/* ------------------- Global Variables ----------------------------- */ 66/* ------------------- Global Variables ----------------------------- */
123 67
124/* display status */ 68/* display status */
diff --git a/drivers/staging/xgifb/XGI_main_26.c b/drivers/staging/xgifb/XGI_main_26.c
index 2502c49c9c5b..21c037827de4 100644
--- a/drivers/staging/xgifb/XGI_main_26.c
+++ b/drivers/staging/xgifb/XGI_main_26.c
@@ -4,6 +4,8 @@
4 * Base on TW's sis fbdev code. 4 * Base on TW's sis fbdev code.
5 */ 5 */
6 6
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
7/* #include <linux/config.h> */ 9/* #include <linux/config.h> */
8#include <linux/module.h> 10#include <linux/module.h>
9#include <linux/moduleparam.h> 11#include <linux/moduleparam.h>
@@ -55,7 +57,7 @@ static unsigned int refresh_rate;
55#undef XGIFBDEBUG 57#undef XGIFBDEBUG
56 58
57#ifdef XGIFBDEBUG 59#ifdef XGIFBDEBUG
58#define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __FUNCTION__ , ## args) 60#define DPRINTK(fmt, args...) pr_debug("%s: " fmt, __func__ , ## args)
59#else 61#else
60#define DPRINTK(fmt, args...) 62#define DPRINTK(fmt, args...)
61#endif 63#endif
@@ -142,7 +144,7 @@ static inline void dumpVGAReg(void)
142#if 1 144#if 1
143#define DEBUGPRN(x) 145#define DEBUGPRN(x)
144#else 146#else
145#define DEBUGPRN(x) printk(KERN_INFO x "\n"); 147#define DEBUGPRN(x) pr_info(x "\n");
146#endif 148#endif
147 149
148/* --------------- Hardware Access Routines -------------------------- */ 150/* --------------- Hardware Access Routines -------------------------- */
@@ -369,15 +371,15 @@ static void XGIRegInit(struct vb_device_info *XGI_Pr, unsigned long BaseAddr)
369 XGI_Pr->P3c9 = BaseAddr + 0x19; 371 XGI_Pr->P3c9 = BaseAddr + 0x19;
370 XGI_Pr->P3da = BaseAddr + 0x2A; 372 XGI_Pr->P3da = BaseAddr + 0x2A;
371 /* Digital video interface registers (LCD) */ 373 /* Digital video interface registers (LCD) */
372 XGI_Pr->Part1Port = BaseAddr + XGI_CRT2_PORT_04; 374 XGI_Pr->Part1Port = BaseAddr + SIS_CRT2_PORT_04;
373 /* 301 TV Encoder registers */ 375 /* 301 TV Encoder registers */
374 XGI_Pr->Part2Port = BaseAddr + XGI_CRT2_PORT_10; 376 XGI_Pr->Part2Port = BaseAddr + SIS_CRT2_PORT_10;
375 /* 301 Macrovision registers */ 377 /* 301 Macrovision registers */
376 XGI_Pr->Part3Port = BaseAddr + XGI_CRT2_PORT_12; 378 XGI_Pr->Part3Port = BaseAddr + SIS_CRT2_PORT_12;
377 /* 301 VGA2 (and LCD) registers */ 379 /* 301 VGA2 (and LCD) registers */
378 XGI_Pr->Part4Port = BaseAddr + XGI_CRT2_PORT_14; 380 XGI_Pr->Part4Port = BaseAddr + SIS_CRT2_PORT_14;
379 /* 301 palette address port registers */ 381 /* 301 palette address port registers */
380 XGI_Pr->Part5Port = BaseAddr + XGI_CRT2_PORT_14 + 2; 382 XGI_Pr->Part5Port = BaseAddr + SIS_CRT2_PORT_14 + 2;
381 383
382} 384}
383 385
@@ -424,7 +426,7 @@ static void XGIfb_search_mode(struct xgifb_video_info *xgifb_info,
424 i++; 426 i++;
425 } 427 }
426 if (!j) 428 if (!j)
427 printk(KERN_INFO "XGIfb: Invalid mode '%s'\n", name); 429 pr_info("Invalid mode '%s'\n", name);
428} 430}
429 431
430static void XGIfb_search_vesamode(struct xgifb_video_info *xgifb_info, 432static void XGIfb_search_vesamode(struct xgifb_video_info *xgifb_info,
@@ -449,7 +451,7 @@ static void XGIfb_search_vesamode(struct xgifb_video_info *xgifb_info,
449 451
450invalid: 452invalid:
451 if (!j) 453 if (!j)
452 printk(KERN_INFO "XGIfb: Invalid VESA mode 0x%x'\n", vesamode); 454 pr_info("Invalid VESA mode 0x%x'\n", vesamode);
453} 455}
454 456
455static int XGIfb_validate_mode(struct xgifb_video_info *xgifb_info, int myindex) 457static int XGIfb_validate_mode(struct xgifb_video_info *xgifb_info, int myindex)
@@ -526,12 +528,6 @@ static int XGIfb_validate_mode(struct xgifb_video_info *xgifb_info, int myindex)
526 xres = 1600; 528 xres = 1600;
527 yres = 1200; 529 yres = 1200;
528 break; 530 break;
529 /* case LCD_320x480: */ /* TW: FSTN */
530 /*
531 xres = 320;
532 yres = 480;
533 break;
534 */
535 default: 531 default:
536 xres = 0; 532 xres = 0;
537 yres = 0; 533 yres = 0;
@@ -692,7 +688,7 @@ static void XGIfb_search_crt2type(const char *name)
692 i++; 688 i++;
693 } 689 }
694 if (XGIfb_crt2type < 0) 690 if (XGIfb_crt2type < 0)
695 printk(KERN_INFO "XGIfb: Invalid CRT2 type: %s\n", name); 691 pr_info("Invalid CRT2 type: %s\n", name);
696} 692}
697 693
698static u8 XGIfb_search_refresh_rate(struct xgifb_video_info *xgifb_info, 694static u8 XGIfb_search_refresh_rate(struct xgifb_video_info *xgifb_info,
@@ -742,7 +738,7 @@ static u8 XGIfb_search_refresh_rate(struct xgifb_video_info *xgifb_info,
742 if (xgifb_info->rate_idx > 0) { 738 if (xgifb_info->rate_idx > 0) {
743 return xgifb_info->rate_idx; 739 return xgifb_info->rate_idx;
744 } else { 740 } else {
745 printk(KERN_INFO "XGIfb: Unsupported rate %d for %dx%d\n", 741 pr_info("Unsupported rate %d for %dx%d\n",
746 rate, xres, yres); 742 rate, xres, yres);
747 return 0; 743 return 0;
748 } 744 }
@@ -811,27 +807,27 @@ static void XGIfb_pre_setmode(struct xgifb_video_info *xgifb_info)
811 807
812 switch (xgifb_info->display2) { 808 switch (xgifb_info->display2) {
813 case XGIFB_DISP_CRT: 809 case XGIFB_DISP_CRT:
814 cr30 = (XGI_VB_OUTPUT_CRT2 | XGI_SIMULTANEOUS_VIEW_ENABLE); 810 cr30 = (SIS_VB_OUTPUT_CRT2 | SIS_SIMULTANEOUS_VIEW_ENABLE);
815 cr31 |= XGI_DRIVER_MODE; 811 cr31 |= SIS_DRIVER_MODE;
816 break; 812 break;
817 case XGIFB_DISP_LCD: 813 case XGIFB_DISP_LCD:
818 cr30 = (XGI_VB_OUTPUT_LCD | XGI_SIMULTANEOUS_VIEW_ENABLE); 814 cr30 = (SIS_VB_OUTPUT_LCD | SIS_SIMULTANEOUS_VIEW_ENABLE);
819 cr31 |= XGI_DRIVER_MODE; 815 cr31 |= SIS_DRIVER_MODE;
820 break; 816 break;
821 case XGIFB_DISP_TV: 817 case XGIFB_DISP_TV:
822 if (xgifb_info->TV_type == TVMODE_HIVISION) 818 if (xgifb_info->TV_type == TVMODE_HIVISION)
823 cr30 = (XGI_VB_OUTPUT_HIVISION 819 cr30 = (SIS_VB_OUTPUT_HIVISION
824 | XGI_SIMULTANEOUS_VIEW_ENABLE); 820 | SIS_SIMULTANEOUS_VIEW_ENABLE);
825 else if (xgifb_info->TV_plug == TVPLUG_SVIDEO) 821 else if (xgifb_info->TV_plug == TVPLUG_SVIDEO)
826 cr30 = (XGI_VB_OUTPUT_SVIDEO 822 cr30 = (SIS_VB_OUTPUT_SVIDEO
827 | XGI_SIMULTANEOUS_VIEW_ENABLE); 823 | SIS_SIMULTANEOUS_VIEW_ENABLE);
828 else if (xgifb_info->TV_plug == TVPLUG_COMPOSITE) 824 else if (xgifb_info->TV_plug == TVPLUG_COMPOSITE)
829 cr30 = (XGI_VB_OUTPUT_COMPOSITE 825 cr30 = (SIS_VB_OUTPUT_COMPOSITE
830 | XGI_SIMULTANEOUS_VIEW_ENABLE); 826 | SIS_SIMULTANEOUS_VIEW_ENABLE);
831 else if (xgifb_info->TV_plug == TVPLUG_SCART) 827 else if (xgifb_info->TV_plug == TVPLUG_SCART)
832 cr30 = (XGI_VB_OUTPUT_SCART 828 cr30 = (SIS_VB_OUTPUT_SCART
833 | XGI_SIMULTANEOUS_VIEW_ENABLE); 829 | SIS_SIMULTANEOUS_VIEW_ENABLE);
834 cr31 |= XGI_DRIVER_MODE; 830 cr31 |= SIS_DRIVER_MODE;
835 831
836 if (XGIfb_tvmode == 1 || xgifb_info->TV_type == TVMODE_PAL) 832 if (XGIfb_tvmode == 1 || xgifb_info->TV_type == TVMODE_PAL)
837 cr31 |= 0x01; 833 cr31 |= 0x01;
@@ -840,7 +836,7 @@ static void XGIfb_pre_setmode(struct xgifb_video_info *xgifb_info)
840 break; 836 break;
841 default: /* disable CRT2 */ 837 default: /* disable CRT2 */
842 cr30 = 0x00; 838 cr30 = 0x00;
843 cr31 |= (XGI_DRIVER_MODE | XGI_VB_OUTPUT_DISABLE); 839 cr31 |= (SIS_DRIVER_MODE | SIS_VB_OUTPUT_DISABLE);
844 } 840 }
845 841
846 xgifb_reg_set(XGICR, IND_XGI_SCRATCH_REG_CR30, cr30); 842 xgifb_reg_set(XGICR, IND_XGI_SCRATCH_REG_CR30, cr30);
@@ -854,7 +850,7 @@ static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info)
854 u8 reg; 850 u8 reg;
855 unsigned char doit = 1; 851 unsigned char doit = 1;
856 /* 852 /*
857 xgifb_reg_set(XGISR,IND_XGI_PASSWORD,XGI_PASSWORD); 853 xgifb_reg_set(XGISR,IND_SIS_PASSWORD,SIS_PASSWORD);
858 xgifb_reg_set(XGICR, 0x13, 0x00); 854 xgifb_reg_set(XGICR, 0x13, 0x00);
859 xgifb_reg_and_or(XGISR,0x0E, 0xF0, 0x01); 855 xgifb_reg_and_or(XGISR,0x0E, 0xF0, 0x01);
860 *test* 856 *test*
@@ -890,7 +886,7 @@ static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info)
890 reg |= 0x80; 886 reg |= 0x80;
891 xgifb_reg_set(XGICR, 0x17, reg); 887 xgifb_reg_set(XGICR, 0x17, reg);
892 888
893 xgifb_reg_and(XGISR, IND_XGI_RAMDAC_CONTROL, ~0x04); 889 xgifb_reg_and(XGISR, IND_SIS_RAMDAC_CONTROL, ~0x04);
894 890
895 if (xgifb_info->display2 == XGIFB_DISP_TV && 891 if (xgifb_info->display2 == XGIFB_DISP_TV &&
896 xgifb_info->hasVB == HASVB_301) { 892 xgifb_info->hasVB == HASVB_301) {
@@ -923,7 +919,7 @@ static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info)
923 break; 919 break;
924 } 920 }
925 xgifb_reg_or(XGIPART1, 921 xgifb_reg_or(XGIPART1,
926 IND_XGI_CRT2_WRITE_ENABLE_315, 922 SIS_CRT2_WENABLE_315,
927 0x01); 923 0x01);
928 924
929 if (xgifb_info->TV_type == TVMODE_NTSC) { 925 if (xgifb_info->TV_type == TVMODE_NTSC) {
@@ -1118,7 +1114,7 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
1118 if (!htotal || !vtotal) { 1114 if (!htotal || !vtotal) {
1119 DPRINTK("XGIfb: Invalid 'var' information\n"); 1115 DPRINTK("XGIfb: Invalid 'var' information\n");
1120 return -EINVAL; 1116 return -EINVAL;
1121 } printk(KERN_DEBUG "XGIfb: var->pixclock=%d, htotal=%d, vtotal=%d\n", 1117 } pr_debug("var->pixclock=%d, htotal=%d, vtotal=%d\n",
1122 var->pixclock, htotal, vtotal); 1118 var->pixclock, htotal, vtotal);
1123 1119
1124 if (var->pixclock && htotal && vtotal) { 1120 if (var->pixclock && htotal && vtotal) {
@@ -1130,7 +1126,7 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
1130 xgifb_info->refresh_rate = 60; 1126 xgifb_info->refresh_rate = 60;
1131 } 1127 }
1132 1128
1133 printk(KERN_DEBUG "XGIfb: Change mode to %dx%dx%d-%dHz\n", 1129 pr_debug("Change mode to %dx%dx%d-%dHz\n",
1134 var->xres, 1130 var->xres,
1135 var->yres, 1131 var->yres,
1136 var->bits_per_pixel, 1132 var->bits_per_pixel,
@@ -1158,7 +1154,7 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
1158 xgifb_info->mode_idx = -1; 1154 xgifb_info->mode_idx = -1;
1159 1155
1160 if (xgifb_info->mode_idx < 0) { 1156 if (xgifb_info->mode_idx < 0) {
1161 printk(KERN_ERR "XGIfb: Mode %dx%dx%d not supported\n", 1157 pr_err("Mode %dx%dx%d not supported\n",
1162 var->xres, var->yres, var->bits_per_pixel); 1158 var->xres, var->yres, var->bits_per_pixel);
1163 xgifb_info->mode_idx = old_mode; 1159 xgifb_info->mode_idx = old_mode;
1164 return -EINVAL; 1160 return -EINVAL;
@@ -1177,14 +1173,14 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
1177 if (XGISetModeNew(xgifb_info, hw_info, 1173 if (XGISetModeNew(xgifb_info, hw_info,
1178 XGIbios_mode[xgifb_info->mode_idx].mode_no) 1174 XGIbios_mode[xgifb_info->mode_idx].mode_no)
1179 == 0) { 1175 == 0) {
1180 printk(KERN_ERR "XGIfb: Setting mode[0x%x] failed\n", 1176 pr_err("Setting mode[0x%x] failed\n",
1181 XGIbios_mode[xgifb_info->mode_idx].mode_no); 1177 XGIbios_mode[xgifb_info->mode_idx].mode_no);
1182 return -EINVAL; 1178 return -EINVAL;
1183 } 1179 }
1184 info->fix.line_length = ((info->var.xres_virtual 1180 info->fix.line_length = ((info->var.xres_virtual
1185 * info->var.bits_per_pixel) >> 6); 1181 * info->var.bits_per_pixel) >> 6);
1186 1182
1187 xgifb_reg_set(XGISR, IND_XGI_PASSWORD, XGI_PASSWORD); 1183 xgifb_reg_set(XGISR, IND_SIS_PASSWORD, SIS_PASSWORD);
1188 1184
1189 xgifb_reg_set(XGICR, 0x13, (info->fix.line_length & 0x00ff)); 1185 xgifb_reg_set(XGICR, 0x13, (info->fix.line_length & 0x00ff));
1190 xgifb_reg_set(XGISR, 1186 xgifb_reg_set(XGISR,
@@ -1239,7 +1235,7 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
1239 break; 1235 break;
1240 default: 1236 default:
1241 xgifb_info->video_cmap_len = 16; 1237 xgifb_info->video_cmap_len = 16;
1242 printk(KERN_ERR "XGIfb: Unsupported depth %d", 1238 pr_err("Unsupported depth %d",
1243 xgifb_info->video_bpp); 1239 xgifb_info->video_bpp);
1244 break; 1240 break;
1245 } 1241 }
@@ -1273,7 +1269,7 @@ static int XGIfb_pan_var(struct fb_var_screeninfo *var, struct fb_info *info)
1273 break; 1269 break;
1274 } 1270 }
1275 1271
1276 xgifb_reg_set(XGISR, IND_XGI_PASSWORD, XGI_PASSWORD); 1272 xgifb_reg_set(XGISR, IND_SIS_PASSWORD, SIS_PASSWORD);
1277 1273
1278 xgifb_reg_set(XGICR, 0x0D, base & 0xFF); 1274 xgifb_reg_set(XGICR, 0x0D, base & 0xFF);
1279 xgifb_reg_set(XGICR, 0x0C, (base >> 8) & 0xFF); 1275 xgifb_reg_set(XGICR, 0x0C, (base >> 8) & 0xFF);
@@ -1282,7 +1278,7 @@ static int XGIfb_pan_var(struct fb_var_screeninfo *var, struct fb_info *info)
1282 xgifb_reg_and_or(XGISR, 0x37, 0xDF, (base >> 21) & 0x04); 1278 xgifb_reg_and_or(XGISR, 0x37, 0xDF, (base >> 21) & 0x04);
1283 1279
1284 if (xgifb_info->display2 != XGIFB_DISP_NONE) { 1280 if (xgifb_info->display2 != XGIFB_DISP_NONE) {
1285 xgifb_reg_or(XGIPART1, IND_XGI_CRT2_WRITE_ENABLE_315, 0x01); 1281 xgifb_reg_or(XGIPART1, SIS_CRT2_WENABLE_315, 0x01);
1286 xgifb_reg_set(XGIPART1, 0x06, (base & 0xFF)); 1282 xgifb_reg_set(XGIPART1, 0x06, (base & 0xFF));
1287 xgifb_reg_set(XGIPART1, 0x05, ((base >> 8) & 0xFF)); 1283 xgifb_reg_set(XGIPART1, 0x05, ((base >> 8) & 0xFF));
1288 xgifb_reg_set(XGIPART1, 0x04, ((base >> 16) & 0xFF)); 1284 xgifb_reg_set(XGIPART1, 0x04, ((base >> 16) & 0xFF));
@@ -1387,7 +1383,7 @@ static int XGIfb_get_fix(struct fb_fix_screeninfo *fix, int con,
1387 fix->line_length = xgifb_info->video_linelength; 1383 fix->line_length = xgifb_info->video_linelength;
1388 fix->mmio_start = xgifb_info->mmio_base; 1384 fix->mmio_start = xgifb_info->mmio_base;
1389 fix->mmio_len = xgifb_info->mmio_size; 1385 fix->mmio_len = xgifb_info->mmio_size;
1390 fix->accel = FB_ACCEL_XGI_XABRE; 1386 fix->accel = FB_ACCEL_SIS_XABRE;
1391 1387
1392 DEBUGPRN("end of get_fix"); 1388 DEBUGPRN("end of get_fix");
1393 return 0; 1389 return 0;
@@ -1441,7 +1437,7 @@ static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
1441 hrate = (drate * 1000) / htotal; 1437 hrate = (drate * 1000) / htotal;
1442 xgifb_info->refresh_rate = 1438 xgifb_info->refresh_rate =
1443 (unsigned int) (hrate * 2 / vtotal); 1439 (unsigned int) (hrate * 2 / vtotal);
1444 printk(KERN_DEBUG 1440 pr_debug(
1445 "%s: pixclock = %d ,htotal=%d, vtotal=%d\n" 1441 "%s: pixclock = %d ,htotal=%d, vtotal=%d\n"
1446 "%s: drate=%d, hrate=%d, refresh_rate=%d\n", 1442 "%s: drate=%d, hrate=%d, refresh_rate=%d\n",
1447 __func__, var->pixclock, htotal, vtotal, 1443 __func__, var->pixclock, htotal, vtotal,
@@ -1479,7 +1475,7 @@ static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
1479 1475
1480 if (!found_mode) { 1476 if (!found_mode) {
1481 1477
1482 printk(KERN_ERR "XGIfb: %dx%dx%d is no valid mode\n", 1478 pr_err("%dx%dx%d is no valid mode\n",
1483 var->xres, var->yres, var->bits_per_pixel); 1479 var->xres, var->yres, var->bits_per_pixel);
1484 search_idx = 0; 1480 search_idx = 0;
1485 while (XGIbios_mode[search_idx].mode_no != 0) { 1481 while (XGIbios_mode[search_idx].mode_no != 0) {
@@ -1498,11 +1494,11 @@ static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
1498 if (found_mode) { 1494 if (found_mode) {
1499 var->xres = XGIbios_mode[search_idx].xres; 1495 var->xres = XGIbios_mode[search_idx].xres;
1500 var->yres = XGIbios_mode[search_idx].yres; 1496 var->yres = XGIbios_mode[search_idx].yres;
1501 printk(KERN_DEBUG "XGIfb: Adapted to mode %dx%dx%d\n", 1497 pr_debug("Adapted to mode %dx%dx%d\n",
1502 var->xres, var->yres, var->bits_per_pixel); 1498 var->xres, var->yres, var->bits_per_pixel);
1503 1499
1504 } else { 1500 } else {
1505 printk(KERN_ERR "XGIfb: Failed to find similar mode to %dx%dx%d\n", 1501 pr_err("Failed to find similar mode to %dx%dx%d\n",
1506 var->xres, var->yres, var->bits_per_pixel); 1502 var->xres, var->yres, var->bits_per_pixel);
1507 return -EINVAL; 1503 return -EINVAL;
1508 } 1504 }
@@ -1634,9 +1630,9 @@ static int XGIfb_get_dram_size(struct xgifb_video_info *xgifb_info)
1634 1630
1635 /* xorg driver sets 32MB * 1 channel */ 1631 /* xorg driver sets 32MB * 1 channel */
1636 if (xgifb_info->chip == XG27) 1632 if (xgifb_info->chip == XG27)
1637 xgifb_reg_set(XGISR, IND_XGI_DRAM_SIZE, 0x51); 1633 xgifb_reg_set(XGISR, IND_SIS_DRAM_SIZE, 0x51);
1638 1634
1639 reg = xgifb_reg_get(XGISR, IND_XGI_DRAM_SIZE); 1635 reg = xgifb_reg_get(XGISR, IND_SIS_DRAM_SIZE);
1640 switch ((reg & XGI_DRAM_SIZE_MASK) >> 4) { 1636 switch ((reg & XGI_DRAM_SIZE_MASK) >> 4) {
1641 case XGI_DRAM_SIZE_1MB: 1637 case XGI_DRAM_SIZE_1MB:
1642 xgifb_info->video_size = 0x100000; 1638 xgifb_info->video_size = 0x100000;
@@ -1711,7 +1707,7 @@ static int XGIfb_get_dram_size(struct xgifb_video_info *xgifb_info)
1711 /* xgifb_info->video_size = 0x200000; */ /* 1024x768x16 */ 1707 /* xgifb_info->video_size = 0x200000; */ /* 1024x768x16 */
1712 /* xgifb_info->video_size = 0x1000000; */ /* benchmark */ 1708 /* xgifb_info->video_size = 0x1000000; */ /* benchmark */
1713 1709
1714 printk("XGIfb: SR14=%x DramSzie %x ChannelNum %x\n", 1710 pr_info("SR14=%x DramSzie %x ChannelNum %x\n",
1715 reg, 1711 reg,
1716 xgifb_info->video_size, ChannelNum); 1712 xgifb_info->video_size, ChannelNum);
1717 return 0; 1713 return 0;
@@ -1736,7 +1732,7 @@ static void XGIfb_detect_VB(struct xgifb_video_info *xgifb_info)
1736 1732
1737 cr32 = xgifb_reg_get(XGICR, IND_XGI_SCRATCH_REG_CR32); 1733 cr32 = xgifb_reg_get(XGICR, IND_XGI_SCRATCH_REG_CR32);
1738 1734
1739 if ((cr32 & XGI_CRT1) && !XGIfb_crt1off) 1735 if ((cr32 & SIS_CRT1) && !XGIfb_crt1off)
1740 XGIfb_crt1off = 0; 1736 XGIfb_crt1off = 0;
1741 else { 1737 else {
1742 if (cr32 & 0x5F) 1738 if (cr32 & 0x5F)
@@ -1746,11 +1742,11 @@ static void XGIfb_detect_VB(struct xgifb_video_info *xgifb_info)
1746 } 1742 }
1747 1743
1748 if (!xgifb_info->display2_force) { 1744 if (!xgifb_info->display2_force) {
1749 if (cr32 & XGI_VB_TV) 1745 if (cr32 & SIS_VB_TV)
1750 xgifb_info->display2 = XGIFB_DISP_TV; 1746 xgifb_info->display2 = XGIFB_DISP_TV;
1751 else if (cr32 & XGI_VB_LCD) 1747 else if (cr32 & SIS_VB_LCD)
1752 xgifb_info->display2 = XGIFB_DISP_LCD; 1748 xgifb_info->display2 = XGIFB_DISP_LCD;
1753 else if (cr32 & XGI_VB_CRT2) 1749 else if (cr32 & SIS_VB_CRT2)
1754 xgifb_info->display2 = XGIFB_DISP_CRT; 1750 xgifb_info->display2 = XGIFB_DISP_CRT;
1755 else 1751 else
1756 xgifb_info->display2 = XGIFB_DISP_NONE; 1752 xgifb_info->display2 = XGIFB_DISP_NONE;
@@ -1759,14 +1755,14 @@ static void XGIfb_detect_VB(struct xgifb_video_info *xgifb_info)
1759 if (XGIfb_tvplug != -1) 1755 if (XGIfb_tvplug != -1)
1760 /* PR/TW: Override with option */ 1756 /* PR/TW: Override with option */
1761 xgifb_info->TV_plug = XGIfb_tvplug; 1757 xgifb_info->TV_plug = XGIfb_tvplug;
1762 else if (cr32 & XGI_VB_HIVISION) { 1758 else if (cr32 & SIS_VB_HIVISION) {
1763 xgifb_info->TV_type = TVMODE_HIVISION; 1759 xgifb_info->TV_type = TVMODE_HIVISION;
1764 xgifb_info->TV_plug = TVPLUG_SVIDEO; 1760 xgifb_info->TV_plug = TVPLUG_SVIDEO;
1765 } else if (cr32 & XGI_VB_SVIDEO) 1761 } else if (cr32 & SIS_VB_SVIDEO)
1766 xgifb_info->TV_plug = TVPLUG_SVIDEO; 1762 xgifb_info->TV_plug = TVPLUG_SVIDEO;
1767 else if (cr32 & XGI_VB_COMPOSITE) 1763 else if (cr32 & SIS_VB_COMPOSITE)
1768 xgifb_info->TV_plug = TVPLUG_COMPOSITE; 1764 xgifb_info->TV_plug = TVPLUG_COMPOSITE;
1769 else if (cr32 & XGI_VB_SCART) 1765 else if (cr32 & SIS_VB_SCART)
1770 xgifb_info->TV_plug = TVPLUG_SCART; 1766 xgifb_info->TV_plug = TVPLUG_SCART;
1771 1767
1772 if (xgifb_info->TV_type == 0) { 1768 if (xgifb_info->TV_type == 0) {
@@ -1811,11 +1807,11 @@ static void XGIfb_get_VB_type(struct xgifb_video_info *xgifb_info)
1811 1807
1812 if (!XGIfb_has_VB(xgifb_info)) { 1808 if (!XGIfb_has_VB(xgifb_info)) {
1813 reg = xgifb_reg_get(XGICR, IND_XGI_SCRATCH_REG_CR37); 1809 reg = xgifb_reg_get(XGICR, IND_XGI_SCRATCH_REG_CR37);
1814 switch ((reg & XGI_EXTERNAL_CHIP_MASK) >> 1) { 1810 switch ((reg & SIS_EXTERNAL_CHIP_MASK) >> 1) {
1815 case XGI310_EXTERNAL_CHIP_LVDS: 1811 case SIS_EXTERNAL_CHIP_LVDS:
1816 xgifb_info->hasVB = HASVB_LVDS; 1812 xgifb_info->hasVB = HASVB_LVDS;
1817 break; 1813 break;
1818 case XGI310_EXTERNAL_CHIP_LVDS_CHRONTEL: 1814 case SIS_EXTERNAL_CHIP_LVDS_CHRONTEL:
1819 xgifb_info->hasVB = HASVB_LVDS_CHRONTEL; 1815 xgifb_info->hasVB = HASVB_LVDS_CHRONTEL;
1820 break; 1816 break;
1821 default: 1817 default:
@@ -1917,7 +1913,7 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
1917 xgifb_info->vga_base = pci_resource_start(pdev, 2) + 0x30; 1913 xgifb_info->vga_base = pci_resource_start(pdev, 2) + 0x30;
1918 hw_info->pjIOAddress = (unsigned char *)xgifb_info->vga_base; 1914 hw_info->pjIOAddress = (unsigned char *)xgifb_info->vga_base;
1919 /* XGI_Pr.RelIO = ioremap(pci_resource_start(pdev, 2), 128) + 0x30; */ 1915 /* XGI_Pr.RelIO = ioremap(pci_resource_start(pdev, 2), 128) + 0x30; */
1920 printk("XGIfb: Relocate IO address: %lx [%08lx]\n", 1916 pr_info("Relocate IO address: %lx [%08lx]\n",
1921 (unsigned long)pci_resource_start(pdev, 2), 1917 (unsigned long)pci_resource_start(pdev, 2),
1922 xgifb_info->dev_info.RelIO); 1918 xgifb_info->dev_info.RelIO);
1923 1919
@@ -1933,17 +1929,17 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
1933 1929
1934 XGIRegInit(&xgifb_info->dev_info, (unsigned long)hw_info->pjIOAddress); 1930 XGIRegInit(&xgifb_info->dev_info, (unsigned long)hw_info->pjIOAddress);
1935 1931
1936 xgifb_reg_set(XGISR, IND_XGI_PASSWORD, XGI_PASSWORD); 1932 xgifb_reg_set(XGISR, IND_SIS_PASSWORD, SIS_PASSWORD);
1937 reg1 = xgifb_reg_get(XGISR, IND_XGI_PASSWORD); 1933 reg1 = xgifb_reg_get(XGISR, IND_SIS_PASSWORD);
1938 1934
1939 if (reg1 != 0xa1) { /*I/O error */ 1935 if (reg1 != 0xa1) { /*I/O error */
1940 printk("\nXGIfb: I/O error!!!"); 1936 pr_err("I/O error!!!");
1941 ret = -EIO; 1937 ret = -EIO;
1942 goto error; 1938 goto error;
1943 } 1939 }
1944 1940
1945 switch (xgifb_info->chip_id) { 1941 switch (xgifb_info->chip_id) {
1946 case PCI_DEVICE_ID_XG_20: 1942 case PCI_DEVICE_ID_XGI_20:
1947 xgifb_reg_or(XGICR, Index_CR_GPIO_Reg3, GPIOG_EN); 1943 xgifb_reg_or(XGICR, Index_CR_GPIO_Reg3, GPIOG_EN);
1948 CR48 = xgifb_reg_get(XGICR, Index_CR_GPIO_Reg1); 1944 CR48 = xgifb_reg_get(XGICR, Index_CR_GPIO_Reg1);
1949 if (CR48&GPIOG_READ) 1945 if (CR48&GPIOG_READ)
@@ -1951,16 +1947,16 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
1951 else 1947 else
1952 xgifb_info->chip = XG20; 1948 xgifb_info->chip = XG20;
1953 break; 1949 break;
1954 case PCI_DEVICE_ID_XG_40: 1950 case PCI_DEVICE_ID_XGI_40:
1955 xgifb_info->chip = XG40; 1951 xgifb_info->chip = XG40;
1956 break; 1952 break;
1957 case PCI_DEVICE_ID_XG_41: 1953 case PCI_DEVICE_ID_XGI_41:
1958 xgifb_info->chip = XG41; 1954 xgifb_info->chip = XG41;
1959 break; 1955 break;
1960 case PCI_DEVICE_ID_XG_42: 1956 case PCI_DEVICE_ID_XGI_42:
1961 xgifb_info->chip = XG42; 1957 xgifb_info->chip = XG42;
1962 break; 1958 break;
1963 case PCI_DEVICE_ID_XG_27: 1959 case PCI_DEVICE_ID_XGI_27:
1964 xgifb_info->chip = XG27; 1960 xgifb_info->chip = XG27;
1965 break; 1961 break;
1966 default: 1962 default:
@@ -1968,31 +1964,31 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
1968 goto error; 1964 goto error;
1969 } 1965 }
1970 1966
1971 printk("XGIfb:chipid = %x\n", xgifb_info->chip); 1967 pr_info("chipid = %x\n", xgifb_info->chip);
1972 hw_info->jChipType = xgifb_info->chip; 1968 hw_info->jChipType = xgifb_info->chip;
1973 1969
1974 if (XGIfb_get_dram_size(xgifb_info)) { 1970 if (XGIfb_get_dram_size(xgifb_info)) {
1975 printk(KERN_INFO "XGIfb: Fatal error: Unable to determine RAM size.\n"); 1971 pr_err("Fatal error: Unable to determine RAM size.\n");
1976 ret = -ENODEV; 1972 ret = -ENODEV;
1977 goto error; 1973 goto error;
1978 } 1974 }
1979 1975
1980 /* Enable PCI_LINEAR_ADDRESSING and MMIO_ENABLE */ 1976 /* Enable PCI_LINEAR_ADDRESSING and MMIO_ENABLE */
1981 xgifb_reg_or(XGISR, 1977 xgifb_reg_or(XGISR,
1982 IND_XGI_PCI_ADDRESS_SET, 1978 IND_SIS_PCI_ADDRESS_SET,
1983 (XGI_PCI_ADDR_ENABLE | XGI_MEM_MAP_IO_ENABLE)); 1979 (SIS_PCI_ADDR_ENABLE | SIS_MEM_MAP_IO_ENABLE));
1984 /* Enable 2D accelerator engine */ 1980 /* Enable 2D accelerator engine */
1985 xgifb_reg_or(XGISR, IND_XGI_MODULE_ENABLE, XGI_ENABLE_2D); 1981 xgifb_reg_or(XGISR, IND_SIS_MODULE_ENABLE, SIS_ENABLE_2D);
1986 1982
1987 hw_info->ulVideoMemorySize = xgifb_info->video_size; 1983 hw_info->ulVideoMemorySize = xgifb_info->video_size;
1988 1984
1989 if (!request_mem_region(xgifb_info->video_base, 1985 if (!request_mem_region(xgifb_info->video_base,
1990 xgifb_info->video_size, 1986 xgifb_info->video_size,
1991 "XGIfb FB")) { 1987 "XGIfb FB")) {
1992 printk("unable request memory size %x", 1988 pr_err("unable request memory size %x\n",
1993 xgifb_info->video_size); 1989 xgifb_info->video_size);
1994 printk(KERN_ERR "XGIfb: Fatal error: Unable to reserve frame buffer memory\n"); 1990 pr_err("Fatal error: Unable to reserve frame buffer memory\n");
1995 printk(KERN_ERR "XGIfb: Is there another framebuffer driver active?\n"); 1991 pr_err("Is there another framebuffer driver active?\n");
1996 ret = -ENODEV; 1992 ret = -ENODEV;
1997 goto error; 1993 goto error;
1998 } 1994 }
@@ -2000,7 +1996,7 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
2000 if (!request_mem_region(xgifb_info->mmio_base, 1996 if (!request_mem_region(xgifb_info->mmio_base,
2001 xgifb_info->mmio_size, 1997 xgifb_info->mmio_size,
2002 "XGIfb MMIO")) { 1998 "XGIfb MMIO")) {
2003 printk(KERN_ERR "XGIfb: Fatal error: Unable to reserve MMIO region\n"); 1999 pr_err("Fatal error: Unable to reserve MMIO region\n");
2004 ret = -ENODEV; 2000 ret = -ENODEV;
2005 goto error_0; 2001 goto error_0;
2006 } 2002 }
@@ -2010,20 +2006,18 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
2010 xgifb_info->mmio_vbase = ioremap(xgifb_info->mmio_base, 2006 xgifb_info->mmio_vbase = ioremap(xgifb_info->mmio_base,
2011 xgifb_info->mmio_size); 2007 xgifb_info->mmio_size);
2012 2008
2013 printk(KERN_INFO "XGIfb: Framebuffer at 0x%lx, mapped to 0x%p, size %dk\n", 2009 pr_info("Framebuffer at 0x%lx, mapped to 0x%p, size %dk\n",
2014 xgifb_info->video_base, 2010 xgifb_info->video_base,
2015 xgifb_info->video_vbase, 2011 xgifb_info->video_vbase,
2016 xgifb_info->video_size / 1024); 2012 xgifb_info->video_size / 1024);
2017 2013
2018 printk(KERN_INFO "XGIfb: MMIO at 0x%lx, mapped to 0x%p, size %ldk\n", 2014 pr_info("MMIO at 0x%lx, mapped to 0x%p, size %ldk\n",
2019 xgifb_info->mmio_base, xgifb_info->mmio_vbase, 2015 xgifb_info->mmio_base, xgifb_info->mmio_vbase,
2020 xgifb_info->mmio_size / 1024); 2016 xgifb_info->mmio_size / 1024);
2021 printk("XGIfb: XGIInitNew() ..."); 2017
2022 pci_set_drvdata(pdev, xgifb_info); 2018 pci_set_drvdata(pdev, xgifb_info);
2023 if (XGIInitNew(pdev)) 2019 if (!XGIInitNew(pdev))
2024 printk("OK\n"); 2020 pr_err("XGIInitNew() failed!\n");
2025 else
2026 printk("Fail\n");
2027 2021
2028 xgifb_info->mtrr = (unsigned int) 0; 2022 xgifb_info->mtrr = (unsigned int) 0;
2029 2023
@@ -2033,13 +2027,12 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
2033 xgifb_info->hasVB = HASVB_NONE; 2027 xgifb_info->hasVB = HASVB_NONE;
2034 } else if (xgifb_info->chip == XG21) { 2028 } else if (xgifb_info->chip == XG21) {
2035 CR38 = xgifb_reg_get(XGICR, 0x38); 2029 CR38 = xgifb_reg_get(XGICR, 0x38);
2036 if ((CR38&0xE0) == 0xC0) { 2030 if ((CR38&0xE0) == 0xC0)
2037 xgifb_info->display2 = XGIFB_DISP_LCD; 2031 xgifb_info->display2 = XGIFB_DISP_LCD;
2038 } else if ((CR38&0xE0) == 0x60) { 2032 else if ((CR38&0xE0) == 0x60)
2039 xgifb_info->hasVB = HASVB_CHRONTEL; 2033 xgifb_info->hasVB = HASVB_CHRONTEL;
2040 } else { 2034 else
2041 xgifb_info->hasVB = HASVB_NONE; 2035 xgifb_info->hasVB = HASVB_NONE;
2042 }
2043 } else { 2036 } else {
2044 XGIfb_get_VB_type(xgifb_info); 2037 XGIfb_get_VB_type(xgifb_info);
2045 } 2038 }
@@ -2053,10 +2046,10 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
2053 reg = xgifb_reg_get(XGIPART4, 0x01); 2046 reg = xgifb_reg_get(XGIPART4, 0x01);
2054 if (reg >= 0xE0) { 2047 if (reg >= 0xE0) {
2055 hw_info->ujVBChipID = VB_CHIP_302LV; 2048 hw_info->ujVBChipID = VB_CHIP_302LV;
2056 printk(KERN_INFO "XGIfb: XGI302LV bridge detected (revision 0x%02x)\n", reg); 2049 pr_info("XGI302LV bridge detected (revision 0x%02x)\n", reg);
2057 } else if (reg >= 0xD0) { 2050 } else if (reg >= 0xD0) {
2058 hw_info->ujVBChipID = VB_CHIP_301LV; 2051 hw_info->ujVBChipID = VB_CHIP_301LV;
2059 printk(KERN_INFO "XGIfb: XGI301LV bridge detected (revision 0x%02x)\n", reg); 2052 pr_info("XGI301LV bridge detected (revision 0x%02x)\n", reg);
2060 } 2053 }
2061 /* else if (reg >= 0xB0) { 2054 /* else if (reg >= 0xB0) {
2062 hw_info->ujVBChipID = VB_CHIP_301B; 2055 hw_info->ujVBChipID = VB_CHIP_301B;
@@ -2065,17 +2058,17 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
2065 } */ 2058 } */
2066 else { 2059 else {
2067 hw_info->ujVBChipID = VB_CHIP_301; 2060 hw_info->ujVBChipID = VB_CHIP_301;
2068 printk("XGIfb: XGI301 bridge detected\n"); 2061 pr_info("XGI301 bridge detected\n");
2069 } 2062 }
2070 break; 2063 break;
2071 case HASVB_302: 2064 case HASVB_302:
2072 reg = xgifb_reg_get(XGIPART4, 0x01); 2065 reg = xgifb_reg_get(XGIPART4, 0x01);
2073 if (reg >= 0xE0) { 2066 if (reg >= 0xE0) {
2074 hw_info->ujVBChipID = VB_CHIP_302LV; 2067 hw_info->ujVBChipID = VB_CHIP_302LV;
2075 printk(KERN_INFO "XGIfb: XGI302LV bridge detected (revision 0x%02x)\n", reg); 2068 pr_info("XGI302LV bridge detected (revision 0x%02x)\n", reg);
2076 } else if (reg >= 0xD0) { 2069 } else if (reg >= 0xD0) {
2077 hw_info->ujVBChipID = VB_CHIP_301LV; 2070 hw_info->ujVBChipID = VB_CHIP_301LV;
2078 printk(KERN_INFO "XGIfb: XGI302LV bridge detected (revision 0x%02x)\n", reg); 2071 pr_info("XGI302LV bridge detected (revision 0x%02x)\n", reg);
2079 } else if (reg >= 0xB0) { 2072 } else if (reg >= 0xB0) {
2080 reg1 = xgifb_reg_get(XGIPART4, 0x23); 2073 reg1 = xgifb_reg_get(XGIPART4, 0x23);
2081 2074
@@ -2083,27 +2076,27 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
2083 2076
2084 } else { 2077 } else {
2085 hw_info->ujVBChipID = VB_CHIP_302; 2078 hw_info->ujVBChipID = VB_CHIP_302;
2086 printk(KERN_INFO "XGIfb: XGI302 bridge detected\n"); 2079 pr_info("XGI302 bridge detected\n");
2087 } 2080 }
2088 break; 2081 break;
2089 case HASVB_LVDS: 2082 case HASVB_LVDS:
2090 hw_info->ulExternalChip = 0x1; 2083 hw_info->ulExternalChip = 0x1;
2091 printk(KERN_INFO "XGIfb: LVDS transmitter detected\n"); 2084 pr_info("LVDS transmitter detected\n");
2092 break; 2085 break;
2093 case HASVB_TRUMPION: 2086 case HASVB_TRUMPION:
2094 hw_info->ulExternalChip = 0x2; 2087 hw_info->ulExternalChip = 0x2;
2095 printk(KERN_INFO "XGIfb: Trumpion Zurac LVDS scaler detected\n"); 2088 pr_info("Trumpion Zurac LVDS scaler detected\n");
2096 break; 2089 break;
2097 case HASVB_CHRONTEL: 2090 case HASVB_CHRONTEL:
2098 hw_info->ulExternalChip = 0x4; 2091 hw_info->ulExternalChip = 0x4;
2099 printk(KERN_INFO "XGIfb: Chrontel TV encoder detected\n"); 2092 pr_info("Chrontel TV encoder detected\n");
2100 break; 2093 break;
2101 case HASVB_LVDS_CHRONTEL: 2094 case HASVB_LVDS_CHRONTEL:
2102 hw_info->ulExternalChip = 0x5; 2095 hw_info->ulExternalChip = 0x5;
2103 printk(KERN_INFO "XGIfb: LVDS transmitter and Chrontel TV encoder detected\n"); 2096 pr_info("LVDS transmitter and Chrontel TV encoder detected\n");
2104 break; 2097 break;
2105 default: 2098 default:
2106 printk(KERN_INFO "XGIfb: No or unknown bridge type detected\n"); 2099 pr_info("No or unknown bridge type detected\n");
2107 break; 2100 break;
2108 } 2101 }
2109 2102
@@ -2117,10 +2110,6 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
2117 reg = xgifb_reg_get(XGICR, IND_XGI_LCD_PANEL); 2110 reg = xgifb_reg_get(XGICR, IND_XGI_LCD_PANEL);
2118 reg &= 0x0f; 2111 reg &= 0x0f;
2119 hw_info->ulCRT2LCDType = XGI310paneltype[reg]; 2112 hw_info->ulCRT2LCDType = XGI310paneltype[reg];
2120
2121 } else {
2122 /* TW: FSTN/DSTN */
2123 hw_info->ulCRT2LCDType = LCD_320x480;
2124 } 2113 }
2125 } 2114 }
2126 2115
@@ -2147,9 +2136,6 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
2147 if (tmp & 0x20) { 2136 if (tmp & 0x20) {
2148 tmp = xgifb_reg_get( 2137 tmp = xgifb_reg_get(
2149 XGIPART1, 0x13); 2138 XGIPART1, 0x13);
2150 if (tmp & 0x04) {
2151 /* XGI_Pr.XGI_UseLCDA = 1; */
2152 }
2153 } 2139 }
2154 } 2140 }
2155 } 2141 }
@@ -2222,12 +2208,12 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
2222 break; 2208 break;
2223 default: 2209 default:
2224 xgifb_info->video_cmap_len = 16; 2210 xgifb_info->video_cmap_len = 16;
2225 printk(KERN_INFO "XGIfb: Unsupported depth %d", 2211 pr_info("Unsupported depth %d\n",
2226 xgifb_info->video_bpp); 2212 xgifb_info->video_bpp);
2227 break; 2213 break;
2228 } 2214 }
2229 2215
2230 printk(KERN_INFO "XGIfb: Default mode is %dx%dx%d (%dHz)\n", 2216 pr_info("Default mode is %dx%dx%d (%dHz)\n",
2231 xgifb_info->video_width, 2217 xgifb_info->video_width,
2232 xgifb_info->video_height, 2218 xgifb_info->video_height,
2233 xgifb_info->video_bpp, 2219 xgifb_info->video_bpp,
@@ -2404,7 +2390,7 @@ MODULE_PARM_DESC(filter,
2404static void __exit xgifb_remove_module(void) 2390static void __exit xgifb_remove_module(void)
2405{ 2391{
2406 pci_unregister_driver(&xgifb_driver); 2392 pci_unregister_driver(&xgifb_driver);
2407 printk(KERN_DEBUG "xgifb: Module unloaded\n"); 2393 pr_debug("Module unloaded\n");
2408} 2394}
2409 2395
2410module_exit(xgifb_remove_module); 2396module_exit(xgifb_remove_module);
diff --git a/drivers/staging/xgifb/XGIfb.h b/drivers/staging/xgifb/XGIfb.h
index 2c866bb65a00..37bb730de047 100644
--- a/drivers/staging/xgifb/XGIfb.h
+++ b/drivers/staging/xgifb/XGIfb.h
@@ -3,8 +3,8 @@
3#include <linux/ioctl.h> 3#include <linux/ioctl.h>
4#include <linux/types.h> 4#include <linux/types.h>
5 5
6#include "vb_struct.h"
7#include "vgatypes.h" 6#include "vgatypes.h"
7#include "vb_struct.h"
8 8
9enum xgifb_display_type { 9enum xgifb_display_type {
10 XGIFB_DISP_NONE = 0, 10 XGIFB_DISP_NONE = 0,
diff --git a/drivers/staging/xgifb/vb_def.h b/drivers/staging/xgifb/vb_def.h
index 5beeef99bb14..c7317931f671 100644
--- a/drivers/staging/xgifb/vb_def.h
+++ b/drivers/staging/xgifb/vb_def.h
@@ -1,153 +1,48 @@
1/* $XFree86: xc/programs/Xserver/hw/xfree86/drivers/xgi/initdef.h 1/* $XFree86: xc/programs/Xserver/hw/xfree86/drivers/xgi/initdef.h
2 * ,v 1.4 2000/12/02 01:16:17 dawes Exp $*/ 2 * ,v 1.4 2000/12/02 01:16:17 dawes Exp $*/
3#ifndef _INITDEF_ 3#ifndef _VB_DEF_
4#define _INITDEF_ 4#define _VB_DEF_
5#include "../../video/sis/initdef.h"
5 6
6#define VB_XGI301C 0x0020 /* for 301C */ 7#define VB_XGI301C 0x0020 /* for 301C */
7/*end 301b*/
8
9#define VB_YPbPr525p 0x01
10#define VB_YPbPr750p 0x02
11#define VB_YPbPr1080i 0x03 8#define VB_YPbPr1080i 0x03
12 9
13#define LVDSCRT1Len 15 10#define LVDSCRT1Len 15
14
15#define SupportCHTV 0x0800
16#define SupportCRT2in301C 0x0100 /* for 301C */ 11#define SupportCRT2in301C 0x0100 /* for 301C */
17#define SetCHTVOverScan 0x8000 12#define SetCHTVOverScan 0x8000
18#define PanelRGB18Bit 0x0100
19#define PanelRGB24Bit 0x0000
20 13
21#define Panel320x480 0x07 /*fstn*/ 14#define Panel_320x480 0x07 /*fstn*/
22/* [ycchen] 02/12/03 Modify for Multi-Sync. LCD Support */ 15/* [ycchen] 02/12/03 Modify for Multi-Sync. LCD Support */
23#define PanelResInfo 0x1F /* CR36 Panel Type/LCDResInfo */ 16#define PanelResInfo 0x1F /* CR36 Panel Type/LCDResInfo */
24#define Panel800x600 0x01 17#define Panel_1024x768x75 0x22
25#define Panel1024x768 0x02 18#define Panel_1280x1024x75 0x23
26#define Panel1024x768x75 0x22
27#define Panel1280x1024 0x03
28#define Panel1280x1024x75 0x23
29#define Panel640x480 0x04
30#define Panel1280x960 0x07
31#define Panel1400x1050 0x09
32#define Panel1600x1200 0x0B
33 19
34#define PanelRef60Hz 0x00 20#define PanelRef60Hz 0x00
35#define PanelRef75Hz 0x20 21#define PanelRef75Hz 0x20
36 22
37#define CRT2DisplayFlag 0x2000
38
39#define YPbPr525iVCLK 0x03B 23#define YPbPr525iVCLK 0x03B
40#define YPbPr525iVCLK_2 0x03A 24#define YPbPr525iVCLK_2 0x03A
41 25
42#define XGI_CRT2_PORT_00 (0x00 - 0x030) 26#define XGI_CRT2_PORT_00 (0x00 - 0x030)
43#define XGI_CRT2_PORT_04 (0x04 - 0x030)
44#define XGI_CRT2_PORT_10 (0x10 - 0x30)
45#define XGI_CRT2_PORT_12 (0x12 - 0x30)
46#define XGI_CRT2_PORT_14 (0x14 - 0x30)
47
48#define _PanelType00 0x00
49#define _PanelType01 0x08
50#define _PanelType02 0x10
51#define _PanelType03 0x18
52#define _PanelType04 0x20
53#define _PanelType05 0x28
54#define _PanelType06 0x30
55#define _PanelType07 0x38
56#define _PanelType08 0x40
57#define _PanelType09 0x48
58#define _PanelType0A 0x50
59#define _PanelType0B 0x58
60#define _PanelType0C 0x60
61#define _PanelType0D 0x68
62#define _PanelType0E 0x70
63#define _PanelType0F 0x78
64 27
65/* ============================================================= 28/* =============================================================
66 for 310 29 for 310
67============================================================== */ 30============================================================== */
68/* add LCDDataList for GetLCDPtr */
69#define LCDDataList (VBIOSTablePointerStart+0x22)
70/* */
71/* Modify from 310.inc */
72/* */
73/* */
74
75#define ModeSoftSetting 0x04 31#define ModeSoftSetting 0x04
76 32
77#define BoardTVType 0x02
78
79#define SoftDRAMType 0x80 /* DRAMSetting */
80
81/* ---------------- SetMode Stack */ 33/* ---------------- SetMode Stack */
82#define CRT1Len 15 34#define CRT1Len 15
83#define VCLKLen 4 35#define VCLKLen 4
84#define VGA_XGI340 0x0001 /* 340 series */
85
86#define VB_XGI301 0x0001 /* VB Type Info */
87#define VB_XGI301B 0x0002 /* 301 series */
88#define VB_XGI302B 0x0004
89#define VB_NoLCD 0x8000
90#define VB_XGI301LV 0x0008
91#define VB_XGI302LV 0x0010
92#define VB_LVDS_NS 0x0001 /* 3rd party chip */
93
94#define ModeInfoFlag 0x0007
95#define ModeText 0x0000
96#define ModeEGA 0x0002 /* 16 colors mode */
97#define ModeVGA 0x0003 /* 256 colors mode */
98
99#define DACInfoFlag 0x0018
100
101#define MemoryInfoFlag 0x01e0
102#define MemorySizeShift 5
103
104#define Charx8Dot 0x0200
105#define LineCompareOff 0x0400
106#define CRT2Mode 0x0800
107#define HalfDCLK 0x1000
108#define NoSupportSimuTV 0x2000
109#define DoubleScanMode 0x8000
110
111/* -------------- Ext_InfoFlag */
112#define Support16Bpp 0x0005
113#define Support32Bpp 0x0007
114 36
115#define SupportAllCRT2 0x0078 37#define SupportAllCRT2 0x0078
116#define SupportTV 0x0008
117#define SupportHiVisionTV 0x0010
118#define SupportLCD 0x0020
119#define SupportRAMDAC2 0x0040
120#define NoSupportTV 0x0070 38#define NoSupportTV 0x0070
121#define NoSupportHiVisionTV 0x0060 39#define NoSupportHiVisionTV 0x0060
122#define NoSupportLCD 0x0058 40#define NoSupportLCD 0x0058
123#define SupportTV1024 0x0800 /* 301btest */
124#define SupportYPbPr 0x1000 /* 301lv */
125#define InterlaceMode 0x0080
126#define SyncPP 0x0000
127#define SyncPN 0x4000
128#define SyncNP 0x8000
129#define SyncNN 0xC000
130 41
131/* -------------- SetMode Stack/Scratch */ 42/* -------------- SetMode Stack/Scratch */
132#define SetSimuScanMode 0x0001 /* VBInfo/CR30 & CR31 */ 43#define XGI_SetCRT2ToLCDA 0x0100
133#define SwitchToCRT2 0x0002
134#define SetCRT2ToTV 0x089C
135#define SetCRT2ToAVIDEO 0x0004
136#define SetCRT2ToSVIDEO 0x0008
137#define SetCRT2ToSCART 0x0010
138#define SetCRT2ToLCD 0x0020
139#define SetCRT2ToRAMDAC 0x0040
140#define SetCRT2ToHiVisionTV 0x0080
141#define SetCRT2ToLCDA 0x0100
142#define SetInSlaveMode 0x0200
143#define SetNotSimuMode 0x0400
144#define SetCRT2ToYPbPr 0x0800
145#define LoadDACFlag 0x1000
146#define DisableCRT2Display 0x2000
147#define DriverMode 0x4000
148#define SetCRT2ToDualEdge 0x8000 44#define SetCRT2ToDualEdge 0x8000
149 45
150#define ProgrammingCRT2 0x0001 /* Set Flag */
151#define ReserveTVOption 0x0008 46#define ReserveTVOption 0x0008
152#define GatingCRT 0x0800 47#define GatingCRT 0x0800
153#define DisableChB 0x1000 48#define DisableChB 0x1000
@@ -155,23 +50,14 @@
155#define DisableChA 0x4000 50#define DisableChA 0x4000
156#define EnableChA 0x8000 51#define EnableChA 0x8000
157 52
158#define SetNTSCTV 0x0000 /* TV Info */
159#define SetPALTV 0x0001
160#define SetNTSCJ 0x0002
161#define SetPALMTV 0x0004
162#define SetPALNTV 0x0008
163#define SetYPbPrMode525i 0x0020
164#define SetYPbPrMode525p 0x0040
165#define SetYPbPrMode750p 0x0080
166#define SetYPbPrMode1080i 0x0100
167#define SetTVLowResolution 0x0400 53#define SetTVLowResolution 0x0400
168#define TVSimuMode 0x0800 54#define TVSimuMode 0x0800
169#define RPLLDIV2XO 0x1000 55#define RPLLDIV2XO 0x1000
170#define NTSC1024x768 0x2000 56#define NTSC1024x768 0x2000
171#define SetTVLockMode 0x4000 57#define SetTVLockMode 0x4000
172 58
173#define LCDVESATiming 0x0001 /* LCD Info/CR37 */ 59#define XGI_LCDVESATiming 0x0001 /* LCD Info/CR37 */
174#define EnableLVDSDDA 0x0002 60#define XGI_EnableLVDSDDA 0x0002
175#define EnableScalingLCD 0x0008 61#define EnableScalingLCD 0x0008
176#define SetPWDEnable 0x0004 62#define SetPWDEnable 0x0004
177#define SetLCDtoNonExpanding 0x0010 63#define SetLCDtoNonExpanding 0x0010
@@ -184,7 +70,7 @@
184#define EnableLCD24bpp 0x0004 /* default */ 70#define EnableLCD24bpp 0x0004 /* default */
185#define DisableLCD24bpp 0x0000 71#define DisableLCD24bpp 0x0000
186#define LCDPolarity 0x00c0 /* default: SyncNN */ 72#define LCDPolarity 0x00c0 /* default: SyncNN */
187#define LCDDualLink 0x0100 73#define XGI_LCDDualLink 0x0100
188#define EnableSpectrum 0x0200 74#define EnableSpectrum 0x0200
189#define PWDEnable 0x0400 75#define PWDEnable 0x0400
190#define EnableVBCLKDRVLOW 0x4000 76#define EnableVBCLKDRVLOW 0x4000
@@ -206,31 +92,21 @@
206 92
207#define TVSense 0xc7 93#define TVSense 0xc7
208 94
209#define TVOverScan 0x10 /* CR35 */
210
211#define YPbPrMode 0xe0 95#define YPbPrMode 0xe0
212#define YPbPrMode525i 0x00 96#define YPbPrMode525i 0x00
213#define YPbPrMode525p 0x20 97#define YPbPrMode525p 0x20
214#define YPbPrMode750p 0x40 98#define YPbPrMode750p 0x40
215#define YPbPrMode1080i 0x60 99#define YPbPrMode1080i 0x60
216 100
217
218#define LCDRGB18Bit 0x01 /* CR37 */
219#define LCDNonExpanding 0x10
220#define LCDSync 0x20
221#define LCDSyncBit 0xe0 /* H/V polarity & sync ID */
222
223#define ScalingLCD 0x08 101#define ScalingLCD 0x08
224 102
225#define EnableDualEdge 0x01 /* CR38 */
226#define SetToLCDA 0x02
227#define SetYPbPr 0x04 103#define SetYPbPr 0x04
228 104
229/* ---------------------- VUMA Information */ 105/* ---------------------- VUMA Information */
230#define DisplayDeviceFromCMOS 0x10 106#define DisplayDeviceFromCMOS 0x10
231 107
232/* ---------------------- HK Evnet Definition */ 108/* ---------------------- HK Evnet Definition */
233#define ModeSwitchStatus 0xf0 109#define XGI_ModeSwitchStatus 0xf0
234#define ActiveCRT1 0x10 110#define ActiveCRT1 0x10
235#define ActiveLCD 0x0020 111#define ActiveLCD 0x0020
236#define ActiveTV 0x40 112#define ActiveTV 0x40
@@ -246,28 +122,13 @@
246/* translated from asm code 301def.h */ 122/* translated from asm code 301def.h */
247/* */ 123/* */
248/* --------------------------------------------------------- */ 124/* --------------------------------------------------------- */
249#define LCDDataLen 8
250#define TVDataLen 12
251#define LVDSCRT1Len_H 8 125#define LVDSCRT1Len_H 8
252#define LVDSCRT1Len_V 7 126#define LVDSCRT1Len_V 7
253#define LVDSDataLen 6
254#define LVDSDesDataLen 6
255#define LCDDesDataLen 6 127#define LCDDesDataLen 6
256#define LVDSDesDataLen2 8 128#define LVDSDesDataLen2 8
257#define LCDDesDataLen2 8 129#define LCDDesDataLen2 8
258#define CHTVRegLen 16
259 130
260#define StHiTVHT 892
261#define StHiTVVT 1126
262#define StHiTextTVHT 1000
263#define StHiTextTVVT 1126
264#define ExtHiTVHT 2100
265#define ExtHiTVVT 1125
266#define NTSCHT 1716
267#define NTSCVT 525
268#define NTSC1024x768HT 1908 131#define NTSC1024x768HT 1908
269#define PALHT 1728
270#define PALVT 625
271 132
272#define YPbPrTV525iHT 1716 /* YPbPr */ 133#define YPbPrTV525iHT 1716 /* YPbPr */
273#define YPbPrTV525iVT 525 134#define YPbPrTV525iVT 525
@@ -276,22 +137,16 @@
276#define YPbPrTV750pHT 1650 137#define YPbPrTV750pHT 1650
277#define YPbPrTV750pVT 750 138#define YPbPrTV750pVT 750
278 139
279#define CRT2Delay1 0x04 /* XGI301 */
280#define CRT2Delay2 0x0A /* 301B,302 */
281
282
283#define VCLK25_175 0x00 140#define VCLK25_175 0x00
284#define VCLK28_322 0x01 141#define VCLK28_322 0x01
285#define VCLK31_5 0x02 142#define VCLK31_5 0x02
286#define VCLK36 0x03 143#define VCLK36 0x03
287#define VCLK40 0x04
288#define VCLK43_163 0x05 144#define VCLK43_163 0x05
289#define VCLK44_9 0x06 145#define VCLK44_9 0x06
290#define VCLK49_5 0x07 146#define VCLK49_5 0x07
291#define VCLK50 0x08 147#define VCLK50 0x08
292#define VCLK52_406 0x09 148#define VCLK52_406 0x09
293#define VCLK56_25 0x0A 149#define VCLK56_25 0x0A
294#define VCLK65 0x0B
295#define VCLK68_179 0x0D 150#define VCLK68_179 0x0D
296#define VCLK72_852 0x0E 151#define VCLK72_852 0x0E
297#define VCLK75 0x0F 152#define VCLK75 0x0F
@@ -300,7 +155,6 @@
300#define VCLK83_95 0x13 155#define VCLK83_95 0x13
301#define VCLK86_6 0x15 156#define VCLK86_6 0x15
302#define VCLK94_5 0x16 157#define VCLK94_5 0x16
303#define VCLK108_2 0x19
304#define VCLK113_309 0x1B 158#define VCLK113_309 0x1B
305#define VCLK116_406 0x1C 159#define VCLK116_406 0x1C
306#define VCLK135_5 0x1E 160#define VCLK135_5 0x1E
@@ -327,16 +181,10 @@
327#define VCLK125_999 0x51 181#define VCLK125_999 0x51
328#define VCLK148_5 0x52 182#define VCLK148_5 0x52
329#define VCLK217_325 0x55 183#define VCLK217_325 0x55
330#define YPbPr750pVCLK 0x57 184#define XGI_YPbPr750pVCLK 0x57
331 185
332#define TVVCLKDIV2 0x3A
333#define TVVCLK 0x3B
334#define HiTVVCLKDIV2 0x3C
335#define HiTVVCLK 0x3D
336#define HiTVSimuVCLK 0x3E
337#define HiTVTextVCLK 0x3F
338#define VCLK39_77 0x40 186#define VCLK39_77 0x40
339#define YPbPr525pVCLK 0x3A 187#define YPbPr525pVCLK 0x3A
340#define NTSC1024VCLK 0x41 188#define NTSC1024VCLK 0x41
341#define VCLK35_2 0x49 /* ; 800x480 */ 189#define VCLK35_2 0x49 /* ; 800x480 */
342#define VCLK122_61 0x4A 190#define VCLK122_61 0x4A
diff --git a/drivers/staging/xgifb/vb_init.c b/drivers/staging/xgifb/vb_init.c
index 4ccd988ffd7c..94d5c35e22fb 100644
--- a/drivers/staging/xgifb/vb_init.c
+++ b/drivers/staging/xgifb/vb_init.c
@@ -3,8 +3,8 @@
3#include <linux/pci.h> 3#include <linux/pci.h>
4#include <linux/vmalloc.h> 4#include <linux/vmalloc.h>
5 5
6#include "vgatypes.h"
7#include "XGIfb.h" 6#include "XGIfb.h"
7#include "vgatypes.h"
8 8
9#include "vb_def.h" 9#include "vb_def.h"
10#include "vb_struct.h" 10#include "vb_struct.h"
@@ -1268,7 +1268,7 @@ static void XGINew_SetModeScratch(struct xgi_hw_device_info *HwDeviceExtension,
1268 1268
1269 if (pVBInfo->IF_DEF_HiVision == 1) { 1269 if (pVBInfo->IF_DEF_HiVision == 1) {
1270 if ((temp >> 8) & ActiveHiTV) 1270 if ((temp >> 8) & ActiveHiTV)
1271 tempcl |= SetCRT2ToHiVisionTV; 1271 tempcl |= SetCRT2ToHiVision;
1272 } 1272 }
1273 1273
1274 if (pVBInfo->IF_DEF_YPbPr == 1) { 1274 if (pVBInfo->IF_DEF_YPbPr == 1) {
@@ -1287,7 +1287,7 @@ static void XGINew_SetModeScratch(struct xgi_hw_device_info *HwDeviceExtension,
1287 1287
1288 if (pVBInfo->IF_DEF_HiVision == 1) { 1288 if (pVBInfo->IF_DEF_HiVision == 1) {
1289 if ((temp >> 8) & ActiveHiTV) 1289 if ((temp >> 8) & ActiveHiTV)
1290 tempcl |= SetCRT2ToHiVisionTV; 1290 tempcl |= SetCRT2ToHiVision;
1291 } 1291 }
1292 1292
1293 if (pVBInfo->IF_DEF_YPbPr == 1) { 1293 if (pVBInfo->IF_DEF_YPbPr == 1) {
@@ -1299,9 +1299,9 @@ static void XGINew_SetModeScratch(struct xgi_hw_device_info *HwDeviceExtension,
1299 tempcl |= SetSimuScanMode; 1299 tempcl |= SetSimuScanMode;
1300 if ((!(temp & ActiveCRT1)) && ((temp & ActiveLCD) || (temp & ActiveTV) 1300 if ((!(temp & ActiveCRT1)) && ((temp & ActiveLCD) || (temp & ActiveTV)
1301 || (temp & ActiveCRT2))) 1301 || (temp & ActiveCRT2)))
1302 tempcl ^= (SetSimuScanMode | SwitchToCRT2); 1302 tempcl ^= (SetSimuScanMode | SwitchCRT2);
1303 if ((temp & ActiveLCD) && (temp & ActiveTV)) 1303 if ((temp & ActiveLCD) && (temp & ActiveTV))
1304 tempcl ^= (SetSimuScanMode | SwitchToCRT2); 1304 tempcl ^= (SetSimuScanMode | SwitchCRT2);
1305 xgifb_reg_set(pVBInfo->P3d4, 0x30, tempcl); 1305 xgifb_reg_set(pVBInfo->P3d4, 0x30, tempcl);
1306 1306
1307 CR31Data = xgifb_reg_get(pVBInfo->P3d4, 0x31); 1307 CR31Data = xgifb_reg_get(pVBInfo->P3d4, 0x31);
@@ -1516,11 +1516,11 @@ unsigned char XGIInitNew(struct pci_dev *pdev)
1516 pVBInfo->P3c9 = pVBInfo->BaseAddr + 0x19; 1516 pVBInfo->P3c9 = pVBInfo->BaseAddr + 0x19;
1517 pVBInfo->P3da = pVBInfo->BaseAddr + 0x2A; 1517 pVBInfo->P3da = pVBInfo->BaseAddr + 0x2A;
1518 pVBInfo->Part0Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_00; 1518 pVBInfo->Part0Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_00;
1519 pVBInfo->Part1Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_04; 1519 pVBInfo->Part1Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_04;
1520 pVBInfo->Part2Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_10; 1520 pVBInfo->Part2Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_10;
1521 pVBInfo->Part3Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_12; 1521 pVBInfo->Part3Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_12;
1522 pVBInfo->Part4Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_14; 1522 pVBInfo->Part4Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_14;
1523 pVBInfo->Part5Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_14 + 2; 1523 pVBInfo->Part5Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_14 + 2;
1524 printk("5"); 1524 printk("5");
1525 1525
1526 if (HwDeviceExtension->jChipType < XG20) /* kuku 2004/06/25 */ 1526 if (HwDeviceExtension->jChipType < XG20) /* kuku 2004/06/25 */
diff --git a/drivers/staging/xgifb/vb_setmode.c b/drivers/staging/xgifb/vb_setmode.c
index 67a316c3c108..2919924213c4 100644
--- a/drivers/staging/xgifb/vb_setmode.c
+++ b/drivers/staging/xgifb/vb_setmode.c
@@ -61,20 +61,20 @@ static const unsigned short XGINew_VGA_DAC[] = {
61void InitTo330Pointer(unsigned char ChipType, struct vb_device_info *pVBInfo) 61void InitTo330Pointer(unsigned char ChipType, struct vb_device_info *pVBInfo)
62{ 62{
63 pVBInfo->SModeIDTable = (struct XGI_StStruct *) XGI330_SModeIDTable; 63 pVBInfo->SModeIDTable = (struct XGI_StStruct *) XGI330_SModeIDTable;
64 pVBInfo->StandTable = (struct XGI_StandTableStruct *) XGI330_StandTable; 64 pVBInfo->StandTable = (struct SiS_StandTable_S *) XGI330_StandTable;
65 pVBInfo->EModeIDTable = (struct XGI_ExtStruct *) XGI330_EModeIDTable; 65 pVBInfo->EModeIDTable = (struct XGI_ExtStruct *) XGI330_EModeIDTable;
66 pVBInfo->RefIndex = (struct XGI_Ext2Struct *) XGI330_RefIndex; 66 pVBInfo->RefIndex = (struct XGI_Ext2Struct *) XGI330_RefIndex;
67 pVBInfo->XGINEWUB_CRT1Table 67 pVBInfo->XGINEWUB_CRT1Table
68 = (struct XGI_CRT1TableStruct *) XGI_CRT1Table; 68 = (struct XGI_CRT1TableStruct *) XGI_CRT1Table;
69 69
70 pVBInfo->MCLKData = (struct XGI_MCLKDataStruct *) XGI340New_MCLKData; 70 pVBInfo->MCLKData = (struct SiS_MCLKData *) XGI340New_MCLKData;
71 pVBInfo->ECLKData = (struct XGI_ECLKDataStruct *) XGI340_ECLKData; 71 pVBInfo->ECLKData = (struct XGI_ECLKDataStruct *) XGI340_ECLKData;
72 pVBInfo->VCLKData = (struct XGI_VCLKDataStruct *) XGI_VCLKData; 72 pVBInfo->VCLKData = (struct SiS_VCLKData *) XGI_VCLKData;
73 pVBInfo->VBVCLKData = (struct XGI_VBVCLKDataStruct *) XGI_VBVCLKData; 73 pVBInfo->VBVCLKData = (struct SiS_VBVCLKData *) XGI_VBVCLKData;
74 pVBInfo->ScreenOffset = XGI330_ScreenOffset; 74 pVBInfo->ScreenOffset = XGI330_ScreenOffset;
75 pVBInfo->StResInfo = (struct XGI_StResInfoStruct *) XGI330_StResInfo; 75 pVBInfo->StResInfo = (struct SiS_StResInfo_S *) XGI330_StResInfo;
76 pVBInfo->ModeResInfo 76 pVBInfo->ModeResInfo
77 = (struct XGI_ModeResInfoStruct *) XGI330_ModeResInfo; 77 = (struct SiS_ModeResInfo_S *) XGI330_ModeResInfo;
78 78
79 pVBInfo->pOutputSelect = &XGI330_OutputSelect; 79 pVBInfo->pOutputSelect = &XGI330_OutputSelect;
80 pVBInfo->pSoftSetting = &XGI330_SoftSetting; 80 pVBInfo->pSoftSetting = &XGI330_SoftSetting;
@@ -138,7 +138,7 @@ void InitTo330Pointer(unsigned char ChipType, struct vb_device_info *pVBInfo)
138 pVBInfo->UpdateCRT1 = (struct XGI_XG21CRT1Struct *) XGI_UpdateCRT1Table; 138 pVBInfo->UpdateCRT1 = (struct XGI_XG21CRT1Struct *) XGI_UpdateCRT1Table;
139 139
140 /* 310 customization related */ 140 /* 310 customization related */
141 if ((pVBInfo->VBType & VB_XGI301LV) || (pVBInfo->VBType & VB_XGI302LV)) 141 if ((pVBInfo->VBType & VB_SIS301LV) || (pVBInfo->VBType & VB_SIS302LV))
142 pVBInfo->LCDCapList = XGI_LCDDLCapList; 142 pVBInfo->LCDCapList = XGI_LCDDLCapList;
143 else 143 else
144 pVBInfo->LCDCapList = XGI_LCDCapList; 144 pVBInfo->LCDCapList = XGI_LCDCapList;
@@ -153,7 +153,7 @@ void InitTo330Pointer(unsigned char ChipType, struct vb_device_info *pVBInfo)
153 153
154 if (ChipType == XG27) { 154 if (ChipType == XG27) {
155 pVBInfo->MCLKData 155 pVBInfo->MCLKData
156 = (struct XGI_MCLKDataStruct *) XGI27New_MCLKData; 156 = (struct SiS_MCLKData *) XGI27New_MCLKData;
157 pVBInfo->CR40 = XGI27_cr41; 157 pVBInfo->CR40 = XGI27_cr41;
158 pVBInfo->pXGINew_CR97 = &XG27_CR97; 158 pVBInfo->pXGINew_CR97 = &XG27_CR97;
159 pVBInfo->pSR36 = &XG27_SR36; 159 pVBInfo->pSR36 = &XG27_SR36;
@@ -208,8 +208,8 @@ static void XGI_SetSeqRegs(unsigned short ModeNo,
208 xgifb_reg_set(pVBInfo->P3c4, 0x00, 0x03); /* Set SR0 */ 208 xgifb_reg_set(pVBInfo->P3c4, 0x00, 0x03); /* Set SR0 */
209 tempah = pVBInfo->StandTable[StandTableIndex].SR[0]; 209 tempah = pVBInfo->StandTable[StandTableIndex].SR[0];
210 210
211 i = SetCRT2ToLCDA; 211 i = XGI_SetCRT2ToLCDA;
212 if (pVBInfo->VBInfo & SetCRT2ToLCDA) { 212 if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
213 tempah |= 0x01; 213 tempah |= 0x01;
214 } else { 214 } else {
215 if (pVBInfo->VBInfo & (SetCRT2ToTV | SetCRT2ToLCD)) { 215 if (pVBInfo->VBInfo & (SetCRT2ToTV | SetCRT2ToLCD)) {
@@ -263,7 +263,7 @@ static void XGI_SetATTRegs(unsigned short ModeNo,
263 ARdata = pVBInfo->StandTable[StandTableIndex].ATTR[i]; 263 ARdata = pVBInfo->StandTable[StandTableIndex].ATTR[i];
264 if (modeflag & Charx8Dot) { /* ifndef Dot9 */ 264 if (modeflag & Charx8Dot) { /* ifndef Dot9 */
265 if (i == 0x13) { 265 if (i == 0x13) {
266 if (pVBInfo->VBInfo & SetCRT2ToLCDA) { 266 if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
267 ARdata = 0; 267 ARdata = 0;
268 } else { 268 } else {
269 if (pVBInfo->VBInfo & (SetCRT2ToTV 269 if (pVBInfo->VBInfo & (SetCRT2ToTV
@@ -356,11 +356,11 @@ static unsigned char XGI_AjustCRT2Rate(unsigned short ModeNo,
356 } 356 }
357 357
358 /* 301b */ 358 /* 301b */
359 if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) { 359 if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
360 tempax |= SupportLCD; 360 tempax |= SupportLCD;
361 361
362 if (pVBInfo->LCDResInfo != Panel1280x1024) { 362 if (pVBInfo->LCDResInfo != Panel_1280x1024) {
363 if (pVBInfo->LCDResInfo != Panel1280x960) { 363 if (pVBInfo->LCDResInfo != Panel_1280x960) {
364 if (pVBInfo->LCDInfo & 364 if (pVBInfo->LCDInfo &
365 LCDNonExpanding) { 365 LCDNonExpanding) {
366 if (resinfo >= 9) { 366 if (resinfo >= 9) {
@@ -372,10 +372,10 @@ static unsigned char XGI_AjustCRT2Rate(unsigned short ModeNo,
372 } 372 }
373 } 373 }
374 374
375 if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) { /* for HiTV */ 375 if (pVBInfo->VBInfo & SetCRT2ToHiVision) { /* for HiTV */
376 if ((pVBInfo->VBType & VB_XGI301LV) && 376 if ((pVBInfo->VBType & VB_SIS301LV) &&
377 (pVBInfo->VBExtInfo == VB_YPbPr1080i)) { 377 (pVBInfo->VBExtInfo == VB_YPbPr1080i)) {
378 tempax |= SupportYPbPr; 378 tempax |= SupportYPbPr750p;
379 if (pVBInfo->VBInfo & SetInSlaveMode) { 379 if (pVBInfo->VBInfo & SetInSlaveMode) {
380 if (resinfo == 4) 380 if (resinfo == 4)
381 return 0; 381 return 0;
@@ -387,7 +387,7 @@ static unsigned char XGI_AjustCRT2Rate(unsigned short ModeNo,
387 return 0; 387 return 0;
388 } 388 }
389 } else { 389 } else {
390 tempax |= SupportHiVisionTV; 390 tempax |= SupportHiVision;
391 if (pVBInfo->VBInfo & SetInSlaveMode) { 391 if (pVBInfo->VBInfo & SetInSlaveMode) {
392 if (resinfo == 4) 392 if (resinfo == 4)
393 return 0; 393 return 0;
@@ -406,17 +406,17 @@ static unsigned char XGI_AjustCRT2Rate(unsigned short ModeNo,
406 if (pVBInfo->VBInfo & (SetCRT2ToAVIDEO | 406 if (pVBInfo->VBInfo & (SetCRT2ToAVIDEO |
407 SetCRT2ToSVIDEO | 407 SetCRT2ToSVIDEO |
408 SetCRT2ToSCART | 408 SetCRT2ToSCART |
409 SetCRT2ToYPbPr | 409 SetCRT2ToYPbPr525750 |
410 SetCRT2ToHiVisionTV)) { 410 SetCRT2ToHiVision)) {
411 tempax |= SupportTV; 411 tempax |= SupportTV;
412 412
413 if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B 413 if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B
414 | VB_XGI301LV | VB_XGI302LV 414 | VB_SIS301LV | VB_SIS302LV
415 | VB_XGI301C)) { 415 | VB_XGI301C)) {
416 tempax |= SupportTV1024; 416 tempax |= SupportTV1024;
417 } 417 }
418 418
419 if (!(pVBInfo->VBInfo & SetPALTV)) { 419 if (!(pVBInfo->VBInfo & TVSetPAL)) {
420 if (modeflag & NoSupportSimuTV) { 420 if (modeflag & NoSupportSimuTV) {
421 if (pVBInfo->VBInfo & 421 if (pVBInfo->VBInfo &
422 SetInSlaveMode) { 422 SetInSlaveMode) {
@@ -436,7 +436,7 @@ static unsigned char XGI_AjustCRT2Rate(unsigned short ModeNo,
436 if (resinfo > 0x08) 436 if (resinfo > 0x08)
437 return 0; /* 1024x768 */ 437 return 0; /* 1024x768 */
438 438
439 if (pVBInfo->LCDResInfo < Panel1024x768) { 439 if (pVBInfo->LCDResInfo < Panel_1024x768) {
440 if (resinfo > 0x07) 440 if (resinfo > 0x07)
441 return 0; /* 800x600 */ 441 return 0; /* 800x600 */
442 442
@@ -1230,23 +1230,23 @@ static unsigned short XGI_GetVCLK2Ptr(unsigned short ModeNo,
1230 struct xgi_hw_device_info *HwDeviceExtension, 1230 struct xgi_hw_device_info *HwDeviceExtension,
1231 struct vb_device_info *pVBInfo) 1231 struct vb_device_info *pVBInfo)
1232{ 1232{
1233 unsigned short LCDXlat1VCLK[4] = { VCLK65 + 2, 1233 unsigned short LCDXlat1VCLK[4] = { VCLK65_315 + 2,
1234 VCLK65 + 2, 1234 VCLK65_315 + 2,
1235 VCLK65 + 2, 1235 VCLK65_315 + 2,
1236 VCLK65 + 2 }; 1236 VCLK65_315 + 2 };
1237 unsigned short LCDXlat2VCLK[4] = { VCLK108_2 + 5, 1237 unsigned short LCDXlat2VCLK[4] = { VCLK108_2_315 + 5,
1238 VCLK108_2 + 5, 1238 VCLK108_2_315 + 5,
1239 VCLK108_2 + 5, 1239 VCLK108_2_315 + 5,
1240 VCLK108_2 + 5 }; 1240 VCLK108_2_315 + 5 };
1241 unsigned short LVDSXlat1VCLK[4] = { VCLK40, VCLK40, VCLK40, VCLK40 }; 1241 unsigned short LVDSXlat1VCLK[4] = { VCLK40, VCLK40, VCLK40, VCLK40 };
1242 unsigned short LVDSXlat2VCLK[4] = { VCLK65 + 2, 1242 unsigned short LVDSXlat2VCLK[4] = { VCLK65_315 + 2,
1243 VCLK65 + 2, 1243 VCLK65_315 + 2,
1244 VCLK65 + 2, 1244 VCLK65_315 + 2,
1245 VCLK65 + 2 }; 1245 VCLK65_315 + 2 };
1246 unsigned short LVDSXlat3VCLK[4] = { VCLK65 + 2, 1246 unsigned short LVDSXlat3VCLK[4] = { VCLK65_315 + 2,
1247 VCLK65 + 2, 1247 VCLK65_315 + 2,
1248 VCLK65 + 2, 1248 VCLK65_315 + 2,
1249 VCLK65 + 2 }; 1249 VCLK65_315 + 2 };
1250 1250
1251 unsigned short CRT2Index, VCLKIndex; 1251 unsigned short CRT2Index, VCLKIndex;
1252 unsigned short modeflag, resinfo; 1252 unsigned short modeflag, resinfo;
@@ -1266,36 +1266,36 @@ static unsigned short XGI_GetVCLK2Ptr(unsigned short ModeNo,
1266 1266
1267 if (pVBInfo->IF_DEF_LVDS == 0) { 1267 if (pVBInfo->IF_DEF_LVDS == 0) {
1268 CRT2Index = CRT2Index >> 6; /* for LCD */ 1268 CRT2Index = CRT2Index >> 6; /* for LCD */
1269 if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) { /*301b*/ 1269 if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) { /*301b*/
1270 if (pVBInfo->LCDResInfo != Panel1024x768) 1270 if (pVBInfo->LCDResInfo != Panel_1024x768)
1271 VCLKIndex = LCDXlat2VCLK[CRT2Index]; 1271 VCLKIndex = LCDXlat2VCLK[CRT2Index];
1272 else 1272 else
1273 VCLKIndex = LCDXlat1VCLK[CRT2Index]; 1273 VCLKIndex = LCDXlat1VCLK[CRT2Index];
1274 } else if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) { 1274 } else if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
1275 if (pVBInfo->SetFlag & RPLLDIV2XO) { 1275 if (pVBInfo->SetFlag & RPLLDIV2XO) {
1276 VCLKIndex = HiTVVCLKDIV2; 1276 VCLKIndex = TVCLKBASE_315 + HiTVVCLKDIV2;
1277 VCLKIndex += 25; 1277 VCLKIndex += 25;
1278 } else { 1278 } else {
1279 VCLKIndex = HiTVVCLK; 1279 VCLKIndex = TVCLKBASE_315 + HiTVVCLK;
1280 VCLKIndex += 25; 1280 VCLKIndex += 25;
1281 } 1281 }
1282 1282
1283 if (pVBInfo->SetFlag & TVSimuMode) { 1283 if (pVBInfo->SetFlag & TVSimuMode) {
1284 if (modeflag & Charx8Dot) { 1284 if (modeflag & Charx8Dot) {
1285 VCLKIndex = HiTVSimuVCLK; 1285 VCLKIndex = TVCLKBASE_315 + HiTVSimuVCLK;
1286 VCLKIndex += 25; 1286 VCLKIndex += 25;
1287 } else { 1287 } else {
1288 VCLKIndex = HiTVTextVCLK; 1288 VCLKIndex = TVCLKBASE_315 + HiTVTextVCLK;
1289 VCLKIndex += 25; 1289 VCLKIndex += 25;
1290 } 1290 }
1291 } 1291 }
1292 1292
1293 /* 301lv */ 1293 /* 301lv */
1294 if ((pVBInfo->VBType & VB_XGI301LV) && 1294 if ((pVBInfo->VBType & VB_SIS301LV) &&
1295 !(pVBInfo->VBExtInfo == VB_YPbPr1080i)) { 1295 !(pVBInfo->VBExtInfo == VB_YPbPr1080i)) {
1296 if (pVBInfo->VBExtInfo == VB_YPbPr750p) 1296 if (pVBInfo->VBExtInfo == YPbPr750p)
1297 VCLKIndex = YPbPr750pVCLK; 1297 VCLKIndex = XGI_YPbPr750pVCLK;
1298 else if (pVBInfo->VBExtInfo == VB_YPbPr525p) 1298 else if (pVBInfo->VBExtInfo == YPbPr525p)
1299 VCLKIndex = YPbPr525pVCLK; 1299 VCLKIndex = YPbPr525pVCLK;
1300 else if (pVBInfo->SetFlag & RPLLDIV2XO) 1300 else if (pVBInfo->SetFlag & RPLLDIV2XO)
1301 VCLKIndex = YPbPr525iVCLK_2; 1301 VCLKIndex = YPbPr525iVCLK_2;
@@ -1304,10 +1304,10 @@ static unsigned short XGI_GetVCLK2Ptr(unsigned short ModeNo,
1304 } 1304 }
1305 } else if (pVBInfo->VBInfo & SetCRT2ToTV) { 1305 } else if (pVBInfo->VBInfo & SetCRT2ToTV) {
1306 if (pVBInfo->SetFlag & RPLLDIV2XO) { 1306 if (pVBInfo->SetFlag & RPLLDIV2XO) {
1307 VCLKIndex = TVVCLKDIV2; 1307 VCLKIndex = TVCLKBASE_315 + TVVCLKDIV2;
1308 VCLKIndex += 25; 1308 VCLKIndex += 25;
1309 } else { 1309 } else {
1310 VCLKIndex = TVVCLK; 1310 VCLKIndex = TVCLKBASE_315 + TVVCLK;
1311 VCLKIndex += 25; 1311 VCLKIndex += 25;
1312 } 1312 }
1313 } else { /* for CRT2 */ 1313 } else { /* for CRT2 */
@@ -1329,11 +1329,11 @@ static unsigned short XGI_GetVCLK2Ptr(unsigned short ModeNo,
1329 VCLKIndex = CRT2Index; 1329 VCLKIndex = CRT2Index;
1330 1330
1331 VCLKIndex = VCLKIndex >> 6; 1331 VCLKIndex = VCLKIndex >> 6;
1332 if ((pVBInfo->LCDResInfo == Panel800x600) || 1332 if ((pVBInfo->LCDResInfo == Panel_800x600) ||
1333 (pVBInfo->LCDResInfo == Panel320x480)) 1333 (pVBInfo->LCDResInfo == Panel_320x480))
1334 VCLKIndex = LVDSXlat1VCLK[VCLKIndex]; 1334 VCLKIndex = LVDSXlat1VCLK[VCLKIndex];
1335 else if ((pVBInfo->LCDResInfo == Panel1024x768) || 1335 else if ((pVBInfo->LCDResInfo == Panel_1024x768) ||
1336 (pVBInfo->LCDResInfo == Panel1024x768x75)) 1336 (pVBInfo->LCDResInfo == Panel_1024x768x75))
1337 VCLKIndex = LVDSXlat2VCLK[VCLKIndex]; 1337 VCLKIndex = LVDSXlat2VCLK[VCLKIndex];
1338 else 1338 else
1339 VCLKIndex = LVDSXlat3VCLK[VCLKIndex]; 1339 VCLKIndex = LVDSXlat3VCLK[VCLKIndex];
@@ -1360,9 +1360,9 @@ static void XGI_SetCRT1VCLK(unsigned short ModeNo,
1360 xgifb_reg_set(pVBInfo->P3c4, 0x2C, 1360 xgifb_reg_set(pVBInfo->P3c4, 0x2C,
1361 pVBInfo->VCLKData[index].SR2C); 1361 pVBInfo->VCLKData[index].SR2C);
1362 xgifb_reg_set(pVBInfo->P3c4, 0x2D, 0x01); 1362 xgifb_reg_set(pVBInfo->P3c4, 0x2D, 0x01);
1363 } else if ((pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV 1363 } else if ((pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
1364 | VB_XGI302LV | VB_XGI301C)) && (pVBInfo->VBInfo 1364 | VB_SIS302LV | VB_XGI301C)) && (pVBInfo->VBInfo
1365 & SetCRT2ToLCDA)) { 1365 & XGI_SetCRT2ToLCDA)) {
1366 vclkindex = XGI_GetVCLK2Ptr(ModeNo, ModeIdIndex, 1366 vclkindex = XGI_GetVCLK2Ptr(ModeNo, ModeIdIndex,
1367 RefreshRateTableIndex, HwDeviceExtension, 1367 RefreshRateTableIndex, HwDeviceExtension,
1368 pVBInfo); 1368 pVBInfo);
@@ -1801,7 +1801,7 @@ static void *XGI_GetLcdPtr(unsigned short BX, unsigned short ModeNo,
1801 Ext_CRT2CRTC; 1801 Ext_CRT2CRTC;
1802 } 1802 }
1803 1803
1804 if (pVBInfo->VBInfo & SetCRT2ToLCDA) { 1804 if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
1805 if (ModeNo <= 0x13) 1805 if (ModeNo <= 0x13)
1806 tempal = pVBInfo->SModeIDTable[ModeIdIndex]. 1806 tempal = pVBInfo->SModeIDTable[ModeIdIndex].
1807 St_CRT2CRTC2; 1807 St_CRT2CRTC2;
@@ -2128,30 +2128,30 @@ static void *XGI_GetLcdPtr(unsigned short BX, unsigned short ModeNo,
2128 return &XGI_CetLCDDes1024x768Data[tempal]; 2128 return &XGI_CetLCDDes1024x768Data[tempal];
2129 break; 2129 break;
2130 case 3: 2130 case 3:
2131 if ((pVBInfo->VBType & VB_XGI301LV) || 2131 if ((pVBInfo->VBType & VB_SIS301LV) ||
2132 (pVBInfo->VBType & VB_XGI302LV)) 2132 (pVBInfo->VBType & VB_SIS302LV))
2133 return &XGI_ExtLCDDLDes1280x1024Data[tempal]; 2133 return &XGI_ExtLCDDLDes1280x1024Data[tempal];
2134 else 2134 else
2135 return &XGI_ExtLCDDes1280x1024Data[tempal]; 2135 return &XGI_ExtLCDDes1280x1024Data[tempal];
2136 break; 2136 break;
2137 case 4: 2137 case 4:
2138 if ((pVBInfo->VBType & VB_XGI301LV) || 2138 if ((pVBInfo->VBType & VB_SIS301LV) ||
2139 (pVBInfo->VBType & VB_XGI302LV)) 2139 (pVBInfo->VBType & VB_SIS302LV))
2140 return &XGI_StLCDDLDes1280x1024Data[tempal]; 2140 return &XGI_StLCDDLDes1280x1024Data[tempal];
2141 else 2141 else
2142 return &XGI_StLCDDes1280x1024Data[tempal]; 2142 return &XGI_StLCDDes1280x1024Data[tempal];
2143 break; 2143 break;
2144 case 5: 2144 case 5:
2145 if ((pVBInfo->VBType & VB_XGI301LV) || 2145 if ((pVBInfo->VBType & VB_SIS301LV) ||
2146 (pVBInfo->VBType & VB_XGI302LV)) 2146 (pVBInfo->VBType & VB_SIS302LV))
2147 return &XGI_CetLCDDLDes1280x1024Data[tempal]; 2147 return &XGI_CetLCDDLDes1280x1024Data[tempal];
2148 else 2148 else
2149 return &XGI_CetLCDDes1280x1024Data[tempal]; 2149 return &XGI_CetLCDDes1280x1024Data[tempal];
2150 break; 2150 break;
2151 case 6: 2151 case 6:
2152 case 7: 2152 case 7:
2153 if ((pVBInfo->VBType & VB_XGI301LV) || 2153 if ((pVBInfo->VBType & VB_SIS301LV) ||
2154 (pVBInfo->VBType & VB_XGI302LV)) 2154 (pVBInfo->VBType & VB_SIS302LV))
2155 return &xgifb_lcddldes_1400x1050[tempal]; 2155 return &xgifb_lcddldes_1400x1050[tempal];
2156 else 2156 else
2157 return &xgifb_lcddes_1400x1050[tempal]; 2157 return &xgifb_lcddes_1400x1050[tempal];
@@ -2163,15 +2163,15 @@ static void *XGI_GetLcdPtr(unsigned short BX, unsigned short ModeNo,
2163 return &XGI_CetLCDDes1400x1050Data2[tempal]; 2163 return &XGI_CetLCDDes1400x1050Data2[tempal];
2164 break; 2164 break;
2165 case 10: 2165 case 10:
2166 if ((pVBInfo->VBType & VB_XGI301LV) || 2166 if ((pVBInfo->VBType & VB_SIS301LV) ||
2167 (pVBInfo->VBType & VB_XGI302LV)) 2167 (pVBInfo->VBType & VB_SIS302LV))
2168 return &XGI_ExtLCDDLDes1600x1200Data[tempal]; 2168 return &XGI_ExtLCDDLDes1600x1200Data[tempal];
2169 else 2169 else
2170 return &XGI_ExtLCDDes1600x1200Data[tempal]; 2170 return &XGI_ExtLCDDes1600x1200Data[tempal];
2171 break; 2171 break;
2172 case 11: 2172 case 11:
2173 if ((pVBInfo->VBType & VB_XGI301LV) || 2173 if ((pVBInfo->VBType & VB_SIS301LV) ||
2174 (pVBInfo->VBType & VB_XGI302LV)) 2174 (pVBInfo->VBType & VB_SIS302LV))
2175 return &XGI_StLCDDLDes1600x1200Data[tempal]; 2175 return &XGI_StLCDDLDes1600x1200Data[tempal];
2176 else 2176 else
2177 return &XGI_StLCDDes1600x1200Data[tempal]; 2177 return &XGI_StLCDDes1600x1200Data[tempal];
@@ -2188,15 +2188,15 @@ static void *XGI_GetLcdPtr(unsigned short BX, unsigned short ModeNo,
2188 break; 2188 break;
2189 case 16: 2189 case 16:
2190 case 17: 2190 case 17:
2191 if ((pVBInfo->VBType & VB_XGI301LV) || 2191 if ((pVBInfo->VBType & VB_SIS301LV) ||
2192 (pVBInfo->VBType & VB_XGI302LV)) 2192 (pVBInfo->VBType & VB_SIS302LV))
2193 return &xgifb_lcddldes_1280x1024x75[tempal]; 2193 return &xgifb_lcddldes_1280x1024x75[tempal];
2194 else 2194 else
2195 return &xgifb_lcddes_1280x1024x75[tempal]; 2195 return &xgifb_lcddes_1280x1024x75[tempal];
2196 break; 2196 break;
2197 case 18: 2197 case 18:
2198 if ((pVBInfo->VBType & VB_XGI301LV) || 2198 if ((pVBInfo->VBType & VB_SIS301LV) ||
2199 (pVBInfo->VBType & VB_XGI302LV)) 2199 (pVBInfo->VBType & VB_SIS302LV))
2200 return &XGI_CetLCDDLDes1280x1024x75Data[tempal]; 2200 return &XGI_CetLCDDLDes1280x1024x75Data[tempal];
2201 else 2201 else
2202 return &XGI_CetLCDDes1280x1024x75Data[tempal]; 2202 return &XGI_CetLCDDes1280x1024x75Data[tempal];
@@ -2364,7 +2364,7 @@ static void XGI_GetLVDSData(unsigned short ModeNo, unsigned short ModeIdIndex,
2364 2364
2365 tempbx = 2; 2365 tempbx = 2;
2366 2366
2367 if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) { 2367 if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
2368 LCDPtr = (struct XGI330_LVDSDataStruct *) XGI_GetLcdPtr(tempbx, 2368 LCDPtr = (struct XGI330_LVDSDataStruct *) XGI_GetLcdPtr(tempbx,
2369 ModeNo, ModeIdIndex, RefreshRateTableIndex, 2369 ModeNo, ModeIdIndex, RefreshRateTableIndex,
2370 pVBInfo); 2370 pVBInfo);
@@ -2374,18 +2374,18 @@ static void XGI_GetLVDSData(unsigned short ModeNo, unsigned short ModeIdIndex,
2374 pVBInfo->VT = LCDPtr->LCDVT; 2374 pVBInfo->VT = LCDPtr->LCDVT;
2375 } 2375 }
2376 2376
2377 if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) { 2377 if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
2378 if (!(pVBInfo->LCDInfo & (SetLCDtoNonExpanding 2378 if (!(pVBInfo->LCDInfo & (SetLCDtoNonExpanding
2379 | EnableScalingLCD))) { 2379 | EnableScalingLCD))) {
2380 if ((pVBInfo->LCDResInfo == Panel1024x768) || 2380 if ((pVBInfo->LCDResInfo == Panel_1024x768) ||
2381 (pVBInfo->LCDResInfo == Panel1024x768x75)) { 2381 (pVBInfo->LCDResInfo == Panel_1024x768x75)) {
2382 pVBInfo->HDE = 1024; 2382 pVBInfo->HDE = 1024;
2383 pVBInfo->VDE = 768; 2383 pVBInfo->VDE = 768;
2384 } else if ((pVBInfo->LCDResInfo == Panel1280x1024) || 2384 } else if ((pVBInfo->LCDResInfo == Panel_1280x1024) ||
2385 (pVBInfo->LCDResInfo == Panel1280x1024x75)) { 2385 (pVBInfo->LCDResInfo == Panel_1280x1024x75)) {
2386 pVBInfo->HDE = 1280; 2386 pVBInfo->HDE = 1280;
2387 pVBInfo->VDE = 1024; 2387 pVBInfo->VDE = 1024;
2388 } else if (pVBInfo->LCDResInfo == Panel1400x1050) { 2388 } else if (pVBInfo->LCDResInfo == Panel_1400x1050) {
2389 pVBInfo->HDE = 1400; 2389 pVBInfo->HDE = 1400;
2390 pVBInfo->VDE = 1050; 2390 pVBInfo->VDE = 1050;
2391 } else { 2391 } else {
@@ -2415,7 +2415,7 @@ static void XGI_ModCRT1Regs(unsigned short ModeNo, unsigned short ModeIdIndex,
2415 2415
2416 tempbx = 0; 2416 tempbx = 0;
2417 2417
2418 if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) { 2418 if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
2419 LCDPtr = (struct XGI_LVDSCRT1HDataStruct *) 2419 LCDPtr = (struct XGI_LVDSCRT1HDataStruct *)
2420 XGI_GetLcdPtr(tempbx, ModeNo, 2420 XGI_GetLcdPtr(tempbx, ModeNo,
2421 ModeIdIndex, 2421 ModeIdIndex,
@@ -2430,7 +2430,7 @@ static void XGI_ModCRT1Regs(unsigned short ModeNo, unsigned short ModeIdIndex,
2430 2430
2431 tempbx = 1; 2431 tempbx = 1;
2432 2432
2433 if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) { 2433 if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
2434 LCDPtr1 = (struct XGI_LVDSCRT1VDataStruct *) 2434 LCDPtr1 = (struct XGI_LVDSCRT1VDataStruct *)
2435 XGI_GetLcdPtr( 2435 XGI_GetLcdPtr(
2436 tempbx, 2436 tempbx,
@@ -2496,7 +2496,7 @@ static unsigned short XGI_GetLCDCapPtr1(struct vb_device_info *pVBInfo)
2496 } 2496 }
2497 2497
2498 if (tempbl == 0xFF) { 2498 if (tempbl == 0xFF) {
2499 pVBInfo->LCDResInfo = Panel1024x768; 2499 pVBInfo->LCDResInfo = Panel_1024x768;
2500 pVBInfo->LCDTypeInfo = 0; 2500 pVBInfo->LCDTypeInfo = 0;
2501 i = 0; 2501 i = 0;
2502 } 2502 }
@@ -2556,15 +2556,15 @@ static void XGI_SetLVDSRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
2556 push2 = tempax; 2556 push2 = tempax;
2557 2557
2558 /* GetLCDResInfo */ 2558 /* GetLCDResInfo */
2559 if ((pVBInfo->LCDResInfo == Panel1024x768) || 2559 if ((pVBInfo->LCDResInfo == Panel_1024x768) ||
2560 (pVBInfo->LCDResInfo == Panel1024x768x75)) { 2560 (pVBInfo->LCDResInfo == Panel_1024x768x75)) {
2561 tempax = 1024; 2561 tempax = 1024;
2562 tempbx = 768; 2562 tempbx = 768;
2563 } else if ((pVBInfo->LCDResInfo == Panel1280x1024) || 2563 } else if ((pVBInfo->LCDResInfo == Panel_1280x1024) ||
2564 (pVBInfo->LCDResInfo == Panel1280x1024x75)) { 2564 (pVBInfo->LCDResInfo == Panel_1280x1024x75)) {
2565 tempax = 1280; 2565 tempax = 1280;
2566 tempbx = 1024; 2566 tempbx = 1024;
2567 } else if (pVBInfo->LCDResInfo == Panel1400x1050) { 2567 } else if (pVBInfo->LCDResInfo == Panel_1400x1050) {
2568 tempax = 1400; 2568 tempax = 1400;
2569 tempbx = 1050; 2569 tempbx = 1050;
2570 } else { 2570 } else {
@@ -2682,7 +2682,7 @@ static void XGI_SetLVDSRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
2682 if (tempbx != pVBInfo->VDE) 2682 if (tempbx != pVBInfo->VDE)
2683 tempax |= 0x40; 2683 tempax |= 0x40;
2684 2684
2685 if (pVBInfo->LCDInfo & EnableLVDSDDA) 2685 if (pVBInfo->LCDInfo & XGI_EnableLVDSDDA)
2686 tempax |= 0x40; 2686 tempax |= 0x40;
2687 2687
2688 xgifb_reg_and_or(pVBInfo->Part1Port, 0x1a, 0x07, 2688 xgifb_reg_and_or(pVBInfo->Part1Port, 0x1a, 0x07,
@@ -2768,7 +2768,7 @@ static void XGI_SetLVDSRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
2768 temp1 = temp1 / push3; 2768 temp1 = temp1 / push3;
2769 tempbx = (unsigned short) (temp1 & 0xffff); 2769 tempbx = (unsigned short) (temp1 & 0xffff);
2770 2770
2771 if (pVBInfo->LCDResInfo == Panel1024x768) 2771 if (pVBInfo->LCDResInfo == Panel_1024x768)
2772 tempbx -= 1; 2772 tempbx -= 1;
2773 2773
2774 tempax = ((tempbx >> 8) & 0xff) << 3; 2774 tempax = ((tempbx >> 8) & 0xff) << 3;
@@ -2800,7 +2800,7 @@ static void XGI_GetLCDVCLKPtr(unsigned char *di_0, unsigned char *di_1,
2800{ 2800{
2801 unsigned short index; 2801 unsigned short index;
2802 2802
2803 if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) { 2803 if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
2804 index = XGI_GetLCDCapPtr1(pVBInfo); 2804 index = XGI_GetLCDCapPtr1(pVBInfo);
2805 2805
2806 if (pVBInfo->VBInfo & SetCRT2ToLCD) { /* LCDB */ 2806 if (pVBInfo->VBInfo & SetCRT2ToLCD) { /* LCDB */
@@ -2834,35 +2834,35 @@ static unsigned char XGI_GetVCLKPtr(unsigned short RefreshRateTableIndex,
2834 index = XGI_GetLCDCapPtr(pVBInfo); 2834 index = XGI_GetLCDCapPtr(pVBInfo);
2835 tempal = pVBInfo->LCDCapList[index].LCD_VCLK; 2835 tempal = pVBInfo->LCDCapList[index].LCD_VCLK;
2836 2836
2837 if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) 2837 if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA))
2838 return tempal; 2838 return tempal;
2839 2839
2840 /* {TV} */ 2840 /* {TV} */
2841 if (pVBInfo->VBType & 2841 if (pVBInfo->VBType &
2842 (VB_XGI301B | 2842 (VB_SIS301B |
2843 VB_XGI302B | 2843 VB_SIS302B |
2844 VB_XGI301LV | 2844 VB_SIS301LV |
2845 VB_XGI302LV | 2845 VB_SIS302LV |
2846 VB_XGI301C)) { 2846 VB_XGI301C)) {
2847 if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) { 2847 if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
2848 tempal = HiTVVCLKDIV2; 2848 tempal = TVCLKBASE_315 + HiTVVCLKDIV2;
2849 if (!(pVBInfo->TVInfo & RPLLDIV2XO)) 2849 if (!(pVBInfo->TVInfo & RPLLDIV2XO))
2850 tempal = HiTVVCLK; 2850 tempal = TVCLKBASE_315 + HiTVVCLK;
2851 if (pVBInfo->TVInfo & TVSimuMode) { 2851 if (pVBInfo->TVInfo & TVSimuMode) {
2852 tempal = HiTVSimuVCLK; 2852 tempal = TVCLKBASE_315 + HiTVSimuVCLK;
2853 if (!(modeflag & Charx8Dot)) 2853 if (!(modeflag & Charx8Dot))
2854 tempal = HiTVTextVCLK; 2854 tempal = TVCLKBASE_315 + HiTVTextVCLK;
2855 2855
2856 } 2856 }
2857 return tempal; 2857 return tempal;
2858 } 2858 }
2859 2859
2860 if (pVBInfo->TVInfo & SetYPbPrMode750p) { 2860 if (pVBInfo->TVInfo & TVSetYPbPr750p) {
2861 tempal = YPbPr750pVCLK; 2861 tempal = XGI_YPbPr750pVCLK;
2862 return tempal; 2862 return tempal;
2863 } 2863 }
2864 2864
2865 if (pVBInfo->TVInfo & SetYPbPrMode525p) { 2865 if (pVBInfo->TVInfo & TVSetYPbPr525p) {
2866 tempal = YPbPr525pVCLK; 2866 tempal = YPbPr525pVCLK;
2867 return tempal; 2867 return tempal;
2868 } 2868 }
@@ -2870,9 +2870,9 @@ static unsigned char XGI_GetVCLKPtr(unsigned short RefreshRateTableIndex,
2870 tempal = NTSC1024VCLK; 2870 tempal = NTSC1024VCLK;
2871 2871
2872 if (!(pVBInfo->TVInfo & NTSC1024x768)) { 2872 if (!(pVBInfo->TVInfo & NTSC1024x768)) {
2873 tempal = TVVCLKDIV2; 2873 tempal = TVCLKBASE_315 + TVVCLKDIV2;
2874 if (!(pVBInfo->TVInfo & RPLLDIV2XO)) 2874 if (!(pVBInfo->TVInfo & RPLLDIV2XO))
2875 tempal = TVVCLK; 2875 tempal = TVCLKBASE_315 + TVVCLK;
2876 } 2876 }
2877 2877
2878 if (pVBInfo->VBInfo & SetCRT2ToTV) 2878 if (pVBInfo->VBInfo & SetCRT2ToTV)
@@ -2898,9 +2898,9 @@ static unsigned char XGI_GetVCLKPtr(unsigned short RefreshRateTableIndex,
2898static void XGI_GetVCLKLen(unsigned char tempal, unsigned char *di_0, 2898static void XGI_GetVCLKLen(unsigned char tempal, unsigned char *di_0,
2899 unsigned char *di_1, struct vb_device_info *pVBInfo) 2899 unsigned char *di_1, struct vb_device_info *pVBInfo)
2900{ 2900{
2901 if (pVBInfo->VBType & (VB_XGI301 | VB_XGI301B | VB_XGI302B 2901 if (pVBInfo->VBType & (VB_SIS301 | VB_SIS301B | VB_SIS302B
2902 | VB_XGI301LV | VB_XGI302LV | VB_XGI301C)) { 2902 | VB_SIS301LV | VB_SIS302LV | VB_XGI301C)) {
2903 if ((!(pVBInfo->VBInfo & SetCRT2ToLCDA)) && (pVBInfo->SetFlag 2903 if ((!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)) && (pVBInfo->SetFlag
2904 & ProgrammingCRT2)) { 2904 & ProgrammingCRT2)) {
2905 *di_0 = (unsigned char) XGI_VBVCLKData[tempal].SR2B; 2905 *di_0 = (unsigned char) XGI_VBVCLKData[tempal].SR2B;
2906 *di_1 = XGI_VBVCLKData[tempal].SR2C; 2906 *di_1 = XGI_VBVCLKData[tempal].SR2C;
@@ -2926,7 +2926,7 @@ static void XGI_SetCRT2ECLK(unsigned short ModeNo, unsigned short ModeIdIndex,
2926 for (i = 0; i < 4; i++) { 2926 for (i = 0; i < 4; i++) {
2927 xgifb_reg_and_or(pVBInfo->P3d4, 0x31, ~0x30, 2927 xgifb_reg_and_or(pVBInfo->P3d4, 0x31, ~0x30,
2928 (unsigned short) (0x10 * i)); 2928 (unsigned short) (0x10 * i));
2929 if ((!(pVBInfo->VBInfo & SetCRT2ToLCDA)) 2929 if ((!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA))
2930 && (!(pVBInfo->VBInfo & SetInSlaveMode))) { 2930 && (!(pVBInfo->VBInfo & SetInSlaveMode))) {
2931 xgifb_reg_set(pVBInfo->P3c4, 0x2e, di_0); 2931 xgifb_reg_set(pVBInfo->P3c4, 0x2e, di_0);
2932 xgifb_reg_set(pVBInfo->P3c4, 0x2f, di_1); 2932 xgifb_reg_set(pVBInfo->P3c4, 0x2f, di_1);
@@ -2942,8 +2942,8 @@ static void XGI_UpdateModeInfo(struct xgi_hw_device_info *HwDeviceExtension,
2942{ 2942{
2943 unsigned short tempcl, tempch, temp, tempbl, tempax; 2943 unsigned short tempcl, tempch, temp, tempbl, tempax;
2944 2944
2945 if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV 2945 if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
2946 | VB_XGI302LV | VB_XGI301C)) { 2946 | VB_SIS302LV | VB_XGI301C)) {
2947 tempcl = 0; 2947 tempcl = 0;
2948 tempch = 0; 2948 tempch = 0;
2949 temp = xgifb_reg_get(pVBInfo->P3c4, 0x01); 2949 temp = xgifb_reg_get(pVBInfo->P3c4, 0x01);
@@ -2987,12 +2987,12 @@ static void XGI_UpdateModeInfo(struct xgi_hw_device_info *HwDeviceExtension,
2987 if (temp & 0x02) 2987 if (temp & 0x02)
2988 tempch |= ActiveSCART; 2988 tempch |= ActiveSCART;
2989 2989
2990 if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) { 2990 if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
2991 if (temp & 0x01) 2991 if (temp & 0x01)
2992 tempch |= ActiveHiTV; 2992 tempch |= ActiveHiTV;
2993 } 2993 }
2994 2994
2995 if (pVBInfo->VBInfo & SetCRT2ToYPbPr) { 2995 if (pVBInfo->VBInfo & SetCRT2ToYPbPr525750) {
2996 temp = xgifb_reg_get( 2996 temp = xgifb_reg_get(
2997 pVBInfo->Part2Port, 2997 pVBInfo->Part2Port,
2998 0x4d); 2998 0x4d);
@@ -3014,7 +3014,7 @@ static void XGI_UpdateModeInfo(struct xgi_hw_device_info *HwDeviceExtension,
3014 } 3014 }
3015 } 3015 }
3016 temp = tempcl; 3016 temp = tempcl;
3017 tempbl = ~ModeSwitchStatus; 3017 tempbl = ~XGI_ModeSwitchStatus;
3018 xgifb_reg_and_or(pVBInfo->P3d4, 0x3d, tempbl, temp); 3018 xgifb_reg_and_or(pVBInfo->P3d4, 0x3d, tempbl, temp);
3019 3019
3020 if (!(pVBInfo->SetFlag & ReserveTVOption)) 3020 if (!(pVBInfo->SetFlag & ReserveTVOption))
@@ -3029,19 +3029,19 @@ void XGI_GetVBType(struct vb_device_info *pVBInfo)
3029 unsigned short flag, tempbx, tempah; 3029 unsigned short flag, tempbx, tempah;
3030 3030
3031 if (pVBInfo->IF_DEF_LVDS == 0) { 3031 if (pVBInfo->IF_DEF_LVDS == 0) {
3032 tempbx = VB_XGI302B; 3032 tempbx = VB_SIS302B;
3033 flag = xgifb_reg_get(pVBInfo->Part4Port, 0x00); 3033 flag = xgifb_reg_get(pVBInfo->Part4Port, 0x00);
3034 if (flag != 0x02) { 3034 if (flag != 0x02) {
3035 tempbx = VB_XGI301; 3035 tempbx = VB_SIS301;
3036 flag = xgifb_reg_get(pVBInfo->Part4Port, 0x01); 3036 flag = xgifb_reg_get(pVBInfo->Part4Port, 0x01);
3037 if (flag >= 0xB0) { 3037 if (flag >= 0xB0) {
3038 tempbx = VB_XGI301B; 3038 tempbx = VB_SIS301B;
3039 if (flag >= 0xC0) { 3039 if (flag >= 0xC0) {
3040 tempbx = VB_XGI301C; 3040 tempbx = VB_XGI301C;
3041 if (flag >= 0xD0) { 3041 if (flag >= 0xD0) {
3042 tempbx = VB_XGI301LV; 3042 tempbx = VB_SIS301LV;
3043 if (flag >= 0xE0) { 3043 if (flag >= 0xE0) {
3044 tempbx = VB_XGI302LV; 3044 tempbx = VB_SIS302LV;
3045 tempah = xgifb_reg_get( 3045 tempah = xgifb_reg_get(
3046 pVBInfo->Part4Port, 3046 pVBInfo->Part4Port,
3047 0x39); 3047 0x39);
@@ -3052,7 +3052,7 @@ void XGI_GetVBType(struct vb_device_info *pVBInfo)
3052 } 3052 }
3053 } 3053 }
3054 3054
3055 if (tempbx & (VB_XGI301B | VB_XGI302B)) { 3055 if (tempbx & (VB_SIS301B | VB_SIS302B)) {
3056 flag = xgifb_reg_get( 3056 flag = xgifb_reg_get(
3057 pVBInfo->Part4Port, 3057 pVBInfo->Part4Port,
3058 0x23); 3058 0x23);
@@ -3078,7 +3078,7 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
3078 modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag; 3078 modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
3079 3079
3080 pVBInfo->SetFlag = 0; 3080 pVBInfo->SetFlag = 0;
3081 pVBInfo->ModeType = modeflag & ModeInfoFlag; 3081 pVBInfo->ModeType = modeflag & ModeTypeMask;
3082 tempbx = 0; 3082 tempbx = 0;
3083 3083
3084 if (pVBInfo->VBType & 0xFFFF) { 3084 if (pVBInfo->VBType & 0xFFFF) {
@@ -3090,7 +3090,7 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
3090 push = push << 8; 3090 push = push << 8;
3091 tempax = temp << 8; 3091 tempax = temp << 8;
3092 tempbx = tempbx | tempax; 3092 tempbx = tempbx | tempax;
3093 temp = (SetCRT2ToDualEdge | SetCRT2ToYPbPr | SetCRT2ToLCDA 3093 temp = (SetCRT2ToDualEdge | SetCRT2ToYPbPr525750 | XGI_SetCRT2ToLCDA
3094 | SetInSlaveMode | DisableCRT2Display); 3094 | SetInSlaveMode | DisableCRT2Display);
3095 temp = 0xFFFF ^ temp; 3095 temp = 0xFFFF ^ temp;
3096 tempbx &= temp; 3096 tempbx &= temp;
@@ -3103,9 +3103,9 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
3103 (HwDeviceExtension->jChipType >= XG40)) { 3103 (HwDeviceExtension->jChipType >= XG40)) {
3104 if (pVBInfo->IF_DEF_LVDS == 0) { 3104 if (pVBInfo->IF_DEF_LVDS == 0) {
3105 if (pVBInfo->VBType & 3105 if (pVBInfo->VBType &
3106 (VB_XGI302B | 3106 (VB_SIS302B |
3107 VB_XGI301LV | 3107 VB_SIS301LV |
3108 VB_XGI302LV | 3108 VB_SIS302LV |
3109 VB_XGI301C)) { 3109 VB_XGI301C)) {
3110 if (temp & EnableDualEdge) { 3110 if (temp & EnableDualEdge) {
3111 tempbx |= 3111 tempbx |=
@@ -3113,7 +3113,7 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
3113 3113
3114 if (temp & SetToLCDA) 3114 if (temp & SetToLCDA)
3115 tempbx |= 3115 tempbx |=
3116 SetCRT2ToLCDA; 3116 XGI_SetCRT2ToLCDA;
3117 } 3117 }
3118 } 3118 }
3119 } 3119 }
@@ -3123,8 +3123,8 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
3123 if (pVBInfo->IF_DEF_YPbPr == 1) { 3123 if (pVBInfo->IF_DEF_YPbPr == 1) {
3124 /* [Billy] 07/05/04 */ 3124 /* [Billy] 07/05/04 */
3125 if (((pVBInfo->IF_DEF_LVDS == 0) && 3125 if (((pVBInfo->IF_DEF_LVDS == 0) &&
3126 ((pVBInfo->VBType & VB_XGI301LV) || 3126 ((pVBInfo->VBType & VB_SIS301LV) ||
3127 (pVBInfo->VBType & VB_XGI302LV) || 3127 (pVBInfo->VBType & VB_SIS302LV) ||
3128 (pVBInfo->VBType & VB_XGI301C)))) { 3128 (pVBInfo->VBType & VB_XGI301C)))) {
3129 if (temp & SetYPbPr) { 3129 if (temp & SetYPbPr) {
3130 if (pVBInfo->IF_DEF_HiVision == 1) { 3130 if (pVBInfo->IF_DEF_HiVision == 1) {
@@ -3134,13 +3134,13 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
3134 pVBInfo->P3d4, 3134 pVBInfo->P3d4,
3135 0x35); 3135 0x35);
3136 temp &= YPbPrMode; 3136 temp &= YPbPrMode;
3137 tempbx |= SetCRT2ToHiVisionTV; 3137 tempbx |= SetCRT2ToHiVision;
3138 3138
3139 if (temp != YPbPrMode1080i) { 3139 if (temp != YPbPrMode1080i) {
3140 tempbx &= 3140 tempbx &=
3141 (~SetCRT2ToHiVisionTV); 3141 (~SetCRT2ToHiVision);
3142 tempbx |= 3142 tempbx |=
3143 SetCRT2ToYPbPr; 3143 SetCRT2ToYPbPr525750;
3144 } 3144 }
3145 } 3145 }
3146 } 3146 }
@@ -3172,30 +3172,30 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
3172 3172
3173 if (pVBInfo->IF_DEF_LCDA == 1) { /* Select Display Device */ 3173 if (pVBInfo->IF_DEF_LCDA == 1) { /* Select Display Device */
3174 if (!(pVBInfo->VBType & VB_NoLCD)) { 3174 if (!(pVBInfo->VBType & VB_NoLCD)) {
3175 if (tempbx & SetCRT2ToLCDA) { 3175 if (tempbx & XGI_SetCRT2ToLCDA) {
3176 if (tempbx & SetSimuScanMode) 3176 if (tempbx & SetSimuScanMode)
3177 tempbx &= (~(SetCRT2ToLCD | 3177 tempbx &= (~(SetCRT2ToLCD |
3178 SetCRT2ToRAMDAC | 3178 SetCRT2ToRAMDAC |
3179 SwitchToCRT2)); 3179 SwitchCRT2));
3180 else 3180 else
3181 tempbx &= (~(SetCRT2ToLCD | 3181 tempbx &= (~(SetCRT2ToLCD |
3182 SetCRT2ToRAMDAC | 3182 SetCRT2ToRAMDAC |
3183 SetCRT2ToTV | 3183 SetCRT2ToTV |
3184 SwitchToCRT2)); 3184 SwitchCRT2));
3185 } 3185 }
3186 } 3186 }
3187 } 3187 }
3188 3188
3189 /* shampoo add */ 3189 /* shampoo add */
3190 /* for driver abnormal */ 3190 /* for driver abnormal */
3191 if (!(tempbx & (SwitchToCRT2 | SetSimuScanMode))) { 3191 if (!(tempbx & (SwitchCRT2 | SetSimuScanMode))) {
3192 if (pVBInfo->IF_DEF_CRT2Monitor == 1) { 3192 if (pVBInfo->IF_DEF_CRT2Monitor == 1) {
3193 if (tempbx & SetCRT2ToRAMDAC) { 3193 if (tempbx & SetCRT2ToRAMDAC) {
3194 tempbx &= (0xFF00 | 3194 tempbx &= (0xFF00 |
3195 SetCRT2ToRAMDAC | 3195 SetCRT2ToRAMDAC |
3196 SwitchToCRT2 | 3196 SwitchCRT2 |
3197 SetSimuScanMode); 3197 SetSimuScanMode);
3198 tempbx &= (0x00FF | (~SetCRT2ToYPbPr)); 3198 tempbx &= (0x00FF | (~SetCRT2ToYPbPr525750));
3199 } 3199 }
3200 } else { 3200 } else {
3201 tempbx &= (~(SetCRT2ToRAMDAC | 3201 tempbx &= (~(SetCRT2ToRAMDAC |
@@ -3208,37 +3208,37 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
3208 if (tempbx & SetCRT2ToLCD) { 3208 if (tempbx & SetCRT2ToLCD) {
3209 tempbx &= (0xFF00 | 3209 tempbx &= (0xFF00 |
3210 SetCRT2ToLCD | 3210 SetCRT2ToLCD |
3211 SwitchToCRT2 | 3211 SwitchCRT2 |
3212 SetSimuScanMode); 3212 SetSimuScanMode);
3213 tempbx &= (0x00FF | (~SetCRT2ToYPbPr)); 3213 tempbx &= (0x00FF | (~SetCRT2ToYPbPr525750));
3214 } 3214 }
3215 } 3215 }
3216 3216
3217 if (tempbx & SetCRT2ToSCART) { 3217 if (tempbx & SetCRT2ToSCART) {
3218 tempbx &= (0xFF00 | 3218 tempbx &= (0xFF00 |
3219 SetCRT2ToSCART | 3219 SetCRT2ToSCART |
3220 SwitchToCRT2 | 3220 SwitchCRT2 |
3221 SetSimuScanMode); 3221 SetSimuScanMode);
3222 tempbx &= (0x00FF | (~SetCRT2ToYPbPr)); 3222 tempbx &= (0x00FF | (~SetCRT2ToYPbPr525750));
3223 } 3223 }
3224 3224
3225 if (pVBInfo->IF_DEF_YPbPr == 1) { 3225 if (pVBInfo->IF_DEF_YPbPr == 1) {
3226 if (tempbx & SetCRT2ToYPbPr) 3226 if (tempbx & SetCRT2ToYPbPr525750)
3227 tempbx &= (0xFF00 | 3227 tempbx &= (0xFF00 |
3228 SwitchToCRT2 | 3228 SwitchCRT2 |
3229 SetSimuScanMode); 3229 SetSimuScanMode);
3230 } 3230 }
3231 3231
3232 if (pVBInfo->IF_DEF_HiVision == 1) { 3232 if (pVBInfo->IF_DEF_HiVision == 1) {
3233 if (tempbx & SetCRT2ToHiVisionTV) 3233 if (tempbx & SetCRT2ToHiVision)
3234 tempbx &= (0xFF00 | 3234 tempbx &= (0xFF00 |
3235 SetCRT2ToHiVisionTV | 3235 SetCRT2ToHiVision |
3236 SwitchToCRT2 | 3236 SwitchCRT2 |
3237 SetSimuScanMode); 3237 SetSimuScanMode);
3238 } 3238 }
3239 3239
3240 if (tempax & DisableCRT2Display) { /* Set Display Device Info */ 3240 if (tempax & DisableCRT2Display) { /* Set Display Device Info */
3241 if (!(tempbx & (SwitchToCRT2 | SetSimuScanMode))) 3241 if (!(tempbx & (SwitchCRT2 | SetSimuScanMode)))
3242 tempbx = DisableCRT2Display; 3242 tempbx = DisableCRT2Display;
3243 } 3243 }
3244 3244
@@ -3246,7 +3246,7 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
3246 if ((!(tempbx & DriverMode)) || 3246 if ((!(tempbx & DriverMode)) ||
3247 (!(modeflag & CRT2Mode))) { 3247 (!(modeflag & CRT2Mode))) {
3248 if (pVBInfo->IF_DEF_LCDA == 1) { 3248 if (pVBInfo->IF_DEF_LCDA == 1) {
3249 if (!(tempbx & SetCRT2ToLCDA)) 3249 if (!(tempbx & XGI_SetCRT2ToLCDA))
3250 tempbx |= (SetInSlaveMode | 3250 tempbx |= (SetInSlaveMode |
3251 SetSimuScanMode); 3251 SetSimuScanMode);
3252 } 3252 }
@@ -3255,9 +3255,9 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
3255 /* LCD+TV can't support in slave mode 3255 /* LCD+TV can't support in slave mode
3256 * (Force LCDA+TV->LCDB) */ 3256 * (Force LCDA+TV->LCDB) */
3257 if ((tempbx & SetInSlaveMode) && 3257 if ((tempbx & SetInSlaveMode) &&
3258 (tempbx & SetCRT2ToLCDA)) { 3258 (tempbx & XGI_SetCRT2ToLCDA)) {
3259 tempbx ^= (SetCRT2ToLCD | 3259 tempbx ^= (SetCRT2ToLCD |
3260 SetCRT2ToLCDA | 3260 XGI_SetCRT2ToLCDA |
3261 SetCRT2ToDualEdge); 3261 SetCRT2ToDualEdge);
3262 pVBInfo->SetFlag |= ReserveTVOption; 3262 pVBInfo->SetFlag |= ReserveTVOption;
3263 } 3263 }
@@ -3291,43 +3291,43 @@ static void XGI_GetTVInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
3291 if (pVBInfo->VBInfo & SetCRT2ToTV) { 3291 if (pVBInfo->VBInfo & SetCRT2ToTV) {
3292 temp = xgifb_reg_get(pVBInfo->P3d4, 0x35); 3292 temp = xgifb_reg_get(pVBInfo->P3d4, 0x35);
3293 tempbx = temp; 3293 tempbx = temp;
3294 if (tempbx & SetPALTV) { 3294 if (tempbx & TVSetPAL) {
3295 tempbx &= (SetCHTVOverScan | 3295 tempbx &= (SetCHTVOverScan |
3296 SetPALMTV | 3296 TVSetPALM |
3297 SetPALNTV | 3297 TVSetPALN |
3298 SetPALTV); 3298 TVSetPAL);
3299 if (tempbx & SetPALMTV) 3299 if (tempbx & TVSetPALM)
3300 /* set to NTSC if PAL-M */ 3300 /* set to NTSC if PAL-M */
3301 tempbx &= ~SetPALTV; 3301 tempbx &= ~TVSetPAL;
3302 } else 3302 } else
3303 tempbx &= (SetCHTVOverScan | 3303 tempbx &= (SetCHTVOverScan |
3304 SetNTSCJ | 3304 TVSetNTSCJ |
3305 SetPALTV); 3305 TVSetPAL);
3306 } 3306 }
3307 3307
3308 if (pVBInfo->IF_DEF_LVDS == 0) { 3308 if (pVBInfo->IF_DEF_LVDS == 0) {
3309 if (pVBInfo->VBInfo & SetCRT2ToSCART) 3309 if (pVBInfo->VBInfo & SetCRT2ToSCART)
3310 tempbx |= SetPALTV; 3310 tempbx |= TVSetPAL;
3311 } 3311 }
3312 3312
3313 if (pVBInfo->IF_DEF_YPbPr == 1) { 3313 if (pVBInfo->IF_DEF_YPbPr == 1) {
3314 if (pVBInfo->VBInfo & SetCRT2ToYPbPr) { 3314 if (pVBInfo->VBInfo & SetCRT2ToYPbPr525750) {
3315 index1 = xgifb_reg_get(pVBInfo->P3d4, 0x35); 3315 index1 = xgifb_reg_get(pVBInfo->P3d4, 0x35);
3316 index1 &= YPbPrMode; 3316 index1 &= YPbPrMode;
3317 3317
3318 if (index1 == YPbPrMode525i) 3318 if (index1 == YPbPrMode525i)
3319 tempbx |= SetYPbPrMode525i; 3319 tempbx |= TVSetYPbPr525i;
3320 3320
3321 if (index1 == YPbPrMode525p) 3321 if (index1 == YPbPrMode525p)
3322 tempbx = tempbx | SetYPbPrMode525p; 3322 tempbx = tempbx | TVSetYPbPr525p;
3323 if (index1 == YPbPrMode750p) 3323 if (index1 == YPbPrMode750p)
3324 tempbx = tempbx | SetYPbPrMode750p; 3324 tempbx = tempbx | TVSetYPbPr750p;
3325 } 3325 }
3326 } 3326 }
3327 3327
3328 if (pVBInfo->IF_DEF_HiVision == 1) { 3328 if (pVBInfo->IF_DEF_HiVision == 1) {
3329 if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) 3329 if (pVBInfo->VBInfo & SetCRT2ToHiVision)
3330 tempbx = tempbx | SetYPbPrMode1080i | SetPALTV; 3330 tempbx = tempbx | TVSetHiVision | TVSetPAL;
3331 } 3331 }
3332 3332
3333 if (pVBInfo->IF_DEF_LVDS == 0) { /* shampoo */ 3333 if (pVBInfo->IF_DEF_LVDS == 0) { /* shampoo */
@@ -3335,25 +3335,25 @@ static void XGI_GetTVInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
3335 (!(pVBInfo->VBInfo & SetNotSimuMode))) 3335 (!(pVBInfo->VBInfo & SetNotSimuMode)))
3336 tempbx |= TVSimuMode; 3336 tempbx |= TVSimuMode;
3337 3337
3338 if (!(tempbx & SetPALTV) && 3338 if (!(tempbx & TVSetPAL) &&
3339 (modeflag > 13) && 3339 (modeflag > 13) &&
3340 (resinfo == 8)) /* NTSC 1024x768, */ 3340 (resinfo == 8)) /* NTSC 1024x768, */
3341 tempbx |= NTSC1024x768; 3341 tempbx |= NTSC1024x768;
3342 3342
3343 tempbx |= RPLLDIV2XO; 3343 tempbx |= RPLLDIV2XO;
3344 3344
3345 if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) { 3345 if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
3346 if (pVBInfo->VBInfo & SetInSlaveMode) 3346 if (pVBInfo->VBInfo & SetInSlaveMode)
3347 tempbx &= (~RPLLDIV2XO); 3347 tempbx &= (~RPLLDIV2XO);
3348 } else { 3348 } else {
3349 if (tempbx & 3349 if (tempbx &
3350 (SetYPbPrMode525p | SetYPbPrMode750p)) 3350 (TVSetYPbPr525p | TVSetYPbPr750p))
3351 tempbx &= (~RPLLDIV2XO); 3351 tempbx &= (~RPLLDIV2XO);
3352 else if (!(pVBInfo->VBType & 3352 else if (!(pVBInfo->VBType &
3353 (VB_XGI301B | 3353 (VB_SIS301B |
3354 VB_XGI302B | 3354 VB_SIS302B |
3355 VB_XGI301LV | 3355 VB_SIS301LV |
3356 VB_XGI302LV | 3356 VB_SIS302LV |
3357 VB_XGI301C))) { 3357 VB_XGI301C))) {
3358 if (tempbx & TVSimuMode) 3358 if (tempbx & TVSimuMode)
3359 tempbx &= (~RPLLDIV2XO); 3359 tempbx &= (~RPLLDIV2XO);
@@ -3386,13 +3386,13 @@ static unsigned char XGI_GetLCDInfo(unsigned short ModeNo,
3386 tempbx = temp & 0x0F; 3386 tempbx = temp & 0x0F;
3387 3387
3388 if (tempbx == 0) 3388 if (tempbx == 0)
3389 tempbx = Panel1024x768; /* default */ 3389 tempbx = Panel_1024x768; /* default */
3390 3390
3391 /* LCD75 [2003/8/22] Vicent */ 3391 /* LCD75 [2003/8/22] Vicent */
3392 if ((tempbx == Panel1024x768) || (tempbx == Panel1280x1024)) { 3392 if ((tempbx == Panel_1024x768) || (tempbx == Panel_1280x1024)) {
3393 if (pVBInfo->VBInfo & DriverMode) { 3393 if (pVBInfo->VBInfo & DriverMode) {
3394 tempax = xgifb_reg_get(pVBInfo->P3d4, 0x33); 3394 tempax = xgifb_reg_get(pVBInfo->P3d4, 0x33);
3395 if (pVBInfo->VBInfo & SetCRT2ToLCDA) 3395 if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)
3396 tempax &= 0x0F; 3396 tempax &= 0x0F;
3397 else 3397 else
3398 tempax = tempax >> 4; 3398 tempax = tempax >> 4;
@@ -3411,7 +3411,7 @@ static unsigned char XGI_GetLCDInfo(unsigned short ModeNo,
3411 3411
3412 /* End of LCD75 */ 3412 /* End of LCD75 */
3413 3413
3414 if (!(pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA))) 3414 if (!(pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)))
3415 return 0; 3415 return 0;
3416 3416
3417 tempbx = 0; 3417 tempbx = 0;
@@ -3427,30 +3427,30 @@ static unsigned char XGI_GetLCDInfo(unsigned short ModeNo,
3427 tempax = pVBInfo->LCDCapList[LCDIdIndex].LCD_Capability; 3427 tempax = pVBInfo->LCDCapList[LCDIdIndex].LCD_Capability;
3428 3428
3429 if (pVBInfo->IF_DEF_LVDS == 0) { /* shampoo */ 3429 if (pVBInfo->IF_DEF_LVDS == 0) { /* shampoo */
3430 if (((pVBInfo->VBType & VB_XGI302LV) || (pVBInfo->VBType 3430 if (((pVBInfo->VBType & VB_SIS302LV) || (pVBInfo->VBType
3431 & VB_XGI301C)) && (tempax & LCDDualLink)) { 3431 & VB_XGI301C)) && (tempax & XGI_LCDDualLink)) {
3432 tempbx |= SetLCDDualLink; 3432 tempbx |= SetLCDDualLink;
3433 } 3433 }
3434 } 3434 }
3435 3435
3436 if (pVBInfo->IF_DEF_LVDS == 0) { 3436 if (pVBInfo->IF_DEF_LVDS == 0) {
3437 if ((pVBInfo->LCDResInfo == Panel1400x1050) && (pVBInfo->VBInfo 3437 if ((pVBInfo->LCDResInfo == Panel_1400x1050) && (pVBInfo->VBInfo
3438 & SetCRT2ToLCD) && (ModeNo > 0x13) && (resinfo 3438 & SetCRT2ToLCD) && (ModeNo > 0x13) && (resinfo
3439 == 9) && (!(tempbx & EnableScalingLCD))) 3439 == 9) && (!(tempbx & EnableScalingLCD)))
3440 /* set to center in 1280x1024 LCDB for Panel1400x1050 */ 3440 /* set to center in 1280x1024 LCDB for Panel_1400x1050 */
3441 tempbx |= SetLCDtoNonExpanding; 3441 tempbx |= SetLCDtoNonExpanding;
3442 } 3442 }
3443 3443
3444 if (pVBInfo->IF_DEF_ExpLink == 1) { 3444 if (pVBInfo->IF_DEF_ExpLink == 1) {
3445 if (modeflag & HalfDCLK) { 3445 if (modeflag & HalfDCLK) {
3446 if (!(tempbx & SetLCDtoNonExpanding)) { 3446 if (!(tempbx & SetLCDtoNonExpanding)) {
3447 tempbx |= EnableLVDSDDA; 3447 tempbx |= XGI_EnableLVDSDDA;
3448 } else { 3448 } else {
3449 if (ModeNo > 0x13) { 3449 if (ModeNo > 0x13) {
3450 if (pVBInfo->LCDResInfo 3450 if (pVBInfo->LCDResInfo
3451 == Panel1024x768) { 3451 == Panel_1024x768) {
3452 if (resinfo == 4) {/* 512x384 */ 3452 if (resinfo == 4) {/* 512x384 */
3453 tempbx |= EnableLVDSDDA; 3453 tempbx |= XGI_EnableLVDSDDA;
3454 } 3454 }
3455 } 3455 }
3456 } 3456 }
@@ -3460,9 +3460,9 @@ static unsigned char XGI_GetLCDInfo(unsigned short ModeNo,
3460 3460
3461 if (pVBInfo->VBInfo & SetInSlaveMode) { 3461 if (pVBInfo->VBInfo & SetInSlaveMode) {
3462 if (pVBInfo->VBInfo & SetNotSimuMode) 3462 if (pVBInfo->VBInfo & SetNotSimuMode)
3463 tempbx |= LCDVESATiming; 3463 tempbx |= XGI_LCDVESATiming;
3464 } else { 3464 } else {
3465 tempbx |= LCDVESATiming; 3465 tempbx |= XGI_LCDVESATiming;
3466 } 3466 }
3467 3467
3468 pVBInfo->LCDInfo = tempbx; 3468 pVBInfo->LCDInfo = tempbx;
@@ -3477,7 +3477,7 @@ static unsigned char XGI_GetLCDInfo(unsigned short ModeNo,
3477 SetInSlaveMode | 3477 SetInSlaveMode |
3478 SetCRT2ToLCD); 3478 SetCRT2ToLCD);
3479 pVBInfo->VBInfo |= 3479 pVBInfo->VBInfo |=
3480 SetCRT2ToLCDA | 3480 XGI_SetCRT2ToLCDA |
3481 SetCRT2ToDualEdge; 3481 SetCRT2ToDualEdge;
3482 } 3482 }
3483 } 3483 }
@@ -3801,27 +3801,27 @@ static void XGI_GetCRT2ResInfo(unsigned short ModeNo,
3801 3801
3802 if (pVBInfo->VBInfo & SetCRT2ToLCD) { 3802 if (pVBInfo->VBInfo & SetCRT2ToLCD) {
3803 if (pVBInfo->IF_DEF_LVDS == 0) { 3803 if (pVBInfo->IF_DEF_LVDS == 0) {
3804 if (pVBInfo->LCDResInfo == Panel1600x1200) { 3804 if (pVBInfo->LCDResInfo == Panel_1600x1200) {
3805 if (!(pVBInfo->LCDInfo & LCDVESATiming)) { 3805 if (!(pVBInfo->LCDInfo & XGI_LCDVESATiming)) {
3806 if (yres == 1024) 3806 if (yres == 1024)
3807 yres = 1056; 3807 yres = 1056;
3808 } 3808 }
3809 } 3809 }
3810 3810
3811 if (pVBInfo->LCDResInfo == Panel1280x1024) { 3811 if (pVBInfo->LCDResInfo == Panel_1280x1024) {
3812 if (yres == 400) 3812 if (yres == 400)
3813 yres = 405; 3813 yres = 405;
3814 else if (yres == 350) 3814 else if (yres == 350)
3815 yres = 360; 3815 yres = 360;
3816 3816
3817 if (pVBInfo->LCDInfo & LCDVESATiming) { 3817 if (pVBInfo->LCDInfo & XGI_LCDVESATiming) {
3818 if (yres == 360) 3818 if (yres == 360)
3819 yres = 375; 3819 yres = 375;
3820 } 3820 }
3821 } 3821 }
3822 3822
3823 if (pVBInfo->LCDResInfo == Panel1024x768) { 3823 if (pVBInfo->LCDResInfo == Panel_1024x768) {
3824 if (!(pVBInfo->LCDInfo & LCDVESATiming)) { 3824 if (!(pVBInfo->LCDInfo & XGI_LCDVESATiming)) {
3825 if (!(pVBInfo->LCDInfo 3825 if (!(pVBInfo->LCDInfo
3826 & LCDNonExpanding)) { 3826 & LCDNonExpanding)) {
3827 if (yres == 350) 3827 if (yres == 350)
@@ -3848,7 +3848,7 @@ static void XGI_GetCRT2ResInfo(unsigned short ModeNo,
3848static unsigned char XGI_IsLCDDualLink(struct vb_device_info *pVBInfo) 3848static unsigned char XGI_IsLCDDualLink(struct vb_device_info *pVBInfo)
3849{ 3849{
3850 3850
3851 if ((pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) && 3851 if ((pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) &&
3852 (pVBInfo->LCDInfo & SetLCDDualLink)) /* shampoo0129 */ 3852 (pVBInfo->LCDInfo & SetLCDDualLink)) /* shampoo0129 */
3853 return 1; 3853 return 1;
3854 3854
@@ -3918,8 +3918,8 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
3918{ 3918{
3919 unsigned short tempax = 0, tempbx, modeflag, resinfo; 3919 unsigned short tempax = 0, tempbx, modeflag, resinfo;
3920 3920
3921 struct XGI_LCDDataStruct *LCDPtr = NULL; 3921 struct SiS_LCDData *LCDPtr = NULL;
3922 struct XGI_TVDataStruct *TVPtr = NULL; 3922 struct SiS_TVData *TVPtr = NULL;
3923 3923
3924 if (ModeNo <= 0x13) { 3924 if (ModeNo <= 0x13) {
3925 /* si+St_ResInfo */ 3925 /* si+St_ResInfo */
@@ -3942,8 +3942,8 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
3942 3942
3943 tempbx = 4; 3943 tempbx = 4;
3944 3944
3945 if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) { 3945 if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
3946 LCDPtr = (struct XGI_LCDDataStruct *) XGI_GetLcdPtr(tempbx, 3946 LCDPtr = (struct SiS_LCDData *) XGI_GetLcdPtr(tempbx,
3947 ModeNo, ModeIdIndex, RefreshRateTableIndex, 3947 ModeNo, ModeIdIndex, RefreshRateTableIndex,
3948 pVBInfo); 3948 pVBInfo);
3949 3949
@@ -3954,11 +3954,11 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
3954 pVBInfo->HT = LCDPtr->LCDHT; 3954 pVBInfo->HT = LCDPtr->LCDHT;
3955 pVBInfo->VT = LCDPtr->LCDVT; 3955 pVBInfo->VT = LCDPtr->LCDVT;
3956 3956
3957 if (pVBInfo->LCDResInfo == Panel1024x768) { 3957 if (pVBInfo->LCDResInfo == Panel_1024x768) {
3958 tempax = 1024; 3958 tempax = 1024;
3959 tempbx = 768; 3959 tempbx = 768;
3960 3960
3961 if (!(pVBInfo->LCDInfo & LCDVESATiming)) { 3961 if (!(pVBInfo->LCDInfo & XGI_LCDVESATiming)) {
3962 if (pVBInfo->VGAVDE == 357) 3962 if (pVBInfo->VGAVDE == 357)
3963 tempbx = 527; 3963 tempbx = 527;
3964 else if (pVBInfo->VGAVDE == 420) 3964 else if (pVBInfo->VGAVDE == 420)
@@ -3971,10 +3971,10 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
3971 tempbx = 768; 3971 tempbx = 768;
3972 } else 3972 } else
3973 tempbx = 768; 3973 tempbx = 768;
3974 } else if (pVBInfo->LCDResInfo == Panel1024x768x75) { 3974 } else if (pVBInfo->LCDResInfo == Panel_1024x768x75) {
3975 tempax = 1024; 3975 tempax = 1024;
3976 tempbx = 768; 3976 tempbx = 768;
3977 } else if (pVBInfo->LCDResInfo == Panel1280x1024) { 3977 } else if (pVBInfo->LCDResInfo == Panel_1280x1024) {
3978 tempax = 1280; 3978 tempax = 1280;
3979 if (pVBInfo->VGAVDE == 360) 3979 if (pVBInfo->VGAVDE == 360)
3980 tempbx = 768; 3980 tempbx = 768;
@@ -3984,10 +3984,10 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
3984 tempbx = 864; 3984 tempbx = 864;
3985 else 3985 else
3986 tempbx = 1024; 3986 tempbx = 1024;
3987 } else if (pVBInfo->LCDResInfo == Panel1280x1024x75) { 3987 } else if (pVBInfo->LCDResInfo == Panel_1280x1024x75) {
3988 tempax = 1280; 3988 tempax = 1280;
3989 tempbx = 1024; 3989 tempbx = 1024;
3990 } else if (pVBInfo->LCDResInfo == Panel1280x960) { 3990 } else if (pVBInfo->LCDResInfo == Panel_1280x960) {
3991 tempax = 1280; 3991 tempax = 1280;
3992 if (pVBInfo->VGAVDE == 350) 3992 if (pVBInfo->VGAVDE == 350)
3993 tempbx = 700; 3993 tempbx = 700;
@@ -3997,7 +3997,7 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
3997 tempbx = 960; 3997 tempbx = 960;
3998 else 3998 else
3999 tempbx = 960; 3999 tempbx = 960;
4000 } else if (pVBInfo->LCDResInfo == Panel1400x1050) { 4000 } else if (pVBInfo->LCDResInfo == Panel_1400x1050) {
4001 tempax = 1400; 4001 tempax = 1400;
4002 tempbx = 1050; 4002 tempbx = 1050;
4003 4003
@@ -4005,10 +4005,10 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
4005 tempax = 1280; 4005 tempax = 1280;
4006 tempbx = 1024; 4006 tempbx = 1024;
4007 } 4007 }
4008 } else if (pVBInfo->LCDResInfo == Panel1600x1200) { 4008 } else if (pVBInfo->LCDResInfo == Panel_1600x1200) {
4009 tempax = 1600; 4009 tempax = 1600;
4010 tempbx = 1200; /* alan 10/14/2003 */ 4010 tempbx = 1200; /* alan 10/14/2003 */
4011 if (!(pVBInfo->LCDInfo & LCDVESATiming)) { 4011 if (!(pVBInfo->LCDInfo & XGI_LCDVESATiming)) {
4012 if (pVBInfo->VGAVDE == 350) 4012 if (pVBInfo->VGAVDE == 350)
4013 tempbx = 875; 4013 tempbx = 875;
4014 else if (pVBInfo->VGAVDE == 400) 4014 else if (pVBInfo->VGAVDE == 400)
@@ -4028,7 +4028,7 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
4028 4028
4029 if (pVBInfo->VBInfo & (SetCRT2ToTV)) { 4029 if (pVBInfo->VBInfo & (SetCRT2ToTV)) {
4030 tempbx = 4; 4030 tempbx = 4;
4031 TVPtr = (struct XGI_TVDataStruct *) XGI_GetTVPtr(tempbx, 4031 TVPtr = (struct SiS_TVData *) XGI_GetTVPtr(tempbx,
4032 ModeNo, ModeIdIndex, RefreshRateTableIndex, 4032 ModeNo, ModeIdIndex, RefreshRateTableIndex,
4033 pVBInfo); 4033 pVBInfo);
4034 4034
@@ -4041,7 +4041,7 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
4041 pVBInfo->RVBHRS = TVPtr->RVBHRS; 4041 pVBInfo->RVBHRS = TVPtr->RVBHRS;
4042 pVBInfo->NewFlickerMode = TVPtr->FlickerMode; 4042 pVBInfo->NewFlickerMode = TVPtr->FlickerMode;
4043 4043
4044 if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) { 4044 if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
4045 if (resinfo == 0x08) 4045 if (resinfo == 0x08)
4046 pVBInfo->NewFlickerMode = 0x40; 4046 pVBInfo->NewFlickerMode = 0x40;
4047 else if (resinfo == 0x09) 4047 else if (resinfo == 0x09)
@@ -4066,16 +4066,16 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
4066 } 4066 }
4067 } 4067 }
4068 } 4068 }
4069 } else if (pVBInfo->VBInfo & SetCRT2ToYPbPr) { 4069 } else if (pVBInfo->VBInfo & SetCRT2ToYPbPr525750) {
4070 if (pVBInfo->TVInfo & SetYPbPrMode750p) { 4070 if (pVBInfo->TVInfo & TVSetYPbPr750p) {
4071 tempax = YPbPrTV750pHT; /* Ext750pTVHT */ 4071 tempax = YPbPrTV750pHT; /* Ext750pTVHT */
4072 tempbx = YPbPrTV750pVT; /* Ext750pTVVT */ 4072 tempbx = YPbPrTV750pVT; /* Ext750pTVVT */
4073 } 4073 }
4074 4074
4075 if (pVBInfo->TVInfo & SetYPbPrMode525p) { 4075 if (pVBInfo->TVInfo & TVSetYPbPr525p) {
4076 tempax = YPbPrTV525pHT; /* Ext525pTVHT */ 4076 tempax = YPbPrTV525pHT; /* Ext525pTVHT */
4077 tempbx = YPbPrTV525pVT; /* Ext525pTVVT */ 4077 tempbx = YPbPrTV525pVT; /* Ext525pTVVT */
4078 } else if (pVBInfo->TVInfo & SetYPbPrMode525i) { 4078 } else if (pVBInfo->TVInfo & TVSetYPbPr525i) {
4079 tempax = YPbPrTV525iHT; /* Ext525iTVHT */ 4079 tempax = YPbPrTV525iHT; /* Ext525iTVHT */
4080 tempbx = YPbPrTV525iVT; /* Ext525iTVVT */ 4080 tempbx = YPbPrTV525iVT; /* Ext525iTVVT */
4081 if (pVBInfo->TVInfo & NTSC1024x768) 4081 if (pVBInfo->TVInfo & NTSC1024x768)
@@ -4084,7 +4084,7 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
4084 } else { 4084 } else {
4085 tempax = PALHT; 4085 tempax = PALHT;
4086 tempbx = PALVT; 4086 tempbx = PALVT;
4087 if (!(pVBInfo->TVInfo & SetPALTV)) { 4087 if (!(pVBInfo->TVInfo & TVSetPAL)) {
4088 tempax = NTSCHT; 4088 tempax = NTSCHT;
4089 tempbx = NTSCVT; 4089 tempbx = NTSCVT;
4090 if (pVBInfo->TVInfo & NTSC1024x768) 4090 if (pVBInfo->TVInfo & NTSC1024x768)
@@ -4109,7 +4109,7 @@ static void XGI_SetCRT2VCLK(unsigned short ModeNo, unsigned short ModeIdIndex,
4109 XGI_GetVCLKLen(tempal, &di_0, &di_1, pVBInfo); 4109 XGI_GetVCLKLen(tempal, &di_0, &di_1, pVBInfo);
4110 XGI_GetLCDVCLKPtr(&di_0, &di_1, pVBInfo); 4110 XGI_GetLCDVCLKPtr(&di_0, &di_1, pVBInfo);
4111 4111
4112 if (pVBInfo->VBType & VB_XGI301) { /* shampoo 0129 */ 4112 if (pVBInfo->VBType & VB_SIS301) { /* shampoo 0129 */
4113 /* 301 */ 4113 /* 301 */
4114 xgifb_reg_set(pVBInfo->Part4Port, 0x0A, 0x10); 4114 xgifb_reg_set(pVBInfo->Part4Port, 0x0A, 0x10);
4115 xgifb_reg_set(pVBInfo->Part4Port, 0x0B, di_1); 4115 xgifb_reg_set(pVBInfo->Part4Port, 0x0B, di_1);
@@ -4139,7 +4139,7 @@ static unsigned short XGI_GetColorDepth(unsigned short ModeNo,
4139 else 4139 else
4140 modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag; 4140 modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
4141 4141
4142 index = (modeflag & ModeInfoFlag) - ModeEGA; 4142 index = (modeflag & ModeTypeMask) - ModeEGA;
4143 4143
4144 if (index < 0) 4144 if (index < 0)
4145 index = 0; 4145 index = 0;
@@ -4435,7 +4435,7 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
4435 xgifb_reg_set(pVBInfo->Part1Port, 0x03, temp); 4435 xgifb_reg_set(pVBInfo->Part1Port, 0x03, temp);
4436 tempcx = 0x08; 4436 tempcx = 0x08;
4437 4437
4438 if (pVBInfo->VBType & (VB_XGI301LV | VB_XGI302LV | VB_XGI301C)) 4438 if (pVBInfo->VBType & (VB_SIS301LV | VB_SIS302LV | VB_XGI301C))
4439 modeflag |= Charx8Dot; 4439 modeflag |= Charx8Dot;
4440 4440
4441 tempax = pVBInfo->VGAHDE; /* 0x04 Horizontal Display End */ 4441 tempax = pVBInfo->VGAHDE; /* 0x04 Horizontal Display End */
@@ -4451,12 +4451,12 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
4451 temp = (tempbx & 0xFF00) >> 8; 4451 temp = (tempbx & 0xFF00) >> 8;
4452 4452
4453 if (pVBInfo->VBInfo & SetCRT2ToTV) { 4453 if (pVBInfo->VBInfo & SetCRT2ToTV) {
4454 if (!(pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV 4454 if (!(pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
4455 | VB_XGI302LV | VB_XGI301C))) 4455 | VB_SIS302LV | VB_XGI301C)))
4456 temp += 2; 4456 temp += 2;
4457 4457
4458 if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) { 4458 if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
4459 if (pVBInfo->VBType & VB_XGI301LV) { 4459 if (pVBInfo->VBType & VB_SIS301LV) {
4460 if (pVBInfo->VBExtInfo == VB_YPbPr1080i) { 4460 if (pVBInfo->VBExtInfo == VB_YPbPr1080i) {
4461 if (resinfo == 7) 4461 if (resinfo == 7)
4462 temp -= 2; 4462 temp -= 2;
@@ -4487,7 +4487,7 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
4487 4487
4488 tempax = (tempax / tempcx) - 5; 4488 tempax = (tempax / tempcx) - 5;
4489 tempcx = tempax; /* 20030401 0x07 horizontal Retrace Start */ 4489 tempcx = tempax; /* 20030401 0x07 horizontal Retrace Start */
4490 if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) { 4490 if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
4491 temp = (tempbx & 0x00FF) - 1; 4491 temp = (tempbx & 0x00FF) - 1;
4492 if (!(modeflag & HalfDCLK)) { 4492 if (!(modeflag & HalfDCLK)) {
4493 temp -= 6; 4493 temp -= 6;
@@ -4513,19 +4513,19 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
4513 } 4513 }
4514 } else if (!(modeflag & HalfDCLK)) { 4514 } else if (!(modeflag & HalfDCLK)) {
4515 temp -= 4; 4515 temp -= 4;
4516 if (pVBInfo->LCDResInfo != Panel1280x960 && 4516 if (pVBInfo->LCDResInfo != Panel_1280x960 &&
4517 pVBInfo->VGAHDE >= 800) { 4517 pVBInfo->VGAHDE >= 800) {
4518 temp -= 7; 4518 temp -= 7;
4519 if (pVBInfo->ModeType == ModeEGA && 4519 if (pVBInfo->ModeType == ModeEGA &&
4520 pVBInfo->VGAVDE == 1024) { 4520 pVBInfo->VGAVDE == 1024) {
4521 temp += 15; 4521 temp += 15;
4522 if (pVBInfo->LCDResInfo != 4522 if (pVBInfo->LCDResInfo !=
4523 Panel1280x1024) 4523 Panel_1280x1024)
4524 temp += 7; 4524 temp += 7;
4525 } 4525 }
4526 4526
4527 if (pVBInfo->VGAHDE >= 1280 && 4527 if (pVBInfo->VGAHDE >= 1280 &&
4528 pVBInfo->LCDResInfo != Panel1280x960 && 4528 pVBInfo->LCDResInfo != Panel_1280x960 &&
4529 (pVBInfo->LCDInfo & LCDNonExpanding)) 4529 (pVBInfo->LCDInfo & LCDNonExpanding))
4530 temp += 28; 4530 temp += 28;
4531 } 4531 }
@@ -4619,8 +4619,8 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
4619 push2 = tempbx; 4619 push2 = tempbx;
4620 4620
4621 if (pVBInfo->VBInfo & SetCRT2ToLCD) { 4621 if (pVBInfo->VBInfo & SetCRT2ToLCD) {
4622 if (pVBInfo->LCDResInfo == Panel1024x768) { 4622 if (pVBInfo->LCDResInfo == Panel_1024x768) {
4623 if (!(pVBInfo->LCDInfo & LCDVESATiming)) { 4623 if (!(pVBInfo->LCDInfo & XGI_LCDVESATiming)) {
4624 if (tempbx == 350) 4624 if (tempbx == 350)
4625 tempbx += 5; 4625 tempbx += 5;
4626 if (tempbx == 480) 4626 if (tempbx == 480)
@@ -4669,19 +4669,19 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
4669 tempbx += tempax; 4669 tempbx += tempax;
4670 } 4670 }
4671 4671
4672 if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) { 4672 if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
4673 if (pVBInfo->VBType & VB_XGI301LV) { 4673 if (pVBInfo->VBType & VB_SIS301LV) {
4674 if (pVBInfo->TVInfo & SetYPbPrMode1080i) { 4674 if (pVBInfo->TVInfo & TVSetHiVision) {
4675 tempbx -= 10; 4675 tempbx -= 10;
4676 } else { 4676 } else {
4677 if (pVBInfo->TVInfo & TVSimuMode) { 4677 if (pVBInfo->TVInfo & TVSimuMode) {
4678 if (pVBInfo->TVInfo & SetPALTV) { 4678 if (pVBInfo->TVInfo & TVSetPAL) {
4679 if (pVBInfo->VBType & 4679 if (pVBInfo->VBType &
4680 VB_XGI301LV) { 4680 VB_SIS301LV) {
4681 if (!(pVBInfo->TVInfo & 4681 if (!(pVBInfo->TVInfo &
4682 (SetYPbPrMode525p | 4682 (TVSetYPbPr525p |
4683 SetYPbPrMode750p | 4683 TVSetYPbPr750p |
4684 SetYPbPrMode1080i))) 4684 TVSetHiVision)))
4685 tempbx += 40; 4685 tempbx += 40;
4686 } else { 4686 } else {
4687 tempbx += 40; 4687 tempbx += 40;
@@ -4694,12 +4694,12 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
4694 } 4694 }
4695 } else { 4695 } else {
4696 if (pVBInfo->TVInfo & TVSimuMode) { 4696 if (pVBInfo->TVInfo & TVSimuMode) {
4697 if (pVBInfo->TVInfo & SetPALTV) { 4697 if (pVBInfo->TVInfo & TVSetPAL) {
4698 if (pVBInfo->VBType & VB_XGI301LV) { 4698 if (pVBInfo->VBType & VB_SIS301LV) {
4699 if (!(pVBInfo->TVInfo & 4699 if (!(pVBInfo->TVInfo &
4700 (SetYPbPrMode525p | 4700 (TVSetYPbPr525p |
4701 SetYPbPrMode750p | 4701 TVSetYPbPr750p |
4702 SetYPbPrMode1080i))) 4702 TVSetHiVision)))
4703 tempbx += 40; 4703 tempbx += 40;
4704 } else { 4704 } else {
4705 tempbx += 40; 4705 tempbx += 40;
@@ -4713,7 +4713,7 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
4713 tempax += tempbx; 4713 tempax += tempbx;
4714 push1 = tempax; /* push ax */ 4714 push1 = tempax; /* push ax */
4715 4715
4716 if ((pVBInfo->TVInfo & SetPALTV)) { 4716 if ((pVBInfo->TVInfo & TVSetPAL)) {
4717 if (tempbx <= 513) { 4717 if (tempbx <= 513) {
4718 if (tempax >= 513) 4718 if (tempax >= 513)
4719 tempbx = 513; 4719 tempbx = 513;
@@ -4761,7 +4761,7 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
4761 4761
4762 temp = (temp >> 1) & 0x09; 4762 temp = (temp >> 1) & 0x09;
4763 4763
4764 if (pVBInfo->VBType & (VB_XGI301LV | VB_XGI302LV | VB_XGI301C)) 4764 if (pVBInfo->VBType & (VB_SIS301LV | VB_SIS302LV | VB_XGI301C))
4765 temp |= 0x01; 4765 temp |= 0x01;
4766 4766
4767 xgifb_reg_set(pVBInfo->Part1Port, 0x16, temp); /* 0x16 SR01 */ 4767 xgifb_reg_set(pVBInfo->Part1Port, 0x16, temp); /* 0x16 SR01 */
@@ -4813,13 +4813,13 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
4813 if (pVBInfo->VBInfo & SetCRT2ToSCART) 4813 if (pVBInfo->VBInfo & SetCRT2ToSCART)
4814 tempax |= 0x0200; 4814 tempax |= 0x0200;
4815 4815
4816 if (!(pVBInfo->TVInfo & SetPALTV)) 4816 if (!(pVBInfo->TVInfo & TVSetPAL))
4817 tempax |= 0x1000; 4817 tempax |= 0x1000;
4818 4818
4819 if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) 4819 if (pVBInfo->VBInfo & SetCRT2ToHiVision)
4820 tempax |= 0x0100; 4820 tempax |= 0x0100;
4821 4821
4822 if (pVBInfo->TVInfo & (SetYPbPrMode525p | SetYPbPrMode750p)) 4822 if (pVBInfo->TVInfo & (TVSetYPbPr525p | TVSetYPbPr750p))
4823 tempax &= 0xfe00; 4823 tempax &= 0xfe00;
4824 4824
4825 tempax = (tempax & 0xff00) >> 8; 4825 tempax = (tempax & 0xff00) >> 8;
@@ -4827,10 +4827,10 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
4827 xgifb_reg_set(pVBInfo->Part2Port, 0x0, tempax); 4827 xgifb_reg_set(pVBInfo->Part2Port, 0x0, tempax);
4828 TimingPoint = pVBInfo->NTSCTiming; 4828 TimingPoint = pVBInfo->NTSCTiming;
4829 4829
4830 if (pVBInfo->TVInfo & SetPALTV) 4830 if (pVBInfo->TVInfo & TVSetPAL)
4831 TimingPoint = pVBInfo->PALTiming; 4831 TimingPoint = pVBInfo->PALTiming;
4832 4832
4833 if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) { 4833 if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
4834 TimingPoint = pVBInfo->HiTVExtTiming; 4834 TimingPoint = pVBInfo->HiTVExtTiming;
4835 4835
4836 if (pVBInfo->VBInfo & SetInSlaveMode) 4836 if (pVBInfo->VBInfo & SetInSlaveMode)
@@ -4843,14 +4843,14 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
4843 TimingPoint = pVBInfo->HiTVTextTiming; 4843 TimingPoint = pVBInfo->HiTVTextTiming;
4844 } 4844 }
4845 4845
4846 if (pVBInfo->VBInfo & SetCRT2ToYPbPr) { 4846 if (pVBInfo->VBInfo & SetCRT2ToYPbPr525750) {
4847 if (pVBInfo->TVInfo & SetYPbPrMode525i) 4847 if (pVBInfo->TVInfo & TVSetYPbPr525i)
4848 TimingPoint = pVBInfo->YPbPr525iTiming; 4848 TimingPoint = pVBInfo->YPbPr525iTiming;
4849 4849
4850 if (pVBInfo->TVInfo & SetYPbPrMode525p) 4850 if (pVBInfo->TVInfo & TVSetYPbPr525p)
4851 TimingPoint = pVBInfo->YPbPr525pTiming; 4851 TimingPoint = pVBInfo->YPbPr525pTiming;
4852 4852
4853 if (pVBInfo->TVInfo & SetYPbPrMode750p) 4853 if (pVBInfo->TVInfo & TVSetYPbPr750p)
4854 TimingPoint = pVBInfo->YPbPr750pTiming; 4854 TimingPoint = pVBInfo->YPbPr750pTiming;
4855 } 4855 }
4856 4856
@@ -4868,10 +4868,10 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
4868 temp &= 0x80; 4868 temp &= 0x80;
4869 xgifb_reg_and_or(pVBInfo->Part2Port, 0x0A, 0xFF, temp); 4869 xgifb_reg_and_or(pVBInfo->Part2Port, 0x0A, 0xFF, temp);
4870 4870
4871 if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) 4871 if (pVBInfo->VBInfo & SetCRT2ToHiVision)
4872 tempax = 950; 4872 tempax = 950;
4873 4873
4874 if (pVBInfo->TVInfo & SetPALTV) 4874 if (pVBInfo->TVInfo & TVSetPAL)
4875 tempax = 520; 4875 tempax = 520;
4876 else 4876 else
4877 tempax = 440; 4877 tempax = 440;
@@ -4884,15 +4884,15 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
4884 temp = (tempax & 0xFF00) >> 8; 4884 temp = (tempax & 0xFF00) >> 8;
4885 temp += (unsigned short) TimingPoint[0]; 4885 temp += (unsigned short) TimingPoint[0];
4886 4886
4887 if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV 4887 if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
4888 | VB_XGI302LV | VB_XGI301C)) { 4888 | VB_SIS302LV | VB_XGI301C)) {
4889 if (pVBInfo->VBInfo & (SetCRT2ToAVIDEO 4889 if (pVBInfo->VBInfo & (SetCRT2ToAVIDEO
4890 | SetCRT2ToSVIDEO | SetCRT2ToSCART 4890 | SetCRT2ToSVIDEO | SetCRT2ToSCART
4891 | SetCRT2ToYPbPr)) { 4891 | SetCRT2ToYPbPr525750)) {
4892 tempcx = pVBInfo->VGAHDE; 4892 tempcx = pVBInfo->VGAHDE;
4893 if (tempcx >= 1024) { 4893 if (tempcx >= 1024) {
4894 temp = 0x17; /* NTSC */ 4894 temp = 0x17; /* NTSC */
4895 if (pVBInfo->TVInfo & SetPALTV) 4895 if (pVBInfo->TVInfo & TVSetPAL)
4896 temp = 0x19; /* PAL */ 4896 temp = 0x19; /* PAL */
4897 } 4897 }
4898 } 4898 }
@@ -4903,15 +4903,15 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
4903 temp = (tempax & 0xFF00) >> 8; 4903 temp = (tempax & 0xFF00) >> 8;
4904 temp += TimingPoint[1]; 4904 temp += TimingPoint[1];
4905 4905
4906 if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV 4906 if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
4907 | VB_XGI302LV | VB_XGI301C)) { 4907 | VB_SIS302LV | VB_XGI301C)) {
4908 if ((pVBInfo->VBInfo & (SetCRT2ToAVIDEO 4908 if ((pVBInfo->VBInfo & (SetCRT2ToAVIDEO
4909 | SetCRT2ToSVIDEO | SetCRT2ToSCART 4909 | SetCRT2ToSVIDEO | SetCRT2ToSCART
4910 | SetCRT2ToYPbPr))) { 4910 | SetCRT2ToYPbPr525750))) {
4911 tempcx = pVBInfo->VGAHDE; 4911 tempcx = pVBInfo->VGAHDE;
4912 if (tempcx >= 1024) { 4912 if (tempcx >= 1024) {
4913 temp = 0x1D; /* NTSC */ 4913 temp = 0x1D; /* NTSC */
4914 if (pVBInfo->TVInfo & SetPALTV) 4914 if (pVBInfo->TVInfo & TVSetPAL)
4915 temp = 0x52; /* PAL */ 4915 temp = 0x52; /* PAL */
4916 } 4916 }
4917 } 4917 }
@@ -4936,7 +4936,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
4936 push1 = tempcx; /* push cx */ 4936 push1 = tempcx; /* push cx */
4937 tempcx += 7; 4937 tempcx += 7;
4938 4938
4939 if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) 4939 if (pVBInfo->VBInfo & SetCRT2ToHiVision)
4940 tempcx -= 4; 4940 tempcx -= 4;
4941 4941
4942 temp = tempcx & 0x00FF; 4942 temp = tempcx & 0x00FF;
@@ -4954,7 +4954,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
4954 4954
4955 tempbx = push2; 4955 tempbx = push2;
4956 tempbx = tempbx + 8; 4956 tempbx = tempbx + 8;
4957 if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) { 4957 if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
4958 tempbx = tempbx - 4; 4958 tempbx = tempbx - 4;
4959 tempcx = tempbx; 4959 tempcx = tempbx;
4960 } 4960 }
@@ -4970,7 +4970,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
4970 xgifb_reg_and_or(pVBInfo->Part2Port, 0x28, 0x0F, temp); 4970 xgifb_reg_and_or(pVBInfo->Part2Port, 0x28, 0x0F, temp);
4971 4971
4972 tempcx += 8; 4972 tempcx += 8;
4973 if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) 4973 if (pVBInfo->VBInfo & SetCRT2ToHiVision)
4974 tempcx -= 4; 4974 tempcx -= 4;
4975 4975
4976 temp = tempcx & 0xFF; 4976 temp = tempcx & 0xFF;
@@ -5005,9 +5005,9 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
5005 5005
5006 if (pVBInfo->VBInfo & SetCRT2ToTV) { 5006 if (pVBInfo->VBInfo & SetCRT2ToTV) {
5007 if (pVBInfo->VBType & 5007 if (pVBInfo->VBType &
5008 (VB_XGI301LV | VB_XGI302LV | VB_XGI301C)) { 5008 (VB_SIS301LV | VB_SIS302LV | VB_XGI301C)) {
5009 if (!(pVBInfo->TVInfo & 5009 if (!(pVBInfo->TVInfo &
5010 (SetYPbPrMode525p | SetYPbPrMode750p))) 5010 (TVSetYPbPr525p | TVSetYPbPr750p)))
5011 tempbx = tempbx >> 1; 5011 tempbx = tempbx >> 1;
5012 } else 5012 } else
5013 tempbx = tempbx >> 1; 5013 tempbx = tempbx >> 1;
@@ -5016,9 +5016,9 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
5016 tempbx -= 2; 5016 tempbx -= 2;
5017 temp = tempbx & 0x00FF; 5017 temp = tempbx & 0x00FF;
5018 5018
5019 if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) { 5019 if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
5020 if (pVBInfo->VBType & VB_XGI301LV) { 5020 if (pVBInfo->VBType & VB_SIS301LV) {
5021 if (pVBInfo->TVInfo & SetYPbPrMode1080i) { 5021 if (pVBInfo->TVInfo & TVSetHiVision) {
5022 if (pVBInfo->VBInfo & SetInSlaveMode) { 5022 if (pVBInfo->VBInfo & SetInSlaveMode) {
5023 if (ModeNo == 0x2f) 5023 if (ModeNo == 0x2f)
5024 temp += 1; 5024 temp += 1;
@@ -5037,9 +5037,9 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
5037 temp = (tempcx & 0xFF00) >> 8; 5037 temp = (tempcx & 0xFF00) >> 8;
5038 temp |= ((tempbx & 0xFF00) >> 8) << 6; 5038 temp |= ((tempbx & 0xFF00) >> 8) << 6;
5039 5039
5040 if (!(pVBInfo->VBInfo & SetCRT2ToHiVisionTV)) { 5040 if (!(pVBInfo->VBInfo & SetCRT2ToHiVision)) {
5041 if (pVBInfo->VBType & VB_XGI301LV) { 5041 if (pVBInfo->VBType & VB_SIS301LV) {
5042 if (pVBInfo->TVInfo & SetYPbPrMode1080i) { 5042 if (pVBInfo->TVInfo & TVSetHiVision) {
5043 temp |= 0x10; 5043 temp |= 0x10;
5044 5044
5045 if (!(pVBInfo->VBInfo & SetCRT2ToSVIDEO)) 5045 if (!(pVBInfo->VBInfo & SetCRT2ToSVIDEO))
@@ -5054,18 +5054,18 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
5054 5054
5055 xgifb_reg_set(pVBInfo->Part2Port, 0x30, temp); 5055 xgifb_reg_set(pVBInfo->Part2Port, 0x30, temp);
5056 5056
5057 if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV 5057 if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
5058 | VB_XGI302LV | VB_XGI301C)) { /* TV gatingno */ 5058 | VB_SIS302LV | VB_XGI301C)) { /* TV gatingno */
5059 tempbx = pVBInfo->VDE; 5059 tempbx = pVBInfo->VDE;
5060 tempcx = tempbx - 2; 5060 tempcx = tempbx - 2;
5061 5061
5062 if (pVBInfo->VBInfo & SetCRT2ToTV) { 5062 if (pVBInfo->VBInfo & SetCRT2ToTV) {
5063 if (!(pVBInfo->TVInfo & (SetYPbPrMode525p 5063 if (!(pVBInfo->TVInfo & (TVSetYPbPr525p
5064 | SetYPbPrMode750p))) 5064 | TVSetYPbPr750p)))
5065 tempbx = tempbx >> 1; 5065 tempbx = tempbx >> 1;
5066 } 5066 }
5067 5067
5068 if (pVBInfo->VBType & (VB_XGI302LV | VB_XGI301C)) { 5068 if (pVBInfo->VBType & (VB_SIS302LV | VB_XGI301C)) {
5069 temp = 0; 5069 temp = 0;
5070 if (tempcx & 0x0400) 5070 if (tempcx & 0x0400)
5071 temp |= 0x20; 5071 temp |= 0x20;
@@ -5118,8 +5118,8 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
5118 /* 301b */ 5118 /* 301b */
5119 tempecx = 8 * 1024; 5119 tempecx = 8 * 1024;
5120 5120
5121 if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV 5121 if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
5122 | VB_XGI302LV | VB_XGI301C)) { 5122 | VB_SIS302LV | VB_XGI301C)) {
5123 tempecx = tempecx * 8; 5123 tempecx = tempecx * 8;
5124 } 5124 }
5125 5125
@@ -5133,8 +5133,8 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
5133 tempax = (unsigned short) tempeax; 5133 tempax = (unsigned short) tempeax;
5134 5134
5135 /* 301b */ 5135 /* 301b */
5136 if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV 5136 if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
5137 | VB_XGI302LV | VB_XGI301C)) { 5137 | VB_SIS302LV | VB_XGI301C)) {
5138 tempcx = ((tempax & 0xFF00) >> 5) >> 8; 5138 tempcx = ((tempax & 0xFF00) >> 5) >> 8;
5139 } 5139 }
5140 /* end 301b */ 5140 /* end 301b */
@@ -5161,7 +5161,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
5161 temp |= 0x18; 5161 temp |= 0x18;
5162 5162
5163 xgifb_reg_and_or(pVBInfo->Part2Port, 0x46, ~0x1F, temp); 5163 xgifb_reg_and_or(pVBInfo->Part2Port, 0x46, ~0x1F, temp);
5164 if (pVBInfo->TVInfo & SetPALTV) { 5164 if (pVBInfo->TVInfo & TVSetPAL) {
5165 tempbx = 0x0382; 5165 tempbx = 0x0382;
5166 tempcx = 0x007e; 5166 tempcx = 0x007e;
5167 } else { 5167 } else {
@@ -5178,13 +5178,13 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
5178 temp = temp << 2; 5178 temp = temp << 2;
5179 temp |= ((tempbx & 0xFF00) >> 8) & 0x03; 5179 temp |= ((tempbx & 0xFF00) >> 8) & 0x03;
5180 5180
5181 if (pVBInfo->VBInfo & SetCRT2ToYPbPr) { 5181 if (pVBInfo->VBInfo & SetCRT2ToYPbPr525750) {
5182 temp |= 0x10; 5182 temp |= 0x10;
5183 5183
5184 if (pVBInfo->TVInfo & SetYPbPrMode525p) 5184 if (pVBInfo->TVInfo & TVSetYPbPr525p)
5185 temp |= 0x20; 5185 temp |= 0x20;
5186 5186
5187 if (pVBInfo->TVInfo & SetYPbPrMode750p) 5187 if (pVBInfo->TVInfo & TVSetYPbPr750p)
5188 temp |= 0x60; 5188 temp |= 0x60;
5189 } 5189 }
5190 5190
@@ -5192,7 +5192,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
5192 temp = xgifb_reg_get(pVBInfo->Part2Port, 0x43); /* 301b change */ 5192 temp = xgifb_reg_get(pVBInfo->Part2Port, 0x43); /* 301b change */
5193 xgifb_reg_set(pVBInfo->Part2Port, 0x43, (unsigned short) (temp - 3)); 5193 xgifb_reg_set(pVBInfo->Part2Port, 0x43, (unsigned short) (temp - 3));
5194 5194
5195 if (!(pVBInfo->TVInfo & (SetYPbPrMode525p | SetYPbPrMode750p))) { 5195 if (!(pVBInfo->TVInfo & (TVSetYPbPr525p | TVSetYPbPr750p))) {
5196 if (pVBInfo->TVInfo & NTSC1024x768) { 5196 if (pVBInfo->TVInfo & NTSC1024x768) {
5197 TimingPoint = XGI_NTSC1024AdjTime; 5197 TimingPoint = XGI_NTSC1024AdjTime;
5198 for (i = 0x1c, j = 0; i <= 0x30; i++, j++) { 5198 for (i = 0x1c, j = 0; i <= 0x30; i++, j++) {
@@ -5205,12 +5205,12 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
5205 5205
5206 /* [ycchen] 01/14/03 Modify for 301C PALM Support */ 5206 /* [ycchen] 01/14/03 Modify for 301C PALM Support */
5207 if (pVBInfo->VBType & VB_XGI301C) { 5207 if (pVBInfo->VBType & VB_XGI301C) {
5208 if (pVBInfo->TVInfo & SetPALMTV) 5208 if (pVBInfo->TVInfo & TVSetPALM)
5209 xgifb_reg_and_or(pVBInfo->Part2Port, 0x4E, ~0x08, 5209 xgifb_reg_and_or(pVBInfo->Part2Port, 0x4E, ~0x08,
5210 0x08); /* PALM Mode */ 5210 0x08); /* PALM Mode */
5211 } 5211 }
5212 5212
5213 if (pVBInfo->TVInfo & SetPALMTV) { 5213 if (pVBInfo->TVInfo & TVSetPALM) {
5214 tempax = (unsigned char) xgifb_reg_get(pVBInfo->Part2Port, 5214 tempax = (unsigned char) xgifb_reg_get(pVBInfo->Part2Port,
5215 0x01); 5215 0x01);
5216 tempax--; 5216 tempax--;
@@ -5219,7 +5219,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
5219 xgifb_reg_and(pVBInfo->Part2Port, 0x00, 0xEF); 5219 xgifb_reg_and(pVBInfo->Part2Port, 0x00, 0xEF);
5220 } 5220 }
5221 5221
5222 if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) { 5222 if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
5223 if (!(pVBInfo->VBInfo & SetInSlaveMode)) 5223 if (!(pVBInfo->VBInfo & SetInSlaveMode))
5224 xgifb_reg_set(pVBInfo->Part2Port, 0x0B, 0x00); 5224 xgifb_reg_set(pVBInfo->Part2Port, 0x0B, 0x00);
5225 } 5225 }
@@ -5267,11 +5267,11 @@ static void XGI_SetLCDRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
5267 xgifb_reg_and_or(pVBInfo->Part2Port, 0x2B, 0x0F, temp); 5267 xgifb_reg_and_or(pVBInfo->Part2Port, 0x2B, 0x0F, temp);
5268 temp = 0x01; 5268 temp = 0x01;
5269 5269
5270 if (pVBInfo->LCDResInfo == Panel1280x1024) { 5270 if (pVBInfo->LCDResInfo == Panel_1280x1024) {
5271 if (pVBInfo->ModeType == ModeEGA) { 5271 if (pVBInfo->ModeType == ModeEGA) {
5272 if (pVBInfo->VGAHDE >= 1024) { 5272 if (pVBInfo->VGAHDE >= 1024) {
5273 temp = 0x02; 5273 temp = 0x02;
5274 if (pVBInfo->LCDInfo & LCDVESATiming) 5274 if (pVBInfo->LCDInfo & XGI_LCDVESATiming)
5275 temp = 0x01; 5275 temp = 0x01;
5276 } 5276 }
5277 } 5277 }
@@ -5305,14 +5305,14 @@ static void XGI_SetLCDRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
5305 tempah = pVBInfo->LCDResInfo; 5305 tempah = pVBInfo->LCDResInfo;
5306 tempah &= PanelResInfo; 5306 tempah &= PanelResInfo;
5307 5307
5308 if ((tempah == Panel1024x768) || (tempah == Panel1024x768x75)) { 5308 if ((tempah == Panel_1024x768) || (tempah == Panel_1024x768x75)) {
5309 tempbx = 1024; 5309 tempbx = 1024;
5310 tempcx = 768; 5310 tempcx = 768;
5311 } else if ((tempah == Panel1280x1024) || 5311 } else if ((tempah == Panel_1280x1024) ||
5312 (tempah == Panel1280x1024x75)) { 5312 (tempah == Panel_1280x1024x75)) {
5313 tempbx = 1280; 5313 tempbx = 1280;
5314 tempcx = 1024; 5314 tempcx = 1024;
5315 } else if (tempah == Panel1400x1050) { 5315 } else if (tempah == Panel_1400x1050) {
5316 tempbx = 1400; 5316 tempbx = 1400;
5317 tempcx = 1050; 5317 tempcx = 1050;
5318 } else { 5318 } else {
@@ -5375,7 +5375,7 @@ static void XGI_SetLCDRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
5375 tempcx = tempcx >> 1; 5375 tempcx = tempcx >> 1;
5376 } 5376 }
5377 5377
5378 if (pVBInfo->VBType & VB_XGI302LV) 5378 if (pVBInfo->VBType & VB_SIS302LV)
5379 tempbx += 1; 5379 tempbx += 1;
5380 5380
5381 if (pVBInfo->VBType & VB_XGI301C) /* tap4 */ 5381 if (pVBInfo->VBType & VB_XGI301C) /* tap4 */
@@ -5405,7 +5405,7 @@ static void XGI_SetLCDRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
5405 tempcx = tempcx >> 1; 5405 tempcx = tempcx >> 1;
5406 } 5406 }
5407 5407
5408 if (pVBInfo->VBType & VB_XGI302LV) 5408 if (pVBInfo->VBType & VB_SIS302LV)
5409 tempbx += 1; 5409 tempbx += 1;
5410 5410
5411 tempcx += tempbx; 5411 tempcx += tempbx;
@@ -5422,10 +5422,10 @@ static void XGI_SetLCDRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
5422 temp = tempcx & 0x00FF; /* RHSYEXP2S=lcdhre */ 5422 temp = tempcx & 0x00FF; /* RHSYEXP2S=lcdhre */
5423 xgifb_reg_set(pVBInfo->Part2Port, 0x21, temp); 5423 xgifb_reg_set(pVBInfo->Part2Port, 0x21, temp);
5424 5424
5425 if (!(pVBInfo->LCDInfo & LCDVESATiming)) { 5425 if (!(pVBInfo->LCDInfo & XGI_LCDVESATiming)) {
5426 if (pVBInfo->VGAVDE == 525) { 5426 if (pVBInfo->VGAVDE == 525) {
5427 if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B 5427 if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B
5428 | VB_XGI301LV | VB_XGI302LV 5428 | VB_SIS301LV | VB_SIS302LV
5429 | VB_XGI301C)) { 5429 | VB_XGI301C)) {
5430 temp = 0xC6; 5430 temp = 0xC6;
5431 } else 5431 } else
@@ -5436,8 +5436,8 @@ static void XGI_SetLCDRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
5436 } 5436 }
5437 5437
5438 if (pVBInfo->VGAVDE == 420) { 5438 if (pVBInfo->VGAVDE == 420) {
5439 if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B 5439 if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B
5440 | VB_XGI301LV | VB_XGI302LV 5440 | VB_SIS301LV | VB_SIS302LV
5441 | VB_XGI301C)) { 5441 | VB_XGI301C)) {
5442 temp = 0x4F; 5442 temp = 0x4F;
5443 } else 5443 } else
@@ -5473,18 +5473,18 @@ static struct XGI301C_Tap4TimingStruct *XGI_GetTap4Ptr(unsigned short tempcx,
5473 else 5473 else
5474 Tap4TimingPtr = xgifb_ntsc_525_tap4_timing; /* NTSC */ 5474 Tap4TimingPtr = xgifb_ntsc_525_tap4_timing; /* NTSC */
5475 5475
5476 if (pVBInfo->TVInfo & SetPALTV) 5476 if (pVBInfo->TVInfo & TVSetPAL)
5477 Tap4TimingPtr = PALTap4Timing; 5477 Tap4TimingPtr = PALTap4Timing;
5478 5478
5479 if (pVBInfo->VBInfo & SetCRT2ToYPbPr) { 5479 if (pVBInfo->VBInfo & SetCRT2ToYPbPr525750) {
5480 if ((pVBInfo->TVInfo & SetYPbPrMode525i) || 5480 if ((pVBInfo->TVInfo & TVSetYPbPr525i) ||
5481 (pVBInfo->TVInfo & SetYPbPrMode525p)) 5481 (pVBInfo->TVInfo & TVSetYPbPr525p))
5482 Tap4TimingPtr = xgifb_ntsc_525_tap4_timing; 5482 Tap4TimingPtr = xgifb_ntsc_525_tap4_timing;
5483 if (pVBInfo->TVInfo & SetYPbPrMode750p) 5483 if (pVBInfo->TVInfo & TVSetYPbPr750p)
5484 Tap4TimingPtr = YPbPr750pTap4Timing; 5484 Tap4TimingPtr = YPbPr750pTap4Timing;
5485 } 5485 }
5486 5486
5487 if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) 5487 if (pVBInfo->VBInfo & SetCRT2ToHiVision)
5488 Tap4TimingPtr = xgifb_tap4_timing; 5488 Tap4TimingPtr = xgifb_tap4_timing;
5489 5489
5490 i = 0; 5490 i = 0;
@@ -5510,7 +5510,7 @@ static void XGI_SetTap4Regs(struct vb_device_info *pVBInfo)
5510 xgifb_reg_set(pVBInfo->Part2Port, i, Tap4TimingPtr->Reg[j]); 5510 xgifb_reg_set(pVBInfo->Part2Port, i, Tap4TimingPtr->Reg[j]);
5511 5511
5512 if ((pVBInfo->VBInfo & SetCRT2ToTV) && 5512 if ((pVBInfo->VBInfo & SetCRT2ToTV) &&
5513 (!(pVBInfo->VBInfo & SetCRT2ToHiVisionTV))) { 5513 (!(pVBInfo->VBInfo & SetCRT2ToHiVision))) {
5514 /* Set Vertical Scaling */ 5514 /* Set Vertical Scaling */
5515 Tap4TimingPtr = XGI_GetTap4Ptr(1, pVBInfo); 5515 Tap4TimingPtr = XGI_GetTap4Ptr(1, pVBInfo);
5516 for (i = 0xC0, j = 0; i < 0xFF; i++, j++) 5516 for (i = 0xC0, j = 0; i < 0xFF; i++, j++)
@@ -5520,7 +5520,7 @@ static void XGI_SetTap4Regs(struct vb_device_info *pVBInfo)
5520 } 5520 }
5521 5521
5522 if ((pVBInfo->VBInfo & SetCRT2ToTV) && 5522 if ((pVBInfo->VBInfo & SetCRT2ToTV) &&
5523 (!(pVBInfo->VBInfo & SetCRT2ToHiVisionTV))) 5523 (!(pVBInfo->VBInfo & SetCRT2ToHiVision)))
5524 /* Enable V.Scaling */ 5524 /* Enable V.Scaling */
5525 xgifb_reg_and_or(pVBInfo->Part2Port, 0x4E, ~0x14, 0x04); 5525 xgifb_reg_and_or(pVBInfo->Part2Port, 0x4E, ~0x14, 0x04);
5526 else 5526 else
@@ -5543,7 +5543,7 @@ static void XGI_SetGroup3(unsigned short ModeNo, unsigned short ModeIdIndex,
5543 modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag; 5543 modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
5544 5544
5545 xgifb_reg_set(pVBInfo->Part3Port, 0x00, 0x00); 5545 xgifb_reg_set(pVBInfo->Part3Port, 0x00, 0x00);
5546 if (pVBInfo->TVInfo & SetPALTV) { 5546 if (pVBInfo->TVInfo & TVSetPAL) {
5547 xgifb_reg_set(pVBInfo->Part3Port, 0x13, 0xFA); 5547 xgifb_reg_set(pVBInfo->Part3Port, 0x13, 0xFA);
5548 xgifb_reg_set(pVBInfo->Part3Port, 0x14, 0xC8); 5548 xgifb_reg_set(pVBInfo->Part3Port, 0x14, 0xC8);
5549 } else { 5549 } else {
@@ -5554,15 +5554,15 @@ static void XGI_SetGroup3(unsigned short ModeNo, unsigned short ModeIdIndex,
5554 if (!(pVBInfo->VBInfo & SetCRT2ToTV)) 5554 if (!(pVBInfo->VBInfo & SetCRT2ToTV))
5555 return; 5555 return;
5556 5556
5557 if (pVBInfo->TVInfo & SetPALMTV) { 5557 if (pVBInfo->TVInfo & TVSetPALM) {
5558 xgifb_reg_set(pVBInfo->Part3Port, 0x13, 0xFA); 5558 xgifb_reg_set(pVBInfo->Part3Port, 0x13, 0xFA);
5559 xgifb_reg_set(pVBInfo->Part3Port, 0x14, 0xC8); 5559 xgifb_reg_set(pVBInfo->Part3Port, 0x14, 0xC8);
5560 xgifb_reg_set(pVBInfo->Part3Port, 0x3D, 0xA8); 5560 xgifb_reg_set(pVBInfo->Part3Port, 0x3D, 0xA8);
5561 } 5561 }
5562 5562
5563 if ((pVBInfo->VBInfo & SetCRT2ToHiVisionTV) || (pVBInfo->VBInfo 5563 if ((pVBInfo->VBInfo & SetCRT2ToHiVision) || (pVBInfo->VBInfo
5564 & SetCRT2ToYPbPr)) { 5564 & SetCRT2ToYPbPr525750)) {
5565 if (pVBInfo->TVInfo & SetYPbPrMode525i) 5565 if (pVBInfo->TVInfo & TVSetYPbPr525i)
5566 return; 5566 return;
5567 5567
5568 tempdi = pVBInfo->HiTVGroup3Data; 5568 tempdi = pVBInfo->HiTVGroup3Data;
@@ -5572,17 +5572,17 @@ static void XGI_SetGroup3(unsigned short ModeNo, unsigned short ModeIdIndex,
5572 tempdi = pVBInfo->HiTVGroup3Text; 5572 tempdi = pVBInfo->HiTVGroup3Text;
5573 } 5573 }
5574 5574
5575 if (pVBInfo->TVInfo & SetYPbPrMode525p) 5575 if (pVBInfo->TVInfo & TVSetYPbPr525p)
5576 tempdi = pVBInfo->Ren525pGroup3; 5576 tempdi = pVBInfo->Ren525pGroup3;
5577 5577
5578 if (pVBInfo->TVInfo & SetYPbPrMode750p) 5578 if (pVBInfo->TVInfo & TVSetYPbPr750p)
5579 tempdi = pVBInfo->Ren750pGroup3; 5579 tempdi = pVBInfo->Ren750pGroup3;
5580 5580
5581 for (i = 0; i <= 0x3E; i++) 5581 for (i = 0; i <= 0x3E; i++)
5582 xgifb_reg_set(pVBInfo->Part3Port, i, tempdi[i]); 5582 xgifb_reg_set(pVBInfo->Part3Port, i, tempdi[i]);
5583 5583
5584 if (pVBInfo->VBType & VB_XGI301C) { /* Marcovision */ 5584 if (pVBInfo->VBType & VB_XGI301C) { /* Marcovision */
5585 if (pVBInfo->TVInfo & SetYPbPrMode525p) 5585 if (pVBInfo->TVInfo & TVSetYPbPr525p)
5586 xgifb_reg_set(pVBInfo->Part3Port, 0x28, 0x3f); 5586 xgifb_reg_set(pVBInfo->Part3Port, 0x28, 0x3f);
5587 } 5587 }
5588 } 5588 }
@@ -5637,7 +5637,7 @@ static void XGI_SetGroup4(unsigned short ModeNo, unsigned short ModeIdIndex,
5637 if (XGI_IsLCDDualLink(pVBInfo)) 5637 if (XGI_IsLCDDualLink(pVBInfo))
5638 tempbx = tempbx >> 1; 5638 tempbx = tempbx >> 1;
5639 5639
5640 if (tempcx & SetCRT2ToHiVisionTV) { 5640 if (tempcx & SetCRT2ToHiVision) {
5641 temp = 0; 5641 temp = 0;
5642 if (tempbx <= 1024) 5642 if (tempbx <= 1024)
5643 temp = 0xA0; 5643 temp = 0xA0;
@@ -5656,7 +5656,7 @@ static void XGI_SetGroup4(unsigned short ModeNo, unsigned short ModeIdIndex,
5656 } 5656 }
5657 } 5657 }
5658 5658
5659 if (pVBInfo->TVInfo & (SetYPbPrMode525p | SetYPbPrMode750p)) { 5659 if (pVBInfo->TVInfo & (TVSetYPbPr525p | TVSetYPbPr750p)) {
5660 temp = 0x00; 5660 temp = 0x00;
5661 if (pVBInfo->VGAHDE == 1280) 5661 if (pVBInfo->VGAHDE == 1280)
5662 temp = 0x40; 5662 temp = 0x40;
@@ -5667,7 +5667,7 @@ static void XGI_SetGroup4(unsigned short ModeNo, unsigned short ModeIdIndex,
5667 5667
5668 tempebx = pVBInfo->VDE; 5668 tempebx = pVBInfo->VDE;
5669 5669
5670 if (tempcx & SetCRT2ToHiVisionTV) { 5670 if (tempcx & SetCRT2ToHiVision) {
5671 if (!(temp & 0xE000)) 5671 if (!(temp & 0xE000))
5672 tempbx = tempbx >> 1; 5672 tempbx = tempbx >> 1;
5673 } 5673 }
@@ -5705,8 +5705,8 @@ static void XGI_SetGroup4(unsigned short ModeNo, unsigned short ModeIdIndex,
5705 xgifb_reg_set(pVBInfo->Part4Port, 0x19, temp); 5705 xgifb_reg_set(pVBInfo->Part4Port, 0x19, temp);
5706 5706
5707 /* 301b */ 5707 /* 301b */
5708 if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV 5708 if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
5709 | VB_XGI302LV | VB_XGI301C)) { 5709 | VB_SIS302LV | VB_XGI301C)) {
5710 temp = 0x0028; 5710 temp = 0x0028;
5711 xgifb_reg_set(pVBInfo->Part4Port, 0x1C, temp); 5711 xgifb_reg_set(pVBInfo->Part4Port, 0x1C, temp);
5712 tempax = pVBInfo->VGAHDE; 5712 tempax = pVBInfo->VGAHDE;
@@ -5735,7 +5735,7 @@ static void XGI_SetGroup4(unsigned short ModeNo, unsigned short ModeIdIndex,
5735 temp = (tempax & 0x00FF); 5735 temp = (tempax & 0x00FF);
5736 xgifb_reg_set(pVBInfo->Part4Port, 0x1D, temp); 5736 xgifb_reg_set(pVBInfo->Part4Port, 0x1D, temp);
5737 5737
5738 if (pVBInfo->VBInfo & (SetCRT2ToTV | SetCRT2ToHiVisionTV)) { 5738 if (pVBInfo->VBInfo & (SetCRT2ToTV | SetCRT2ToHiVision)) {
5739 if (pVBInfo->VGAHDE > 800) 5739 if (pVBInfo->VGAHDE > 800)
5740 xgifb_reg_or(pVBInfo->Part4Port, 0x1E, 0x08); 5740 xgifb_reg_or(pVBInfo->Part4Port, 0x1E, 0x08);
5741 5741
@@ -5744,8 +5744,8 @@ static void XGI_SetGroup4(unsigned short ModeNo, unsigned short ModeIdIndex,
5744 5744
5745 if (pVBInfo->VBInfo & SetCRT2ToTV) { 5745 if (pVBInfo->VBInfo & SetCRT2ToTV) {
5746 if (!(pVBInfo->TVInfo & (NTSC1024x768 5746 if (!(pVBInfo->TVInfo & (NTSC1024x768
5747 | SetYPbPrMode525p | SetYPbPrMode750p 5747 | TVSetYPbPr525p | TVSetYPbPr750p
5748 | SetYPbPrMode1080i))) { 5748 | TVSetHiVision))) {
5749 temp |= 0x0001; 5749 temp |= 0x0001;
5750 if ((pVBInfo->VBInfo & SetInSlaveMode) 5750 if ((pVBInfo->VBInfo & SetInSlaveMode)
5751 && (!(pVBInfo->TVInfo 5751 && (!(pVBInfo->TVInfo
@@ -5785,7 +5785,7 @@ static void XGI_SetGroup5(unsigned short ModeNo, unsigned short ModeIdIndex,
5785 Pdata = pVBInfo->Part5Port + 1; 5785 Pdata = pVBInfo->Part5Port + 1;
5786 if (pVBInfo->ModeType == ModeVGA) { 5786 if (pVBInfo->ModeType == ModeVGA) {
5787 if (!(pVBInfo->VBInfo & (SetInSlaveMode | LoadDACFlag 5787 if (!(pVBInfo->VBInfo & (SetInSlaveMode | LoadDACFlag
5788 | CRT2DisplayFlag))) { 5788 | DisableCRT2Display))) {
5789 XGINew_EnableCRT2(pVBInfo); 5789 XGINew_EnableCRT2(pVBInfo);
5790 } 5790 }
5791 } 5791 }
@@ -6074,7 +6074,7 @@ static unsigned char XGI_IsLCDON(struct vb_device_info *pVBInfo)
6074 tempax = pVBInfo->VBInfo; 6074 tempax = pVBInfo->VBInfo;
6075 if (tempax & SetCRT2ToDualEdge) 6075 if (tempax & SetCRT2ToDualEdge)
6076 return 0; 6076 return 0;
6077 else if (tempax & (DisableCRT2Display | SwitchToCRT2 | SetSimuScanMode)) 6077 else if (tempax & (DisableCRT2Display | SwitchCRT2 | SetSimuScanMode))
6078 return 1; 6078 return 1;
6079 6079
6080 return 0; 6080 return 0;
@@ -6140,15 +6140,15 @@ static void XGI_DisableBridge(struct xgifb_video_info *xgifb_info,
6140{ 6140{
6141 unsigned short tempah = 0; 6141 unsigned short tempah = 0;
6142 6142
6143 if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV 6143 if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
6144 | VB_XGI302LV | VB_XGI301C)) { 6144 | VB_SIS302LV | VB_XGI301C)) {
6145 tempah = 0x3F; 6145 tempah = 0x3F;
6146 if (!(pVBInfo->VBInfo & 6146 if (!(pVBInfo->VBInfo &
6147 (DisableCRT2Display | SetSimuScanMode))) { 6147 (DisableCRT2Display | SetSimuScanMode))) {
6148 if (pVBInfo->VBInfo & SetCRT2ToLCDA) { 6148 if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
6149 if (pVBInfo->VBInfo & SetCRT2ToDualEdge) { 6149 if (pVBInfo->VBInfo & SetCRT2ToDualEdge) {
6150 tempah = 0x7F; /* Disable Channel A */ 6150 tempah = 0x7F; /* Disable Channel A */
6151 if (!(pVBInfo->VBInfo & SetCRT2ToLCDA)) 6151 if (!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA))
6152 /* Disable Channel B */ 6152 /* Disable Channel B */
6153 tempah = 0xBF; 6153 tempah = 0xBF;
6154 6154
@@ -6166,8 +6166,8 @@ static void XGI_DisableBridge(struct xgifb_video_info *xgifb_info,
6166 /* disable part4_1f */ 6166 /* disable part4_1f */
6167 xgifb_reg_and(pVBInfo->Part4Port, 0x1F, tempah); 6167 xgifb_reg_and(pVBInfo->Part4Port, 0x1F, tempah);
6168 6168
6169 if (pVBInfo->VBType & (VB_XGI302LV | VB_XGI301C)) { 6169 if (pVBInfo->VBType & (VB_SIS302LV | VB_XGI301C)) {
6170 if (((pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA))) 6170 if (((pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)))
6171 || (XGI_DisableChISLCD(pVBInfo)) 6171 || (XGI_DisableChISLCD(pVBInfo))
6172 || (XGI_IsLCDON(pVBInfo))) 6172 || (XGI_IsLCDON(pVBInfo)))
6173 /* LVDS Driver power down */ 6173 /* LVDS Driver power down */
@@ -6175,16 +6175,16 @@ static void XGI_DisableBridge(struct xgifb_video_info *xgifb_info,
6175 } 6175 }
6176 6176
6177 if ((pVBInfo->SetFlag & DisableChA) || (pVBInfo->VBInfo 6177 if ((pVBInfo->SetFlag & DisableChA) || (pVBInfo->VBInfo
6178 & (DisableCRT2Display | SetCRT2ToLCDA 6178 & (DisableCRT2Display | XGI_SetCRT2ToLCDA
6179 | SetSimuScanMode))) { 6179 | SetSimuScanMode))) {
6180 if (pVBInfo->SetFlag & GatingCRT) 6180 if (pVBInfo->SetFlag & GatingCRT)
6181 XGI_EnableGatingCRT(HwDeviceExtension, pVBInfo); 6181 XGI_EnableGatingCRT(HwDeviceExtension, pVBInfo);
6182 XGI_DisplayOff(xgifb_info, HwDeviceExtension, pVBInfo); 6182 XGI_DisplayOff(xgifb_info, HwDeviceExtension, pVBInfo);
6183 } 6183 }
6184 6184
6185 if (pVBInfo->VBInfo & SetCRT2ToLCDA) { 6185 if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
6186 if ((pVBInfo->SetFlag & DisableChA) || (pVBInfo->VBInfo 6186 if ((pVBInfo->SetFlag & DisableChA) || (pVBInfo->VBInfo
6187 & SetCRT2ToLCDA)) 6187 & XGI_SetCRT2ToLCDA))
6188 /* Power down */ 6188 /* Power down */
6189 xgifb_reg_and(pVBInfo->Part1Port, 0x1e, 0xdf); 6189 xgifb_reg_and(pVBInfo->Part1Port, 0x1e, 0xdf);
6190 } 6190 }
@@ -6198,7 +6198,7 @@ static void XGI_DisableBridge(struct xgifb_video_info *xgifb_info,
6198 if ((pVBInfo->SetFlag & DisableChB) || 6198 if ((pVBInfo->SetFlag & DisableChB) ||
6199 (pVBInfo->VBInfo & 6199 (pVBInfo->VBInfo &
6200 (DisableCRT2Display | SetSimuScanMode)) || 6200 (DisableCRT2Display | SetSimuScanMode)) ||
6201 ((!(pVBInfo->VBInfo & SetCRT2ToLCDA)) && 6201 ((!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)) &&
6202 (pVBInfo->VBInfo & 6202 (pVBInfo->VBInfo &
6203 (SetCRT2ToRAMDAC | SetCRT2ToLCD | SetCRT2ToTV)))) 6203 (SetCRT2ToRAMDAC | SetCRT2ToLCD | SetCRT2ToTV))))
6204 xgifb_reg_or(pVBInfo->Part1Port, 0x00, 0x80); 6204 xgifb_reg_or(pVBInfo->Part1Port, 0x00, 0x80);
@@ -6206,7 +6206,7 @@ static void XGI_DisableBridge(struct xgifb_video_info *xgifb_info,
6206 if ((pVBInfo->SetFlag & DisableChB) || 6206 if ((pVBInfo->SetFlag & DisableChB) ||
6207 (pVBInfo->VBInfo & 6207 (pVBInfo->VBInfo &
6208 (DisableCRT2Display | SetSimuScanMode)) || 6208 (DisableCRT2Display | SetSimuScanMode)) ||
6209 (!(pVBInfo->VBInfo & SetCRT2ToLCDA)) || 6209 (!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)) ||
6210 (pVBInfo->VBInfo & 6210 (pVBInfo->VBInfo &
6211 (SetCRT2ToRAMDAC | SetCRT2ToLCD | SetCRT2ToTV))) { 6211 (SetCRT2ToRAMDAC | SetCRT2ToLCD | SetCRT2ToTV))) {
6212 /* save Part1 index 0 */ 6212 /* save Part1 index 0 */
@@ -6227,7 +6227,7 @@ static void XGI_DisableBridge(struct xgifb_video_info *xgifb_info,
6227 xgifb_reg_and(pVBInfo->P3c4, 0x32, 0xDF); 6227 xgifb_reg_and(pVBInfo->P3c4, 0x32, 0xDF);
6228 } 6228 }
6229 6229
6230 if (pVBInfo->VBInfo & (DisableCRT2Display | SetCRT2ToLCDA 6230 if (pVBInfo->VBInfo & (DisableCRT2Display | XGI_SetCRT2ToLCDA
6231 | SetSimuScanMode)) 6231 | SetSimuScanMode))
6232 XGI_DisplayOff(xgifb_info, HwDeviceExtension, pVBInfo); 6232 XGI_DisplayOff(xgifb_info, HwDeviceExtension, pVBInfo);
6233 } 6233 }
@@ -6254,15 +6254,15 @@ static unsigned short XGI_GetTVPtrIndex(struct vb_device_info *pVBInfo)
6254{ 6254{
6255 unsigned short tempbx = 0; 6255 unsigned short tempbx = 0;
6256 6256
6257 if (pVBInfo->TVInfo & SetPALTV) 6257 if (pVBInfo->TVInfo & TVSetPAL)
6258 tempbx = 2; 6258 tempbx = 2;
6259 if (pVBInfo->TVInfo & SetYPbPrMode1080i) 6259 if (pVBInfo->TVInfo & TVSetHiVision)
6260 tempbx = 4; 6260 tempbx = 4;
6261 if (pVBInfo->TVInfo & SetYPbPrMode525i) 6261 if (pVBInfo->TVInfo & TVSetYPbPr525i)
6262 tempbx = 6; 6262 tempbx = 6;
6263 if (pVBInfo->TVInfo & SetYPbPrMode525p) 6263 if (pVBInfo->TVInfo & TVSetYPbPr525p)
6264 tempbx = 8; 6264 tempbx = 8;
6265 if (pVBInfo->TVInfo & SetYPbPrMode750p) 6265 if (pVBInfo->TVInfo & TVSetYPbPr750p)
6266 tempbx = 10; 6266 tempbx = 10;
6267 if (pVBInfo->TVInfo & TVSimuMode) 6267 if (pVBInfo->TVInfo & TVSimuMode)
6268 tempbx++; 6268 tempbx++;
@@ -6293,23 +6293,23 @@ static void XGI_GetTVPtrIndex2(unsigned short *tempbx, unsigned char *tempcl,
6293 *tempcl = 0; 6293 *tempcl = 0;
6294 *tempch = 0; 6294 *tempch = 0;
6295 6295
6296 if (pVBInfo->TVInfo & SetPALTV) 6296 if (pVBInfo->TVInfo & TVSetPAL)
6297 *tempbx = 1; 6297 *tempbx = 1;
6298 6298
6299 if (pVBInfo->TVInfo & SetPALMTV) 6299 if (pVBInfo->TVInfo & TVSetPALM)
6300 *tempbx = 2; 6300 *tempbx = 2;
6301 6301
6302 if (pVBInfo->TVInfo & SetPALNTV) 6302 if (pVBInfo->TVInfo & TVSetPALN)
6303 *tempbx = 3; 6303 *tempbx = 3;
6304 6304
6305 if (pVBInfo->TVInfo & NTSC1024x768) { 6305 if (pVBInfo->TVInfo & NTSC1024x768) {
6306 *tempbx = 4; 6306 *tempbx = 4;
6307 if (pVBInfo->TVInfo & SetPALMTV) 6307 if (pVBInfo->TVInfo & TVSetPALM)
6308 *tempbx = 5; 6308 *tempbx = 5;
6309 } 6309 }
6310 6310
6311 if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV 6311 if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
6312 | VB_XGI302LV | VB_XGI301C)) { 6312 | VB_SIS302LV | VB_XGI301C)) {
6313 if ((!(pVBInfo->VBInfo & SetInSlaveMode)) || (pVBInfo->TVInfo 6313 if ((!(pVBInfo->VBInfo & SetInSlaveMode)) || (pVBInfo->TVInfo
6314 & TVSimuMode)) { 6314 & TVSimuMode)) {
6315 *tempbx += 8; 6315 *tempbx += 8;
@@ -6317,8 +6317,8 @@ static void XGI_GetTVPtrIndex2(unsigned short *tempbx, unsigned char *tempcl,
6317 } 6317 }
6318 } 6318 }
6319 6319
6320 if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV 6320 if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
6321 | VB_XGI302LV | VB_XGI301C)) 6321 | VB_SIS302LV | VB_XGI301C))
6322 (*tempch)++; 6322 (*tempch)++;
6323} 6323}
6324 6324
@@ -6328,9 +6328,9 @@ static void XGI_SetDelayComp(struct vb_device_info *pVBInfo)
6328 6328
6329 unsigned char tempah, tempbl, tempbh; 6329 unsigned char tempah, tempbl, tempbh;
6330 6330
6331 if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV 6331 if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
6332 | VB_XGI302LV | VB_XGI301C)) { 6332 | VB_SIS302LV | VB_XGI301C)) {
6333 if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA 6333 if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA
6334 | SetCRT2ToTV | SetCRT2ToRAMDAC)) { 6334 | SetCRT2ToTV | SetCRT2ToRAMDAC)) {
6335 tempbl = 0; 6335 tempbl = 0;
6336 tempbh = 0; 6336 tempbh = 0;
@@ -6338,20 +6338,20 @@ static void XGI_SetDelayComp(struct vb_device_info *pVBInfo)
6338 index = XGI_GetTVPtrIndex(pVBInfo); /* Get TV Delay */ 6338 index = XGI_GetTVPtrIndex(pVBInfo); /* Get TV Delay */
6339 tempbl = pVBInfo->XGI_TVDelayList[index]; 6339 tempbl = pVBInfo->XGI_TVDelayList[index];
6340 6340
6341 if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B 6341 if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B
6342 | VB_XGI301LV | VB_XGI302LV 6342 | VB_SIS301LV | VB_SIS302LV
6343 | VB_XGI301C)) 6343 | VB_XGI301C))
6344 tempbl = pVBInfo->XGI_TVDelayList2[index]; 6344 tempbl = pVBInfo->XGI_TVDelayList2[index];
6345 6345
6346 if (pVBInfo->VBInfo & SetCRT2ToDualEdge) 6346 if (pVBInfo->VBInfo & SetCRT2ToDualEdge)
6347 tempbl = tempbl >> 4; 6347 tempbl = tempbl >> 4;
6348 if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) { 6348 if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
6349 /* Get LCD Delay */ 6349 /* Get LCD Delay */
6350 index = XGI_GetLCDCapPtr(pVBInfo); 6350 index = XGI_GetLCDCapPtr(pVBInfo);
6351 tempbh = pVBInfo->LCDCapList[index]. 6351 tempbh = pVBInfo->LCDCapList[index].
6352 LCD_DelayCompensation; 6352 LCD_DelayCompensation;
6353 6353
6354 if (!(pVBInfo->VBInfo & SetCRT2ToLCDA)) 6354 if (!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA))
6355 tempbl = tempbh; 6355 tempbl = tempbh;
6356 } 6356 }
6357 6357
@@ -6365,7 +6365,7 @@ static void XGI_SetDelayComp(struct vb_device_info *pVBInfo)
6365 tempah |= tempbl; 6365 tempah |= tempbl;
6366 } 6366 }
6367 6367
6368 if (pVBInfo->VBInfo & SetCRT2ToLCDA) { /* Channel A */ 6368 if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) { /* Channel A */
6369 tempah &= 0x0F; 6369 tempah &= 0x0F;
6370 tempah |= tempbh; 6370 tempah |= tempbh;
6371 } 6371 }
@@ -6475,13 +6475,13 @@ static void XGI_SetLCDCap(struct vb_device_info *pVBInfo)
6475 tempcx = pVBInfo->LCDCapList[XGI_GetLCDCapPtr(pVBInfo)].LCD_Capability; 6475 tempcx = pVBInfo->LCDCapList[XGI_GetLCDCapPtr(pVBInfo)].LCD_Capability;
6476 6476
6477 if (pVBInfo->VBType & 6477 if (pVBInfo->VBType &
6478 (VB_XGI301B | 6478 (VB_SIS301B |
6479 VB_XGI302B | 6479 VB_SIS302B |
6480 VB_XGI301LV | 6480 VB_SIS301LV |
6481 VB_XGI302LV | 6481 VB_SIS302LV |
6482 VB_XGI301C)) { /* 301LV/302LV only */ 6482 VB_XGI301C)) { /* 301LV/302LV only */
6483 if (pVBInfo->VBType & 6483 if (pVBInfo->VBType &
6484 (VB_XGI301LV | VB_XGI302LV | VB_XGI301C)) { 6484 (VB_SIS301LV | VB_SIS302LV | VB_XGI301C)) {
6485 /* Set 301LV Capability */ 6485 /* Set 301LV Capability */
6486 xgifb_reg_set(pVBInfo->Part4Port, 0x24, 6486 xgifb_reg_set(pVBInfo->Part4Port, 0x24,
6487 (unsigned char) (tempcx & 0x1F)); 6487 (unsigned char) (tempcx & 0x1F));
@@ -6493,14 +6493,14 @@ static void XGI_SetLCDCap(struct vb_device_info *pVBInfo)
6493 | EnablePLLSPLOW)) >> 8)); 6493 | EnablePLLSPLOW)) >> 8));
6494 } 6494 }
6495 6495
6496 if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV 6496 if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
6497 | VB_XGI302LV | VB_XGI301C)) { 6497 | VB_SIS302LV | VB_XGI301C)) {
6498 if (pVBInfo->VBInfo & SetCRT2ToLCD) 6498 if (pVBInfo->VBInfo & SetCRT2ToLCD)
6499 XGI_SetLCDCap_B(tempcx, pVBInfo); 6499 XGI_SetLCDCap_B(tempcx, pVBInfo);
6500 else if (pVBInfo->VBInfo & SetCRT2ToLCDA) 6500 else if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)
6501 XGI_SetLCDCap_A(tempcx, pVBInfo); 6501 XGI_SetLCDCap_A(tempcx, pVBInfo);
6502 6502
6503 if (pVBInfo->VBType & (VB_XGI302LV | VB_XGI301C)) { 6503 if (pVBInfo->VBType & (VB_SIS302LV | VB_XGI301C)) {
6504 if (tempcx & EnableSpectrum) 6504 if (tempcx & EnableSpectrum)
6505 SetSpectrum(pVBInfo); 6505 SetSpectrum(pVBInfo);
6506 } 6506 }
@@ -6524,7 +6524,7 @@ static void XGI_SetAntiFlicker(unsigned short ModeNo,
6524 6524
6525 unsigned char tempah; 6525 unsigned char tempah;
6526 6526
6527 if (pVBInfo->TVInfo & (SetYPbPrMode525p | SetYPbPrMode750p)) 6527 if (pVBInfo->TVInfo & (TVSetYPbPr525p | TVSetYPbPr750p))
6528 return; 6528 return;
6529 6529
6530 tempbx = XGI_GetTVPtrIndex(pVBInfo); 6530 tempbx = XGI_GetTVPtrIndex(pVBInfo);
@@ -6648,8 +6648,8 @@ static void XGI_SetYFilter(unsigned short ModeNo, unsigned short ModeIdIndex,
6648 xgifb_reg_set(pVBInfo->Part2Port, 0x38, filterPtr[index++]); 6648 xgifb_reg_set(pVBInfo->Part2Port, 0x38, filterPtr[index++]);
6649 } 6649 }
6650 6650
6651 if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV 6651 if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
6652 | VB_XGI302LV | VB_XGI301C)) { 6652 | VB_SIS302LV | VB_XGI301C)) {
6653 xgifb_reg_set(pVBInfo->Part2Port, 0x48, filterPtr[index++]); 6653 xgifb_reg_set(pVBInfo->Part2Port, 0x48, filterPtr[index++]);
6654 xgifb_reg_set(pVBInfo->Part2Port, 0x49, filterPtr[index++]); 6654 xgifb_reg_set(pVBInfo->Part2Port, 0x49, filterPtr[index++]);
6655 xgifb_reg_set(pVBInfo->Part2Port, 0x4A, filterPtr[index++]); 6655 xgifb_reg_set(pVBInfo->Part2Port, 0x4A, filterPtr[index++]);
@@ -6668,7 +6668,7 @@ static void XGI_OEM310Setting(unsigned short ModeNo,
6668{ 6668{
6669 XGI_SetDelayComp(pVBInfo); 6669 XGI_SetDelayComp(pVBInfo);
6670 6670
6671 if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) 6671 if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA))
6672 XGI_SetLCDCap(pVBInfo); 6672 XGI_SetLCDCap(pVBInfo);
6673 6673
6674 if (pVBInfo->VBInfo & SetCRT2ToTV) { 6674 if (pVBInfo->VBInfo & SetCRT2ToTV) {
@@ -6676,7 +6676,7 @@ static void XGI_OEM310Setting(unsigned short ModeNo,
6676 XGI_SetYFilter(ModeNo, ModeIdIndex, pVBInfo); 6676 XGI_SetYFilter(ModeNo, ModeIdIndex, pVBInfo);
6677 XGI_SetAntiFlicker(ModeNo, ModeIdIndex, pVBInfo); 6677 XGI_SetAntiFlicker(ModeNo, ModeIdIndex, pVBInfo);
6678 6678
6679 if (pVBInfo->VBType & VB_XGI301) 6679 if (pVBInfo->VBType & VB_SIS301)
6680 XGI_SetEdgeEnhance(ModeNo, ModeIdIndex, pVBInfo); 6680 XGI_SetEdgeEnhance(ModeNo, ModeIdIndex, pVBInfo);
6681 } 6681 }
6682} 6682}
@@ -6732,15 +6732,15 @@ static void XGI_SetCRT2ModeRegs(unsigned short ModeNo,
6732 tempbl = 0xff; 6732 tempbl = 0xff;
6733 6733
6734 if (pVBInfo->VBInfo & (SetCRT2ToRAMDAC | SetCRT2ToTV 6734 if (pVBInfo->VBInfo & (SetCRT2ToRAMDAC | SetCRT2ToTV
6735 | SetCRT2ToLCD | SetCRT2ToLCDA)) { 6735 | SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
6736 if ((pVBInfo->VBInfo & SetCRT2ToLCDA) && 6736 if ((pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) &&
6737 (!(pVBInfo->VBInfo & SetSimuScanMode))) { 6737 (!(pVBInfo->VBInfo & SetSimuScanMode))) {
6738 tempbl &= 0xf7; 6738 tempbl &= 0xf7;
6739 tempah |= 0x01; 6739 tempah |= 0x01;
6740 xgifb_reg_and_or(pVBInfo->Part1Port, 0x2e, 6740 xgifb_reg_and_or(pVBInfo->Part1Port, 0x2e,
6741 tempbl, tempah); 6741 tempbl, tempah);
6742 } else { 6742 } else {
6743 if (pVBInfo->VBInfo & SetCRT2ToLCDA) { 6743 if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
6744 tempbl &= 0xf7; 6744 tempbl &= 0xf7;
6745 tempah |= 0x01; 6745 tempah |= 0x01;
6746 } 6746 }
@@ -6780,7 +6780,7 @@ static void XGI_SetCRT2ModeRegs(unsigned short ModeNo,
6780 } 6780 }
6781 6781
6782 if (pVBInfo->VBInfo & (SetCRT2ToRAMDAC | SetCRT2ToTV | SetCRT2ToLCD 6782 if (pVBInfo->VBInfo & (SetCRT2ToRAMDAC | SetCRT2ToTV | SetCRT2ToLCD
6783 | SetCRT2ToLCDA)) { 6783 | XGI_SetCRT2ToLCDA)) {
6784 tempah &= (~0x08); 6784 tempah &= (~0x08);
6785 if ((pVBInfo->ModeType == ModeVGA) && (!(pVBInfo->VBInfo 6785 if ((pVBInfo->ModeType == ModeVGA) && (!(pVBInfo->VBInfo
6786 & SetInSlaveMode))) { 6786 & SetInSlaveMode))) {
@@ -6807,24 +6807,24 @@ static void XGI_SetCRT2ModeRegs(unsigned short ModeNo,
6807 tempah |= 0x40; 6807 tempah |= 0x40;
6808 } 6808 }
6809 6809
6810 if ((pVBInfo->LCDResInfo == Panel1280x1024) 6810 if ((pVBInfo->LCDResInfo == Panel_1280x1024)
6811 || (pVBInfo->LCDResInfo == Panel1280x1024x75)) 6811 || (pVBInfo->LCDResInfo == Panel_1280x1024x75))
6812 tempah |= 0x80; 6812 tempah |= 0x80;
6813 6813
6814 if (pVBInfo->LCDResInfo == Panel1280x960) 6814 if (pVBInfo->LCDResInfo == Panel_1280x960)
6815 tempah |= 0x80; 6815 tempah |= 0x80;
6816 6816
6817 xgifb_reg_set(pVBInfo->Part4Port, 0x0C, tempah); 6817 xgifb_reg_set(pVBInfo->Part4Port, 0x0C, tempah);
6818 } 6818 }
6819 6819
6820 if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV 6820 if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
6821 | VB_XGI302LV | VB_XGI301C)) { 6821 | VB_SIS302LV | VB_XGI301C)) {
6822 tempah = 0; 6822 tempah = 0;
6823 tempbl = 0xfb; 6823 tempbl = 0xfb;
6824 6824
6825 if (pVBInfo->VBInfo & SetCRT2ToDualEdge) { 6825 if (pVBInfo->VBInfo & SetCRT2ToDualEdge) {
6826 tempbl = 0xff; 6826 tempbl = 0xff;
6827 if (pVBInfo->VBInfo & SetCRT2ToLCDA) 6827 if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)
6828 tempah |= 0x04; /* shampoo 0129 */ 6828 tempah |= 0x04; /* shampoo 0129 */
6829 } 6829 }
6830 6830
@@ -6849,7 +6849,7 @@ static void XGI_SetCRT2ModeRegs(unsigned short ModeNo,
6849 6849
6850 tempah = 0; 6850 tempah = 0;
6851 tempbl = 0x7f; 6851 tempbl = 0x7f;
6852 if (!(pVBInfo->VBInfo & SetCRT2ToLCDA)) { 6852 if (!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)) {
6853 tempbl = 0xff; 6853 tempbl = 0xff;
6854 if (!(pVBInfo->VBInfo & SetCRT2ToDualEdge)) 6854 if (!(pVBInfo->VBInfo & SetCRT2ToDualEdge))
6855 tempah |= 0x80; 6855 tempah |= 0x80;
@@ -6857,7 +6857,7 @@ static void XGI_SetCRT2ModeRegs(unsigned short ModeNo,
6857 6857
6858 xgifb_reg_and_or(pVBInfo->Part4Port, 0x23, tempbl, tempah); 6858 xgifb_reg_and_or(pVBInfo->Part4Port, 0x23, tempbl, tempah);
6859 6859
6860 if (pVBInfo->VBType & (VB_XGI302LV | VB_XGI301C)) { 6860 if (pVBInfo->VBType & (VB_SIS302LV | VB_XGI301C)) {
6861 if (pVBInfo->LCDInfo & SetLCDDualLink) { 6861 if (pVBInfo->LCDInfo & SetLCDDualLink) {
6862 xgifb_reg_or(pVBInfo->Part4Port, 0x27, 0x20); 6862 xgifb_reg_or(pVBInfo->Part4Port, 0x27, 0x20);
6863 xgifb_reg_or(pVBInfo->Part4Port, 0x34, 0x10); 6863 xgifb_reg_or(pVBInfo->Part4Port, 0x34, 0x10);
@@ -6872,7 +6872,7 @@ static void XGI_CloseCRTC(struct xgi_hw_device_info *HwDeviceExtension,
6872 6872
6873 tempbx = 0; 6873 tempbx = 0;
6874 6874
6875 if (pVBInfo->VBInfo & SetCRT2ToLCDA) 6875 if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)
6876 tempbx = 0x08A0; 6876 tempbx = 0x08A0;
6877 6877
6878} 6878}
@@ -6937,10 +6937,10 @@ unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
6937 index--; 6937 index--;
6938 6938
6939 if (pVBInfo->SetFlag & ProgrammingCRT2) { 6939 if (pVBInfo->SetFlag & ProgrammingCRT2) {
6940 if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) { 6940 if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
6941 if (pVBInfo->IF_DEF_LVDS == 0) { 6941 if (pVBInfo->IF_DEF_LVDS == 0) {
6942 if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B 6942 if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B
6943 | VB_XGI301LV | VB_XGI302LV 6943 | VB_SIS301LV | VB_SIS302LV
6944 | VB_XGI301C)) 6944 | VB_XGI301C))
6945 /* 301b */ 6945 /* 301b */
6946 temp = LCDARefreshIndex[ 6946 temp = LCDARefreshIndex[
@@ -6983,7 +6983,7 @@ unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
6983 break; 6983 break;
6984 temp = pVBInfo->RefIndex[RefreshRateTableIndex + i]. 6984 temp = pVBInfo->RefIndex[RefreshRateTableIndex + i].
6985 Ext_InfoFlag; 6985 Ext_InfoFlag;
6986 temp &= ModeInfoFlag; 6986 temp &= ModeTypeMask;
6987 if (temp < pVBInfo->ModeType) 6987 if (temp < pVBInfo->ModeType)
6988 break; 6988 break;
6989 i++; 6989 i++;
@@ -7163,8 +7163,8 @@ static void XGI_EnableBridge(struct xgifb_video_info *xgifb_info,
7163{ 7163{
7164 unsigned short tempah; 7164 unsigned short tempah;
7165 7165
7166 if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV 7166 if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
7167 | VB_XGI302LV | VB_XGI301C)) { 7167 | VB_SIS302LV | VB_XGI301C)) {
7168 if (!(pVBInfo->SetFlag & DisableChA)) { 7168 if (!(pVBInfo->SetFlag & DisableChA)) {
7169 if (pVBInfo->SetFlag & EnableChA) { 7169 if (pVBInfo->SetFlag & EnableChA) {
7170 /* Power on */ 7170 /* Power on */
@@ -7207,11 +7207,11 @@ static void XGI_EnableBridge(struct xgifb_video_info *xgifb_info,
7207 || (!(pVBInfo->VBInfo & DisableCRT2Display))) { 7207 || (!(pVBInfo->VBInfo & DisableCRT2Display))) {
7208 xgifb_reg_and_or(pVBInfo->Part2Port, 0x00, ~0xE0, 7208 xgifb_reg_and_or(pVBInfo->Part2Port, 0x00, ~0xE0,
7209 0x20); /* shampoo 0129 */ 7209 0x20); /* shampoo 0129 */
7210 if (pVBInfo->VBType & (VB_XGI302LV | VB_XGI301C)) { 7210 if (pVBInfo->VBType & (VB_SIS302LV | VB_XGI301C)) {
7211 if (!XGI_DisableChISLCD(pVBInfo)) { 7211 if (!XGI_DisableChISLCD(pVBInfo)) {
7212 if (XGI_EnableChISLCD(pVBInfo) || 7212 if (XGI_EnableChISLCD(pVBInfo) ||
7213 (pVBInfo->VBInfo & 7213 (pVBInfo->VBInfo &
7214 (SetCRT2ToLCD | SetCRT2ToLCDA))) 7214 (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)))
7215 /* LVDS PLL power on */ 7215 /* LVDS PLL power on */
7216 xgifb_reg_and( 7216 xgifb_reg_and(
7217 pVBInfo->Part4Port, 7217 pVBInfo->Part4Port,
@@ -7229,12 +7229,12 @@ static void XGI_EnableBridge(struct xgifb_video_info *xgifb_info,
7229 tempah = 0xc0; 7229 tempah = 0xc0;
7230 7230
7231 if (!(pVBInfo->VBInfo & SetSimuScanMode)) { 7231 if (!(pVBInfo->VBInfo & SetSimuScanMode)) {
7232 if (pVBInfo->VBInfo & SetCRT2ToLCDA) { 7232 if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
7233 if (pVBInfo->VBInfo & 7233 if (pVBInfo->VBInfo &
7234 SetCRT2ToDualEdge) { 7234 SetCRT2ToDualEdge) {
7235 tempah = tempah & 0x40; 7235 tempah = tempah & 0x40;
7236 if (pVBInfo->VBInfo & 7236 if (pVBInfo->VBInfo &
7237 SetCRT2ToLCDA) 7237 XGI_SetCRT2ToLCDA)
7238 tempah = tempah ^ 0xC0; 7238 tempah = tempah ^ 0xC0;
7239 7239
7240 if (pVBInfo->SetFlag & 7240 if (pVBInfo->SetFlag &
@@ -7271,7 +7271,7 @@ static void XGI_EnableBridge(struct xgifb_video_info *xgifb_info,
7271 } /* 301 */ 7271 } /* 301 */
7272 else { /* LVDS */ 7272 else { /* LVDS */
7273 if (pVBInfo->VBInfo & (SetCRT2ToTV | SetCRT2ToLCD 7273 if (pVBInfo->VBInfo & (SetCRT2ToTV | SetCRT2ToLCD
7274 | SetCRT2ToLCDA)) 7274 | XGI_SetCRT2ToLCDA))
7275 /* enable CRT2 */ 7275 /* enable CRT2 */
7276 xgifb_reg_or(pVBInfo->Part1Port, 0x1E, 0x20); 7276 xgifb_reg_or(pVBInfo->Part1Port, 0x1E, 0x20);
7277 7277
@@ -7311,9 +7311,9 @@ static void XGI_SetCRT1Group(struct xgifb_video_info *xgifb_info,
7311 pVBInfo->SetFlag &= temp; 7311 pVBInfo->SetFlag &= temp;
7312 pVBInfo->SelectCRT2Rate = 0; 7312 pVBInfo->SelectCRT2Rate = 0;
7313 7313
7314 if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV 7314 if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
7315 | VB_XGI302LV | VB_XGI301C)) { 7315 | VB_SIS302LV | VB_XGI301C)) {
7316 if (pVBInfo->VBInfo & (SetSimuScanMode | SetCRT2ToLCDA 7316 if (pVBInfo->VBInfo & (SetSimuScanMode | XGI_SetCRT2ToLCDA
7317 | SetInSlaveMode)) { 7317 | SetInSlaveMode)) {
7318 pVBInfo->SetFlag |= ProgrammingCRT2; 7318 pVBInfo->SetFlag |= ProgrammingCRT2;
7319 } 7319 }
@@ -7415,11 +7415,11 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
7415 pVBInfo->P3c9 = pVBInfo->BaseAddr + 0x19; 7415 pVBInfo->P3c9 = pVBInfo->BaseAddr + 0x19;
7416 pVBInfo->P3da = pVBInfo->BaseAddr + 0x2A; 7416 pVBInfo->P3da = pVBInfo->BaseAddr + 0x2A;
7417 pVBInfo->Part0Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_00; 7417 pVBInfo->Part0Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_00;
7418 pVBInfo->Part1Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_04; 7418 pVBInfo->Part1Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_04;
7419 pVBInfo->Part2Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_10; 7419 pVBInfo->Part2Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_10;
7420 pVBInfo->Part3Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_12; 7420 pVBInfo->Part3Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_12;
7421 pVBInfo->Part4Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_14; 7421 pVBInfo->Part4Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_14;
7422 pVBInfo->Part5Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_14 + 2; 7422 pVBInfo->Part5Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_14 + 2;
7423 7423
7424 /* for x86 Linux, XG21 LVDS */ 7424 /* for x86 Linux, XG21 LVDS */
7425 if (HwDeviceExtension->jChipType == XG21) { 7425 if (HwDeviceExtension->jChipType == XG21) {
@@ -7452,20 +7452,20 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
7452 XGI_GetLCDInfo(ModeNo, ModeIdIndex, pVBInfo); 7452 XGI_GetLCDInfo(ModeNo, ModeIdIndex, pVBInfo);
7453 XGI_DisableBridge(xgifb_info, HwDeviceExtension, pVBInfo); 7453 XGI_DisableBridge(xgifb_info, HwDeviceExtension, pVBInfo);
7454 7454
7455 if (pVBInfo->VBInfo & (SetSimuScanMode | SetCRT2ToLCDA)) { 7455 if (pVBInfo->VBInfo & (SetSimuScanMode | XGI_SetCRT2ToLCDA)) {
7456 XGI_SetCRT1Group(xgifb_info, HwDeviceExtension, ModeNo, 7456 XGI_SetCRT1Group(xgifb_info, HwDeviceExtension, ModeNo,
7457 ModeIdIndex, pVBInfo); 7457 ModeIdIndex, pVBInfo);
7458 7458
7459 if (pVBInfo->VBInfo & SetCRT2ToLCDA) { 7459 if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
7460 XGI_SetLCDAGroup(ModeNo, ModeIdIndex, 7460 XGI_SetLCDAGroup(ModeNo, ModeIdIndex,
7461 HwDeviceExtension, pVBInfo); 7461 HwDeviceExtension, pVBInfo);
7462 } 7462 }
7463 } else { 7463 } else {
7464 if (!(pVBInfo->VBInfo & SwitchToCRT2)) { 7464 if (!(pVBInfo->VBInfo & SwitchCRT2)) {
7465 XGI_SetCRT1Group(xgifb_info, 7465 XGI_SetCRT1Group(xgifb_info,
7466 HwDeviceExtension, ModeNo, 7466 HwDeviceExtension, ModeNo,
7467 ModeIdIndex, pVBInfo); 7467 ModeIdIndex, pVBInfo);
7468 if (pVBInfo->VBInfo & SetCRT2ToLCDA) { 7468 if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
7469 XGI_SetLCDAGroup(ModeNo, ModeIdIndex, 7469 XGI_SetLCDAGroup(ModeNo, ModeIdIndex,
7470 HwDeviceExtension, 7470 HwDeviceExtension,
7471 pVBInfo); 7471 pVBInfo);
@@ -7473,7 +7473,7 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
7473 } 7473 }
7474 } 7474 }
7475 7475
7476 if (pVBInfo->VBInfo & (SetSimuScanMode | SwitchToCRT2)) { 7476 if (pVBInfo->VBInfo & (SetSimuScanMode | SwitchCRT2)) {
7477 switch (HwDeviceExtension->ujVBChipID) { 7477 switch (HwDeviceExtension->ujVBChipID) {
7478 case VB_CHIP_301: 7478 case VB_CHIP_301:
7479 XGI_SetCRT2Group301(ModeNo, HwDeviceExtension, 7479 XGI_SetCRT2Group301(ModeNo, HwDeviceExtension,
@@ -7504,10 +7504,10 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
7504 7504
7505 if (ModeNo <= 0x13) { 7505 if (ModeNo <= 0x13) {
7506 pVBInfo->ModeType = pVBInfo->SModeIDTable[ModeIdIndex]. 7506 pVBInfo->ModeType = pVBInfo->SModeIDTable[ModeIdIndex].
7507 St_ModeFlag & ModeInfoFlag; 7507 St_ModeFlag & ModeTypeMask;
7508 } else { 7508 } else {
7509 pVBInfo->ModeType = pVBInfo->EModeIDTable[ModeIdIndex]. 7509 pVBInfo->ModeType = pVBInfo->EModeIDTable[ModeIdIndex].
7510 Ext_ModeFlag & ModeInfoFlag; 7510 Ext_ModeFlag & ModeTypeMask;
7511 } 7511 }
7512 7512
7513 pVBInfo->SetFlag = 0; 7513 pVBInfo->SetFlag = 0;
diff --git a/drivers/staging/xgifb/vb_struct.h b/drivers/staging/xgifb/vb_struct.h
index 6556a0d6ff82..a5bd56af92b1 100644
--- a/drivers/staging/xgifb/vb_struct.h
+++ b/drivers/staging/xgifb/vb_struct.h
@@ -1,15 +1,6 @@
1#ifndef _VB_STRUCT_ 1#ifndef _VB_STRUCT_
2#define _VB_STRUCT_ 2#define _VB_STRUCT_
3 3#include "../../video/sis/vstruct.h"
4struct XGI_LCDDataStruct {
5 unsigned short RVBHCMAX;
6 unsigned short RVBHCFACT;
7 unsigned short VGAHT;
8 unsigned short VGAVT;
9 unsigned short LCDHT;
10 unsigned short LCDVT;
11};
12
13 4
14struct XGI_LVDSCRT1HDataStruct { 5struct XGI_LVDSCRT1HDataStruct {
15 unsigned char Reg[8]; 6 unsigned char Reg[8];
@@ -19,22 +10,6 @@ struct XGI_LVDSCRT1VDataStruct {
19 unsigned char Reg[7]; 10 unsigned char Reg[7];
20}; 11};
21 12
22struct XGI_TVDataStruct {
23 unsigned short RVBHCMAX;
24 unsigned short RVBHCFACT;
25 unsigned short VGAHT;
26 unsigned short VGAVT;
27 unsigned short TVHDE;
28 unsigned short TVVDE;
29 unsigned short RVBHRS;
30 unsigned char FlickerMode;
31 unsigned short HALFRVBHRS;
32 unsigned char RY1COE;
33 unsigned char RY2COE;
34 unsigned char RY3COE;
35 unsigned char RY4COE;
36};
37
38struct XGI_StStruct { 13struct XGI_StStruct {
39 unsigned char St_ModeID; 14 unsigned char St_ModeID;
40 unsigned short St_ModeFlag; 15 unsigned short St_ModeFlag;
@@ -47,18 +22,6 @@ struct XGI_StStruct {
47 unsigned char VB_StTVYFilterIndex; 22 unsigned char VB_StTVYFilterIndex;
48}; 23};
49 24
50struct XGI_StandTableStruct {
51 unsigned char CRT_COLS;
52 unsigned char ROWS;
53 unsigned char CHAR_HEIGHT;
54 unsigned short CRT_LEN;
55 unsigned char SR[4];
56 unsigned char MISC;
57 unsigned char CRTC[0x19];
58 unsigned char ATTR[0x14];
59 unsigned char GRC[9];
60};
61
62struct XGI_ExtStruct { 25struct XGI_ExtStruct {
63 unsigned char Ext_ModeID; 26 unsigned char Ext_ModeID;
64 unsigned short Ext_ModeFlag; 27 unsigned short Ext_ModeFlag;
@@ -85,39 +48,11 @@ struct XGI_Ext2Struct {
85 /* unsigned short ROM_OFFSET; */ 48 /* unsigned short ROM_OFFSET; */
86}; 49};
87 50
88
89struct XGI_MCLKDataStruct {
90 unsigned char SR28, SR29, SR2A;
91 unsigned short CLOCK;
92};
93
94struct XGI_ECLKDataStruct { 51struct XGI_ECLKDataStruct {
95 unsigned char SR2E, SR2F, SR30; 52 unsigned char SR2E, SR2F, SR30;
96 unsigned short CLOCK; 53 unsigned short CLOCK;
97}; 54};
98 55
99struct XGI_VCLKDataStruct {
100 unsigned char SR2B, SR2C;
101 unsigned short CLOCK;
102};
103
104struct XGI_VBVCLKDataStruct {
105 unsigned char Part4_A, Part4_B;
106 unsigned short CLOCK;
107};
108
109struct XGI_StResInfoStruct {
110 unsigned short HTotal;
111 unsigned short VTotal;
112};
113
114struct XGI_ModeResInfoStruct {
115 unsigned short HTotal;
116 unsigned short VTotal;
117 unsigned char XChar;
118 unsigned char YChar;
119};
120
121/*add for new UNIVGABIOS*/ 56/*add for new UNIVGABIOS*/
122struct XGI_LCDDesStruct { 57struct XGI_LCDDesStruct {
123 unsigned short LCDHDES; 58 unsigned short LCDHDES;
@@ -350,7 +285,7 @@ struct vb_device_info {
350 unsigned char *pCRT2Data_4_D; 285 unsigned char *pCRT2Data_4_D;
351 unsigned char *pCRT2Data_4_E; 286 unsigned char *pCRT2Data_4_E;
352 unsigned char *pCRT2Data_4_10; 287 unsigned char *pCRT2Data_4_10;
353 struct XGI_MCLKDataStruct *MCLKData; 288 struct SiS_MCLKData *MCLKData;
354 struct XGI_ECLKDataStruct *ECLKData; 289 struct XGI_ECLKDataStruct *ECLKData;
355 290
356 unsigned char *XGI_TVDelayList; 291 unsigned char *XGI_TVDelayList;
@@ -380,15 +315,15 @@ struct vb_device_info {
380 struct XGI_TimingVStruct *TimingV; 315 struct XGI_TimingVStruct *TimingV;
381 316
382 struct XGI_StStruct *SModeIDTable; 317 struct XGI_StStruct *SModeIDTable;
383 struct XGI_StandTableStruct *StandTable; 318 struct SiS_StandTable_S *StandTable;
384 struct XGI_ExtStruct *EModeIDTable; 319 struct XGI_ExtStruct *EModeIDTable;
385 struct XGI_Ext2Struct *RefIndex; 320 struct XGI_Ext2Struct *RefIndex;
386 /* XGINew_CRT1TableStruct *CRT1Table; */ 321 /* XGINew_CRT1TableStruct *CRT1Table; */
387 struct XGI_CRT1TableStruct *XGINEWUB_CRT1Table; 322 struct XGI_CRT1TableStruct *XGINEWUB_CRT1Table;
388 struct XGI_VCLKDataStruct *VCLKData; 323 struct SiS_VCLKData *VCLKData;
389 struct XGI_VBVCLKDataStruct *VBVCLKData; 324 struct SiS_VBVCLKData *VBVCLKData;
390 struct XGI_StResInfoStruct *StResInfo; 325 struct SiS_StResInfo_S *StResInfo;
391 struct XGI_ModeResInfoStruct *ModeResInfo; 326 struct SiS_ModeResInfo_S *ModeResInfo;
392 struct XGI_XG21CRT1Struct *UpdateCRT1; 327 struct XGI_XG21CRT1Struct *UpdateCRT1;
393 328
394 int ram_type; 329 int ram_type;
diff --git a/drivers/staging/xgifb/vb_table.h b/drivers/staging/xgifb/vb_table.h
index e7946f1c1143..dddf261ed53d 100644
--- a/drivers/staging/xgifb/vb_table.h
+++ b/drivers/staging/xgifb/vb_table.h
@@ -1,5 +1,5 @@
1/* yilin modify for xgi20 */ 1/* yilin modify for xgi20 */
2static struct XGI_MCLKDataStruct XGI340New_MCLKData[] = { 2static struct SiS_MCLKData XGI340New_MCLKData[] = {
3 {0x16, 0x01, 0x01, 166}, 3 {0x16, 0x01, 0x01, 166},
4 {0x19, 0x02, 0x01, 124}, 4 {0x19, 0x02, 0x01, 124},
5 {0x7C, 0x08, 0x01, 200}, 5 {0x7C, 0x08, 0x01, 200},
@@ -10,7 +10,7 @@ static struct XGI_MCLKDataStruct XGI340New_MCLKData[] = {
10 {0x5c, 0x23, 0x01, 166} 10 {0x5c, 0x23, 0x01, 166}
11}; 11};
12 12
13static struct XGI_MCLKDataStruct XGI27New_MCLKData[] = { 13static struct SiS_MCLKData XGI27New_MCLKData[] = {
14 {0x5c, 0x23, 0x01, 166}, 14 {0x5c, 0x23, 0x01, 166},
15 {0x19, 0x02, 0x01, 124}, 15 {0x19, 0x02, 0x01, 124},
16 {0x7C, 0x08, 0x80, 200}, 16 {0x7C, 0x08, 0x80, 200},
@@ -296,7 +296,7 @@ static struct XGI_ExtStruct XGI330_EModeIDTable[] = {
296 0x00, 0x00, 0x00, 0x00, 0x00} 296 0x00, 0x00, 0x00, 0x00, 0x00}
297}; 297};
298 298
299static struct XGI_StandTableStruct XGI330_StandTable[] = { 299static struct SiS_StandTable_S XGI330_StandTable[] = {
300/* MD_0_200 */ 300/* MD_0_200 */
301 { 301 {
302 0x28, 0x18, 0x08, 0x0800, 302 0x28, 0x18, 0x08, 0x0800,
@@ -2353,109 +2353,109 @@ static struct XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11280x1024_2_Vx75[] = {
2353 2353
2354/*add for new UNIVGABIOS*/ 2354/*add for new UNIVGABIOS*/
2355static struct XGI330_LCDDataTablStruct XGI_LCDDataTable[] = { 2355static struct XGI330_LCDDataTablStruct XGI_LCDDataTable[] = {
2356 {Panel1024x768, 0x0019, 0x0001, 0}, /* XGI_ExtLCD1024x768Data */ 2356 {Panel_1024x768, 0x0019, 0x0001, 0}, /* XGI_ExtLCD1024x768Data */
2357 {Panel1024x768, 0x0019, 0x0000, 1}, /* XGI_StLCD1024x768Data */ 2357 {Panel_1024x768, 0x0019, 0x0000, 1}, /* XGI_StLCD1024x768Data */
2358 {Panel1024x768, 0x0018, 0x0010, 2}, /* XGI_CetLCD1024x768Data */ 2358 {Panel_1024x768, 0x0018, 0x0010, 2}, /* XGI_CetLCD1024x768Data */
2359 {Panel1280x1024, 0x0019, 0x0001, 3}, /* XGI_ExtLCD1280x1024Data */ 2359 {Panel_1280x1024, 0x0019, 0x0001, 3}, /* XGI_ExtLCD1280x1024Data */
2360 {Panel1280x1024, 0x0019, 0x0000, 4}, /* XGI_StLCD1280x1024Data */ 2360 {Panel_1280x1024, 0x0019, 0x0000, 4}, /* XGI_StLCD1280x1024Data */
2361 {Panel1280x1024, 0x0018, 0x0010, 5}, /* XGI_CetLCD1280x1024Data */ 2361 {Panel_1280x1024, 0x0018, 0x0010, 5}, /* XGI_CetLCD1280x1024Data */
2362 {Panel1400x1050, 0x0019, 0x0001, 6}, /* XGI_ExtLCD1400x1050Data */ 2362 {Panel_1400x1050, 0x0019, 0x0001, 6}, /* XGI_ExtLCD1400x1050Data */
2363 {Panel1400x1050, 0x0019, 0x0000, 7}, /* XGI_StLCD1400x1050Data */ 2363 {Panel_1400x1050, 0x0019, 0x0000, 7}, /* XGI_StLCD1400x1050Data */
2364 {Panel1400x1050, 0x0018, 0x0010, 8}, /* XGI_CetLCD1400x1050Data */ 2364 {Panel_1400x1050, 0x0018, 0x0010, 8}, /* XGI_CetLCD1400x1050Data */
2365 {Panel1600x1200, 0x0019, 0x0001, 9}, /* XGI_ExtLCD1600x1200Data */ 2365 {Panel_1600x1200, 0x0019, 0x0001, 9}, /* XGI_ExtLCD1600x1200Data */
2366 {Panel1600x1200, 0x0019, 0x0000, 10}, /* XGI_StLCD1600x1200Data */ 2366 {Panel_1600x1200, 0x0019, 0x0000, 10}, /* XGI_StLCD1600x1200Data */
2367 {PanelRef60Hz, 0x0008, 0x0008, 11}, /* XGI_NoScalingData */ 2367 {PanelRef60Hz, 0x0008, 0x0008, 11}, /* XGI_NoScalingData */
2368 {Panel1024x768x75, 0x0019, 0x0001, 12}, /* XGI_ExtLCD1024x768x75Data */ 2368 {Panel_1024x768x75, 0x0019, 0x0001, 12}, /* XGI_ExtLCD1024x768x75Data */
2369 {Panel1024x768x75, 0x0019, 0x0000, 13}, /* XGI_StLCD1024x768x75Data */ 2369 {Panel_1024x768x75, 0x0019, 0x0000, 13}, /* XGI_StLCD1024x768x75Data */
2370 {Panel1024x768x75, 0x0018, 0x0010, 14}, /* XGI_CetLCD1024x768x75Data */ 2370 {Panel_1024x768x75, 0x0018, 0x0010, 14}, /* XGI_CetLCD1024x768x75Data */
2371 {Panel1280x1024x75, 0x0019, 0x0001, 15}, /* XGI_ExtLCD1280x1024x75Data*/ 2371 {Panel_1280x1024x75, 0x0019, 0x0001, 15}, /* XGI_ExtLCD1280x1024x75Data*/
2372 {Panel1280x1024x75, 0x0019, 0x0000, 16}, /* XGI_StLCD1280x1024x75Data */ 2372 {Panel_1280x1024x75, 0x0019, 0x0000, 16}, /* XGI_StLCD1280x1024x75Data */
2373 {Panel1280x1024x75, 0x0018, 0x0010, 17}, /* XGI_CetLCD1280x1024x75Data*/ 2373 {Panel_1280x1024x75, 0x0018, 0x0010, 17}, /* XGI_CetLCD1280x1024x75Data*/
2374 {PanelRef75Hz, 0x0008, 0x0008, 18}, /* XGI_NoScalingDatax75 */ 2374 {PanelRef75Hz, 0x0008, 0x0008, 18}, /* XGI_NoScalingDatax75 */
2375 {0xFF, 0x0000, 0x0000, 0} /* End of table */ 2375 {0xFF, 0x0000, 0x0000, 0} /* End of table */
2376}; 2376};
2377 2377
2378static struct XGI330_LCDDataTablStruct XGI_LCDDesDataTable[] = { 2378static struct XGI330_LCDDataTablStruct XGI_LCDDesDataTable[] = {
2379 {Panel1024x768, 0x0019, 0x0001, 0}, /* XGI_ExtLCDDes1024x768Data */ 2379 {Panel_1024x768, 0x0019, 0x0001, 0}, /* XGI_ExtLCDDes1024x768Data */
2380 {Panel1024x768, 0x0019, 0x0000, 1}, /* XGI_StLCDDes1024x768Data */ 2380 {Panel_1024x768, 0x0019, 0x0000, 1}, /* XGI_StLCDDes1024x768Data */
2381 {Panel1024x768, 0x0018, 0x0010, 2}, /* XGI_CetLCDDes1024x768Data */ 2381 {Panel_1024x768, 0x0018, 0x0010, 2}, /* XGI_CetLCDDes1024x768Data */
2382 {Panel1280x1024, 0x0019, 0x0001, 3}, /* XGI_ExtLCDDes1280x1024Data */ 2382 {Panel_1280x1024, 0x0019, 0x0001, 3}, /* XGI_ExtLCDDes1280x1024Data */
2383 {Panel1280x1024, 0x0019, 0x0000, 4}, /* XGI_StLCDDes1280x1024Data */ 2383 {Panel_1280x1024, 0x0019, 0x0000, 4}, /* XGI_StLCDDes1280x1024Data */
2384 {Panel1280x1024, 0x0018, 0x0010, 5}, /* XGI_CetLCDDes1280x1024Data */ 2384 {Panel_1280x1024, 0x0018, 0x0010, 5}, /* XGI_CetLCDDes1280x1024Data */
2385 {Panel1400x1050, 0x0019, 0x0001, 6}, /* XGI_ExtLCDDes1400x1050Data */ 2385 {Panel_1400x1050, 0x0019, 0x0001, 6}, /* XGI_ExtLCDDes1400x1050Data */
2386 {Panel1400x1050, 0x0019, 0x0000, 7}, /* XGI_StLCDDes1400x1050Data */ 2386 {Panel_1400x1050, 0x0019, 0x0000, 7}, /* XGI_StLCDDes1400x1050Data */
2387 {Panel1400x1050, 0x0418, 0x0010, 8}, /* XGI_CetLCDDes1400x1050Data */ 2387 {Panel_1400x1050, 0x0418, 0x0010, 8}, /* XGI_CetLCDDes1400x1050Data */
2388 {Panel1400x1050, 0x0418, 0x0410, 9}, /* XGI_CetLCDDes1400x1050Data2 */ 2388 {Panel_1400x1050, 0x0418, 0x0410, 9}, /* XGI_CetLCDDes1400x1050Data2 */
2389 {Panel1600x1200, 0x0019, 0x0001, 10}, /* XGI_ExtLCDDes1600x1200Data */ 2389 {Panel_1600x1200, 0x0019, 0x0001, 10}, /* XGI_ExtLCDDes1600x1200Data */
2390 {Panel1600x1200, 0x0019, 0x0000, 11}, /* XGI_StLCDDes1600x1200Data */ 2390 {Panel_1600x1200, 0x0019, 0x0000, 11}, /* XGI_StLCDDes1600x1200Data */
2391 {PanelRef60Hz, 0x0008, 0x0008, 12}, /* XGI_NoScalingDesData */ 2391 {PanelRef60Hz, 0x0008, 0x0008, 12}, /* XGI_NoScalingDesData */
2392 {Panel1024x768x75, 0x0019, 0x0001, 13}, /*XGI_ExtLCDDes1024x768x75Data*/ 2392 {Panel_1024x768x75, 0x0019, 0x0001, 13}, /*XGI_ExtLCDDes1024x768x75Data*/
2393 {Panel1024x768x75, 0x0019, 0x0000, 14}, /* XGI_StLCDDes1024x768x75Data*/ 2393 {Panel_1024x768x75, 0x0019, 0x0000, 14}, /* XGI_StLCDDes1024x768x75Data*/
2394 {Panel1024x768x75, 0x0018, 0x0010, 15}, /*XGI_CetLCDDes1024x768x75Data*/ 2394 {Panel_1024x768x75, 0x0018, 0x0010, 15}, /*XGI_CetLCDDes1024x768x75Data*/
2395 /* XGI_ExtLCDDes1280x1024x75Data */ 2395 /* XGI_ExtLCDDes1280x1024x75Data */
2396 {Panel1280x1024x75, 0x0019, 0x0001, 16}, 2396 {Panel_1280x1024x75, 0x0019, 0x0001, 16},
2397 /* XGI_StLCDDes1280x1024x75Data */ 2397 /* XGI_StLCDDes1280x1024x75Data */
2398 {Panel1280x1024x75, 0x0019, 0x0000, 17}, 2398 {Panel_1280x1024x75, 0x0019, 0x0000, 17},
2399 /* XGI_CetLCDDes1280x1024x75Data */ 2399 /* XGI_CetLCDDes1280x1024x75Data */
2400 {Panel1280x1024x75, 0x0018, 0x0010, 18}, 2400 {Panel_1280x1024x75, 0x0018, 0x0010, 18},
2401 {PanelRef75Hz, 0x0008, 0x0008, 19}, /* XGI_NoScalingDesDatax75 */ 2401 {PanelRef75Hz, 0x0008, 0x0008, 19}, /* XGI_NoScalingDesDatax75 */
2402 {0xFF, 0x0000, 0x0000, 0} 2402 {0xFF, 0x0000, 0x0000, 0}
2403}; 2403};
2404 2404
2405static struct XGI330_LCDDataTablStruct xgifb_epllcd_crt1[] = { 2405static struct XGI330_LCDDataTablStruct xgifb_epllcd_crt1[] = {
2406 {Panel1024x768, 0x0018, 0x0000, 0}, /* XGI_LVDSCRT11024x768_1 */ 2406 {Panel_1024x768, 0x0018, 0x0000, 0}, /* XGI_LVDSCRT11024x768_1 */
2407 {Panel1024x768, 0x0018, 0x0010, 1}, /* XGI_LVDSCRT11024x768_2 */ 2407 {Panel_1024x768, 0x0018, 0x0010, 1}, /* XGI_LVDSCRT11024x768_2 */
2408 {Panel1280x1024, 0x0018, 0x0000, 2}, /* XGI_LVDSCRT11280x1024_1 */ 2408 {Panel_1280x1024, 0x0018, 0x0000, 2}, /* XGI_LVDSCRT11280x1024_1 */
2409 {Panel1280x1024, 0x0018, 0x0010, 3}, /* XGI_LVDSCRT11280x1024_2 */ 2409 {Panel_1280x1024, 0x0018, 0x0010, 3}, /* XGI_LVDSCRT11280x1024_2 */
2410 {Panel1400x1050, 0x0018, 0x0000, 4}, /* XGI_LVDSCRT11400x1050_1 */ 2410 {Panel_1400x1050, 0x0018, 0x0000, 4}, /* XGI_LVDSCRT11400x1050_1 */
2411 {Panel1400x1050, 0x0018, 0x0010, 5}, /* XGI_LVDSCRT11400x1050_2 */ 2411 {Panel_1400x1050, 0x0018, 0x0010, 5}, /* XGI_LVDSCRT11400x1050_2 */
2412 {Panel1600x1200, 0x0018, 0x0000, 6}, /* XGI_LVDSCRT11600x1200_1 */ 2412 {Panel_1600x1200, 0x0018, 0x0000, 6}, /* XGI_LVDSCRT11600x1200_1 */
2413 {Panel1024x768x75, 0x0018, 0x0000, 7}, /* XGI_LVDSCRT11024x768_1x75 */ 2413 {Panel_1024x768x75, 0x0018, 0x0000, 7}, /* XGI_LVDSCRT11024x768_1x75 */
2414 {Panel1024x768x75, 0x0018, 0x0010, 8}, /* XGI_LVDSCRT11024x768_2x75 */ 2414 {Panel_1024x768x75, 0x0018, 0x0010, 8}, /* XGI_LVDSCRT11024x768_2x75 */
2415 {Panel1280x1024x75, 0x0018, 0x0000, 9}, /*XGI_LVDSCRT11280x1024_1x75*/ 2415 {Panel_1280x1024x75, 0x0018, 0x0000, 9}, /*XGI_LVDSCRT11280x1024_1x75*/
2416 {Panel1280x1024x75, 0x0018, 0x0010, 10},/*XGI_LVDSCRT11280x1024_2x75*/ 2416 {Panel_1280x1024x75, 0x0018, 0x0010, 10},/*XGI_LVDSCRT11280x1024_2x75*/
2417 {0xFF, 0x0000, 0x0000, 0} 2417 {0xFF, 0x0000, 0x0000, 0}
2418}; 2418};
2419 2419
2420static struct XGI330_LCDDataTablStruct XGI_EPLLCDDataPtr[] = { 2420static struct XGI330_LCDDataTablStruct XGI_EPLLCDDataPtr[] = {
2421 {Panel1024x768, 0x0018, 0x0000, 0}, /* XGI_LVDS1024x768Data_1 */ 2421 {Panel_1024x768, 0x0018, 0x0000, 0}, /* XGI_LVDS1024x768Data_1 */
2422 {Panel1024x768, 0x0018, 0x0010, 1}, /* XGI_LVDS1024x768Data_2 */ 2422 {Panel_1024x768, 0x0018, 0x0010, 1}, /* XGI_LVDS1024x768Data_2 */
2423 {Panel1280x1024, 0x0018, 0x0000, 2}, /* XGI_LVDS1280x1024Data_1 */ 2423 {Panel_1280x1024, 0x0018, 0x0000, 2}, /* XGI_LVDS1280x1024Data_1 */
2424 {Panel1280x1024, 0x0018, 0x0010, 3}, /* XGI_LVDS1280x1024Data_2 */ 2424 {Panel_1280x1024, 0x0018, 0x0010, 3}, /* XGI_LVDS1280x1024Data_2 */
2425 {Panel1400x1050, 0x0018, 0x0000, 4}, /* XGI_LVDS1400x1050Data_1 */ 2425 {Panel_1400x1050, 0x0018, 0x0000, 4}, /* XGI_LVDS1400x1050Data_1 */
2426 {Panel1400x1050, 0x0018, 0x0010, 5}, /* XGI_LVDS1400x1050Data_2 */ 2426 {Panel_1400x1050, 0x0018, 0x0010, 5}, /* XGI_LVDS1400x1050Data_2 */
2427 {Panel1600x1200, 0x0018, 0x0000, 6}, /* XGI_LVDS1600x1200Data_1 */ 2427 {Panel_1600x1200, 0x0018, 0x0000, 6}, /* XGI_LVDS1600x1200Data_1 */
2428 {PanelRef60Hz, 0x0008, 0x0008, 7}, /* XGI_LVDSNoScalingData */ 2428 {PanelRef60Hz, 0x0008, 0x0008, 7}, /* XGI_LVDSNoScalingData */
2429 {Panel1024x768x75, 0x0018, 0x0000, 8}, /* XGI_LVDS1024x768Data_1x75 */ 2429 {Panel_1024x768x75, 0x0018, 0x0000, 8}, /* XGI_LVDS1024x768Data_1x75 */
2430 {Panel1024x768x75, 0x0018, 0x0010, 9}, /* XGI_LVDS1024x768Data_2x75 */ 2430 {Panel_1024x768x75, 0x0018, 0x0010, 9}, /* XGI_LVDS1024x768Data_2x75 */
2431 {Panel1280x1024x75, 0x0018, 0x0000, 10}, /* XGI_LVDS1280x1024Data_1x75*/ 2431 {Panel_1280x1024x75, 0x0018, 0x0000, 10}, /* XGI_LVDS1280x1024Data_1x75*/
2432 {Panel1280x1024x75, 0x0018, 0x0010, 11}, /*XGI_LVDS1280x1024Data_2x75*/ 2432 {Panel_1280x1024x75, 0x0018, 0x0010, 11}, /*XGI_LVDS1280x1024Data_2x75*/
2433 {PanelRef75Hz, 0x0008, 0x0008, 12}, /* XGI_LVDSNoScalingDatax75 */ 2433 {PanelRef75Hz, 0x0008, 0x0008, 12}, /* XGI_LVDSNoScalingDatax75 */
2434 {0xFF, 0x0000, 0x0000, 0} 2434 {0xFF, 0x0000, 0x0000, 0}
2435}; 2435};
2436 2436
2437static struct XGI330_LCDDataTablStruct XGI_EPLLCDDesDataPtr[] = { 2437static struct XGI330_LCDDataTablStruct XGI_EPLLCDDesDataPtr[] = {
2438 {Panel1024x768, 0x0018, 0x0000, 0}, /* XGI_LVDS1024x768Des_1 */ 2438 {Panel_1024x768, 0x0018, 0x0000, 0}, /* XGI_LVDS1024x768Des_1 */
2439 {Panel1024x768, 0x0618, 0x0410, 1}, /* XGI_LVDS1024x768Des_3 */ 2439 {Panel_1024x768, 0x0618, 0x0410, 1}, /* XGI_LVDS1024x768Des_3 */
2440 {Panel1024x768, 0x0018, 0x0010, 2}, /* XGI_LVDS1024x768Des_2 */ 2440 {Panel_1024x768, 0x0018, 0x0010, 2}, /* XGI_LVDS1024x768Des_2 */
2441 {Panel1280x1024, 0x0018, 0x0000, 3}, /* XGI_LVDS1280x1024Des_1 */ 2441 {Panel_1280x1024, 0x0018, 0x0000, 3}, /* XGI_LVDS1280x1024Des_1 */
2442 {Panel1280x1024, 0x0018, 0x0010, 4}, /* XGI_LVDS1280x1024Des_2 */ 2442 {Panel_1280x1024, 0x0018, 0x0010, 4}, /* XGI_LVDS1280x1024Des_2 */
2443 {Panel1400x1050, 0x0018, 0x0000, 5}, /* XGI_LVDS1400x1050Des_1 */ 2443 {Panel_1400x1050, 0x0018, 0x0000, 5}, /* XGI_LVDS1400x1050Des_1 */
2444 {Panel1400x1050, 0x0018, 0x0010, 6}, /* XGI_LVDS1400x1050Des_2 */ 2444 {Panel_1400x1050, 0x0018, 0x0010, 6}, /* XGI_LVDS1400x1050Des_2 */
2445 {Panel1600x1200, 0x0018, 0x0000, 7}, /* XGI_LVDS1600x1200Des_1 */ 2445 {Panel_1600x1200, 0x0018, 0x0000, 7}, /* XGI_LVDS1600x1200Des_1 */
2446 {PanelRef60Hz, 0x0008, 0x0008, 8}, /* XGI_LVDSNoScalingDesData */ 2446 {PanelRef60Hz, 0x0008, 0x0008, 8}, /* XGI_LVDSNoScalingDesData */
2447 {Panel1024x768x75, 0x0018, 0x0000, 9}, /* XGI_LVDS1024x768Des_1x75 */ 2447 {Panel_1024x768x75, 0x0018, 0x0000, 9}, /* XGI_LVDS1024x768Des_1x75 */
2448 {Panel1024x768x75, 0x0618, 0x0410, 10}, /* XGI_LVDS1024x768Des_3x75 */ 2448 {Panel_1024x768x75, 0x0618, 0x0410, 10}, /* XGI_LVDS1024x768Des_3x75 */
2449 {Panel1024x768x75, 0x0018, 0x0010, 11}, /* XGI_LVDS1024x768Des_2x75 */ 2449 {Panel_1024x768x75, 0x0018, 0x0010, 11}, /* XGI_LVDS1024x768Des_2x75 */
2450 {Panel1280x1024x75, 0x0018, 0x0000, 12}, /* XGI_LVDS1280x1024Des_1x75 */ 2450 {Panel_1280x1024x75, 0x0018, 0x0000, 12}, /* XGI_LVDS1280x1024Des_1x75 */
2451 {Panel1280x1024x75, 0x0018, 0x0010, 13}, /* XGI_LVDS1280x1024Des_2x75 */ 2451 {Panel_1280x1024x75, 0x0018, 0x0010, 13}, /* XGI_LVDS1280x1024Des_2x75 */
2452 {PanelRef75Hz, 0x0008, 0x0008, 14}, /* XGI_LVDSNoScalingDesDatax75 */ 2452 {PanelRef75Hz, 0x0008, 0x0008, 14}, /* XGI_LVDSNoScalingDesDatax75 */
2453 {0xFF, 0x0000, 0x0000, 0} 2453 {0xFF, 0x0000, 0x0000, 0}
2454}; 2454};
2455 2455
2456static struct XGI330_LCDDataTablStruct XGI_EPLCHLCDRegPtr[] = { 2456static struct XGI330_LCDDataTablStruct XGI_EPLCHLCDRegPtr[] = {
2457 {Panel1024x768, 0x0000, 0x0000, 0}, /* XGI_CH7017LV1024x768 */ 2457 {Panel_1024x768, 0x0000, 0x0000, 0}, /* XGI_CH7017LV1024x768 */
2458 {Panel1400x1050, 0x0000, 0x0000, 1}, /* XGI_CH7017LV1400x1050 */ 2458 {Panel_1400x1050, 0x0000, 0x0000, 1}, /* XGI_CH7017LV1400x1050 */
2459 {0xFF, 0x0000, 0x0000, 0} 2459 {0xFF, 0x0000, 0x0000, 0}
2460}; 2460};
2461 2461
@@ -2501,225 +2501,225 @@ static unsigned short LCDLenList[] = {
2501/* Dual link only */ 2501/* Dual link only */
2502static struct XGI330_LCDCapStruct XGI_LCDDLCapList[] = { 2502static struct XGI330_LCDCapStruct XGI_LCDDLCapList[] = {
2503/* LCDCap1024x768 */ 2503/* LCDCap1024x768 */
2504 {Panel1024x768, DefaultLCDCap, 0, 0x012, 0x88, 0x06, VCLK65, 2504 {Panel_1024x768, DefaultLCDCap, 0, 0x012, 0x88, 0x06, VCLK65_315,
2505 0x6C, 0xC3, 0x35, 0x62, 0x02, 0x14, 0x0A, 0x02, 0x00, 2505 0x6C, 0xC3, 0x35, 0x62, 0x02, 0x14, 0x0A, 0x02, 0x00,
2506 0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x28, 0x10}, 2506 0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x28, 0x10},
2507/* LCDCap1280x1024 */ 2507/* LCDCap1280x1024 */
2508 {Panel1280x1024, LCDDualLink+DefaultLCDCap, StLCDBToA, 2508 {Panel_1280x1024, XGI_LCDDualLink+DefaultLCDCap, StLCDBToA,
2509 0x012, 0x70, 0x03, VCLK108_2, 2509 0x012, 0x70, 0x03, VCLK108_2_315,
2510 0x70, 0x44, 0xF8, 0x2F, 0x02, 0x14, 0x0A, 0x02, 0x00, 2510 0x70, 0x44, 0xF8, 0x2F, 0x02, 0x14, 0x0A, 0x02, 0x00,
2511 0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10}, 2511 0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10},
2512/* LCDCap1400x1050 */ 2512/* LCDCap1400x1050 */
2513 {Panel1400x1050, LCDDualLink+DefaultLCDCap, StLCDBToA, 2513 {Panel_1400x1050, XGI_LCDDualLink+DefaultLCDCap, StLCDBToA,
2514 0x012, 0x70, 0x03, VCLK108_2, 2514 0x012, 0x70, 0x03, VCLK108_2_315,
2515 0x70, 0x44, 0xF8, 0x2F, 0x02, 0x14, 0x0A, 0x02, 0x00, 2515 0x70, 0x44, 0xF8, 0x2F, 0x02, 0x14, 0x0A, 0x02, 0x00,
2516 0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10}, 2516 0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10},
2517/* LCDCap1600x1200 */ 2517/* LCDCap1600x1200 */
2518 {Panel1600x1200, LCDDualLink+DefaultLCDCap, LCDToFull, 2518 {Panel_1600x1200, XGI_LCDDualLink+DefaultLCDCap, LCDToFull,
2519 0x012, 0xC0, 0x03, VCLK162, 2519 0x012, 0xC0, 0x03, VCLK162,
2520 0x43, 0x22, 0x70, 0x24, 0x02, 0x14, 0x0A, 0x02, 0x00, 2520 0x43, 0x22, 0x70, 0x24, 0x02, 0x14, 0x0A, 0x02, 0x00,
2521 0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10}, 2521 0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10},
2522/* LCDCap1024x768x75 */ 2522/* LCDCap1024x768x75 */
2523 {Panel1024x768x75, DefaultLCDCap, 0, 0x012, 0x60, 0, VCLK78_75, 2523 {Panel_1024x768x75, DefaultLCDCap, 0, 0x012, 0x60, 0, VCLK78_75,
2524 0x2B, 0x61, 0x2B, 0x61, 0x02, 0x14, 0x0A, 0x02, 0x00, 2524 0x2B, 0x61, 0x2B, 0x61, 0x02, 0x14, 0x0A, 0x02, 0x00,
2525 0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x28, 0x10}, 2525 0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x28, 0x10},
2526/* LCDCap1280x1024x75 */ 2526/* LCDCap1280x1024x75 */
2527 {Panel1280x1024x75, LCDDualLink+DefaultLCDCap, StLCDBToA, 2527 {Panel_1280x1024x75, XGI_LCDDualLink+DefaultLCDCap, StLCDBToA,
2528 0x012, 0x90, 0x03, VCLK135_5, 2528 0x012, 0x90, 0x03, VCLK135_5,
2529 0x54, 0x42, 0x4A, 0x61, 0x02, 0x14, 0x0A, 0x02, 0x00, 2529 0x54, 0x42, 0x4A, 0x61, 0x02, 0x14, 0x0A, 0x02, 0x00,
2530 0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10}, 2530 0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10},
2531/* LCDCapDefault */ 2531/* LCDCapDefault */
2532 {0xFF, DefaultLCDCap, 0, 0x012, 0x88, 0x06, VCLK65, 2532 {0xFF, DefaultLCDCap, 0, 0x012, 0x88, 0x06, VCLK65_315,
2533 0x6C, 0xC3, 0x35, 0x62, 0x02, 0x14, 0x0A, 0x02, 0x00, 2533 0x6C, 0xC3, 0x35, 0x62, 0x02, 0x14, 0x0A, 0x02, 0x00,
2534 0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x28, 0x10} 2534 0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x28, 0x10}
2535}; 2535};
2536 2536
2537static struct XGI330_LCDCapStruct XGI_LCDCapList[] = { 2537static struct XGI330_LCDCapStruct XGI_LCDCapList[] = {
2538/* LCDCap1024x768 */ 2538/* LCDCap1024x768 */
2539 {Panel1024x768, DefaultLCDCap, 0, 0x012, 0x88, 0x06, VCLK65, 2539 {Panel_1024x768, DefaultLCDCap, 0, 0x012, 0x88, 0x06, VCLK65_315,
2540 0x6C, 0xC3, 0x35, 0x62, 0x02, 0x14, 0x0A, 0x02, 0x00, 2540 0x6C, 0xC3, 0x35, 0x62, 0x02, 0x14, 0x0A, 0x02, 0x00,
2541 0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x28, 0x10}, 2541 0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x28, 0x10},
2542/* LCDCap1280x1024 */ 2542/* LCDCap1280x1024 */
2543 {Panel1280x1024, DefaultLCDCap, StLCDBToA, 2543 {Panel_1280x1024, DefaultLCDCap, StLCDBToA,
2544 0x012, 0x70, 0x03, VCLK108_2, 2544 0x012, 0x70, 0x03, VCLK108_2_315,
2545 0x70, 0x44, 0xF8, 0x2F, 0x02, 0x14, 0x0A, 0x02, 0x00, 2545 0x70, 0x44, 0xF8, 0x2F, 0x02, 0x14, 0x0A, 0x02, 0x00,
2546 0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10}, 2546 0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10},
2547/* LCDCap1400x1050 */ 2547/* LCDCap1400x1050 */
2548 {Panel1400x1050, DefaultLCDCap, StLCDBToA, 2548 {Panel_1400x1050, DefaultLCDCap, StLCDBToA,
2549 0x012, 0x70, 0x03, VCLK108_2, 2549 0x012, 0x70, 0x03, VCLK108_2_315,
2550 0x70, 0x44, 0xF8, 0x2F, 0x02, 0x14, 0x0A, 0x02, 0x00, 2550 0x70, 0x44, 0xF8, 0x2F, 0x02, 0x14, 0x0A, 0x02, 0x00,
2551 0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10}, 2551 0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10},
2552/* LCDCap1600x1200 */ 2552/* LCDCap1600x1200 */
2553 {Panel1600x1200, DefaultLCDCap, LCDToFull, 2553 {Panel_1600x1200, DefaultLCDCap, LCDToFull,
2554 0x012, 0xC0, 0x03, VCLK162, 2554 0x012, 0xC0, 0x03, VCLK162,
2555 0x5A, 0x23, 0x5A, 0x23, 0x02, 0x14, 0x0A, 0x02, 0x00, 2555 0x5A, 0x23, 0x5A, 0x23, 0x02, 0x14, 0x0A, 0x02, 0x00,
2556 0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10}, 2556 0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10},
2557/* LCDCap1024x768x75 */ 2557/* LCDCap1024x768x75 */
2558 {Panel1024x768x75, DefaultLCDCap, 0, 0x012, 0x60, 0, VCLK78_75, 2558 {Panel_1024x768x75, DefaultLCDCap, 0, 0x012, 0x60, 0, VCLK78_75,
2559 0x2B, 0x61, 0x2B, 0x61, 0x02, 0x14, 0x0A, 0x02, 0x00, 2559 0x2B, 0x61, 0x2B, 0x61, 0x02, 0x14, 0x0A, 0x02, 0x00,
2560 0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x28, 0x10}, 2560 0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x28, 0x10},
2561/* LCDCap1280x1024x75 */ 2561/* LCDCap1280x1024x75 */
2562 {Panel1280x1024x75, DefaultLCDCap, StLCDBToA, 2562 {Panel_1280x1024x75, DefaultLCDCap, StLCDBToA,
2563 0x012, 0x90, 0x03, VCLK135_5, 2563 0x012, 0x90, 0x03, VCLK135_5,
2564 0x54, 0x42, 0x4A, 0x61, 0x02, 0x14, 0x0A, 0x02, 0x00, 2564 0x54, 0x42, 0x4A, 0x61, 0x02, 0x14, 0x0A, 0x02, 0x00,
2565 0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10}, 2565 0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10},
2566/* LCDCapDefault */ 2566/* LCDCapDefault */
2567 {0xFF, DefaultLCDCap, 0, 0x012, 0x88, 0x06, VCLK65, 2567 {0xFF, DefaultLCDCap, 0, 0x012, 0x88, 0x06, VCLK65_315,
2568 0x6C, 0xC3, 0x35, 0x62, 0x02, 0x14, 0x0A, 0x02, 0x00, 2568 0x6C, 0xC3, 0x35, 0x62, 0x02, 0x14, 0x0A, 0x02, 0x00,
2569 0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x28, 0x10} 2569 0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x28, 0x10}
2570}; 2570};
2571 2571
2572static struct XGI_Ext2Struct XGI330_RefIndex[] = { 2572static struct XGI_Ext2Struct XGI330_RefIndex[] = {
2573 {Support32Bpp + SupportAllCRT2 + SyncPN, RES320x200, VCLK25_175, 2573 {Mode32Bpp + SupportAllCRT2 + SyncPN, RES320x200, VCLK25_175,
2574 0x00, 0x10, 0x59, 320, 200},/* 00 */ 2574 0x00, 0x10, 0x59, 320, 200},/* 00 */
2575 {Support32Bpp + SupportAllCRT2 + SyncPN, RES320x200, VCLK25_175, 2575 {Mode32Bpp + SupportAllCRT2 + SyncPN, RES320x200, VCLK25_175,
2576 0x00, 0x10, 0x00, 320, 400},/* 01 */ 2576 0x00, 0x10, 0x00, 320, 400},/* 01 */
2577 {Support32Bpp + SupportAllCRT2 + SyncNN, RES320x240, VCLK25_175, 2577 {Mode32Bpp + SupportAllCRT2 + SyncNN, RES320x240, VCLK25_175,
2578 0x04, 0x20, 0x50, 320, 240},/* 02 */ 2578 0x04, 0x20, 0x50, 320, 240},/* 02 */
2579 {Support32Bpp + SupportAllCRT2 + SyncPP, RES400x300, VCLK40, 2579 {Mode32Bpp + SupportAllCRT2 + SyncPP, RES400x300, VCLK40,
2580 0x05, 0x32, 0x51, 400, 300},/* 03 */ 2580 0x05, 0x32, 0x51, 400, 300},/* 03 */
2581 {Support32Bpp + NoSupportTV + SyncNN + SupportTV1024, RES512x384, 2581 {Mode32Bpp + NoSupportTV + SyncNN + SupportTV1024, RES512x384,
2582 VCLK65, 0x06, 0x43, 0x52, 512, 384},/* 04 */ 2582 VCLK65_315, 0x06, 0x43, 0x52, 512, 384},/* 04 */
2583 {Support32Bpp + SupportAllCRT2 + SyncPN, RES640x400, VCLK25_175, 2583 {Mode32Bpp + SupportAllCRT2 + SyncPN, RES640x400, VCLK25_175,
2584 0x00, 0x14, 0x2f, 640, 400},/* 05 */ 2584 0x00, 0x14, 0x2f, 640, 400},/* 05 */
2585 {Support32Bpp + SupportAllCRT2 + SyncNN, RES640x480x60, VCLK25_175, 2585 {Mode32Bpp + SupportAllCRT2 + SyncNN, RES640x480x60, VCLK25_175,
2586 0x04, 0x24, 0x2e, 640, 480},/* 06 640x480x60Hz (LCD 640x480x60z) */ 2586 0x04, 0x24, 0x2e, 640, 480},/* 06 640x480x60Hz (LCD 640x480x60z) */
2587 {Support32Bpp + NoSupportHiVisionTV + SyncNN, RES640x480x72, VCLK31_5, 2587 {Mode32Bpp + NoSupportHiVisionTV + SyncNN, RES640x480x72, VCLK31_5,
2588 0x04, 0x24, 0x2e, 640, 480},/* 07 640x480x72Hz (LCD 640x480x70Hz) */ 2588 0x04, 0x24, 0x2e, 640, 480},/* 07 640x480x72Hz (LCD 640x480x70Hz) */
2589 {Support32Bpp + NoSupportHiVisionTV + SyncNN, RES640x480x75, VCLK31_5, 2589 {Mode32Bpp + NoSupportHiVisionTV + SyncNN, RES640x480x75, VCLK31_5,
2590 0x47, 0x24, 0x2e, 640, 480},/* 08 640x480x75Hz (LCD 640x480x75Hz) */ 2590 0x47, 0x24, 0x2e, 640, 480},/* 08 640x480x75Hz (LCD 640x480x75Hz) */
2591 {Support32Bpp + SupportRAMDAC2 + SyncNN, RES640x480x85, VCLK36, 2591 {Mode32Bpp + SupportRAMDAC2 + SyncNN, RES640x480x85, VCLK36,
2592 0x8A, 0x24, 0x2e, 640, 480},/* 09 640x480x85Hz */ 2592 0x8A, 0x24, 0x2e, 640, 480},/* 09 640x480x85Hz */
2593 {Support32Bpp + SupportRAMDAC2 + SyncPN, RES640x480x100, VCLK43_163, 2593 {Mode32Bpp + SupportRAMDAC2 + SyncPN, RES640x480x100, VCLK43_163,
2594 0x00, 0x24, 0x2e, 640, 480},/* 0a 640x480x100Hz */ 2594 0x00, 0x24, 0x2e, 640, 480},/* 0a 640x480x100Hz */
2595 {Support32Bpp + SupportRAMDAC2 + SyncPN, RES640x480x120, VCLK52_406, 2595 {Mode32Bpp + SupportRAMDAC2 + SyncPN, RES640x480x120, VCLK52_406,
2596 0x00, 0x24, 0x2e, 640, 480},/* 0b 640x480x120Hz */ 2596 0x00, 0x24, 0x2e, 640, 480},/* 0b 640x480x120Hz */
2597 {Support32Bpp + SupportRAMDAC2 + SyncPN, RES640x480x160, VCLK72_852, 2597 {Mode32Bpp + SupportRAMDAC2 + SyncPN, RES640x480x160, VCLK72_852,
2598 0x00, 0x24, 0x2e, 640, 480},/* 0c 640x480x160Hz */ 2598 0x00, 0x24, 0x2e, 640, 480},/* 0c 640x480x160Hz */
2599 {Support32Bpp + SupportRAMDAC2 + SyncNN, RES640x480x200, VCLK86_6, 2599 {Mode32Bpp + SupportRAMDAC2 + SyncNN, RES640x480x200, VCLK86_6,
2600 0x00, 0x24, 0x2e, 640, 480},/* 0d 640x480x200Hz */ 2600 0x00, 0x24, 0x2e, 640, 480},/* 0d 640x480x200Hz */
2601 {Support32Bpp + NoSupportLCD + SyncPP, RES800x600x56, VCLK36, 2601 {Mode32Bpp + NoSupportLCD + SyncPP, RES800x600x56, VCLK36,
2602 0x05, 0x36, 0x6a, 800, 600},/* 0e 800x600x56Hz */ 2602 0x05, 0x36, 0x6a, 800, 600},/* 0e 800x600x56Hz */
2603 {Support32Bpp + NoSupportTV + SyncPP, RES800x600x60, VCLK40, 2603 {Mode32Bpp + NoSupportTV + SyncPP, RES800x600x60, VCLK40,
2604 0x05, 0x36, 0x6a, 800, 600},/* 0f 800x600x60Hz (LCD 800x600x60Hz) */ 2604 0x05, 0x36, 0x6a, 800, 600},/* 0f 800x600x60Hz (LCD 800x600x60Hz) */
2605 {Support32Bpp + NoSupportHiVisionTV + SyncPP, RES800x600x72, VCLK50, 2605 {Mode32Bpp + NoSupportHiVisionTV + SyncPP, RES800x600x72, VCLK50,
2606 0x48, 0x36, 0x6a, 800, 600},/* 10 800x600x72Hz (LCD 800x600x70Hz) */ 2606 0x48, 0x36, 0x6a, 800, 600},/* 10 800x600x72Hz (LCD 800x600x70Hz) */
2607 {Support32Bpp + NoSupportHiVisionTV + SyncPP, RES800x600x75, VCLK49_5, 2607 {Mode32Bpp + NoSupportHiVisionTV + SyncPP, RES800x600x75, VCLK49_5,
2608 0x8B, 0x36, 0x6a, 800, 600},/* 11 800x600x75Hz (LCD 800x600x75Hz) */ 2608 0x8B, 0x36, 0x6a, 800, 600},/* 11 800x600x75Hz (LCD 800x600x75Hz) */
2609 {Support32Bpp + SupportRAMDAC2 + SyncPP, RES800x600x85, VCLK56_25, 2609 {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES800x600x85, VCLK56_25,
2610 0x00, 0x36, 0x6a, 800, 600},/* 12 800x600x85Hz */ 2610 0x00, 0x36, 0x6a, 800, 600},/* 12 800x600x85Hz */
2611 {Support32Bpp + SupportRAMDAC2 + SyncPN, RES800x600x100, VCLK68_179, 2611 {Mode32Bpp + SupportRAMDAC2 + SyncPN, RES800x600x100, VCLK68_179,
2612 0x00, 0x36, 0x6a, 800, 600},/* 13 800x600x100Hz */ 2612 0x00, 0x36, 0x6a, 800, 600},/* 13 800x600x100Hz */
2613 {Support32Bpp + SupportRAMDAC2 + SyncPN, RES800x600x120, VCLK83_95, 2613 {Mode32Bpp + SupportRAMDAC2 + SyncPN, RES800x600x120, VCLK83_95,
2614 0x00, 0x36, 0x6a, 800, 600},/* 14 800x600x120Hz */ 2614 0x00, 0x36, 0x6a, 800, 600},/* 14 800x600x120Hz */
2615 {Support32Bpp + SupportRAMDAC2 + SyncPN, RES800x600x160, VCLK116_406, 2615 {Mode32Bpp + SupportRAMDAC2 + SyncPN, RES800x600x160, VCLK116_406,
2616 0x00, 0x36, 0x6a, 800, 600},/* 15 800x600x160Hz */ 2616 0x00, 0x36, 0x6a, 800, 600},/* 15 800x600x160Hz */
2617 {Support32Bpp + InterlaceMode + SyncPP, RES1024x768x43, VCLK44_9, 2617 {Mode32Bpp + InterlaceMode + SyncPP, RES1024x768x43, VCLK44_9,
2618 0x00, 0x47, 0x37, 1024, 768},/* 16 1024x768x43Hz */ 2618 0x00, 0x47, 0x37, 1024, 768},/* 16 1024x768x43Hz */
2619 /* 17 1024x768x60Hz (LCD 1024x768x60Hz) */ 2619 /* 17 1024x768x60Hz (LCD 1024x768x60Hz) */
2620 {Support32Bpp + NoSupportTV + SyncNN + SupportTV1024, RES1024x768x60, 2620 {Mode32Bpp + NoSupportTV + SyncNN + SupportTV1024, RES1024x768x60,
2621 VCLK65, 0x06, 0x47, 0x37, 1024, 768}, 2621 VCLK65_315, 0x06, 0x47, 0x37, 1024, 768},
2622 {Support32Bpp + NoSupportHiVisionTV + SyncNN, RES1024x768x70, VCLK75, 2622 {Mode32Bpp + NoSupportHiVisionTV + SyncNN, RES1024x768x70, VCLK75,
2623 0x49, 0x47, 0x37, 1024, 768},/* 18 1024x768x70Hz (LCD 1024x768x70Hz) */ 2623 0x49, 0x47, 0x37, 1024, 768},/* 18 1024x768x70Hz (LCD 1024x768x70Hz) */
2624 {Support32Bpp + NoSupportHiVisionTV + SyncPP, RES1024x768x75, VCLK78_75, 2624 {Mode32Bpp + NoSupportHiVisionTV + SyncPP, RES1024x768x75, VCLK78_75,
2625 0x00, 0x47, 0x37, 1024, 768},/* 19 1024x768x75Hz (LCD 1024x768x75Hz) */ 2625 0x00, 0x47, 0x37, 1024, 768},/* 19 1024x768x75Hz (LCD 1024x768x75Hz) */
2626 {Support32Bpp + SupportRAMDAC2 + SyncPP, RES1024x768x85, VCLK94_5, 2626 {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES1024x768x85, VCLK94_5,
2627 0x8C, 0x47, 0x37, 1024, 768},/* 1a 1024x768x85Hz */ 2627 0x8C, 0x47, 0x37, 1024, 768},/* 1a 1024x768x85Hz */
2628 {Support32Bpp + SupportRAMDAC2 + SyncPN, RES1024x768x100, VCLK113_309, 2628 {Mode32Bpp + SupportRAMDAC2 + SyncPN, RES1024x768x100, VCLK113_309,
2629 0x00, 0x47, 0x37, 1024, 768},/* 1b 1024x768x100Hz */ 2629 0x00, 0x47, 0x37, 1024, 768},/* 1b 1024x768x100Hz */
2630 {Support32Bpp + SupportRAMDAC2 + SyncPN, RES1024x768x120, VCLK139_054, 2630 {Mode32Bpp + SupportRAMDAC2 + SyncPN, RES1024x768x120, VCLK139_054,
2631 0x00, 0x47, 0x37, 1024, 768},/* 1c 1024x768x120Hz */ 2631 0x00, 0x47, 0x37, 1024, 768},/* 1c 1024x768x120Hz */
2632 {Support32Bpp + SupportLCD + SyncPP, RES1280x960x60, VCLK108_2, 2632 {Mode32Bpp + SupportLCD + SyncPP, RES1280x960x60, VCLK108_2_315,
2633 0x08, 0x58, 0x7b, 1280, 960},/* 1d 1280x960x60Hz */ 2633 0x08, 0x58, 0x7b, 1280, 960},/* 1d 1280x960x60Hz */
2634 {Support32Bpp + InterlaceMode + SyncPP, RES1280x1024x43, VCLK78_75, 2634 {Mode32Bpp + InterlaceMode + SyncPP, RES1280x1024x43, VCLK78_75,
2635 0x00, 0x58, 0x3a, 1280, 1024},/* 1e 1280x1024x43Hz */ 2635 0x00, 0x58, 0x3a, 1280, 1024},/* 1e 1280x1024x43Hz */
2636 {Support32Bpp + NoSupportTV + SyncPP, RES1280x1024x60, VCLK108_2, 2636 {Mode32Bpp + NoSupportTV + SyncPP, RES1280x1024x60, VCLK108_2_315,
2637 0x07, 0x58, 0x3a, 1280, 1024},/*1f 1280x1024x60Hz (LCD 1280x1024x60Hz)*/ 2637 0x07, 0x58, 0x3a, 1280, 1024},/*1f 1280x1024x60Hz (LCD 1280x1024x60Hz)*/
2638 {Support32Bpp + NoSupportTV + SyncPP, RES1280x1024x75, VCLK135_5, 2638 {Mode32Bpp + NoSupportTV + SyncPP, RES1280x1024x75, VCLK135_5,
2639 0x00, 0x58, 0x3a, 1280, 1024},/*20 1280x1024x75Hz (LCD 1280x1024x75Hz)*/ 2639 0x00, 0x58, 0x3a, 1280, 1024},/*20 1280x1024x75Hz (LCD 1280x1024x75Hz)*/
2640 {Support32Bpp + SyncPP, RES1280x1024x85, VCLK157_5, 2640 {Mode32Bpp + SyncPP, RES1280x1024x85, VCLK157_5,
2641 0x00, 0x58, 0x3a, 1280, 1024},/* 21 1280x1024x85Hz */ 2641 0x00, 0x58, 0x3a, 1280, 1024},/* 21 1280x1024x85Hz */
2642 /* 22 1600x1200x60Hz */ 2642 /* 22 1600x1200x60Hz */
2643 {Support32Bpp + SupportLCD + SyncPP + SupportCRT2in301C, 2643 {Mode32Bpp + SupportLCD + SyncPP + SupportCRT2in301C,
2644 RES1600x1200x60, VCLK162, 0x09, 0x7A, 0x3c, 1600, 1200}, 2644 RES1600x1200x60, VCLK162, 0x09, 0x7A, 0x3c, 1600, 1200},
2645 {Support32Bpp + SyncPP + SupportCRT2in301C, RES1600x1200x65, VCLK175, 2645 {Mode32Bpp + SyncPP + SupportCRT2in301C, RES1600x1200x65, VCLK175,
2646 0x00, 0x69, 0x3c, 1600, 1200},/* 23 1600x1200x65Hz */ 2646 0x00, 0x69, 0x3c, 1600, 1200},/* 23 1600x1200x65Hz */
2647 {Support32Bpp + SyncPP + SupportCRT2in301C, RES1600x1200x70, VCLK189, 2647 {Mode32Bpp + SyncPP + SupportCRT2in301C, RES1600x1200x70, VCLK189,
2648 0x00, 0x69, 0x3c, 1600, 1200},/* 24 1600x1200x70Hz */ 2648 0x00, 0x69, 0x3c, 1600, 1200},/* 24 1600x1200x70Hz */
2649 {Support32Bpp + SyncPP + SupportCRT2in301C, RES1600x1200x75, VCLK202_5, 2649 {Mode32Bpp + SyncPP + SupportCRT2in301C, RES1600x1200x75, VCLK202_5,
2650 0x00, 0x69, 0x3c, 1600, 1200},/* 25 1600x1200x75Hz */ 2650 0x00, 0x69, 0x3c, 1600, 1200},/* 25 1600x1200x75Hz */
2651 {Support32Bpp + SyncPP, RES1600x1200x85, VCLK229_5, 2651 {Mode32Bpp + SyncPP, RES1600x1200x85, VCLK229_5,
2652 0x00, 0x69, 0x3c, 1600, 1200},/* 26 1600x1200x85Hz */ 2652 0x00, 0x69, 0x3c, 1600, 1200},/* 26 1600x1200x85Hz */
2653 {Support32Bpp + SyncPP, RES1600x1200x100, VCLK269_655, 2653 {Mode32Bpp + SyncPP, RES1600x1200x100, VCLK269_655,
2654 0x00, 0x69, 0x3c, 1600, 1200},/* 27 1600x1200x100Hz */ 2654 0x00, 0x69, 0x3c, 1600, 1200},/* 27 1600x1200x100Hz */
2655 {Support32Bpp + SyncPP, RES1600x1200x120, VCLK323_586, 2655 {Mode32Bpp + SyncPP, RES1600x1200x120, VCLK323_586,
2656 0x00, 0x69, 0x3c, 1600, 1200},/* 28 1600x1200x120Hz */ 2656 0x00, 0x69, 0x3c, 1600, 1200},/* 28 1600x1200x120Hz */
2657 {Support32Bpp + SupportLCD + SyncNP, RES1920x1440x60, VCLK234, 2657 {Mode32Bpp + SupportLCD + SyncNP, RES1920x1440x60, VCLK234,
2658 0x00, 0x00, 0x68, 1920, 1440},/* 29 1920x1440x60Hz */ 2658 0x00, 0x00, 0x68, 1920, 1440},/* 29 1920x1440x60Hz */
2659 {Support32Bpp + SyncPN, RES1920x1440x65, VCLK254_817, 2659 {Mode32Bpp + SyncPN, RES1920x1440x65, VCLK254_817,
2660 0x00, 0x00, 0x68, 1920, 1440},/* 2a 1920x1440x65Hz */ 2660 0x00, 0x00, 0x68, 1920, 1440},/* 2a 1920x1440x65Hz */
2661 {Support32Bpp + SyncPN, RES1920x1440x70, VCLK277_015, 2661 {Mode32Bpp + SyncPN, RES1920x1440x70, VCLK277_015,
2662 0x00, 0x00, 0x68, 1920, 1440},/* 2b 1920x1440x70Hz */ 2662 0x00, 0x00, 0x68, 1920, 1440},/* 2b 1920x1440x70Hz */
2663 {Support32Bpp + SyncPN, RES1920x1440x75, VCLK291_132, 2663 {Mode32Bpp + SyncPN, RES1920x1440x75, VCLK291_132,
2664 0x00, 0x00, 0x68, 1920, 1440},/* 2c 1920x1440x75Hz */ 2664 0x00, 0x00, 0x68, 1920, 1440},/* 2c 1920x1440x75Hz */
2665 {Support32Bpp + SyncPN, RES1920x1440x85, VCLK330_615, 2665 {Mode32Bpp + SyncPN, RES1920x1440x85, VCLK330_615,
2666 0x00, 0x00, 0x68, 1920, 1440},/* 2d 1920x1440x85Hz */ 2666 0x00, 0x00, 0x68, 1920, 1440},/* 2d 1920x1440x85Hz */
2667 {Support16Bpp + SyncPN, RES1920x1440x100, VCLK388_631, 2667 {Mode16Bpp + SyncPN, RES1920x1440x100, VCLK388_631,
2668 0x00, 0x00, 0x68, 1920, 1440},/* 2e 1920x1440x100Hz */ 2668 0x00, 0x00, 0x68, 1920, 1440},/* 2e 1920x1440x100Hz */
2669 {Support32Bpp + SupportLCD + SyncPN, RES2048x1536x60, VCLK266_952, 2669 {Mode32Bpp + SupportLCD + SyncPN, RES2048x1536x60, VCLK266_952,
2670 0x00, 0x00, 0x6c, 2048, 1536},/* 2f 2048x1536x60Hz */ 2670 0x00, 0x00, 0x6c, 2048, 1536},/* 2f 2048x1536x60Hz */
2671 {Support32Bpp + SyncPN, RES2048x1536x65, VCLK291_766, 2671 {Mode32Bpp + SyncPN, RES2048x1536x65, VCLK291_766,
2672 0x00, 0x00, 0x6c, 2048, 1536},/* 30 2048x1536x65Hz */ 2672 0x00, 0x00, 0x6c, 2048, 1536},/* 30 2048x1536x65Hz */
2673 {Support32Bpp + SyncPN, RES2048x1536x70, VCLK315_195, 2673 {Mode32Bpp + SyncPN, RES2048x1536x70, VCLK315_195,
2674 0x00, 0x00, 0x6c, 2048, 1536},/* 31 2048x1536x70Hz */ 2674 0x00, 0x00, 0x6c, 2048, 1536},/* 31 2048x1536x70Hz */
2675 {Support32Bpp + SyncPN, RES2048x1536x75, VCLK340_477, 2675 {Mode32Bpp + SyncPN, RES2048x1536x75, VCLK340_477,
2676 0x00, 0x00, 0x6c, 2048, 1536},/* 32 2048x1536x75Hz */ 2676 0x00, 0x00, 0x6c, 2048, 1536},/* 32 2048x1536x75Hz */
2677 {Support16Bpp + SyncPN, RES2048x1536x85, VCLK375_847, 2677 {Mode16Bpp + SyncPN, RES2048x1536x85, VCLK375_847,
2678 0x00, 0x00, 0x6c, 2048, 1536},/* 33 2048x1536x85Hz */ 2678 0x00, 0x00, 0x6c, 2048, 1536},/* 33 2048x1536x85Hz */
2679 {Support32Bpp + SupportHiVisionTV + SupportRAMDAC2 + 2679 {Mode32Bpp + SupportHiVision + SupportRAMDAC2 +
2680 SyncPP + SupportYPbPr, RES800x480x60, VCLK39_77, 2680 SyncPP + SupportYPbPr750p, RES800x480x60, VCLK39_77,
2681 0x08, 0x00, 0x70, 800, 480},/* 34 800x480x60Hz */ 2681 0x08, 0x00, 0x70, 800, 480},/* 34 800x480x60Hz */
2682 {Support32Bpp + SupportRAMDAC2 + SyncPP, RES800x480x75, VCLK49_5, 2682 {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES800x480x75, VCLK49_5,
2683 0x08, 0x00, 0x70, 800, 480},/* 35 800x480x75Hz */ 2683 0x08, 0x00, 0x70, 800, 480},/* 35 800x480x75Hz */
2684 {Support32Bpp + SupportRAMDAC2 + SyncPP, RES800x480x85, VCLK56_25, 2684 {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES800x480x85, VCLK56_25,
2685 0x08, 0x00, 0x70, 800, 480},/* 36 800x480x85Hz */ 2685 0x08, 0x00, 0x70, 800, 480},/* 36 800x480x85Hz */
2686 {Support32Bpp + SupportHiVisionTV + SupportRAMDAC2 + 2686 {Mode32Bpp + SupportHiVision + SupportRAMDAC2 +
2687 SyncPP + SupportYPbPr, RES1024x576x60, VCLK65, 2687 SyncPP + SupportYPbPr750p, RES1024x576x60, VCLK65_315,
2688 0x09, 0x00, 0x71, 1024, 576},/* 37 1024x576x60Hz */ 2688 0x09, 0x00, 0x71, 1024, 576},/* 37 1024x576x60Hz */
2689 {Support32Bpp + SupportRAMDAC2 + SyncPP, RES1024x576x75, VCLK78_75, 2689 {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES1024x576x75, VCLK78_75,
2690 0x09, 0x00, 0x71, 1024, 576},/* 38 1024x576x75Hz */ 2690 0x09, 0x00, 0x71, 1024, 576},/* 38 1024x576x75Hz */
2691 {Support32Bpp + SupportRAMDAC2 + SyncPP, RES1024x576x85, VCLK94_5, 2691 {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES1024x576x85, VCLK94_5,
2692 0x09, 0x00, 0x71, 1024, 576},/* 39 1024x576x85Hz */ 2692 0x09, 0x00, 0x71, 1024, 576},/* 39 1024x576x85Hz */
2693 {Support32Bpp + SupportHiVisionTV + SupportRAMDAC2 + 2693 {Mode32Bpp + SupportHiVision + SupportRAMDAC2 +
2694 SyncPP + SupportYPbPr, RES1280x720x60, VCLK108_2, 2694 SyncPP + SupportYPbPr750p, RES1280x720x60, VCLK108_2_315,
2695 0x0A, 0x00, 0x75, 1280, 720},/* 3a 1280x720x60Hz*/ 2695 0x0A, 0x00, 0x75, 1280, 720},/* 3a 1280x720x60Hz*/
2696 {Support32Bpp + SupportRAMDAC2 + SyncPP, RES1280x720x75, VCLK135_5, 2696 {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES1280x720x75, VCLK135_5,
2697 0x0A, 0x00, 0x75, 1280, 720},/* 3b 1280x720x75Hz */ 2697 0x0A, 0x00, 0x75, 1280, 720},/* 3b 1280x720x75Hz */
2698 {Support32Bpp + SupportRAMDAC2 + SyncPP, RES1280x720x85, VCLK157_5, 2698 {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES1280x720x85, VCLK157_5,
2699 0x0A, 0x00, 0x75, 1280, 720},/* 3c 1280x720x85Hz */ 2699 0x0A, 0x00, 0x75, 1280, 720},/* 3c 1280x720x85Hz */
2700 {Support32Bpp + SupportTV + SyncNN, RES720x480x60, VCLK28_322, 2700 {Mode32Bpp + SupportTV + SyncNN, RES720x480x60, VCLK28_322,
2701 0x06, 0x00, 0x31, 720, 480},/* 3d 720x480x60Hz */ 2701 0x06, 0x00, 0x31, 720, 480},/* 3d 720x480x60Hz */
2702 {Support32Bpp + SupportTV + SyncPP, RES720x576x56, VCLK36, 2702 {Mode32Bpp + SupportTV + SyncPP, RES720x576x56, VCLK36,
2703 0x06, 0x00, 0x32, 720, 576},/* 3e 720x576x56Hz */ 2703 0x06, 0x00, 0x32, 720, 576},/* 3e 720x576x56Hz */
2704 {Support32Bpp + InterlaceMode + NoSupportLCD + SyncPP, RES856x480x79I, 2704 {Mode32Bpp + InterlaceMode + NoSupportLCD + SyncPP, RES856x480x79I,
2705 VCLK35_2, 0x00, 0x00, 0x00, 856, 480},/* 3f 856x480x79I */ 2705 VCLK35_2, 0x00, 0x00, 0x00, 856, 480},/* 3f 856x480x79I */
2706 {Support32Bpp + NoSupportLCD + SyncNN, RES856x480x60, VCLK35_2, 2706 {Mode32Bpp + NoSupportLCD + SyncNN, RES856x480x60, VCLK35_2,
2707 0x00, 0x00, 0x00, 856, 480},/* 40 856x480x60Hz */ 2707 0x00, 0x00, 0x00, 856, 480},/* 40 856x480x60Hz */
2708 {Support32Bpp + NoSupportHiVisionTV + SyncPP, RES1280x768x60, 2708 {Mode32Bpp + NoSupportHiVisionTV + SyncPP, RES1280x768x60,
2709 VCLK79_411, 0x08, 0x48, 0x23, 1280, 768},/* 41 1280x768x60Hz */ 2709 VCLK79_411, 0x08, 0x48, 0x23, 1280, 768},/* 41 1280x768x60Hz */
2710 {Support32Bpp + NoSupportHiVisionTV + SyncPP, RES1400x1050x60, 2710 {Mode32Bpp + NoSupportHiVisionTV + SyncPP, RES1400x1050x60,
2711 VCLK122_61, 0x08, 0x69, 0x26, 1400, 1050},/* 42 1400x1050x60Hz */ 2711 VCLK122_61, 0x08, 0x69, 0x26, 1400, 1050},/* 42 1400x1050x60Hz */
2712 {Support32Bpp + SupportRAMDAC2 + SyncPP, RES1152x864x60, VCLK80_350, 2712 {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES1152x864x60, VCLK80_350,
2713 0x37, 0x00, 0x20, 1152, 864},/* 43 1152x864x60Hz */ 2713 0x37, 0x00, 0x20, 1152, 864},/* 43 1152x864x60Hz */
2714 {Support32Bpp + SupportRAMDAC2 + SyncPP, RES1152x864x75, VCLK107_385, 2714 {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES1152x864x75, VCLK107_385,
2715 0x37, 0x00, 0x20, 1152, 864},/* 44 1152x864x75Hz */ 2715 0x37, 0x00, 0x20, 1152, 864},/* 44 1152x864x75Hz */
2716 {Support32Bpp + SupportLCD + SupportRAMDAC2 + SyncPP, RES1280x960x75, 2716 {Mode32Bpp + SupportLCD + SupportRAMDAC2 + SyncPP, RES1280x960x75,
2717 VCLK125_999, 0x3A, 0x88, 0x7b, 1280, 960},/* 45 1280x960x75Hz */ 2717 VCLK125_999, 0x3A, 0x88, 0x7b, 1280, 960},/* 45 1280x960x75Hz */
2718 {Support32Bpp + SupportLCD + SupportRAMDAC2 + SyncPP, RES1280x960x85, 2718 {Mode32Bpp + SupportLCD + SupportRAMDAC2 + SyncPP, RES1280x960x85,
2719 VCLK148_5, 0x0A, 0x88, 0x7b, 1280, 960},/* 46 1280x960x85Hz */ 2719 VCLK148_5, 0x0A, 0x88, 0x7b, 1280, 960},/* 46 1280x960x85Hz */
2720 {Support32Bpp + SupportLCD + SupportRAMDAC2 + SyncPP, RES1280x960x120, 2720 {Mode32Bpp + SupportLCD + SupportRAMDAC2 + SyncPP, RES1280x960x120,
2721 VCLK217_325, 0x3A, 0x88, 0x7b, 1280, 960},/* 47 1280x960x120Hz */ 2721 VCLK217_325, 0x3A, 0x88, 0x7b, 1280, 960},/* 47 1280x960x120Hz */
2722 {Support32Bpp + SupportRAMDAC2 + SyncPN, RES1024x768x160, VCLK139_054, 2722 {Mode32Bpp + SupportRAMDAC2 + SyncPN, RES1024x768x160, VCLK139_054,
2723 0x30, 0x47, 0x37, 1024, 768},/* 48 1024x768x160Hz */ 2723 0x30, 0x47, 0x37, 1024, 768},/* 48 1024x768x160Hz */
2724}; 2724};
2725 2725
@@ -2729,7 +2729,7 @@ static unsigned char XGI330_ScreenOffset[] = {
2729 0x57, 0x48 2729 0x57, 0x48
2730}; 2730};
2731 2731
2732static struct XGI_StResInfoStruct XGI330_StResInfo[] = { 2732static struct SiS_StResInfo_S XGI330_StResInfo[] = {
2733 {640, 400}, 2733 {640, 400},
2734 {640, 350}, 2734 {640, 350},
2735 {720, 400}, 2735 {720, 400},
@@ -2737,7 +2737,7 @@ static struct XGI_StResInfoStruct XGI330_StResInfo[] = {
2737 {640, 480} 2737 {640, 480}
2738}; 2738};
2739 2739
2740static struct XGI_ModeResInfoStruct XGI330_ModeResInfo[] = { 2740static struct SiS_ModeResInfo_S XGI330_ModeResInfo[] = {
2741 { 320, 200, 8, 8}, 2741 { 320, 200, 8, 8},
2742 { 320, 240, 8, 8}, 2742 { 320, 240, 8, 8},
2743 { 320, 400, 8, 8}, 2743 { 320, 400, 8, 8},
diff --git a/drivers/staging/xgifb/vgatypes.h b/drivers/staging/xgifb/vgatypes.h
index 9e166bbb00c4..a7208e315815 100644
--- a/drivers/staging/xgifb/vgatypes.h
+++ b/drivers/staging/xgifb/vgatypes.h
@@ -2,6 +2,9 @@
2#define _VGATYPES_ 2#define _VGATYPES_
3 3
4#include <linux/ioctl.h> 4#include <linux/ioctl.h>
5#include <linux/fb.h> /* for struct fb_var_screeninfo for sis.h */
6#include "../../video/sis/vgatypes.h"
7#include "../../video/sis/sis.h" /* for LCD_TYPE */
5 8
6#ifndef XGI_VB_CHIP_TYPE 9#ifndef XGI_VB_CHIP_TYPE
7enum XGI_VB_CHIP_TYPE { 10enum XGI_VB_CHIP_TYPE {
@@ -19,6 +22,12 @@ enum XGI_VB_CHIP_TYPE {
19}; 22};
20#endif 23#endif
21 24
25
26#define XGI_LCD_TYPE
27/* Since the merge with video/sis the LCD_TYPEs are used from
28 drivers/video/sis/sis.h . Nevertheless we keep this (for the moment) for
29 future reference until the code is merged completely and we are sure
30 nothing of this should be added to the sis.h header */
22#ifndef XGI_LCD_TYPE 31#ifndef XGI_LCD_TYPE
23enum XGI_LCD_TYPE { 32enum XGI_LCD_TYPE {
24 LCD_INVALID = 0, 33 LCD_INVALID = 0,
diff --git a/drivers/staging/zcache/Kconfig b/drivers/staging/zcache/Kconfig
index 7fabcb2bc80d..3ed2c8f656a5 100644
--- a/drivers/staging/zcache/Kconfig
+++ b/drivers/staging/zcache/Kconfig
@@ -1,13 +1,14 @@
1config ZCACHE 1config ZCACHE
2 tristate "Dynamic compression of swap pages and clean pagecache pages" 2 bool "Dynamic compression of swap pages and clean pagecache pages"
3 depends on CLEANCACHE || FRONTSWAP 3 # X86 dependency is because zsmalloc uses non-portable pte/tlb
4 select XVMALLOC 4 # functions
5 select LZO_COMPRESS 5 depends on (CLEANCACHE || FRONTSWAP) && CRYPTO && X86
6 select LZO_DECOMPRESS 6 select ZSMALLOC
7 select CRYPTO_LZO
7 default n 8 default n
8 help 9 help
9 Zcache doubles RAM efficiency while providing a significant 10 Zcache doubles RAM efficiency while providing a significant
10 performance boosts on many workloads. Zcache uses lzo1x 11 performance boosts on many workloads. Zcache uses
11 compression and an in-kernel implementation of transcendent 12 compression and an in-kernel implementation of transcendent
12 memory to store clean page cache pages and swap in RAM, 13 memory to store clean page cache pages and swap in RAM,
13 providing a noticeable reduction in disk I/O. 14 providing a noticeable reduction in disk I/O.
diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
index ed147c4b110d..0d4aa82706b3 100644
--- a/drivers/staging/zcache/tmem.h
+++ b/drivers/staging/zcache/tmem.h
@@ -47,7 +47,7 @@
47#define ASSERT_INVERTED_SENTINEL(_x, _y) do { } while (0) 47#define ASSERT_INVERTED_SENTINEL(_x, _y) do { } while (0)
48#endif 48#endif
49 49
50#define ASSERT_SPINLOCK(_l) WARN_ON(!spin_is_locked(_l)) 50#define ASSERT_SPINLOCK(_l) lockdep_assert_held(_l)
51 51
52/* 52/*
53 * A pool is the highest-level data structure managed by tmem and 53 * A pool is the highest-level data structure managed by tmem and
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
index ef7c52bb1df9..70734652f724 100644
--- a/drivers/staging/zcache/zcache-main.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -6,9 +6,10 @@
6 * 6 *
7 * Zcache provides an in-kernel "host implementation" for transcendent memory 7 * Zcache provides an in-kernel "host implementation" for transcendent memory
8 * and, thus indirectly, for cleancache and frontswap. Zcache includes two 8 * and, thus indirectly, for cleancache and frontswap. Zcache includes two
9 * page-accessible memory [1] interfaces, both utilizing lzo1x compression: 9 * page-accessible memory [1] interfaces, both utilizing the crypto compression
10 * API:
10 * 1) "compression buddies" ("zbud") is used for ephemeral pages 11 * 1) "compression buddies" ("zbud") is used for ephemeral pages
11 * 2) xvmalloc is used for persistent pages. 12 * 2) zsmalloc is used for persistent pages.
12 * Xvmalloc (based on the TLSF allocator) has very low fragmentation 13 * Xvmalloc (based on the TLSF allocator) has very low fragmentation
13 * so maximizes space efficiency, while zbud allows pairs (and potentially, 14 * so maximizes space efficiency, while zbud allows pairs (and potentially,
14 * in the future, more than a pair of) compressed pages to be closely linked 15 * in the future, more than a pair of) compressed pages to be closely linked
@@ -23,15 +24,16 @@
23#include <linux/cpu.h> 24#include <linux/cpu.h>
24#include <linux/highmem.h> 25#include <linux/highmem.h>
25#include <linux/list.h> 26#include <linux/list.h>
26#include <linux/lzo.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/spinlock.h> 28#include <linux/spinlock.h>
29#include <linux/types.h> 29#include <linux/types.h>
30#include <linux/atomic.h> 30#include <linux/atomic.h>
31#include <linux/math64.h> 31#include <linux/math64.h>
32#include <linux/crypto.h>
33#include <linux/string.h>
32#include "tmem.h" 34#include "tmem.h"
33 35
34#include "../zram/xvmalloc.h" /* if built in drivers/staging */ 36#include "../zsmalloc/zsmalloc.h"
35 37
36#if (!defined(CONFIG_CLEANCACHE) && !defined(CONFIG_FRONTSWAP)) 38#if (!defined(CONFIG_CLEANCACHE) && !defined(CONFIG_FRONTSWAP))
37#error "zcache is useless without CONFIG_CLEANCACHE or CONFIG_FRONTSWAP" 39#error "zcache is useless without CONFIG_CLEANCACHE or CONFIG_FRONTSWAP"
@@ -60,7 +62,7 @@ MODULE_LICENSE("GPL");
60 62
61struct zcache_client { 63struct zcache_client {
62 struct tmem_pool *tmem_pools[MAX_POOLS_PER_CLIENT]; 64 struct tmem_pool *tmem_pools[MAX_POOLS_PER_CLIENT];
63 struct xv_pool *xvpool; 65 struct zs_pool *zspool;
64 bool allocated; 66 bool allocated;
65 atomic_t refcount; 67 atomic_t refcount;
66}; 68};
@@ -81,6 +83,38 @@ static inline bool is_local_client(struct zcache_client *cli)
81 return cli == &zcache_host; 83 return cli == &zcache_host;
82} 84}
83 85
86/* crypto API for zcache */
87#define ZCACHE_COMP_NAME_SZ CRYPTO_MAX_ALG_NAME
88static char zcache_comp_name[ZCACHE_COMP_NAME_SZ];
89static struct crypto_comp * __percpu *zcache_comp_pcpu_tfms;
90
91enum comp_op {
92 ZCACHE_COMPOP_COMPRESS,
93 ZCACHE_COMPOP_DECOMPRESS
94};
95
96static inline int zcache_comp_op(enum comp_op op,
97 const u8 *src, unsigned int slen,
98 u8 *dst, unsigned int *dlen)
99{
100 struct crypto_comp *tfm;
101 int ret;
102
103 BUG_ON(!zcache_comp_pcpu_tfms);
104 tfm = *per_cpu_ptr(zcache_comp_pcpu_tfms, get_cpu());
105 BUG_ON(!tfm);
106 switch (op) {
107 case ZCACHE_COMPOP_COMPRESS:
108 ret = crypto_comp_compress(tfm, src, slen, dst, dlen);
109 break;
110 case ZCACHE_COMPOP_DECOMPRESS:
111 ret = crypto_comp_decompress(tfm, src, slen, dst, dlen);
112 break;
113 }
114 put_cpu();
115 return ret;
116}
117
84/********** 118/**********
85 * Compression buddies ("zbud") provides for packing two (or, possibly 119 * Compression buddies ("zbud") provides for packing two (or, possibly
86 * in the future, more) compressed ephemeral pages into a single "raw" 120 * in the future, more) compressed ephemeral pages into a single "raw"
@@ -299,10 +333,12 @@ static void zbud_free_and_delist(struct zbud_hdr *zh)
299 struct zbud_page *zbpg = 333 struct zbud_page *zbpg =
300 container_of(zh, struct zbud_page, buddy[budnum]); 334 container_of(zh, struct zbud_page, buddy[budnum]);
301 335
336 spin_lock(&zbud_budlists_spinlock);
302 spin_lock(&zbpg->lock); 337 spin_lock(&zbpg->lock);
303 if (list_empty(&zbpg->bud_list)) { 338 if (list_empty(&zbpg->bud_list)) {
304 /* ignore zombie page... see zbud_evict_pages() */ 339 /* ignore zombie page... see zbud_evict_pages() */
305 spin_unlock(&zbpg->lock); 340 spin_unlock(&zbpg->lock);
341 spin_unlock(&zbud_budlists_spinlock);
306 return; 342 return;
307 } 343 }
308 size = zbud_free(zh); 344 size = zbud_free(zh);
@@ -310,7 +346,6 @@ static void zbud_free_and_delist(struct zbud_hdr *zh)
310 zh_other = &zbpg->buddy[(budnum == 0) ? 1 : 0]; 346 zh_other = &zbpg->buddy[(budnum == 0) ? 1 : 0];
311 if (zh_other->size == 0) { /* was unbuddied: unlist and free */ 347 if (zh_other->size == 0) { /* was unbuddied: unlist and free */
312 chunks = zbud_size_to_chunks(size) ; 348 chunks = zbud_size_to_chunks(size) ;
313 spin_lock(&zbud_budlists_spinlock);
314 BUG_ON(list_empty(&zbud_unbuddied[chunks].list)); 349 BUG_ON(list_empty(&zbud_unbuddied[chunks].list));
315 list_del_init(&zbpg->bud_list); 350 list_del_init(&zbpg->bud_list);
316 zbud_unbuddied[chunks].count--; 351 zbud_unbuddied[chunks].count--;
@@ -318,7 +353,6 @@ static void zbud_free_and_delist(struct zbud_hdr *zh)
318 zbud_free_raw_page(zbpg); 353 zbud_free_raw_page(zbpg);
319 } else { /* was buddied: move remaining buddy to unbuddied list */ 354 } else { /* was buddied: move remaining buddy to unbuddied list */
320 chunks = zbud_size_to_chunks(zh_other->size) ; 355 chunks = zbud_size_to_chunks(zh_other->size) ;
321 spin_lock(&zbud_budlists_spinlock);
322 list_del_init(&zbpg->bud_list); 356 list_del_init(&zbpg->bud_list);
323 zcache_zbud_buddied_count--; 357 zcache_zbud_buddied_count--;
324 list_add_tail(&zbpg->bud_list, &zbud_unbuddied[chunks].list); 358 list_add_tail(&zbpg->bud_list, &zbud_unbuddied[chunks].list);
@@ -407,7 +441,7 @@ static int zbud_decompress(struct page *page, struct zbud_hdr *zh)
407{ 441{
408 struct zbud_page *zbpg; 442 struct zbud_page *zbpg;
409 unsigned budnum = zbud_budnum(zh); 443 unsigned budnum = zbud_budnum(zh);
410 size_t out_len = PAGE_SIZE; 444 unsigned int out_len = PAGE_SIZE;
411 char *to_va, *from_va; 445 char *to_va, *from_va;
412 unsigned size; 446 unsigned size;
413 int ret = 0; 447 int ret = 0;
@@ -424,8 +458,9 @@ static int zbud_decompress(struct page *page, struct zbud_hdr *zh)
424 to_va = kmap_atomic(page, KM_USER0); 458 to_va = kmap_atomic(page, KM_USER0);
425 size = zh->size; 459 size = zh->size;
426 from_va = zbud_data(zh, size); 460 from_va = zbud_data(zh, size);
427 ret = lzo1x_decompress_safe(from_va, size, to_va, &out_len); 461 ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, from_va, size,
428 BUG_ON(ret != LZO_E_OK); 462 to_va, &out_len);
463 BUG_ON(ret);
429 BUG_ON(out_len != PAGE_SIZE); 464 BUG_ON(out_len != PAGE_SIZE);
430 kunmap_atomic(to_va, KM_USER0); 465 kunmap_atomic(to_va, KM_USER0);
431out: 466out:
@@ -622,8 +657,8 @@ static int zbud_show_cumul_chunk_counts(char *buf)
622#endif 657#endif
623 658
624/********** 659/**********
625 * This "zv" PAM implementation combines the TLSF-based xvMalloc 660 * This "zv" PAM implementation combines the slab-based zsmalloc
626 * with lzo1x compression to maximize the amount of data that can 661 * with the crypto compression API to maximize the amount of data that can
627 * be packed into a physical page. 662 * be packed into a physical page.
628 * 663 *
629 * Zv represents a PAM page with the index and object (plus a "size" value 664 * Zv represents a PAM page with the index and object (plus a "size" value
@@ -636,6 +671,7 @@ struct zv_hdr {
636 uint32_t pool_id; 671 uint32_t pool_id;
637 struct tmem_oid oid; 672 struct tmem_oid oid;
638 uint32_t index; 673 uint32_t index;
674 size_t size;
639 DECL_SENTINEL 675 DECL_SENTINEL
640}; 676};
641 677
@@ -657,72 +693,72 @@ static unsigned int zv_max_mean_zsize = (PAGE_SIZE / 8) * 5;
657static atomic_t zv_curr_dist_counts[NCHUNKS]; 693static atomic_t zv_curr_dist_counts[NCHUNKS];
658static atomic_t zv_cumul_dist_counts[NCHUNKS]; 694static atomic_t zv_cumul_dist_counts[NCHUNKS];
659 695
660static struct zv_hdr *zv_create(struct xv_pool *xvpool, uint32_t pool_id, 696static struct zv_hdr *zv_create(struct zs_pool *pool, uint32_t pool_id,
661 struct tmem_oid *oid, uint32_t index, 697 struct tmem_oid *oid, uint32_t index,
662 void *cdata, unsigned clen) 698 void *cdata, unsigned clen)
663{ 699{
664 struct page *page; 700 struct zv_hdr *zv;
665 struct zv_hdr *zv = NULL; 701 u32 size = clen + sizeof(struct zv_hdr);
666 uint32_t offset; 702 int chunks = (size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
667 int alloc_size = clen + sizeof(struct zv_hdr); 703 void *handle = NULL;
668 int chunks = (alloc_size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
669 int ret;
670 704
671 BUG_ON(!irqs_disabled()); 705 BUG_ON(!irqs_disabled());
672 BUG_ON(chunks >= NCHUNKS); 706 BUG_ON(chunks >= NCHUNKS);
673 ret = xv_malloc(xvpool, alloc_size, 707 handle = zs_malloc(pool, size);
674 &page, &offset, ZCACHE_GFP_MASK); 708 if (!handle)
675 if (unlikely(ret))
676 goto out; 709 goto out;
677 atomic_inc(&zv_curr_dist_counts[chunks]); 710 atomic_inc(&zv_curr_dist_counts[chunks]);
678 atomic_inc(&zv_cumul_dist_counts[chunks]); 711 atomic_inc(&zv_cumul_dist_counts[chunks]);
679 zv = kmap_atomic(page, KM_USER0) + offset; 712 zv = zs_map_object(pool, handle);
680 zv->index = index; 713 zv->index = index;
681 zv->oid = *oid; 714 zv->oid = *oid;
682 zv->pool_id = pool_id; 715 zv->pool_id = pool_id;
716 zv->size = clen;
683 SET_SENTINEL(zv, ZVH); 717 SET_SENTINEL(zv, ZVH);
684 memcpy((char *)zv + sizeof(struct zv_hdr), cdata, clen); 718 memcpy((char *)zv + sizeof(struct zv_hdr), cdata, clen);
685 kunmap_atomic(zv, KM_USER0); 719 zs_unmap_object(pool, handle);
686out: 720out:
687 return zv; 721 return handle;
688} 722}
689 723
690static void zv_free(struct xv_pool *xvpool, struct zv_hdr *zv) 724static void zv_free(struct zs_pool *pool, void *handle)
691{ 725{
692 unsigned long flags; 726 unsigned long flags;
693 struct page *page; 727 struct zv_hdr *zv;
694 uint32_t offset; 728 uint16_t size;
695 uint16_t size = xv_get_object_size(zv); 729 int chunks;
696 int chunks = (size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
697 730
731 zv = zs_map_object(pool, handle);
698 ASSERT_SENTINEL(zv, ZVH); 732 ASSERT_SENTINEL(zv, ZVH);
733 size = zv->size + sizeof(struct zv_hdr);
734 INVERT_SENTINEL(zv, ZVH);
735 zs_unmap_object(pool, handle);
736
737 chunks = (size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
699 BUG_ON(chunks >= NCHUNKS); 738 BUG_ON(chunks >= NCHUNKS);
700 atomic_dec(&zv_curr_dist_counts[chunks]); 739 atomic_dec(&zv_curr_dist_counts[chunks]);
701 size -= sizeof(*zv); 740
702 BUG_ON(size == 0);
703 INVERT_SENTINEL(zv, ZVH);
704 page = virt_to_page(zv);
705 offset = (unsigned long)zv & ~PAGE_MASK;
706 local_irq_save(flags); 741 local_irq_save(flags);
707 xv_free(xvpool, page, offset); 742 zs_free(pool, handle);
708 local_irq_restore(flags); 743 local_irq_restore(flags);
709} 744}
710 745
711static void zv_decompress(struct page *page, struct zv_hdr *zv) 746static void zv_decompress(struct page *page, void *handle)
712{ 747{
713 size_t clen = PAGE_SIZE; 748 unsigned int clen = PAGE_SIZE;
714 char *to_va; 749 char *to_va;
715 unsigned size;
716 int ret; 750 int ret;
751 struct zv_hdr *zv;
717 752
753 zv = zs_map_object(zcache_host.zspool, handle);
754 BUG_ON(zv->size == 0);
718 ASSERT_SENTINEL(zv, ZVH); 755 ASSERT_SENTINEL(zv, ZVH);
719 size = xv_get_object_size(zv) - sizeof(*zv);
720 BUG_ON(size == 0);
721 to_va = kmap_atomic(page, KM_USER0); 756 to_va = kmap_atomic(page, KM_USER0);
722 ret = lzo1x_decompress_safe((char *)zv + sizeof(*zv), 757 ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, (char *)zv + sizeof(*zv),
723 size, to_va, &clen); 758 zv->size, to_va, &clen);
724 kunmap_atomic(to_va, KM_USER0); 759 kunmap_atomic(to_va, KM_USER0);
725 BUG_ON(ret != LZO_E_OK); 760 zs_unmap_object(zcache_host.zspool, handle);
761 BUG_ON(ret);
726 BUG_ON(clen != PAGE_SIZE); 762 BUG_ON(clen != PAGE_SIZE);
727} 763}
728 764
@@ -948,8 +984,8 @@ int zcache_new_client(uint16_t cli_id)
948 goto out; 984 goto out;
949 cli->allocated = 1; 985 cli->allocated = 1;
950#ifdef CONFIG_FRONTSWAP 986#ifdef CONFIG_FRONTSWAP
951 cli->xvpool = xv_create_pool(); 987 cli->zspool = zs_create_pool("zcache", ZCACHE_GFP_MASK);
952 if (cli->xvpool == NULL) 988 if (cli->zspool == NULL)
953 goto out; 989 goto out;
954#endif 990#endif
955 ret = 0; 991 ret = 0;
@@ -1132,14 +1168,14 @@ static atomic_t zcache_curr_pers_pampd_count = ATOMIC_INIT(0);
1132static unsigned long zcache_curr_pers_pampd_count_max; 1168static unsigned long zcache_curr_pers_pampd_count_max;
1133 1169
1134/* forward reference */ 1170/* forward reference */
1135static int zcache_compress(struct page *from, void **out_va, size_t *out_len); 1171static int zcache_compress(struct page *from, void **out_va, unsigned *out_len);
1136 1172
1137static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph, 1173static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph,
1138 struct tmem_pool *pool, struct tmem_oid *oid, 1174 struct tmem_pool *pool, struct tmem_oid *oid,
1139 uint32_t index) 1175 uint32_t index)
1140{ 1176{
1141 void *pampd = NULL, *cdata; 1177 void *pampd = NULL, *cdata;
1142 size_t clen; 1178 unsigned clen;
1143 int ret; 1179 int ret;
1144 unsigned long count; 1180 unsigned long count;
1145 struct page *page = (struct page *)(data); 1181 struct page *page = (struct page *)(data);
@@ -1180,7 +1216,7 @@ static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph,
1180 } 1216 }
1181 /* reject if mean compression is too poor */ 1217 /* reject if mean compression is too poor */
1182 if ((clen > zv_max_mean_zsize) && (curr_pers_pampd_count > 0)) { 1218 if ((clen > zv_max_mean_zsize) && (curr_pers_pampd_count > 0)) {
1183 total_zsize = xv_get_total_size_bytes(cli->xvpool); 1219 total_zsize = zs_get_total_size_bytes(cli->zspool);
1184 zv_mean_zsize = div_u64(total_zsize, 1220 zv_mean_zsize = div_u64(total_zsize,
1185 curr_pers_pampd_count); 1221 curr_pers_pampd_count);
1186 if (zv_mean_zsize > zv_max_mean_zsize) { 1222 if (zv_mean_zsize > zv_max_mean_zsize) {
@@ -1188,7 +1224,7 @@ static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph,
1188 goto out; 1224 goto out;
1189 } 1225 }
1190 } 1226 }
1191 pampd = (void *)zv_create(cli->xvpool, pool->pool_id, 1227 pampd = (void *)zv_create(cli->zspool, pool->pool_id,
1192 oid, index, cdata, clen); 1228 oid, index, cdata, clen);
1193 if (pampd == NULL) 1229 if (pampd == NULL)
1194 goto out; 1230 goto out;
@@ -1246,7 +1282,7 @@ static void zcache_pampd_free(void *pampd, struct tmem_pool *pool,
1246 atomic_dec(&zcache_curr_eph_pampd_count); 1282 atomic_dec(&zcache_curr_eph_pampd_count);
1247 BUG_ON(atomic_read(&zcache_curr_eph_pampd_count) < 0); 1283 BUG_ON(atomic_read(&zcache_curr_eph_pampd_count) < 0);
1248 } else { 1284 } else {
1249 zv_free(cli->xvpool, (struct zv_hdr *)pampd); 1285 zv_free(cli->zspool, pampd);
1250 atomic_dec(&zcache_curr_pers_pampd_count); 1286 atomic_dec(&zcache_curr_pers_pampd_count);
1251 BUG_ON(atomic_read(&zcache_curr_pers_pampd_count) < 0); 1287 BUG_ON(atomic_read(&zcache_curr_pers_pampd_count) < 0);
1252 } 1288 }
@@ -1285,25 +1321,24 @@ static struct tmem_pamops zcache_pamops = {
1285 * zcache compression/decompression and related per-cpu stuff 1321 * zcache compression/decompression and related per-cpu stuff
1286 */ 1322 */
1287 1323
1288#define LZO_WORKMEM_BYTES LZO1X_1_MEM_COMPRESS
1289#define LZO_DSTMEM_PAGE_ORDER 1
1290static DEFINE_PER_CPU(unsigned char *, zcache_workmem);
1291static DEFINE_PER_CPU(unsigned char *, zcache_dstmem); 1324static DEFINE_PER_CPU(unsigned char *, zcache_dstmem);
1325#define ZCACHE_DSTMEM_ORDER 1
1292 1326
1293static int zcache_compress(struct page *from, void **out_va, size_t *out_len) 1327static int zcache_compress(struct page *from, void **out_va, unsigned *out_len)
1294{ 1328{
1295 int ret = 0; 1329 int ret = 0;
1296 unsigned char *dmem = __get_cpu_var(zcache_dstmem); 1330 unsigned char *dmem = __get_cpu_var(zcache_dstmem);
1297 unsigned char *wmem = __get_cpu_var(zcache_workmem);
1298 char *from_va; 1331 char *from_va;
1299 1332
1300 BUG_ON(!irqs_disabled()); 1333 BUG_ON(!irqs_disabled());
1301 if (unlikely(dmem == NULL || wmem == NULL)) 1334 if (unlikely(dmem == NULL))
1302 goto out; /* no buffer, so can't compress */ 1335 goto out; /* no buffer or no compressor so can't compress */
1336 *out_len = PAGE_SIZE << ZCACHE_DSTMEM_ORDER;
1303 from_va = kmap_atomic(from, KM_USER0); 1337 from_va = kmap_atomic(from, KM_USER0);
1304 mb(); 1338 mb();
1305 ret = lzo1x_1_compress(from_va, PAGE_SIZE, dmem, out_len, wmem); 1339 ret = zcache_comp_op(ZCACHE_COMPOP_COMPRESS, from_va, PAGE_SIZE, dmem,
1306 BUG_ON(ret != LZO_E_OK); 1340 out_len);
1341 BUG_ON(ret);
1307 *out_va = dmem; 1342 *out_va = dmem;
1308 kunmap_atomic(from_va, KM_USER0); 1343 kunmap_atomic(from_va, KM_USER0);
1309 ret = 1; 1344 ret = 1;
@@ -1311,29 +1346,48 @@ out:
1311 return ret; 1346 return ret;
1312} 1347}
1313 1348
1349static int zcache_comp_cpu_up(int cpu)
1350{
1351 struct crypto_comp *tfm;
1352
1353 tfm = crypto_alloc_comp(zcache_comp_name, 0, 0);
1354 if (IS_ERR(tfm))
1355 return NOTIFY_BAD;
1356 *per_cpu_ptr(zcache_comp_pcpu_tfms, cpu) = tfm;
1357 return NOTIFY_OK;
1358}
1359
1360static void zcache_comp_cpu_down(int cpu)
1361{
1362 struct crypto_comp *tfm;
1363
1364 tfm = *per_cpu_ptr(zcache_comp_pcpu_tfms, cpu);
1365 crypto_free_comp(tfm);
1366 *per_cpu_ptr(zcache_comp_pcpu_tfms, cpu) = NULL;
1367}
1314 1368
1315static int zcache_cpu_notifier(struct notifier_block *nb, 1369static int zcache_cpu_notifier(struct notifier_block *nb,
1316 unsigned long action, void *pcpu) 1370 unsigned long action, void *pcpu)
1317{ 1371{
1318 int cpu = (long)pcpu; 1372 int ret, cpu = (long)pcpu;
1319 struct zcache_preload *kp; 1373 struct zcache_preload *kp;
1320 1374
1321 switch (action) { 1375 switch (action) {
1322 case CPU_UP_PREPARE: 1376 case CPU_UP_PREPARE:
1377 ret = zcache_comp_cpu_up(cpu);
1378 if (ret != NOTIFY_OK) {
1379 pr_err("zcache: can't allocate compressor transform\n");
1380 return ret;
1381 }
1323 per_cpu(zcache_dstmem, cpu) = (void *)__get_free_pages( 1382 per_cpu(zcache_dstmem, cpu) = (void *)__get_free_pages(
1324 GFP_KERNEL | __GFP_REPEAT, 1383 GFP_KERNEL | __GFP_REPEAT, ZCACHE_DSTMEM_ORDER);
1325 LZO_DSTMEM_PAGE_ORDER),
1326 per_cpu(zcache_workmem, cpu) =
1327 kzalloc(LZO1X_MEM_COMPRESS,
1328 GFP_KERNEL | __GFP_REPEAT);
1329 break; 1384 break;
1330 case CPU_DEAD: 1385 case CPU_DEAD:
1331 case CPU_UP_CANCELED: 1386 case CPU_UP_CANCELED:
1387 zcache_comp_cpu_down(cpu);
1332 free_pages((unsigned long)per_cpu(zcache_dstmem, cpu), 1388 free_pages((unsigned long)per_cpu(zcache_dstmem, cpu),
1333 LZO_DSTMEM_PAGE_ORDER); 1389 ZCACHE_DSTMEM_ORDER);
1334 per_cpu(zcache_dstmem, cpu) = NULL; 1390 per_cpu(zcache_dstmem, cpu) = NULL;
1335 kfree(per_cpu(zcache_workmem, cpu));
1336 per_cpu(zcache_workmem, cpu) = NULL;
1337 kp = &per_cpu(zcache_preloads, cpu); 1391 kp = &per_cpu(zcache_preloads, cpu);
1338 while (kp->nr) { 1392 while (kp->nr) {
1339 kmem_cache_free(zcache_objnode_cache, 1393 kmem_cache_free(zcache_objnode_cache,
@@ -1918,6 +1972,44 @@ static int __init no_frontswap(char *s)
1918 1972
1919__setup("nofrontswap", no_frontswap); 1973__setup("nofrontswap", no_frontswap);
1920 1974
1975static int __init enable_zcache_compressor(char *s)
1976{
1977 strncpy(zcache_comp_name, s, ZCACHE_COMP_NAME_SZ);
1978 zcache_enabled = 1;
1979 return 1;
1980}
1981__setup("zcache=", enable_zcache_compressor);
1982
1983
1984static int zcache_comp_init(void)
1985{
1986 int ret = 0;
1987
1988 /* check crypto algorithm */
1989 if (*zcache_comp_name != '\0') {
1990 ret = crypto_has_comp(zcache_comp_name, 0, 0);
1991 if (!ret)
1992 pr_info("zcache: %s not supported\n",
1993 zcache_comp_name);
1994 }
1995 if (!ret)
1996 strcpy(zcache_comp_name, "lzo");
1997 ret = crypto_has_comp(zcache_comp_name, 0, 0);
1998 if (!ret) {
1999 ret = 1;
2000 goto out;
2001 }
2002 pr_info("zcache: using %s compressor\n", zcache_comp_name);
2003
2004 /* alloc percpu transforms */
2005 ret = 0;
2006 zcache_comp_pcpu_tfms = alloc_percpu(struct crypto_comp *);
2007 if (!zcache_comp_pcpu_tfms)
2008 ret = 1;
2009out:
2010 return ret;
2011}
2012
1921static int __init zcache_init(void) 2013static int __init zcache_init(void)
1922{ 2014{
1923 int ret = 0; 2015 int ret = 0;
@@ -1940,6 +2032,11 @@ static int __init zcache_init(void)
1940 pr_err("zcache: can't register cpu notifier\n"); 2032 pr_err("zcache: can't register cpu notifier\n");
1941 goto out; 2033 goto out;
1942 } 2034 }
2035 ret = zcache_comp_init();
2036 if (ret) {
2037 pr_err("zcache: compressor initialization failed\n");
2038 goto out;
2039 }
1943 for_each_online_cpu(cpu) { 2040 for_each_online_cpu(cpu) {
1944 void *pcpu = (void *)(long)cpu; 2041 void *pcpu = (void *)(long)cpu;
1945 zcache_cpu_notifier(&zcache_cpu_notifier_block, 2042 zcache_cpu_notifier(&zcache_cpu_notifier_block,
@@ -1975,7 +2072,7 @@ static int __init zcache_init(void)
1975 2072
1976 old_ops = zcache_frontswap_register_ops(); 2073 old_ops = zcache_frontswap_register_ops();
1977 pr_info("zcache: frontswap enabled using kernel " 2074 pr_info("zcache: frontswap enabled using kernel "
1978 "transcendent memory and xvmalloc\n"); 2075 "transcendent memory and zsmalloc\n");
1979 if (old_ops.init != NULL) 2076 if (old_ops.init != NULL)
1980 pr_warning("zcache: frontswap_ops overridden"); 2077 pr_warning("zcache: frontswap_ops overridden");
1981 } 2078 }
diff --git a/drivers/staging/zram/Kconfig b/drivers/staging/zram/Kconfig
index 3bec4dba3fe5..9d11a4cb99b7 100644
--- a/drivers/staging/zram/Kconfig
+++ b/drivers/staging/zram/Kconfig
@@ -1,11 +1,9 @@
1config XVMALLOC
2 bool
3 default n
4
5config ZRAM 1config ZRAM
6 tristate "Compressed RAM block device support" 2 tristate "Compressed RAM block device support"
7 depends on BLOCK && SYSFS 3 # X86 dependency is because zsmalloc uses non-portable pte/tlb
8 select XVMALLOC 4 # functions
5 depends on BLOCK && SYSFS && X86
6 select ZSMALLOC
9 select LZO_COMPRESS 7 select LZO_COMPRESS
10 select LZO_DECOMPRESS 8 select LZO_DECOMPRESS
11 default n 9 default n
diff --git a/drivers/staging/zram/Makefile b/drivers/staging/zram/Makefile
index 2a6d3213a756..7f4a3019e9c4 100644
--- a/drivers/staging/zram/Makefile
+++ b/drivers/staging/zram/Makefile
@@ -1,4 +1,3 @@
1zram-y := zram_drv.o zram_sysfs.o 1zram-y := zram_drv.o zram_sysfs.o
2 2
3obj-$(CONFIG_ZRAM) += zram.o 3obj-$(CONFIG_ZRAM) += zram.o
4obj-$(CONFIG_XVMALLOC) += xvmalloc.o \ No newline at end of file
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 2a2a92d389e6..7f138196b3c9 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -40,7 +40,7 @@ static int zram_major;
40struct zram *zram_devices; 40struct zram *zram_devices;
41 41
42/* Module params (documentation at end) */ 42/* Module params (documentation at end) */
43unsigned int zram_num_devices; 43static unsigned int num_devices;
44 44
45static void zram_stat_inc(u32 *v) 45static void zram_stat_inc(u32 *v)
46{ 46{
@@ -135,13 +135,9 @@ static void zram_set_disksize(struct zram *zram, size_t totalram_bytes)
135 135
136static void zram_free_page(struct zram *zram, size_t index) 136static void zram_free_page(struct zram *zram, size_t index)
137{ 137{
138 u32 clen; 138 void *handle = zram->table[index].handle;
139 void *obj;
140 139
141 struct page *page = zram->table[index].page; 140 if (unlikely(!handle)) {
142 u32 offset = zram->table[index].offset;
143
144 if (unlikely(!page)) {
145 /* 141 /*
146 * No memory is allocated for zero filled pages. 142 * No memory is allocated for zero filled pages.
147 * Simply clear zero page flag. 143 * Simply clear zero page flag.
@@ -154,27 +150,24 @@ static void zram_free_page(struct zram *zram, size_t index)
154 } 150 }
155 151
156 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) { 152 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
157 clen = PAGE_SIZE; 153 __free_page(handle);
158 __free_page(page);
159 zram_clear_flag(zram, index, ZRAM_UNCOMPRESSED); 154 zram_clear_flag(zram, index, ZRAM_UNCOMPRESSED);
160 zram_stat_dec(&zram->stats.pages_expand); 155 zram_stat_dec(&zram->stats.pages_expand);
161 goto out; 156 goto out;
162 } 157 }
163 158
164 obj = kmap_atomic(page, KM_USER0) + offset; 159 zs_free(zram->mem_pool, handle);
165 clen = xv_get_object_size(obj) - sizeof(struct zobj_header);
166 kunmap_atomic(obj, KM_USER0);
167 160
168 xv_free(zram->mem_pool, page, offset); 161 if (zram->table[index].size <= PAGE_SIZE / 2)
169 if (clen <= PAGE_SIZE / 2)
170 zram_stat_dec(&zram->stats.good_compress); 162 zram_stat_dec(&zram->stats.good_compress);
171 163
172out: 164out:
173 zram_stat64_sub(zram, &zram->stats.compr_size, clen); 165 zram_stat64_sub(zram, &zram->stats.compr_size,
166 zram->table[index].size);
174 zram_stat_dec(&zram->stats.pages_stored); 167 zram_stat_dec(&zram->stats.pages_stored);
175 168
176 zram->table[index].page = NULL; 169 zram->table[index].handle = NULL;
177 zram->table[index].offset = 0; 170 zram->table[index].size = 0;
178} 171}
179 172
180static void handle_zero_page(struct bio_vec *bvec) 173static void handle_zero_page(struct bio_vec *bvec)
@@ -196,7 +189,7 @@ static void handle_uncompressed_page(struct zram *zram, struct bio_vec *bvec,
196 unsigned char *user_mem, *cmem; 189 unsigned char *user_mem, *cmem;
197 190
198 user_mem = kmap_atomic(page, KM_USER0); 191 user_mem = kmap_atomic(page, KM_USER0);
199 cmem = kmap_atomic(zram->table[index].page, KM_USER1); 192 cmem = kmap_atomic(zram->table[index].handle, KM_USER1);
200 193
201 memcpy(user_mem + bvec->bv_offset, cmem + offset, bvec->bv_len); 194 memcpy(user_mem + bvec->bv_offset, cmem + offset, bvec->bv_len);
202 kunmap_atomic(cmem, KM_USER1); 195 kunmap_atomic(cmem, KM_USER1);
@@ -227,7 +220,7 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
227 } 220 }
228 221
229 /* Requested page is not present in compressed area */ 222 /* Requested page is not present in compressed area */
230 if (unlikely(!zram->table[index].page)) { 223 if (unlikely(!zram->table[index].handle)) {
231 pr_debug("Read before write: sector=%lu, size=%u", 224 pr_debug("Read before write: sector=%lu, size=%u",
232 (ulong)(bio->bi_sector), bio->bi_size); 225 (ulong)(bio->bi_sector), bio->bi_size);
233 handle_zero_page(bvec); 226 handle_zero_page(bvec);
@@ -254,11 +247,10 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
254 uncmem = user_mem; 247 uncmem = user_mem;
255 clen = PAGE_SIZE; 248 clen = PAGE_SIZE;
256 249
257 cmem = kmap_atomic(zram->table[index].page, KM_USER1) + 250 cmem = zs_map_object(zram->mem_pool, zram->table[index].handle);
258 zram->table[index].offset;
259 251
260 ret = lzo1x_decompress_safe(cmem + sizeof(*zheader), 252 ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
261 xv_get_object_size(cmem) - sizeof(*zheader), 253 zram->table[index].size,
262 uncmem, &clen); 254 uncmem, &clen);
263 255
264 if (is_partial_io(bvec)) { 256 if (is_partial_io(bvec)) {
@@ -267,7 +259,7 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
267 kfree(uncmem); 259 kfree(uncmem);
268 } 260 }
269 261
270 kunmap_atomic(cmem, KM_USER1); 262 zs_unmap_object(zram->mem_pool, zram->table[index].handle);
271 kunmap_atomic(user_mem, KM_USER0); 263 kunmap_atomic(user_mem, KM_USER0);
272 264
273 /* Should NEVER happen. Return bio error if it does. */ 265 /* Should NEVER happen. Return bio error if it does. */
@@ -290,13 +282,12 @@ static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
290 unsigned char *cmem; 282 unsigned char *cmem;
291 283
292 if (zram_test_flag(zram, index, ZRAM_ZERO) || 284 if (zram_test_flag(zram, index, ZRAM_ZERO) ||
293 !zram->table[index].page) { 285 !zram->table[index].handle) {
294 memset(mem, 0, PAGE_SIZE); 286 memset(mem, 0, PAGE_SIZE);
295 return 0; 287 return 0;
296 } 288 }
297 289
298 cmem = kmap_atomic(zram->table[index].page, KM_USER0) + 290 cmem = zs_map_object(zram->mem_pool, zram->table[index].handle);
299 zram->table[index].offset;
300 291
301 /* Page is stored uncompressed since it's incompressible */ 292 /* Page is stored uncompressed since it's incompressible */
302 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) { 293 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
@@ -306,9 +297,9 @@ static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
306 } 297 }
307 298
308 ret = lzo1x_decompress_safe(cmem + sizeof(*zheader), 299 ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
309 xv_get_object_size(cmem) - sizeof(*zheader), 300 zram->table[index].size,
310 mem, &clen); 301 mem, &clen);
311 kunmap_atomic(cmem, KM_USER0); 302 zs_unmap_object(zram->mem_pool, zram->table[index].handle);
312 303
313 /* Should NEVER happen. Return bio error if it does. */ 304 /* Should NEVER happen. Return bio error if it does. */
314 if (unlikely(ret != LZO_E_OK)) { 305 if (unlikely(ret != LZO_E_OK)) {
@@ -326,6 +317,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
326 int ret; 317 int ret;
327 u32 store_offset; 318 u32 store_offset;
328 size_t clen; 319 size_t clen;
320 void *handle;
329 struct zobj_header *zheader; 321 struct zobj_header *zheader;
330 struct page *page, *page_store; 322 struct page *page, *page_store;
331 unsigned char *user_mem, *cmem, *src, *uncmem = NULL; 323 unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
@@ -355,7 +347,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
355 * System overwrites unused sectors. Free memory associated 347 * System overwrites unused sectors. Free memory associated
356 * with this sector now. 348 * with this sector now.
357 */ 349 */
358 if (zram->table[index].page || 350 if (zram->table[index].handle ||
359 zram_test_flag(zram, index, ZRAM_ZERO)) 351 zram_test_flag(zram, index, ZRAM_ZERO))
360 zram_free_page(zram, index); 352 zram_free_page(zram, index);
361 353
@@ -407,26 +399,22 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
407 store_offset = 0; 399 store_offset = 0;
408 zram_set_flag(zram, index, ZRAM_UNCOMPRESSED); 400 zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
409 zram_stat_inc(&zram->stats.pages_expand); 401 zram_stat_inc(&zram->stats.pages_expand);
410 zram->table[index].page = page_store; 402 handle = page_store;
411 src = kmap_atomic(page, KM_USER0); 403 src = kmap_atomic(page, KM_USER0);
404 cmem = kmap_atomic(page_store, KM_USER1);
412 goto memstore; 405 goto memstore;
413 } 406 }
414 407
415 if (xv_malloc(zram->mem_pool, clen + sizeof(*zheader), 408 handle = zs_malloc(zram->mem_pool, clen + sizeof(*zheader));
416 &zram->table[index].page, &store_offset, 409 if (!handle) {
417 GFP_NOIO | __GFP_HIGHMEM)) {
418 pr_info("Error allocating memory for compressed " 410 pr_info("Error allocating memory for compressed "
419 "page: %u, size=%zu\n", index, clen); 411 "page: %u, size=%zu\n", index, clen);
420 ret = -ENOMEM; 412 ret = -ENOMEM;
421 goto out; 413 goto out;
422 } 414 }
415 cmem = zs_map_object(zram->mem_pool, handle);
423 416
424memstore: 417memstore:
425 zram->table[index].offset = store_offset;
426
427 cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
428 zram->table[index].offset;
429
430#if 0 418#if 0
431 /* Back-reference needed for memory defragmentation */ 419 /* Back-reference needed for memory defragmentation */
432 if (!zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)) { 420 if (!zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)) {
@@ -438,9 +426,15 @@ memstore:
438 426
439 memcpy(cmem, src, clen); 427 memcpy(cmem, src, clen);
440 428
441 kunmap_atomic(cmem, KM_USER1); 429 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
442 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) 430 kunmap_atomic(cmem, KM_USER1);
443 kunmap_atomic(src, KM_USER0); 431 kunmap_atomic(src, KM_USER0);
432 } else {
433 zs_unmap_object(zram->mem_pool, handle);
434 }
435
436 zram->table[index].handle = handle;
437 zram->table[index].size = clen;
444 438
445 /* Update stats */ 439 /* Update stats */
446 zram_stat64_add(zram, &zram->stats.compr_size, clen); 440 zram_stat64_add(zram, &zram->stats.compr_size, clen);
@@ -598,25 +592,20 @@ void __zram_reset_device(struct zram *zram)
598 592
599 /* Free all pages that are still in this zram device */ 593 /* Free all pages that are still in this zram device */
600 for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) { 594 for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
601 struct page *page; 595 void *handle = zram->table[index].handle;
602 u16 offset; 596 if (!handle)
603
604 page = zram->table[index].page;
605 offset = zram->table[index].offset;
606
607 if (!page)
608 continue; 597 continue;
609 598
610 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) 599 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
611 __free_page(page); 600 __free_page(handle);
612 else 601 else
613 xv_free(zram->mem_pool, page, offset); 602 zs_free(zram->mem_pool, handle);
614 } 603 }
615 604
616 vfree(zram->table); 605 vfree(zram->table);
617 zram->table = NULL; 606 zram->table = NULL;
618 607
619 xv_destroy_pool(zram->mem_pool); 608 zs_destroy_pool(zram->mem_pool);
620 zram->mem_pool = NULL; 609 zram->mem_pool = NULL;
621 610
622 /* Reset stats */ 611 /* Reset stats */
@@ -674,7 +663,7 @@ int zram_init_device(struct zram *zram)
674 /* zram devices sort of resembles non-rotational disks */ 663 /* zram devices sort of resembles non-rotational disks */
675 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue); 664 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
676 665
677 zram->mem_pool = xv_create_pool(); 666 zram->mem_pool = zs_create_pool("zram", GFP_NOIO | __GFP_HIGHMEM);
678 if (!zram->mem_pool) { 667 if (!zram->mem_pool) {
679 pr_err("Error creating memory pool\n"); 668 pr_err("Error creating memory pool\n");
680 ret = -ENOMEM; 669 ret = -ENOMEM;
@@ -790,13 +779,18 @@ static void destroy_device(struct zram *zram)
790 blk_cleanup_queue(zram->queue); 779 blk_cleanup_queue(zram->queue);
791} 780}
792 781
782unsigned int zram_get_num_devices(void)
783{
784 return num_devices;
785}
786
793static int __init zram_init(void) 787static int __init zram_init(void)
794{ 788{
795 int ret, dev_id; 789 int ret, dev_id;
796 790
797 if (zram_num_devices > max_num_devices) { 791 if (num_devices > max_num_devices) {
798 pr_warning("Invalid value for num_devices: %u\n", 792 pr_warning("Invalid value for num_devices: %u\n",
799 zram_num_devices); 793 num_devices);
800 ret = -EINVAL; 794 ret = -EINVAL;
801 goto out; 795 goto out;
802 } 796 }
@@ -808,20 +802,20 @@ static int __init zram_init(void)
808 goto out; 802 goto out;
809 } 803 }
810 804
811 if (!zram_num_devices) { 805 if (!num_devices) {
812 pr_info("num_devices not specified. Using default: 1\n"); 806 pr_info("num_devices not specified. Using default: 1\n");
813 zram_num_devices = 1; 807 num_devices = 1;
814 } 808 }
815 809
816 /* Allocate the device array and initialize each one */ 810 /* Allocate the device array and initialize each one */
817 pr_info("Creating %u devices ...\n", zram_num_devices); 811 pr_info("Creating %u devices ...\n", num_devices);
818 zram_devices = kzalloc(zram_num_devices * sizeof(struct zram), GFP_KERNEL); 812 zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
819 if (!zram_devices) { 813 if (!zram_devices) {
820 ret = -ENOMEM; 814 ret = -ENOMEM;
821 goto unregister; 815 goto unregister;
822 } 816 }
823 817
824 for (dev_id = 0; dev_id < zram_num_devices; dev_id++) { 818 for (dev_id = 0; dev_id < num_devices; dev_id++) {
825 ret = create_device(&zram_devices[dev_id], dev_id); 819 ret = create_device(&zram_devices[dev_id], dev_id);
826 if (ret) 820 if (ret)
827 goto free_devices; 821 goto free_devices;
@@ -844,7 +838,7 @@ static void __exit zram_exit(void)
844 int i; 838 int i;
845 struct zram *zram; 839 struct zram *zram;
846 840
847 for (i = 0; i < zram_num_devices; i++) { 841 for (i = 0; i < num_devices; i++) {
848 zram = &zram_devices[i]; 842 zram = &zram_devices[i];
849 843
850 destroy_device(zram); 844 destroy_device(zram);
@@ -858,8 +852,8 @@ static void __exit zram_exit(void)
858 pr_debug("Cleanup done!\n"); 852 pr_debug("Cleanup done!\n");
859} 853}
860 854
861module_param(zram_num_devices, uint, 0); 855module_param(num_devices, uint, 0);
862MODULE_PARM_DESC(zram_num_devices, "Number of zram devices"); 856MODULE_PARM_DESC(num_devices, "Number of zram devices");
863 857
864module_init(zram_init); 858module_init(zram_init);
865module_exit(zram_exit); 859module_exit(zram_exit);
diff --git a/drivers/staging/zram/zram_drv.h b/drivers/staging/zram/zram_drv.h
index e5cd2469b6a0..fbe8ac98704c 100644
--- a/drivers/staging/zram/zram_drv.h
+++ b/drivers/staging/zram/zram_drv.h
@@ -18,7 +18,7 @@
18#include <linux/spinlock.h> 18#include <linux/spinlock.h>
19#include <linux/mutex.h> 19#include <linux/mutex.h>
20 20
21#include "xvmalloc.h" 21#include "../zsmalloc/zsmalloc.h"
22 22
23/* 23/*
24 * Some arbitrary value. This is just to catch 24 * Some arbitrary value. This is just to catch
@@ -51,7 +51,7 @@ static const size_t max_zpage_size = PAGE_SIZE / 4 * 3;
51 51
52/* 52/*
53 * NOTE: max_zpage_size must be less than or equal to: 53 * NOTE: max_zpage_size must be less than or equal to:
54 * XV_MAX_ALLOC_SIZE - sizeof(struct zobj_header) 54 * ZS_MAX_ALLOC_SIZE - sizeof(struct zobj_header)
55 * otherwise, xv_malloc() would always return failure. 55 * otherwise, xv_malloc() would always return failure.
56 */ 56 */
57 57
@@ -81,8 +81,8 @@ enum zram_pageflags {
81 81
82/* Allocated for each disk page */ 82/* Allocated for each disk page */
83struct table { 83struct table {
84 struct page *page; 84 void *handle;
85 u16 offset; 85 u16 size; /* object size (excluding header) */
86 u8 count; /* object ref count (not yet used) */ 86 u8 count; /* object ref count (not yet used) */
87 u8 flags; 87 u8 flags;
88} __attribute__((aligned(4))); 88} __attribute__((aligned(4)));
@@ -102,7 +102,7 @@ struct zram_stats {
102}; 102};
103 103
104struct zram { 104struct zram {
105 struct xv_pool *mem_pool; 105 struct zs_pool *mem_pool;
106 void *compress_workmem; 106 void *compress_workmem;
107 void *compress_buffer; 107 void *compress_buffer;
108 struct table *table; 108 struct table *table;
@@ -124,7 +124,7 @@ struct zram {
124}; 124};
125 125
126extern struct zram *zram_devices; 126extern struct zram *zram_devices;
127extern unsigned int zram_num_devices; 127unsigned int zram_get_num_devices(void);
128#ifdef CONFIG_SYSFS 128#ifdef CONFIG_SYSFS
129extern struct attribute_group zram_disk_attr_group; 129extern struct attribute_group zram_disk_attr_group;
130#endif 130#endif
diff --git a/drivers/staging/zram/zram_sysfs.c b/drivers/staging/zram/zram_sysfs.c
index d521122826f6..a7f377175525 100644
--- a/drivers/staging/zram/zram_sysfs.c
+++ b/drivers/staging/zram/zram_sysfs.c
@@ -34,7 +34,7 @@ static struct zram *dev_to_zram(struct device *dev)
34 int i; 34 int i;
35 struct zram *zram = NULL; 35 struct zram *zram = NULL;
36 36
37 for (i = 0; i < zram_num_devices; i++) { 37 for (i = 0; i < zram_get_num_devices(); i++) {
38 zram = &zram_devices[i]; 38 zram = &zram_devices[i];
39 if (disk_to_dev(zram->disk) == dev) 39 if (disk_to_dev(zram->disk) == dev)
40 break; 40 break;
@@ -187,7 +187,7 @@ static ssize_t mem_used_total_show(struct device *dev,
187 struct zram *zram = dev_to_zram(dev); 187 struct zram *zram = dev_to_zram(dev);
188 188
189 if (zram->init_done) { 189 if (zram->init_done) {
190 val = xv_get_total_size_bytes(zram->mem_pool) + 190 val = zs_get_total_size_bytes(zram->mem_pool) +
191 ((u64)(zram->stats.pages_expand) << PAGE_SHIFT); 191 ((u64)(zram->stats.pages_expand) << PAGE_SHIFT);
192 } 192 }
193 193
diff --git a/drivers/staging/zsmalloc/Kconfig b/drivers/staging/zsmalloc/Kconfig
new file mode 100644
index 000000000000..a5ab7200626f
--- /dev/null
+++ b/drivers/staging/zsmalloc/Kconfig
@@ -0,0 +1,14 @@
1config ZSMALLOC
2 tristate "Memory allocator for compressed pages"
3 # X86 dependency is because of the use of __flush_tlb_one and set_pte
4 # in zsmalloc-main.c.
5 # TODO: convert these to portable functions
6 depends on X86
7 default n
8 help
9 zsmalloc is a slab-based memory allocator designed to store
10 compressed RAM pages. zsmalloc uses virtual memory mapping
11 in order to reduce fragmentation. However, this results in a
12 non-standard allocator interface where a handle, not a pointer, is
13 returned by an alloc(). This handle must be mapped in order to
14 access the allocated space.
diff --git a/drivers/staging/zsmalloc/Makefile b/drivers/staging/zsmalloc/Makefile
new file mode 100644
index 000000000000..b134848a590d
--- /dev/null
+++ b/drivers/staging/zsmalloc/Makefile
@@ -0,0 +1,3 @@
1zsmalloc-y := zsmalloc-main.o
2
3obj-$(CONFIG_ZSMALLOC) += zsmalloc.o
diff --git a/drivers/staging/zsmalloc/zsmalloc-main.c b/drivers/staging/zsmalloc/zsmalloc-main.c
new file mode 100644
index 000000000000..09caa4f2687e
--- /dev/null
+++ b/drivers/staging/zsmalloc/zsmalloc-main.c
@@ -0,0 +1,745 @@
1/*
2 * zsmalloc memory allocator
3 *
4 * Copyright (C) 2011 Nitin Gupta
5 *
6 * This code is released using a dual license strategy: BSD/GPL
7 * You can choose the license that better fits your requirements.
8 *
9 * Released under the terms of 3-clause BSD License
10 * Released under the terms of GNU General Public License Version 2.0
11 */
12
13#ifdef CONFIG_ZSMALLOC_DEBUG
14#define DEBUG
15#endif
16
17#include <linux/module.h>
18#include <linux/kernel.h>
19#include <linux/bitops.h>
20#include <linux/errno.h>
21#include <linux/highmem.h>
22#include <linux/init.h>
23#include <linux/string.h>
24#include <linux/slab.h>
25#include <asm/tlbflush.h>
26#include <asm/pgtable.h>
27#include <linux/cpumask.h>
28#include <linux/cpu.h>
29#include <linux/vmalloc.h>
30
31#include "zsmalloc.h"
32#include "zsmalloc_int.h"
33
34/*
35 * A zspage's class index and fullness group
36 * are encoded in its (first)page->mapping
37 */
38#define CLASS_IDX_BITS 28
39#define FULLNESS_BITS 4
40#define CLASS_IDX_MASK ((1 << CLASS_IDX_BITS) - 1)
41#define FULLNESS_MASK ((1 << FULLNESS_BITS) - 1)
42
43/* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
44static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
45
46static int is_first_page(struct page *page)
47{
48 return test_bit(PG_private, &page->flags);
49}
50
51static int is_last_page(struct page *page)
52{
53 return test_bit(PG_private_2, &page->flags);
54}
55
56static void get_zspage_mapping(struct page *page, unsigned int *class_idx,
57 enum fullness_group *fullness)
58{
59 unsigned long m;
60 BUG_ON(!is_first_page(page));
61
62 m = (unsigned long)page->mapping;
63 *fullness = m & FULLNESS_MASK;
64 *class_idx = (m >> FULLNESS_BITS) & CLASS_IDX_MASK;
65}
66
67static void set_zspage_mapping(struct page *page, unsigned int class_idx,
68 enum fullness_group fullness)
69{
70 unsigned long m;
71 BUG_ON(!is_first_page(page));
72
73 m = ((class_idx & CLASS_IDX_MASK) << FULLNESS_BITS) |
74 (fullness & FULLNESS_MASK);
75 page->mapping = (struct address_space *)m;
76}
77
78static int get_size_class_index(int size)
79{
80 int idx = 0;
81
82 if (likely(size > ZS_MIN_ALLOC_SIZE))
83 idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE,
84 ZS_SIZE_CLASS_DELTA);
85
86 return idx;
87}
88
89static enum fullness_group get_fullness_group(struct page *page)
90{
91 int inuse, max_objects;
92 enum fullness_group fg;
93 BUG_ON(!is_first_page(page));
94
95 inuse = page->inuse;
96 max_objects = page->objects;
97
98 if (inuse == 0)
99 fg = ZS_EMPTY;
100 else if (inuse == max_objects)
101 fg = ZS_FULL;
102 else if (inuse <= max_objects / fullness_threshold_frac)
103 fg = ZS_ALMOST_EMPTY;
104 else
105 fg = ZS_ALMOST_FULL;
106
107 return fg;
108}
109
110static void insert_zspage(struct page *page, struct size_class *class,
111 enum fullness_group fullness)
112{
113 struct page **head;
114
115 BUG_ON(!is_first_page(page));
116
117 if (fullness >= _ZS_NR_FULLNESS_GROUPS)
118 return;
119
120 head = &class->fullness_list[fullness];
121 if (*head)
122 list_add_tail(&page->lru, &(*head)->lru);
123
124 *head = page;
125}
126
127static void remove_zspage(struct page *page, struct size_class *class,
128 enum fullness_group fullness)
129{
130 struct page **head;
131
132 BUG_ON(!is_first_page(page));
133
134 if (fullness >= _ZS_NR_FULLNESS_GROUPS)
135 return;
136
137 head = &class->fullness_list[fullness];
138 BUG_ON(!*head);
139 if (list_empty(&(*head)->lru))
140 *head = NULL;
141 else if (*head == page)
142 *head = (struct page *)list_entry((*head)->lru.next,
143 struct page, lru);
144
145 list_del_init(&page->lru);
146}
147
148static enum fullness_group fix_fullness_group(struct zs_pool *pool,
149 struct page *page)
150{
151 int class_idx;
152 struct size_class *class;
153 enum fullness_group currfg, newfg;
154
155 BUG_ON(!is_first_page(page));
156
157 get_zspage_mapping(page, &class_idx, &currfg);
158 newfg = get_fullness_group(page);
159 if (newfg == currfg)
160 goto out;
161
162 class = &pool->size_class[class_idx];
163 remove_zspage(page, class, currfg);
164 insert_zspage(page, class, newfg);
165 set_zspage_mapping(page, class_idx, newfg);
166
167out:
168 return newfg;
169}
170
171/*
172 * We have to decide on how many pages to link together
173 * to form a zspage for each size class. This is important
174 * to reduce wastage due to unusable space left at end of
175 * each zspage which is given as:
176 * wastage = Zp - Zp % size_class
177 * where Zp = zspage size = k * PAGE_SIZE where k = 1, 2, ...
178 *
179 * For example, for size class of 3/8 * PAGE_SIZE, we should
180 * link together 3 PAGE_SIZE sized pages to form a zspage
181 * since then we can perfectly fit in 8 such objects.
182 */
183static int get_zspage_order(int class_size)
184{
185 int i, max_usedpc = 0;
186 /* zspage order which gives maximum used size per KB */
187 int max_usedpc_order = 1;
188
189 for (i = 1; i <= ZS_MAX_PAGES_PER_ZSPAGE; i++) {
190 int zspage_size;
191 int waste, usedpc;
192
193 zspage_size = i * PAGE_SIZE;
194 waste = zspage_size % class_size;
195 usedpc = (zspage_size - waste) * 100 / zspage_size;
196
197 if (usedpc > max_usedpc) {
198 max_usedpc = usedpc;
199 max_usedpc_order = i;
200 }
201 }
202
203 return max_usedpc_order;
204}
205
206/*
207 * A single 'zspage' is composed of many system pages which are
208 * linked together using fields in struct page. This function finds
209 * the first/head page, given any component page of a zspage.
210 */
211static struct page *get_first_page(struct page *page)
212{
213 if (is_first_page(page))
214 return page;
215 else
216 return page->first_page;
217}
218
219static struct page *get_next_page(struct page *page)
220{
221 struct page *next;
222
223 if (is_last_page(page))
224 next = NULL;
225 else if (is_first_page(page))
226 next = (struct page *)page->private;
227 else
228 next = list_entry(page->lru.next, struct page, lru);
229
230 return next;
231}
232
233/* Encode <page, obj_idx> as a single handle value */
234static void *obj_location_to_handle(struct page *page, unsigned long obj_idx)
235{
236 unsigned long handle;
237
238 if (!page) {
239 BUG_ON(obj_idx);
240 return NULL;
241 }
242
243 handle = page_to_pfn(page) << OBJ_INDEX_BITS;
244 handle |= (obj_idx & OBJ_INDEX_MASK);
245
246 return (void *)handle;
247}
248
249/* Decode <page, obj_idx> pair from the given object handle */
250static void obj_handle_to_location(void *handle, struct page **page,
251 unsigned long *obj_idx)
252{
253 unsigned long hval = (unsigned long)handle;
254
255 *page = pfn_to_page(hval >> OBJ_INDEX_BITS);
256 *obj_idx = hval & OBJ_INDEX_MASK;
257}
258
259static unsigned long obj_idx_to_offset(struct page *page,
260 unsigned long obj_idx, int class_size)
261{
262 unsigned long off = 0;
263
264 if (!is_first_page(page))
265 off = page->index;
266
267 return off + obj_idx * class_size;
268}
269
270static void free_zspage(struct page *first_page)
271{
272 struct page *nextp, *tmp;
273
274 BUG_ON(!is_first_page(first_page));
275 BUG_ON(first_page->inuse);
276
277 nextp = (struct page *)page_private(first_page);
278
279 clear_bit(PG_private, &first_page->flags);
280 clear_bit(PG_private_2, &first_page->flags);
281 set_page_private(first_page, 0);
282 first_page->mapping = NULL;
283 first_page->freelist = NULL;
284 reset_page_mapcount(first_page);
285 __free_page(first_page);
286
287 /* zspage with only 1 system page */
288 if (!nextp)
289 return;
290
291 list_for_each_entry_safe(nextp, tmp, &nextp->lru, lru) {
292 list_del(&nextp->lru);
293 clear_bit(PG_private_2, &nextp->flags);
294 nextp->index = 0;
295 __free_page(nextp);
296 }
297}
298
299/* Initialize a newly allocated zspage */
300static void init_zspage(struct page *first_page, struct size_class *class)
301{
302 unsigned long off = 0;
303 struct page *page = first_page;
304
305 BUG_ON(!is_first_page(first_page));
306 while (page) {
307 struct page *next_page;
308 struct link_free *link;
309 unsigned int i, objs_on_page;
310
311 /*
312 * page->index stores offset of first object starting
313 * in the page. For the first page, this is always 0,
314 * so we use first_page->index (aka ->freelist) to store
315 * head of corresponding zspage's freelist.
316 */
317 if (page != first_page)
318 page->index = off;
319
320 link = (struct link_free *)kmap_atomic(page) +
321 off / sizeof(*link);
322 objs_on_page = (PAGE_SIZE - off) / class->size;
323
324 for (i = 1; i <= objs_on_page; i++) {
325 off += class->size;
326 if (off < PAGE_SIZE) {
327 link->next = obj_location_to_handle(page, i);
328 link += class->size / sizeof(*link);
329 }
330 }
331
332 /*
333 * We now come to the last (full or partial) object on this
334 * page, which must point to the first object on the next
335 * page (if present)
336 */
337 next_page = get_next_page(page);
338 link->next = obj_location_to_handle(next_page, 0);
339 kunmap_atomic(link);
340 page = next_page;
341 off = (off + class->size) % PAGE_SIZE;
342 }
343}
344
345/*
346 * Allocate a zspage for the given size class
347 */
348static struct page *alloc_zspage(struct size_class *class, gfp_t flags)
349{
350 int i, error;
351 struct page *first_page = NULL;
352
353 /*
354 * Allocate individual pages and link them together as:
355 * 1. first page->private = first sub-page
356 * 2. all sub-pages are linked together using page->lru
357 * 3. each sub-page is linked to the first page using page->first_page
358 *
359 * For each size class, First/Head pages are linked together using
360 * page->lru. Also, we set PG_private to identify the first page
361 * (i.e. no other sub-page has this flag set) and PG_private_2 to
362 * identify the last page.
363 */
364 error = -ENOMEM;
365 for (i = 0; i < class->zspage_order; i++) {
366 struct page *page, *prev_page;
367
368 page = alloc_page(flags);
369 if (!page)
370 goto cleanup;
371
372 INIT_LIST_HEAD(&page->lru);
373 if (i == 0) { /* first page */
374 set_bit(PG_private, &page->flags);
375 set_page_private(page, 0);
376 first_page = page;
377 first_page->inuse = 0;
378 }
379 if (i == 1)
380 first_page->private = (unsigned long)page;
381 if (i >= 1)
382 page->first_page = first_page;
383 if (i >= 2)
384 list_add(&page->lru, &prev_page->lru);
385 if (i == class->zspage_order - 1) /* last page */
386 set_bit(PG_private_2, &page->flags);
387
388 prev_page = page;
389 }
390
391 init_zspage(first_page, class);
392
393 first_page->freelist = obj_location_to_handle(first_page, 0);
394 /* Maximum number of objects we can store in this zspage */
395 first_page->objects = class->zspage_order * PAGE_SIZE / class->size;
396
397 error = 0; /* Success */
398
399cleanup:
400 if (unlikely(error) && first_page) {
401 free_zspage(first_page);
402 first_page = NULL;
403 }
404
405 return first_page;
406}
407
408static struct page *find_get_zspage(struct size_class *class)
409{
410 int i;
411 struct page *page;
412
413 for (i = 0; i < _ZS_NR_FULLNESS_GROUPS; i++) {
414 page = class->fullness_list[i];
415 if (page)
416 break;
417 }
418
419 return page;
420}
421
422
423/*
424 * If this becomes a separate module, register zs_init() with
425 * module_init(), zs_exit with module_exit(), and remove zs_initialized
426*/
427static int zs_initialized;
428
429static int zs_cpu_notifier(struct notifier_block *nb, unsigned long action,
430 void *pcpu)
431{
432 int cpu = (long)pcpu;
433 struct mapping_area *area;
434
435 switch (action) {
436 case CPU_UP_PREPARE:
437 area = &per_cpu(zs_map_area, cpu);
438 if (area->vm)
439 break;
440 area->vm = alloc_vm_area(2 * PAGE_SIZE, area->vm_ptes);
441 if (!area->vm)
442 return notifier_from_errno(-ENOMEM);
443 break;
444 case CPU_DEAD:
445 case CPU_UP_CANCELED:
446 area = &per_cpu(zs_map_area, cpu);
447 if (area->vm)
448 free_vm_area(area->vm);
449 area->vm = NULL;
450 break;
451 }
452
453 return NOTIFY_OK;
454}
455
456static struct notifier_block zs_cpu_nb = {
457 .notifier_call = zs_cpu_notifier
458};
459
460static void zs_exit(void)
461{
462 int cpu;
463
464 for_each_online_cpu(cpu)
465 zs_cpu_notifier(NULL, CPU_DEAD, (void *)(long)cpu);
466 unregister_cpu_notifier(&zs_cpu_nb);
467}
468
469static int zs_init(void)
470{
471 int cpu, ret;
472
473 register_cpu_notifier(&zs_cpu_nb);
474 for_each_online_cpu(cpu) {
475 ret = zs_cpu_notifier(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
476 if (notifier_to_errno(ret))
477 goto fail;
478 }
479 return 0;
480fail:
481 zs_exit();
482 return notifier_to_errno(ret);
483}
484
485struct zs_pool *zs_create_pool(const char *name, gfp_t flags)
486{
487 int i, error, ovhd_size;
488 struct zs_pool *pool;
489
490 if (!name)
491 return NULL;
492
493 ovhd_size = roundup(sizeof(*pool), PAGE_SIZE);
494 pool = kzalloc(ovhd_size, GFP_KERNEL);
495 if (!pool)
496 return NULL;
497
498 for (i = 0; i < ZS_SIZE_CLASSES; i++) {
499 int size;
500 struct size_class *class;
501
502 size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA;
503 if (size > ZS_MAX_ALLOC_SIZE)
504 size = ZS_MAX_ALLOC_SIZE;
505
506 class = &pool->size_class[i];
507 class->size = size;
508 class->index = i;
509 spin_lock_init(&class->lock);
510 class->zspage_order = get_zspage_order(size);
511
512 }
513
514 /*
515 * If this becomes a separate module, register zs_init with
516 * module_init, and remove this block
517 */
518 if (!zs_initialized) {
519 error = zs_init();
520 if (error)
521 goto cleanup;
522 zs_initialized = 1;
523 }
524
525 pool->flags = flags;
526 pool->name = name;
527
528 error = 0; /* Success */
529
530cleanup:
531 if (error) {
532 zs_destroy_pool(pool);
533 pool = NULL;
534 }
535
536 return pool;
537}
538EXPORT_SYMBOL_GPL(zs_create_pool);
539
540void zs_destroy_pool(struct zs_pool *pool)
541{
542 int i;
543
544 for (i = 0; i < ZS_SIZE_CLASSES; i++) {
545 int fg;
546 struct size_class *class = &pool->size_class[i];
547
548 for (fg = 0; fg < _ZS_NR_FULLNESS_GROUPS; fg++) {
549 if (class->fullness_list[fg]) {
550 pr_info("Freeing non-empty class with size "
551 "%db, fullness group %d\n",
552 class->size, fg);
553 }
554 }
555 }
556 kfree(pool);
557}
558EXPORT_SYMBOL_GPL(zs_destroy_pool);
559
560/**
561 * zs_malloc - Allocate block of given size from pool.
562 * @pool: pool to allocate from
563 * @size: size of block to allocate
564 * @page: page no. that holds the object
565 * @offset: location of object within page
566 *
567 * On success, <page, offset> identifies block allocated
568 * and 0 is returned. On failure, <page, offset> is set to
569 * 0 and -ENOMEM is returned.
570 *
571 * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
572 */
573void *zs_malloc(struct zs_pool *pool, size_t size)
574{
575 void *obj;
576 struct link_free *link;
577 int class_idx;
578 struct size_class *class;
579
580 struct page *first_page, *m_page;
581 unsigned long m_objidx, m_offset;
582
583 if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE))
584 return NULL;
585
586 class_idx = get_size_class_index(size);
587 class = &pool->size_class[class_idx];
588 BUG_ON(class_idx != class->index);
589
590 spin_lock(&class->lock);
591 first_page = find_get_zspage(class);
592
593 if (!first_page) {
594 spin_unlock(&class->lock);
595 first_page = alloc_zspage(class, pool->flags);
596 if (unlikely(!first_page))
597 return NULL;
598
599 set_zspage_mapping(first_page, class->index, ZS_EMPTY);
600 spin_lock(&class->lock);
601 class->pages_allocated += class->zspage_order;
602 }
603
604 obj = first_page->freelist;
605 obj_handle_to_location(obj, &m_page, &m_objidx);
606 m_offset = obj_idx_to_offset(m_page, m_objidx, class->size);
607
608 link = (struct link_free *)kmap_atomic(m_page) +
609 m_offset / sizeof(*link);
610 first_page->freelist = link->next;
611 memset(link, POISON_INUSE, sizeof(*link));
612 kunmap_atomic(link);
613
614 first_page->inuse++;
615 /* Now move the zspage to another fullness group, if required */
616 fix_fullness_group(pool, first_page);
617 spin_unlock(&class->lock);
618
619 return obj;
620}
621EXPORT_SYMBOL_GPL(zs_malloc);
622
623void zs_free(struct zs_pool *pool, void *obj)
624{
625 struct link_free *link;
626 struct page *first_page, *f_page;
627 unsigned long f_objidx, f_offset;
628
629 int class_idx;
630 struct size_class *class;
631 enum fullness_group fullness;
632
633 if (unlikely(!obj))
634 return;
635
636 obj_handle_to_location(obj, &f_page, &f_objidx);
637 first_page = get_first_page(f_page);
638
639 get_zspage_mapping(first_page, &class_idx, &fullness);
640 class = &pool->size_class[class_idx];
641 f_offset = obj_idx_to_offset(f_page, f_objidx, class->size);
642
643 spin_lock(&class->lock);
644
645 /* Insert this object in containing zspage's freelist */
646 link = (struct link_free *)((unsigned char *)kmap_atomic(f_page)
647 + f_offset);
648 link->next = first_page->freelist;
649 kunmap_atomic(link);
650 first_page->freelist = obj;
651
652 first_page->inuse--;
653 fullness = fix_fullness_group(pool, first_page);
654
655 if (fullness == ZS_EMPTY)
656 class->pages_allocated -= class->zspage_order;
657
658 spin_unlock(&class->lock);
659
660 if (fullness == ZS_EMPTY)
661 free_zspage(first_page);
662}
663EXPORT_SYMBOL_GPL(zs_free);
664
665void *zs_map_object(struct zs_pool *pool, void *handle)
666{
667 struct page *page;
668 unsigned long obj_idx, off;
669
670 unsigned int class_idx;
671 enum fullness_group fg;
672 struct size_class *class;
673 struct mapping_area *area;
674
675 BUG_ON(!handle);
676
677 obj_handle_to_location(handle, &page, &obj_idx);
678 get_zspage_mapping(get_first_page(page), &class_idx, &fg);
679 class = &pool->size_class[class_idx];
680 off = obj_idx_to_offset(page, obj_idx, class->size);
681
682 area = &get_cpu_var(zs_map_area);
683 if (off + class->size <= PAGE_SIZE) {
684 /* this object is contained entirely within a page */
685 area->vm_addr = kmap_atomic(page);
686 } else {
687 /* this object spans two pages */
688 struct page *nextp;
689
690 nextp = get_next_page(page);
691 BUG_ON(!nextp);
692
693
694 set_pte(area->vm_ptes[0], mk_pte(page, PAGE_KERNEL));
695 set_pte(area->vm_ptes[1], mk_pte(nextp, PAGE_KERNEL));
696
697 /* We pre-allocated VM area so mapping can never fail */
698 area->vm_addr = area->vm->addr;
699 }
700
701 return area->vm_addr + off;
702}
703EXPORT_SYMBOL_GPL(zs_map_object);
704
705void zs_unmap_object(struct zs_pool *pool, void *handle)
706{
707 struct page *page;
708 unsigned long obj_idx, off;
709
710 unsigned int class_idx;
711 enum fullness_group fg;
712 struct size_class *class;
713 struct mapping_area *area;
714
715 BUG_ON(!handle);
716
717 obj_handle_to_location(handle, &page, &obj_idx);
718 get_zspage_mapping(get_first_page(page), &class_idx, &fg);
719 class = &pool->size_class[class_idx];
720 off = obj_idx_to_offset(page, obj_idx, class->size);
721
722 area = &__get_cpu_var(zs_map_area);
723 if (off + class->size <= PAGE_SIZE) {
724 kunmap_atomic(area->vm_addr);
725 } else {
726 set_pte(area->vm_ptes[0], __pte(0));
727 set_pte(area->vm_ptes[1], __pte(0));
728 __flush_tlb_one((unsigned long)area->vm_addr);
729 __flush_tlb_one((unsigned long)area->vm_addr + PAGE_SIZE);
730 }
731 put_cpu_var(zs_map_area);
732}
733EXPORT_SYMBOL_GPL(zs_unmap_object);
734
735u64 zs_get_total_size_bytes(struct zs_pool *pool)
736{
737 int i;
738 u64 npages = 0;
739
740 for (i = 0; i < ZS_SIZE_CLASSES; i++)
741 npages += pool->size_class[i].pages_allocated;
742
743 return npages << PAGE_SHIFT;
744}
745EXPORT_SYMBOL_GPL(zs_get_total_size_bytes);
diff --git a/drivers/staging/zsmalloc/zsmalloc.h b/drivers/staging/zsmalloc/zsmalloc.h
new file mode 100644
index 000000000000..949384ee7491
--- /dev/null
+++ b/drivers/staging/zsmalloc/zsmalloc.h
@@ -0,0 +1,31 @@
1/*
2 * zsmalloc memory allocator
3 *
4 * Copyright (C) 2011 Nitin Gupta
5 *
6 * This code is released using a dual license strategy: BSD/GPL
7 * You can choose the license that better fits your requirements.
8 *
9 * Released under the terms of 3-clause BSD License
10 * Released under the terms of GNU General Public License Version 2.0
11 */
12
13#ifndef _ZS_MALLOC_H_
14#define _ZS_MALLOC_H_
15
16#include <linux/types.h>
17
18struct zs_pool;
19
20struct zs_pool *zs_create_pool(const char *name, gfp_t flags);
21void zs_destroy_pool(struct zs_pool *pool);
22
23void *zs_malloc(struct zs_pool *pool, size_t size);
24void zs_free(struct zs_pool *pool, void *obj);
25
26void *zs_map_object(struct zs_pool *pool, void *handle);
27void zs_unmap_object(struct zs_pool *pool, void *handle);
28
29u64 zs_get_total_size_bytes(struct zs_pool *pool);
30
31#endif
diff --git a/drivers/staging/zsmalloc/zsmalloc_int.h b/drivers/staging/zsmalloc/zsmalloc_int.h
new file mode 100644
index 000000000000..92eefc663afc
--- /dev/null
+++ b/drivers/staging/zsmalloc/zsmalloc_int.h
@@ -0,0 +1,155 @@
1/*
2 * zsmalloc memory allocator
3 *
4 * Copyright (C) 2011 Nitin Gupta
5 *
6 * This code is released using a dual license strategy: BSD/GPL
7 * You can choose the license that better fits your requirements.
8 *
9 * Released under the terms of 3-clause BSD License
10 * Released under the terms of GNU General Public License Version 2.0
11 */
12
13#ifndef _ZS_MALLOC_INT_H_
14#define _ZS_MALLOC_INT_H_
15
16#include <linux/kernel.h>
17#include <linux/spinlock.h>
18#include <linux/types.h>
19
20/*
21 * This must be power of 2 and greater than of equal to sizeof(link_free).
22 * These two conditions ensure that any 'struct link_free' itself doesn't
23 * span more than 1 page which avoids complex case of mapping 2 pages simply
24 * to restore link_free pointer values.
25 */
26#define ZS_ALIGN 8
27
28/*
29 * A single 'zspage' is composed of up to 2^N discontiguous 0-order (single)
30 * pages. ZS_MAX_ZSPAGE_ORDER defines upper limit on N.
31 */
32#define ZS_MAX_ZSPAGE_ORDER 2
33#define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER)
34
35/*
36 * Object location (<PFN>, <obj_idx>) is encoded as
37 * as single (void *) handle value.
38 *
39 * Note that object index <obj_idx> is relative to system
40 * page <PFN> it is stored in, so for each sub-page belonging
41 * to a zspage, obj_idx starts with 0.
42 *
43 * This is made more complicated by various memory models and PAE.
44 */
45
46#ifndef MAX_PHYSMEM_BITS
47#ifdef CONFIG_HIGHMEM64G
48#define MAX_PHYSMEM_BITS 36
49#else /* !CONFIG_HIGHMEM64G */
50/*
51 * If this definition of MAX_PHYSMEM_BITS is used, OBJ_INDEX_BITS will just
52 * be PAGE_SHIFT
53 */
54#define MAX_PHYSMEM_BITS BITS_PER_LONG
55#endif
56#endif
57#define _PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT)
58#define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS)
59#define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1)
60
61#define MAX(a, b) ((a) >= (b) ? (a) : (b))
62/* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */
63#define ZS_MIN_ALLOC_SIZE \
64 MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
65#define ZS_MAX_ALLOC_SIZE PAGE_SIZE
66
67/*
68 * On systems with 4K page size, this gives 254 size classes! There is a
69 * trader-off here:
70 * - Large number of size classes is potentially wasteful as free page are
71 * spread across these classes
72 * - Small number of size classes causes large internal fragmentation
73 * - Probably its better to use specific size classes (empirically
74 * determined). NOTE: all those class sizes must be set as multiple of
75 * ZS_ALIGN to make sure link_free itself never has to span 2 pages.
76 *
77 * ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN
78 * (reason above)
79 */
80#define ZS_SIZE_CLASS_DELTA 16
81#define ZS_SIZE_CLASSES ((ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE) / \
82 ZS_SIZE_CLASS_DELTA + 1)
83
84/*
85 * We do not maintain any list for completely empty or full pages
86 */
87enum fullness_group {
88 ZS_ALMOST_FULL,
89 ZS_ALMOST_EMPTY,
90 _ZS_NR_FULLNESS_GROUPS,
91
92 ZS_EMPTY,
93 ZS_FULL
94};
95
96/*
97 * We assign a page to ZS_ALMOST_EMPTY fullness group when:
98 * n <= N / f, where
99 * n = number of allocated objects
100 * N = total number of objects zspage can store
101 * f = 1/fullness_threshold_frac
102 *
103 * Similarly, we assign zspage to:
104 * ZS_ALMOST_FULL when n > N / f
105 * ZS_EMPTY when n == 0
106 * ZS_FULL when n == N
107 *
108 * (see: fix_fullness_group())
109 */
110static const int fullness_threshold_frac = 4;
111
112struct mapping_area {
113 struct vm_struct *vm;
114 pte_t *vm_ptes[2];
115 char *vm_addr;
116};
117
118struct size_class {
119 /*
120 * Size of objects stored in this class. Must be multiple
121 * of ZS_ALIGN.
122 */
123 int size;
124 unsigned int index;
125
126 /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
127 int zspage_order;
128
129 spinlock_t lock;
130
131 /* stats */
132 u64 pages_allocated;
133
134 struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS];
135};
136
137/*
138 * Placed within free objects to form a singly linked list.
139 * For every zspage, first_page->freelist gives head of this list.
140 *
141 * This must be power of 2 and less than or equal to ZS_ALIGN
142 */
143struct link_free {
144 /* Handle of next free chunk (encodes <PFN, obj_idx>) */
145 void *next;
146};
147
148struct zs_pool {
149 struct size_class size_class[ZS_SIZE_CLASSES];
150
151 gfp_t flags; /* allocation flags used when growing pool */
152 const char *name;
153};
154
155#endif